diff --git a/.github/workflows/validate_pr.yml b/.github/workflows/validate_pr.yml
index 3b7cda508..c86bae734 100644
--- a/.github/workflows/validate_pr.yml
+++ b/.github/workflows/validate_pr.yml
@@ -12,12 +12,12 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
- go-version: 1.22
+ go-version: '1.24'
cache: false
- name: golangci-lint
- uses: golangci/golangci-lint-action@v4
+ uses: golangci/golangci-lint-action@v6
with:
- version: v1.54
+ version: v1.64
unit_tests:
name: Unit-Tests
runs-on: ubuntu-latest
@@ -25,7 +25,7 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
- go-version: 1.22
+ go-version: '1.24'
- name: Unit Tests
run: make test
build:
@@ -35,6 +35,6 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
- go-version: 1.22
+ go-version: '1.24'
- name: Build All
run: make all
diff --git a/.golangci.yml b/.golangci.yml
index fde3d7766..3b4741751 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -6,9 +6,11 @@ run:
- require_docker
output:
- format: line-number
+ formats:
+ - format: line-number
linters:
+ disable-all: true
enable:
- goimports
- revive
@@ -16,7 +18,5 @@ linters:
- gofmt
linters-settings:
- errcheck:
- exclude: .errcheck-exclude
goimports:
local-prefixes: "github.com/cortexproject/cortex-tools"
diff --git a/CHANGELOG.md b/CHANGELOG.md
index ad0ebea9d..639dc5ae9 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,6 +2,15 @@
Order should be `CHANGE`, `FEATURE`, `ENHANCEMENT`, and `BUGFIX`
+## v0.20.1
+* [CHANGE] Upgrade cortex to v1.20.1
+
+## v0.19.1
+* [CHANGE] Upgrade cortex to v1.19.1
+
+## v0.18.1
+* [CHANGE] Upgrade cortex to v1.18.1
+
## v0.17.0
* [CHANGE] Upgrade cortex to v1.17.0
diff --git a/changelogs/v0.18.1.md b/changelogs/v0.18.1.md
new file mode 100644
index 000000000..dee82e0bf
--- /dev/null
+++ b/changelogs/v0.18.1.md
@@ -0,0 +1,33 @@
+# v0.18.1 Release
+
+## Changes
+
+* [CHANGE] Upgrade cortex to v1.18.1
+
+## Installation
+
+## cortextool
+
+```console
+# download the binary (adapt os and arch as needed)
+$ curl -fSL -o "cortextool" "https://github.com/cortexproject/cortex-tools/releases/download/v0.18.1/cortextool_0.18.1_linux_x86_64"
+
+# make it executable
+$ chmod a+x "cortextool"
+
+# have fun :)
+$ ./cortextool --help
+```
+
+## benchtool
+
+```console
+# download the binary (adapt os and arch as needed)
+$ curl -fSL -o "benchtool" "https://github.com/cortexproject/cortex-tools/releases/download/v0.18.1/benchtool_0.18.1_linux_x86_64"
+
+# make it executable
+$ chmod a+x "benchtool"
+
+# have fun :)
+$ ./benchtool --help
+```
diff --git a/changelogs/v0.19.1.md b/changelogs/v0.19.1.md
new file mode 100644
index 000000000..c820114b9
--- /dev/null
+++ b/changelogs/v0.19.1.md
@@ -0,0 +1,33 @@
+# v0.19.1 Release
+
+## Changes
+
+* [CHANGE] Upgrade cortex to v1.19.1
+
+## Installation
+
+## cortextool
+
+```console
+# download the binary (adapt os and arch as needed)
+$ curl -fSL -o "cortextool" "https://github.com/cortexproject/cortex-tools/releases/download/v0.19.1/cortextool_0.19.1_linux_x86_64"
+
+# make it executable
+$ chmod a+x "cortextool"
+
+# have fun :)
+$ ./cortextool --help
+```
+
+## benchtool
+
+```console
+# download the binary (adapt os and arch as needed)
+$ curl -fSL -o "benchtool" "https://github.com/cortexproject/cortex-tools/releases/download/v0.19.1/benchtool_0.19.1_linux_x86_64"
+
+# make it executable
+$ chmod a+x "benchtool"
+
+# have fun :)
+$ ./benchtool --help
+```
diff --git a/changelogs/v0.20.1.md b/changelogs/v0.20.1.md
new file mode 100644
index 000000000..22716acdd
--- /dev/null
+++ b/changelogs/v0.20.1.md
@@ -0,0 +1,33 @@
+# v0.20.1 Release
+
+## Changes
+
+* [CHANGE] Upgrade cortex to v1.20.1
+
+## Installation
+
+## cortextool
+
+```console
+# download the binary (adapt os and arch as needed)
+$ curl -fSL -o "cortextool" "https://github.com/cortexproject/cortex-tools/releases/download/v0.20.1/cortextool_0.20.1_linux_x86_64"
+
+# make it executable
+$ chmod a+x "cortextool"
+
+# have fun :)
+$ ./cortextool --help
+```
+
+## benchtool
+
+```console
+# download the binary (adapt os and arch as needed)
+$ curl -fSL -o "benchtool" "https://github.com/cortexproject/cortex-tools/releases/download/v0.20.1/benchtool_0.20.1_linux_x86_64"
+
+# make it executable
+$ chmod a+x "benchtool"
+
+# have fun :)
+$ ./benchtool --help
+```
diff --git a/cmd/blockscopy/main.go b/cmd/blockscopy/main.go
index ee0c2990e..13f057437 100644
--- a/cmd/blockscopy/main.go
+++ b/cmd/blockscopy/main.go
@@ -19,7 +19,7 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/concurrency"
- "github.com/oklog/ulid"
+ "github.com/oklog/ulid/v2"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
diff --git a/cmd/sim/main.go b/cmd/sim/main.go
index 746b5c918..e46d1baad 100644
--- a/cmd/sim/main.go
+++ b/cmd/sim/main.go
@@ -116,8 +116,8 @@ func run(k int, sizer func(float64) int) {
fmt.Printf("%d, %d, %d, %d, %f, %f\n",
k,
- int(min(nodeSeries)),
- int(max(nodeSeries)),
+ int(minFloat64(nodeSeries)),
+ int(maxFloat64(nodeSeries)),
int(stat.Mean(nodeSeries, nil)),
stat.StdDev(nodeSeries, nil),
float64(maxAffectedTenants)/float64(numTenants))
@@ -168,7 +168,7 @@ func shuffleShard(entropy *rand.Rand, shardSize, numReplicas int) []int {
return ids
}
-func min(fs []float64) float64 {
+func minFloat64(fs []float64) float64 {
result := math.MaxFloat64
for _, f := range fs {
if f < result {
@@ -178,7 +178,7 @@ func min(fs []float64) float64 {
return result
}
-func max(fs []float64) float64 {
+func maxFloat64(fs []float64) float64 {
result := 0.0
for _, f := range fs {
if f > result {
diff --git a/go.mod b/go.mod
index 2413c861b..975b539f7 100644
--- a/go.mod
+++ b/go.mod
@@ -1,15 +1,15 @@
module github.com/cortexproject/cortex-tools
-go 1.22.2
+go 1.24.0
require (
- cloud.google.com/go/storage v1.41.0
+ cloud.google.com/go/storage v1.50.0
github.com/alecthomas/chroma v0.7.0
- github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30
- github.com/cortexproject/cortex v1.18.1
+ github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b
+ github.com/cortexproject/cortex v1.20.1
github.com/go-kit/log v0.2.1
github.com/gogo/protobuf v1.3.2
- github.com/golang/snappy v0.0.4
+ github.com/golang/snappy v1.0.0
github.com/gonum/stat v0.0.0-20181125101827-41a0da705a5b
github.com/google/go-github/v32 v32.1.0
github.com/gorilla/mux v1.8.1
@@ -17,20 +17,20 @@ require (
github.com/grafana/dskit v0.0.0-20211021180445-3bd016e9d7f1
github.com/matttproud/golang_protobuf_extensions v1.0.4
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db
- github.com/oklog/ulid v1.3.1
- github.com/opentracing-contrib/go-stdlib v1.0.0
+ github.com/oklog/ulid/v2 v2.1.1
+ github.com/opentracing-contrib/go-stdlib v1.1.0
github.com/pkg/errors v0.9.1
- github.com/prometheus/alertmanager v0.27.0
- github.com/prometheus/client_golang v1.19.1
- github.com/prometheus/common v0.55.0
- github.com/prometheus/prometheus v0.54.0-rc.0
+ github.com/prometheus/alertmanager v0.28.1
+ github.com/prometheus/client_golang v1.23.2
+ github.com/prometheus/common v0.66.1
+ github.com/prometheus/prometheus v0.306.0
github.com/sirupsen/logrus v1.9.3
- github.com/stretchr/testify v1.9.0
- github.com/thanos-io/objstore v0.0.0-20240622095743-1afe5d4bc3cd
- github.com/thanos-io/thanos v0.35.2-0.20240722172812-990a60b72647
+ github.com/stretchr/testify v1.11.1
+ github.com/thanos-io/objstore v0.0.0-20250722142242-922b22272ee3
+ github.com/thanos-io/thanos v0.39.3-0.20250729120336-88d0ae8071cb
github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5
- golang.org/x/sync v0.8.0
- google.golang.org/api v0.188.0
+ golang.org/x/sync v0.16.0
+ google.golang.org/api v0.239.0
gopkg.in/alecthomas/kingpin.v2 v2.2.6
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
@@ -38,73 +38,89 @@ require (
)
require (
- cloud.google.com/go v0.115.0 // indirect
- cloud.google.com/go/auth v0.7.0 // indirect
- cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
- cloud.google.com/go/compute/metadata v0.4.0 // indirect
- cloud.google.com/go/iam v1.1.10 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect
- github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 // indirect
- github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect
+ cel.dev/expr v0.23.1 // indirect
+ cloud.google.com/go v0.120.0 // indirect
+ cloud.google.com/go/auth v0.16.2 // indirect
+ cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
+ cloud.google.com/go/compute/metadata v0.7.0 // indirect
+ cloud.google.com/go/iam v1.5.2 // indirect
+ cloud.google.com/go/monitoring v1.24.2 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
+ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 // indirect
+ github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 // indirect
+ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.52.0 // indirect
github.com/VictoriaMetrics/fastcache v1.12.2 // indirect
+ github.com/alecthomas/kingpin/v2 v2.4.0 // indirect
github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1 // indirect
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
- github.com/aws/aws-sdk-go v1.55.5 // indirect
- github.com/aws/aws-sdk-go-v2 v1.16.16 // indirect
- github.com/aws/aws-sdk-go-v2/config v1.15.1 // indirect
- github.com/aws/aws-sdk-go-v2/credentials v1.12.20 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 // indirect
- github.com/aws/aws-sdk-go-v2/internal/ini v1.3.8 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 // indirect
- github.com/aws/smithy-go v1.13.3 // indirect
+ github.com/aws/aws-sdk-go-v2 v1.38.3 // indirect
+ github.com/aws/aws-sdk-go-v2/config v1.29.15 // indirect
+ github.com/aws/aws-sdk-go-v2/credentials v1.17.68 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/dynamodb v1.50.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.6 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.33.20 // indirect
+ github.com/aws/smithy-go v1.23.0 // indirect
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
- github.com/bradfitz/gomemcache v0.0.0-20230905024940-24af94b03874 // indirect
- github.com/cespare/xxhash v1.1.0 // indirect
+ github.com/bradfitz/gomemcache v0.0.0-20250403215159-8d39553ac7cf // indirect
+ github.com/caio/go-tdigest v3.1.0+incompatible // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
+ github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
+ github.com/cristalhq/hedgedhttp v0.9.1 // indirect
github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dennwc/varint v1.0.0 // indirect
+ github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 // indirect
github.com/dlclark/regexp2 v1.2.0 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
- github.com/edsrzf/mmap-go v1.1.0 // indirect
- github.com/efficientgo/core v1.0.0-rc.2 // indirect
- github.com/efficientgo/tools/extkingpin v0.0.0-20220817170617-6c25e3b627dd // indirect
+ github.com/edsrzf/mmap-go v1.2.0 // indirect
+ github.com/efficientgo/core v1.0.0-rc.3 // indirect
+ github.com/efficientgo/tools/extkingpin v0.0.0-20230505153745-6b7392939a60 // indirect
+ github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
+ github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb // indirect
- github.com/fatih/color v1.16.0 // indirect
+ github.com/fatih/color v1.18.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
- github.com/fsnotify/fsnotify v1.7.0 // indirect
+ github.com/fsnotify/fsnotify v1.9.0 // indirect
+ github.com/go-chi/chi/v5 v5.2.2 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-logfmt/logfmt v0.6.0 // indirect
- github.com/go-logr/logr v1.4.2 // indirect
+ github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.23.0 // indirect
- github.com/go-openapi/errors v0.22.0 // indirect
- github.com/go-openapi/jsonpointer v0.21.0 // indirect
+ github.com/go-openapi/errors v0.22.1 // indirect
+ github.com/go-openapi/jsonpointer v0.21.1 // indirect
github.com/go-openapi/jsonreference v0.21.0 // indirect
github.com/go-openapi/loads v0.22.0 // indirect
github.com/go-openapi/runtime v0.28.0 // indirect
github.com/go-openapi/spec v0.21.0 // indirect
github.com/go-openapi/strfmt v0.23.0 // indirect
- github.com/go-openapi/swag v0.23.0 // indirect
+ github.com/go-openapi/swag v0.23.1 // indirect
github.com/go-openapi/validate v0.24.0 // indirect
- github.com/goccy/go-json v0.10.3 // indirect
- github.com/gogo/googleapis v1.4.0 // indirect
+ github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
+ github.com/gobwas/glob v0.2.3 // indirect
+ github.com/goccy/go-json v0.10.5 // indirect
+ github.com/gogo/googleapis v1.4.1 // indirect
github.com/gogo/status v1.1.1 // indirect
- github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
- github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
+ github.com/golang-jwt/jwt/v5 v5.2.2 // indirect
+ github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac // indirect
github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82 // indirect
@@ -112,134 +128,173 @@ require (
github.com/gonum/internal v0.0.0-20181124074243-f884aa714029 // indirect
github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9 // indirect
github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9 // indirect
- github.com/google/btree v1.1.2 // indirect
- github.com/google/go-cmp v0.6.0 // indirect
+ github.com/google/btree v1.1.3 // indirect
+ github.com/google/go-cmp v0.7.0 // indirect
github.com/google/go-querystring v1.1.0 // indirect
- github.com/google/s2a-go v0.1.7 // indirect
+ github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
- github.com/googleapis/gax-go/v2 v2.12.5 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
+ github.com/googleapis/gax-go/v2 v2.14.2 // indirect
github.com/gosimple/slug v1.1.1 // indirect
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect
- github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect
- github.com/hashicorp/consul/api v1.29.2 // indirect
+ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect
+ github.com/hashicorp/consul/api v1.32.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v1.6.3 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
+ github.com/hashicorp/go-metrics v0.5.4 // indirect
github.com/hashicorp/go-msgpack v0.5.5 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
- github.com/hashicorp/go-sockaddr v1.0.6 // indirect
+ github.com/hashicorp/go-sockaddr v1.0.7 // indirect
+ github.com/hashicorp/go-version v1.7.0 // indirect
github.com/hashicorp/golang-lru v0.6.0 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
- github.com/hashicorp/memberlist v0.5.1 // indirect
+ github.com/hashicorp/memberlist v0.5.3 // indirect
github.com/hashicorp/serf v0.10.1 // indirect
- github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/jpillora/backoff v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
- github.com/klauspost/compress v1.17.9 // indirect
- github.com/klauspost/cpuid/v2 v2.2.8 // indirect
+ github.com/klauspost/compress v1.18.0 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.10 // indirect
+ github.com/knadh/koanf/maps v0.1.2 // indirect
+ github.com/knadh/koanf/providers/confmap v1.0.0 // indirect
+ github.com/knadh/koanf/v2 v2.2.1 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
- github.com/mailru/easyjson v0.7.7 // indirect
- github.com/mattn/go-colorable v0.1.13 // indirect
+ github.com/mailru/easyjson v0.9.0 // indirect
+ github.com/mattn/go-colorable v0.1.14 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mdlayher/socket v0.5.1 // indirect
+ github.com/mdlayher/vsock v1.2.1 // indirect
github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a // indirect
- github.com/miekg/dns v1.1.61 // indirect
+ github.com/miekg/dns v1.1.66 // indirect
+ github.com/minio/crc64nvme v1.0.1 // indirect
github.com/minio/md5-simd v1.1.2 // indirect
- github.com/minio/minio-go/v7 v7.0.75 // indirect
+ github.com/minio/minio-go/v7 v7.0.93 // indirect
github.com/minio/sha256-simd v1.0.1 // indirect
+ github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
+ github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect
github.com/ncw/swift v1.0.53 // indirect
- github.com/oklog/run v1.1.0 // indirect
- github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e // indirect
+ github.com/oklog/run v1.2.0 // indirect
+ github.com/oklog/ulid v1.3.1 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 // indirect
+ github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 // indirect
+ github.com/opentracing-contrib/go-grpc v0.1.2 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
+ github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect
+ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
- github.com/prometheus-community/prom-label-proxy v0.8.1-0.20240127162815-c1195f9aabc0 // indirect
- github.com/prometheus/client_model v0.6.1 // indirect
- github.com/prometheus/common/sigv4 v0.1.0 // indirect
- github.com/prometheus/exporter-toolkit v0.11.0 // indirect
- github.com/prometheus/procfs v0.15.1 // indirect
+ github.com/prometheus-community/prom-label-proxy v0.11.1 // indirect
+ github.com/prometheus/client_model v0.6.2 // indirect
+ github.com/prometheus/exporter-toolkit v0.14.0 // indirect
+ github.com/prometheus/otlptranslator v0.0.2 // indirect
+ github.com/prometheus/procfs v0.16.1 // indirect
+ github.com/prometheus/sigv4 v0.2.0 // indirect
+ github.com/puzpuzpuz/xsync/v3 v3.5.1 // indirect
github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be // indirect
- github.com/redis/rueidis v1.0.14-go1.18 // indirect
- github.com/rs/xid v1.5.0 // indirect
+ github.com/rantav/go-grpc-channelz v0.0.4 // indirect
+ github.com/redis/rueidis v1.0.61 // indirect
+ github.com/rs/xid v1.6.0 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
- github.com/sercand/kuberesolver/v4 v4.0.0 // indirect
+ github.com/segmentio/fasthash v1.0.3 // indirect
+ github.com/seiflotfy/cuckoofilter v0.0.0-20240715131351-a2f2c23f1771 // indirect
+ github.com/sercand/kuberesolver/v5 v5.1.1 // indirect
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c // indirect
- github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect
+ github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 // indirect
github.com/soheilhy/cmux v0.1.5 // indirect
github.com/sony/gobreaker v1.0.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
- github.com/thanos-io/promql-engine v0.0.0-20240718195911-cdbd6dfed36b // indirect
+ github.com/thanos-io/promql-engine v0.0.0-20250924193140-e9123dc11264 // indirect
+ github.com/tinylib/msgp v1.3.0 // indirect
+ github.com/tjhop/slog-gokit v0.1.4 // indirect
github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect
github.com/uber/jaeger-lib v2.4.1+incompatible // indirect
- github.com/vimeo/galaxycache v0.0.0-20210323154928-b7e5d71c067a // indirect
+ github.com/vimeo/galaxycache v1.3.1 // indirect
github.com/weaveworks/promrus v1.2.0 // indirect
- go.etcd.io/etcd/api/v3 v3.5.15 // indirect
- go.etcd.io/etcd/client/pkg/v3 v3.5.15 // indirect
- go.etcd.io/etcd/client/v3 v3.5.15 // indirect
- go.mongodb.org/mongo-driver v1.14.0 // indirect
+ github.com/xhit/go-str2duration/v2 v2.1.0 // indirect
+ go.etcd.io/etcd/api/v3 v3.5.17 // indirect
+ go.etcd.io/etcd/client/pkg/v3 v3.5.17 // indirect
+ go.etcd.io/etcd/client/v3 v3.5.17 // indirect
+ go.mongodb.org/mongo-driver v1.17.4 // indirect
go.opencensus.io v0.24.0 // indirect
- go.opentelemetry.io/collector/pdata v1.12.0 // indirect
- go.opentelemetry.io/collector/semconv v0.105.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect
- go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
- go.opentelemetry.io/contrib/propagators/autoprop v0.53.0 // indirect
- go.opentelemetry.io/contrib/propagators/aws v1.28.0 // indirect
- go.opentelemetry.io/contrib/propagators/b3 v1.28.0 // indirect
- go.opentelemetry.io/contrib/propagators/jaeger v1.28.0 // indirect
- go.opentelemetry.io/contrib/propagators/ot v1.28.0 // indirect
- go.opentelemetry.io/otel v1.28.0 // indirect
- go.opentelemetry.io/otel/bridge/opentracing v1.28.0 // indirect
- go.opentelemetry.io/otel/metric v1.28.0 // indirect
- go.opentelemetry.io/otel/sdk v1.28.0 // indirect
- go.opentelemetry.io/otel/trace v1.28.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/collector/component v1.35.0 // indirect
+ go.opentelemetry.io/collector/confmap v1.35.0 // indirect
+ go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 // indirect
+ go.opentelemetry.io/collector/consumer v1.35.0 // indirect
+ go.opentelemetry.io/collector/featuregate v1.35.0 // indirect
+ go.opentelemetry.io/collector/internal/telemetry v0.129.0 // indirect
+ go.opentelemetry.io/collector/pdata v1.35.0 // indirect
+ go.opentelemetry.io/collector/pipeline v0.129.0 // indirect
+ go.opentelemetry.io/collector/processor v1.35.0 // indirect
+ go.opentelemetry.io/collector/semconv v0.128.0 // indirect
+ go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 // indirect
+ go.opentelemetry.io/contrib/detectors/gcp v1.35.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
+ go.opentelemetry.io/contrib/propagators/autoprop v0.61.0 // indirect
+ go.opentelemetry.io/contrib/propagators/aws v1.36.0 // indirect
+ go.opentelemetry.io/contrib/propagators/b3 v1.36.0 // indirect
+ go.opentelemetry.io/contrib/propagators/jaeger v1.36.0 // indirect
+ go.opentelemetry.io/contrib/propagators/ot v1.36.0 // indirect
+ go.opentelemetry.io/otel v1.36.0 // indirect
+ go.opentelemetry.io/otel/bridge/opentracing v1.36.0 // indirect
+ go.opentelemetry.io/otel/log v0.12.2 // indirect
+ go.opentelemetry.io/otel/metric v1.36.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.36.0 // indirect
+ go.opentelemetry.io/otel/sdk/metric v1.36.0 // indirect
+ go.opentelemetry.io/otel/trace v1.36.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/goleak v1.3.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
- go.uber.org/zap v1.21.0 // indirect
+ go.uber.org/zap v1.27.0 // indirect
+ go.yaml.in/yaml/v2 v2.4.2 // indirect
go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect
- go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect
- golang.org/x/crypto v0.25.0 // indirect
- golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect
- golang.org/x/mod v0.19.0 // indirect
- golang.org/x/net v0.27.0 // indirect
- golang.org/x/oauth2 v0.21.0 // indirect
- golang.org/x/sys v0.22.0 // indirect
- golang.org/x/text v0.16.0 // indirect
- golang.org/x/time v0.6.0 // indirect
- golang.org/x/tools v0.23.0 // indirect
- google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b // indirect
- google.golang.org/grpc v1.65.0 // indirect
- google.golang.org/protobuf v1.34.2 // indirect
- k8s.io/apimachinery v0.30.2 // indirect
- k8s.io/client-go v0.30.2 // indirect
+ go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect
+ golang.org/x/crypto v0.41.0 // indirect
+ golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
+ golang.org/x/mod v0.26.0 // indirect
+ golang.org/x/net v0.43.0 // indirect
+ golang.org/x/oauth2 v0.30.0 // indirect
+ golang.org/x/sys v0.35.0 // indirect
+ golang.org/x/text v0.28.0 // indirect
+ golang.org/x/time v0.12.0 // indirect
+ golang.org/x/tools v0.35.0 // indirect
+ google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
+ google.golang.org/grpc v1.73.0 // indirect
+ google.golang.org/protobuf v1.36.8 // indirect
+ k8s.io/apimachinery v0.33.1 // indirect
+ k8s.io/client-go v0.33.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect
- k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect
+ k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect
+ sigs.k8s.io/yaml v1.4.0 // indirect
)
+// Using cortex fork of weaveworks/common
+replace github.com/weaveworks/common => github.com/cortexproject/weaveworks-common v0.0.0-20250902164925-0315015a8b9f
+
// Override since git.apache.org is down. The docs say to fetch from github.
replace git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999
// Use fork of gocql that has gokit logs and Prometheus metrics.
replace github.com/gocql/gocql => github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85
-// Using a 3rd-party branch for custom dialer - see https://github.com/bradfitz/gomemcache/pull/86
-replace github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab
-
// Replace memberlist with Grafana's fork which includes some fixes that haven't been merged upstream yet
-replace github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.2.5-0.20211201083710-c7bc8e9df94b
+replace github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe
// Same version being used by thanos
replace github.com/vimeo/galaxycache => github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e
@@ -256,7 +311,11 @@ replace github.com/googleapis/gnostic => github.com/google/gnostic v0.6.9
// https://github.com/thanos-io/thanos/blob/fdeea3917591fc363a329cbe23af37c6fff0b5f0/go.mod#L265
replace gopkg.in/alecthomas/kingpin.v2 => github.com/alecthomas/kingpin v1.3.8-0.20210301060133-17f40c25f497
-// replace github.com/sercand/kuberesolver => github.com/sercand/kuberesolver/v5 v5.1.1
+// Wait for fix for https://github.com/grpc/grpc-go/pull/8504.
+replace google.golang.org/grpc => google.golang.org/grpc v1.71.2
+
+// See https://github.com/envoyproxy/go-control-plane/issues/1083 as this version introduces checksum mismatch.
+exclude github.com/envoyproxy/go-control-plane/envoy v1.32.3
-// Pin kuberesolver/v5 to support new grpc version. Need to upgrade kuberesolver version on weaveworks/common.
-replace github.com/sercand/kuberesolver/v4 => github.com/sercand/kuberesolver/v5 v5.1.1
+// TODO: update it in next PR
+replace github.com/prometheus/otlptranslator => github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588
diff --git a/go.sum b/go.sum
index a1938b949..bc7918edd 100644
--- a/go.sum
+++ b/go.sum
@@ -1,4 +1,9 @@
-cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg=
+cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg=
+cel.dev/expr v0.19.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
+cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
+cel.dev/expr v0.23.1 h1:K4KOtPCJQjVggkARsjG9RWXP6O4R73aHeJMa/dmCQQg=
+cel.dev/expr v0.23.1/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
@@ -36,72 +41,111 @@ cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRY
cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM=
cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I=
cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
-cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14=
-cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU=
+cloud.google.com/go v0.110.2/go.mod h1:k04UEeEtb6ZBRTv3dZz4CeJC3jKGxyhl0sAiVVquxiw=
+cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
+cloud.google.com/go v0.110.6/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI=
+cloud.google.com/go v0.120.0 h1:wc6bgG9DHyKqF5/vQvX1CiZrtHnxJjBlKUyF9nP6meA=
+cloud.google.com/go v0.120.0/go.mod h1:/beW32s8/pGRuj4IILWQNd4uuebeT4dkOhKmkfit64Q=
cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4=
cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw=
cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E=
+cloud.google.com/go/accessapproval v1.7.1/go.mod h1:JYczztsHRMK7NTXb6Xw+dwbs/WnOJxbo/2mTI+Kgg68=
cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o=
cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE=
cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM=
+cloud.google.com/go/accesscontextmanager v1.7.0/go.mod h1:CEGLewx8dwa33aDAZQujl7Dx+uYhS0eay198wB/VumQ=
+cloud.google.com/go/accesscontextmanager v1.8.0/go.mod h1:uI+AI/r1oyWK99NN8cQ3UK76AMelMzgZCvJfsi2c+ps=
+cloud.google.com/go/accesscontextmanager v1.8.1/go.mod h1:JFJHfvuaTC+++1iL1coPiG1eu5D24db2wXCDWDjIrxo=
cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw=
cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY=
cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg=
cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ=
+cloud.google.com/go/aiplatform v1.36.1/go.mod h1:WTm12vJRPARNvJ+v6P52RDHCNe4AhvjcIZ/9/RRHy/k=
+cloud.google.com/go/aiplatform v1.37.0/go.mod h1:IU2Cv29Lv9oCn/9LkFiiuKfwrRTq+QQMbW+hPCxJGZw=
+cloud.google.com/go/aiplatform v1.45.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA=
+cloud.google.com/go/aiplatform v1.48.0/go.mod h1:Iu2Q7sC7QGhXUeOhAj/oCK9a+ULz1O4AotZiqjQ8MYA=
cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI=
cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4=
+cloud.google.com/go/analytics v0.17.0/go.mod h1:WXFa3WSym4IZ+JiKmavYdJwGG/CvpqiqczmL59bTD9M=
cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE=
+cloud.google.com/go/analytics v0.19.0/go.mod h1:k8liqf5/HCnOUkbawNtrWWc+UAzyDlW89doe8TtoDsE=
+cloud.google.com/go/analytics v0.21.2/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo=
+cloud.google.com/go/analytics v0.21.3/go.mod h1:U8dcUtmDmjrmUTnnnRnI4m6zKn/yaA5N9RlEkYFHpQo=
cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk=
cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc=
cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8=
+cloud.google.com/go/apigateway v1.6.1/go.mod h1:ufAS3wpbRjqfZrzpvLC2oh0MFlpRJm2E/ts25yyqmXA=
cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc=
cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04=
cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8=
+cloud.google.com/go/apigeeconnect v1.6.1/go.mod h1:C4awq7x0JpLtrlQCr8AzVIzAaYgngRqWf9S5Uhg+wWs=
cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY=
cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM=
+cloud.google.com/go/apigeeregistry v0.6.0/go.mod h1:BFNzW7yQVLZ3yj0TKcwzb8n25CFBri51GVGOEUcgQsc=
+cloud.google.com/go/apigeeregistry v0.7.1/go.mod h1:1XgyjZye4Mqtw7T9TsY4NW10U7BojBvG4RMD+vRDrIw=
cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU=
cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI=
+cloud.google.com/go/apikeys v0.6.0/go.mod h1:kbpXu5upyiAlGkKrJgQl8A0rKNNJ7dQ377pdroRSSi8=
cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno=
cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak=
cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84=
+cloud.google.com/go/appengine v1.7.0/go.mod h1:eZqpbHFCqRGa2aCdope7eC0SWLV1j0neb/QnMJVWx6A=
+cloud.google.com/go/appengine v1.7.1/go.mod h1:IHLToyb/3fKutRysUlFO0BPt5j7RiQ45nrzEJmKTo6E=
+cloud.google.com/go/appengine v1.8.1/go.mod h1:6NJXGLVhZCN9aQ/AEDvmfzKEfoYBlfB80/BHiKVputY=
cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4=
cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0=
+cloud.google.com/go/area120 v0.7.0/go.mod h1:a3+8EUD1SX5RUcCs3MY5YasiO1z6yLiNLRiFrykbynY=
cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k=
+cloud.google.com/go/area120 v0.8.1/go.mod h1:BVfZpGpB7KFVNxPiQBuHkX6Ed0rS51xIgmGyjrAfzsg=
cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ=
cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk=
cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0=
cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc=
+cloud.google.com/go/artifactregistry v1.11.1/go.mod h1:lLYghw+Itq9SONbCa1YWBoWs1nOucMH0pwXN1rOBZFI=
cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ=
+cloud.google.com/go/artifactregistry v1.12.0/go.mod h1:o6P3MIvtzTOnmvGagO9v/rOjjA0HmhJ+/6KAXrmYDCI=
+cloud.google.com/go/artifactregistry v1.13.0/go.mod h1:uy/LNfoOIivepGhooAUpL1i30Hgee3Cu0l4VTWHUC08=
+cloud.google.com/go/artifactregistry v1.14.1/go.mod h1:nxVdG19jTaSTu7yA7+VbWL346r3rIdkZ142BSQqhn5E=
cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o=
cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s=
cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0=
cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ=
cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY=
cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo=
+cloud.google.com/go/asset v1.12.0/go.mod h1:h9/sFOa4eDIyKmH6QMpm4eUK3pDojWnUhTgJlk762Hg=
+cloud.google.com/go/asset v1.13.0/go.mod h1:WQAMyYek/b7NBpYq/K4KJWcRqzoalEsxz/t/dTk4THw=
+cloud.google.com/go/asset v1.14.1/go.mod h1:4bEJ3dnHCqWCDbWJ/6Vn7GVI9LerSi7Rfdi03hd+WTQ=
cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY=
cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw=
cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI=
cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo=
cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0=
cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E=
-cloud.google.com/go/auth v0.7.0 h1:kf/x9B3WTbBUHkC+1VS8wwwli9TzhSt0vSTVBmMR8Ts=
-cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw=
-cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
-cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
+cloud.google.com/go/assuredworkloads v1.11.1/go.mod h1:+F04I52Pgn5nmPG36CWFtxmav6+7Q+c5QyJoL18Lry0=
+cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4=
+cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA=
+cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
+cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0=
cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8=
cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8=
cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM=
cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU=
+cloud.google.com/go/automl v1.13.1/go.mod h1:1aowgAHWYZU27MybSCFiukPO7xnyawv7pt3zK4bheQE=
cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc=
cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI=
cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss=
+cloud.google.com/go/baremetalsolution v1.1.1/go.mod h1:D1AV6xwOksJMV4OSlWHtWuFNZZYujJknMAP4Qa27QIA=
cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE=
cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE=
cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g=
+cloud.google.com/go/batch v1.3.1/go.mod h1:VguXeQKXIYaeeIYbuozUmBR13AfL4SJP7IltNPS+A4A=
cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4=
cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8=
cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM=
+cloud.google.com/go/beyondcorp v0.5.0/go.mod h1:uFqj9X+dSfrheVp7ssLTaRHd2EHqSL4QZmH4e8WXGGU=
+cloud.google.com/go/beyondcorp v0.6.1/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4=
+cloud.google.com/go/beyondcorp v1.0.0/go.mod h1:YhxDWw946SCbmcWo3fAhw3V4XZMSpQ/VYfcKGAEU8/4=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -111,34 +155,53 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7
cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA=
cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw=
cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc=
+cloud.google.com/go/bigquery v1.47.0/go.mod h1:sA9XOgy0A8vQK9+MWhEQTY6Tix87M/ZurWFIxmF9I/E=
cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac=
+cloud.google.com/go/bigquery v1.49.0/go.mod h1:Sv8hMmTFFYBlt/ftw2uN6dFdQPzBlREY9yBh7Oy7/4Q=
+cloud.google.com/go/bigquery v1.50.0/go.mod h1:YrleYEh2pSEbgTBZYMJ5SuSr0ML3ypjRB1zgf7pvQLU=
+cloud.google.com/go/bigquery v1.52.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4=
+cloud.google.com/go/bigquery v1.53.0/go.mod h1:3b/iXjRQGU4nKa87cXeg6/gogLjO8C6PmuM8i5Bi/u4=
cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY=
cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s=
cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI=
cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y=
cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss=
+cloud.google.com/go/billing v1.13.0/go.mod h1:7kB2W9Xf98hP9Sr12KfECgfGclsH3CQR0R08tnRlRbc=
+cloud.google.com/go/billing v1.16.0/go.mod h1:y8vx09JSSJG02k5QxbycNRrN7FGZB6F3CAcgum7jvGA=
cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM=
cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI=
cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0=
cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk=
cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q=
+cloud.google.com/go/binaryauthorization v1.6.1/go.mod h1:TKt4pa8xhowwffiBmbrbcxijJRZED4zrqnwZ1lKH51U=
cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg=
cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590=
cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8=
+cloud.google.com/go/certificatemanager v1.7.1/go.mod h1:iW8J3nG6SaRYImIa+wXQ0g8IgoofDFRp5UMzaNk1UqI=
cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk=
cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk=
cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE=
+cloud.google.com/go/channel v1.12.0/go.mod h1:VkxCGKASi4Cq7TbXxlaBezonAYpp1GCnKMY6tnMQnLU=
+cloud.google.com/go/channel v1.16.0/go.mod h1:eN/q1PFSl5gyu0dYdmxNXscY/4Fi7ABmeHCJNf/oHmc=
cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U=
cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA=
+cloud.google.com/go/cloudbuild v1.6.0/go.mod h1:UIbc/w9QCbH12xX+ezUsgblrWv+Cv4Tw83GiSMHOn9M=
cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg=
+cloud.google.com/go/cloudbuild v1.9.0/go.mod h1:qK1d7s4QlO0VwfYn5YuClDGg2hfmLZEb4wQGAbIgL1s=
+cloud.google.com/go/cloudbuild v1.10.1/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU=
+cloud.google.com/go/cloudbuild v1.13.0/go.mod h1:lyJg7v97SUIPq4RC2sGsz/9tNczhyv2AjML/ci4ulzU=
cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM=
cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk=
cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA=
+cloud.google.com/go/clouddms v1.6.1/go.mod h1:Ygo1vL52Ov4TBZQquhz5fiw2CQ58gvu+PlS6PVXCpZI=
cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY=
cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI=
cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4=
cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI=
cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y=
+cloud.google.com/go/cloudtasks v1.10.0/go.mod h1:NDSoTLkZ3+vExFEWu2UJV1arUyzVDAiZtdWcsUyNwBs=
+cloud.google.com/go/cloudtasks v1.11.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM=
+cloud.google.com/go/cloudtasks v1.12.1/go.mod h1:a9udmnou9KO2iulGscKR0qBYjreuX8oHwpmFsKspEvM=
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
@@ -150,23 +213,37 @@ cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x
cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU=
cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE=
cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
-cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA=
cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
+cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
+cloud.google.com/go/compute v1.19.3/go.mod h1:qxvISKp/gYnXkSAD1ppcSOveRAmzxicEv/JlizULFrI=
+cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
+cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU=
cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
-cloud.google.com/go/compute/metadata v0.4.0 h1:vHzJCWaM4g8XIcm8kopr3XmDA4Gy/lblD3EhhSux05c=
-cloud.google.com/go/compute/metadata v0.4.0/go.mod h1:SIQh1Kkb4ZJ8zJ874fqVkslA29PRXuleyj6vOzlbK7M=
+cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
+cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
+cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
+cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU=
+cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY=
cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck=
cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w=
+cloud.google.com/go/contactcenterinsights v1.9.1/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM=
+cloud.google.com/go/contactcenterinsights v1.10.0/go.mod h1:bsg/R7zGLYMVxFFzfh9ooLTruLRCG9fnzhH9KznHhbM=
cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg=
cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo=
cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4=
+cloud.google.com/go/container v1.14.0/go.mod h1:3AoJMPhHfLDxLvrlVWaK57IXzaPnLaZq63WX59aQBfM=
+cloud.google.com/go/container v1.15.0/go.mod h1:ft+9S0WGjAyjDggg5S06DXj+fHJICWg8L7isCQe9pQA=
+cloud.google.com/go/container v1.22.1/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4=
+cloud.google.com/go/container v1.24.0/go.mod h1:lTNExE2R7f+DLbAN+rJiKTisauFCaoDq6NURZ83eVH4=
cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I=
cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4=
cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI=
+cloud.google.com/go/containeranalysis v0.9.0/go.mod h1:orbOANbwk5Ejoom+s+DUCTTJ7IBdBQJDcSylAx/on9s=
+cloud.google.com/go/containeranalysis v0.10.1/go.mod h1:Ya2jiILITMY68ZLPaogjmOMNkwsDrWBSTyBubGXO7j0=
cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0=
cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs=
cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc=
@@ -174,39 +251,63 @@ cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H
cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM=
cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M=
cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0=
+cloud.google.com/go/datacatalog v1.13.0/go.mod h1:E4Rj9a5ZtAxcQJlEBTLgMTphfP11/lNaAshpoBgemX8=
+cloud.google.com/go/datacatalog v1.14.0/go.mod h1:h0PrGtlihoutNMp/uvwhawLQ9+c63Kz65UFqh49Yo+E=
+cloud.google.com/go/datacatalog v1.14.1/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4=
+cloud.google.com/go/datacatalog v1.16.0/go.mod h1:d2CevwTG4yedZilwe+v3E3ZBDRMobQfSG/a6cCCN5R4=
cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM=
cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ=
cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE=
+cloud.google.com/go/dataflow v0.9.1/go.mod h1:Wp7s32QjYuQDWqJPFFlnBKhkAtiFpMTdg00qGbnIHVw=
cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo=
cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE=
cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0=
cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA=
+cloud.google.com/go/dataform v0.7.0/go.mod h1:7NulqnVozfHvWUBpMDfKMUESr+85aJsC/2O0o3jWPDE=
+cloud.google.com/go/dataform v0.8.1/go.mod h1:3BhPSiw8xmppbgzeBbmDvmSWlwouuJkXsXsb8UBih9M=
cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38=
cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w=
cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8=
+cloud.google.com/go/datafusion v1.7.1/go.mod h1:KpoTBbFmoToDExJUso/fcCiguGDk7MEzOWXUsJo0wsI=
cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I=
cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ=
cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM=
+cloud.google.com/go/datalabeling v0.8.1/go.mod h1:XS62LBSVPbYR54GfYQsPXZjTW8UxCK2fkDciSrpRFdY=
cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA=
cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A=
cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ=
+cloud.google.com/go/dataplex v1.6.0/go.mod h1:bMsomC/aEJOSpHXdFKFGQ1b0TDPIeL28nJObeO1ppRs=
+cloud.google.com/go/dataplex v1.8.1/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE=
+cloud.google.com/go/dataplex v1.9.0/go.mod h1:7TyrDT6BCdI8/38Uvp0/ZxBslOslP2X2MPDucliyvSE=
cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s=
cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI=
cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4=
+cloud.google.com/go/dataproc/v2 v2.0.1/go.mod h1:7Ez3KRHdFGcfY7GcevBbvozX+zyWGcwLJvvAMwCaoZ4=
cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo=
cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA=
cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c=
+cloud.google.com/go/dataqna v0.8.1/go.mod h1:zxZM0Bl6liMePWsHA8RMGAfmTG34vJMapbHAxQ5+WA8=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM=
+cloud.google.com/go/datastore v1.11.0/go.mod h1:TvGxBIHCS50u8jzG+AW/ppf87v1of8nwzFNgEZU1D3c=
+cloud.google.com/go/datastore v1.12.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70=
+cloud.google.com/go/datastore v1.12.1/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70=
+cloud.google.com/go/datastore v1.13.0/go.mod h1:KjdB88W897MRITkvWWJrg2OUtrR5XVj1EoLgSp6/N70=
cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo=
cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ=
cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g=
cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4=
cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs=
+cloud.google.com/go/datastream v1.7.0/go.mod h1:uxVRMm2elUSPuh65IbZpzJNMbuzkcvu5CjMqVIUHrww=
+cloud.google.com/go/datastream v1.9.1/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q=
+cloud.google.com/go/datastream v1.10.0/go.mod h1:hqnmr8kdUBmrnk65k5wNRoHSCYksvpdZIcZIEl8h43Q=
cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c=
cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s=
cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI=
+cloud.google.com/go/deploy v1.8.0/go.mod h1:z3myEJnA/2wnB4sgjqdMfgxCA0EqC3RBTNcVPs93mtQ=
+cloud.google.com/go/deploy v1.11.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g=
+cloud.google.com/go/deploy v1.13.0/go.mod h1:tKuSUV5pXbn67KiubiUNUejqLs4f5cxxiCNCeyl0F2g=
cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4=
cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0=
cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8=
@@ -214,57 +315,86 @@ cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz
cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0=
cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM=
cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4=
+cloud.google.com/go/dialogflow v1.32.0/go.mod h1:jG9TRJl8CKrDhMEcvfcfFkkpp8ZhgPz3sBGmAUYJ2qE=
+cloud.google.com/go/dialogflow v1.38.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4=
+cloud.google.com/go/dialogflow v1.40.0/go.mod h1:L7jnH+JL2mtmdChzAIcXQHXMvQkE3U4hTaNltEuxXn4=
cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM=
cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q=
cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4=
+cloud.google.com/go/dlp v1.10.1/go.mod h1:IM8BWz1iJd8njcNcG0+Kyd9OPnqnRNkDV8j42VT5KOI=
cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU=
cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU=
cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k=
cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4=
cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM=
+cloud.google.com/go/documentai v1.18.0/go.mod h1:F6CK6iUH8J81FehpskRmhLq/3VlwQvb7TvwOceQ2tbs=
+cloud.google.com/go/documentai v1.20.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E=
+cloud.google.com/go/documentai v1.22.0/go.mod h1:yJkInoMcK0qNAEdRnqY/D5asy73tnPe88I1YTZT+a8E=
cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y=
cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg=
cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE=
+cloud.google.com/go/domains v0.9.1/go.mod h1:aOp1c0MbejQQ2Pjf1iJvnVyT+z6R6s8pX66KaCSDYfE=
cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk=
cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w=
cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc=
+cloud.google.com/go/edgecontainer v1.0.0/go.mod h1:cttArqZpBB2q58W/upSG++ooo6EsblxDIolxa3jSjbY=
+cloud.google.com/go/edgecontainer v1.1.1/go.mod h1:O5bYcS//7MELQZs3+7mabRqoWQhXCzenBu0R8bz2rwk=
cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU=
cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI=
cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8=
cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M=
+cloud.google.com/go/essentialcontacts v1.6.2/go.mod h1:T2tB6tX+TRak7i88Fb2N9Ok3PvY3UNbUsMag9/BARh4=
cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc=
cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw=
cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw=
+cloud.google.com/go/eventarc v1.11.0/go.mod h1:PyUjsUKPWoRBCHeOxZd/lbOOjahV41icXyUY5kSTvVY=
+cloud.google.com/go/eventarc v1.12.1/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI=
+cloud.google.com/go/eventarc v1.13.0/go.mod h1:mAFCW6lukH5+IZjkvrEss+jmt2kOdYlN8aMx3sRJiAI=
cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w=
cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI=
cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs=
+cloud.google.com/go/filestore v1.6.0/go.mod h1:di5unNuss/qfZTw2U9nhFqo8/ZDSc466dre85Kydllg=
+cloud.google.com/go/filestore v1.7.1/go.mod h1:y10jsorq40JJnjR/lQ8AfFbbcGlw3g+Dp8oN7i7FjV4=
cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE=
+cloud.google.com/go/firestore v1.11.0/go.mod h1:b38dKhgzlmNNGTNZZwe7ZRFEuRab1Hay3/DBsIGKKy4=
cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk=
cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg=
cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY=
cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08=
cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw=
+cloud.google.com/go/functions v1.12.0/go.mod h1:AXWGrF3e2C/5ehvwYo/GH6O5s09tOPksiKhz+hH8WkA=
+cloud.google.com/go/functions v1.13.0/go.mod h1:EU4O007sQm6Ef/PwRsI8N2umygGqPBS/IZQKBQBcJ3c=
+cloud.google.com/go/functions v1.15.1/go.mod h1:P5yNWUTkyU+LvW/S9O6V+V423VZooALQlqoXdoPz5AE=
cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM=
cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA=
cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w=
cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM=
cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0=
+cloud.google.com/go/gaming v1.10.1/go.mod h1:XQQvtfP8Rb9Rxnxm5wFVpAp9zCQkJi2bLIb7iHGwB3s=
cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60=
cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo=
cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg=
+cloud.google.com/go/gkebackup v1.3.0/go.mod h1:vUDOu++N0U5qs4IhG1pcOnD1Mac79xWy6GoBFlWCWBU=
cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o=
cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A=
cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw=
+cloud.google.com/go/gkeconnect v0.8.1/go.mod h1:KWiK1g9sDLZqhxB2xEuPV8V9NYzrqTUmQR9shJHpOZw=
cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0=
cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0=
cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E=
+cloud.google.com/go/gkehub v0.12.0/go.mod h1:djiIwwzTTBrF5NaXCGv3mf7klpEMcST17VBTVVDcuaw=
+cloud.google.com/go/gkehub v0.14.1/go.mod h1:VEXKIJZ2avzrbd7u+zeMtW00Y8ddk/4V9511C9CQGTY=
cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA=
cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI=
cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y=
+cloud.google.com/go/gkemulticloud v0.6.1/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw=
+cloud.google.com/go/gkemulticloud v1.0.0/go.mod h1:kbZ3HKyTsiwqKX7Yw56+wUGwwNZViRnxWK2DVknXWfw=
cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc=
+cloud.google.com/go/grafeas v0.3.0/go.mod h1:P7hgN24EyONOTMyeJH6DxG4zD7fwiYa5Q6GUgyFSOU8=
cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM=
cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o=
cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo=
+cloud.google.com/go/gsuiteaddons v1.6.1/go.mod h1:CodrdOqRZcLp5WOwejHWYBjZvfY0kOphkAKpF/3qdZY=
cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c=
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc=
@@ -273,99 +403,156 @@ cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQE
cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE=
cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY=
cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY=
-cloud.google.com/go/iam v1.1.10 h1:ZSAr64oEhQSClwBL670MsJAW5/RLiC6kfw3Bqmd5ZDI=
-cloud.google.com/go/iam v1.1.10/go.mod h1:iEgMq62sg8zx446GCaijmA2Miwg5o3UbO+nI47WHJps=
+cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
+cloud.google.com/go/iam v1.0.1/go.mod h1:yR3tmSL8BcZB4bxByRv2jkSIahVmCtfKZwLYGBalRE8=
+cloud.google.com/go/iam v1.1.0/go.mod h1:nxdHjaKfCr7fNYx/HJMM8LgiMugmveWlkatear5gVyk=
+cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU=
+cloud.google.com/go/iam v1.5.2 h1:qgFRAGEmd8z6dJ/qyEchAuL9jpswyODjA2lS+w234g8=
+cloud.google.com/go/iam v1.5.2/go.mod h1:SE1vg0N81zQqLzQEwxL2WI6yhetBdbNQuTvIKCSkUHE=
cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc=
cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A=
cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk=
+cloud.google.com/go/iap v1.7.0/go.mod h1:beqQx56T9O1G1yNPph+spKpNibDlYIiIixiqsQXxLIo=
+cloud.google.com/go/iap v1.7.1/go.mod h1:WapEwPc7ZxGt2jFGB/C/bm+hP0Y6NXzOYGjpPnmMS74=
+cloud.google.com/go/iap v1.8.1/go.mod h1:sJCbeqg3mvWLqjZNsI6dfAtbbV1DL2Rl7e1mTyXYREQ=
cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM=
cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY=
cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4=
+cloud.google.com/go/ids v1.4.1/go.mod h1:np41ed8YMU8zOgv53MMMoCntLTn2lF+SUzlM+O3u/jw=
cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs=
cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g=
cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o=
+cloud.google.com/go/iot v1.6.0/go.mod h1:IqdAsmE2cTYYNO1Fvjfzo9po179rAtJeVGUvkLN3rLE=
+cloud.google.com/go/iot v1.7.1/go.mod h1:46Mgw7ev1k9KqK1ao0ayW9h0lI+3hxeanz+L1zmbbbk=
cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA=
cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg=
cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0=
+cloud.google.com/go/kms v1.8.0/go.mod h1:4xFEhYFqvW+4VMELtZyxomGSYtSQKzM178ylFW4jMAg=
cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w=
+cloud.google.com/go/kms v1.10.0/go.mod h1:ng3KTUtQQU9bPX3+QGLsflZIHlkbn8amFAMY63m8d24=
+cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI=
+cloud.google.com/go/kms v1.11.0/go.mod h1:hwdiYC0xjnWsKQQCQQmIQnS9asjYVSK6jtXm+zFqXLM=
+cloud.google.com/go/kms v1.12.1/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM=
+cloud.google.com/go/kms v1.15.0/go.mod h1:c9J991h5DTl+kg7gi3MYomh12YEENGrf48ee/N/2CDM=
cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic=
cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI=
cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE=
cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8=
cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY=
+cloud.google.com/go/language v1.10.1/go.mod h1:CPp94nsdVNiQEt1CNjF5WkTcisLiHPyIbMhvR8H2AW0=
cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8=
cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08=
cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo=
+cloud.google.com/go/lifesciences v0.9.1/go.mod h1:hACAOd1fFbCGLr/+weUKRAJas82Y4vrL3O5326N//Wc=
cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw=
cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M=
+cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=
+cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE=
cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc=
cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo=
+cloud.google.com/go/longrunning v0.4.2/go.mod h1:OHrnaYyLUV6oqwh0xiS7e5sLQhP1m0QU9R+WhGDMgIQ=
+cloud.google.com/go/longrunning v0.5.0/go.mod h1:0JNuqRShmscVAhIACGtskSAWtqtOoPkwP0YF1oVEchc=
+cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc=
+cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE=
+cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY=
cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE=
cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM=
cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA=
+cloud.google.com/go/managedidentities v1.6.1/go.mod h1:h/irGhTN2SkZ64F43tfGPMbHnypMbu4RB3yl8YcuEak=
cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI=
cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw=
+cloud.google.com/go/maps v0.7.0/go.mod h1:3GnvVl3cqeSvgMcpRlQidXsPYuDGQ8naBis7MVzpXsY=
+cloud.google.com/go/maps v1.3.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s=
+cloud.google.com/go/maps v1.4.0/go.mod h1:6mWTUv+WhnOwAgjVsSW2QPPECmW+s3PcRyOa9vgG/5s=
cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4=
cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w=
cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I=
+cloud.google.com/go/mediatranslation v0.8.1/go.mod h1:L/7hBdEYbYHQJhX2sldtTO5SZZ1C1vkapubj0T2aGig=
cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE=
cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM=
cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA=
cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY=
cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM=
+cloud.google.com/go/memcache v1.10.1/go.mod h1:47YRQIarv4I3QS5+hoETgKO40InqzLP6kpNLvyXuyaA=
cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY=
cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s=
cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8=
cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI=
cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo=
+cloud.google.com/go/metastore v1.11.1/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA=
+cloud.google.com/go/metastore v1.12.0/go.mod h1:uZuSo80U3Wd4zi6C22ZZliOUJ3XeM/MlYi/z5OAOWRA=
cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk=
cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4=
cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w=
+cloud.google.com/go/monitoring v1.13.0/go.mod h1:k2yMBAB1H9JT/QETjNkgdCGD9bPF712XiLTVr+cBrpw=
+cloud.google.com/go/monitoring v1.15.1/go.mod h1:lADlSAlFdbqQuwwpaImhsJXu1QSdd3ojypXrFSMr2rM=
+cloud.google.com/go/monitoring v1.24.2 h1:5OTsoJ1dXYIiMiuL+sYscLc9BumrL3CarVLL7dd7lHM=
+cloud.google.com/go/monitoring v1.24.2/go.mod h1:x7yzPWcgDRnPEv3sI+jJGBkwl5qINf+6qY4eq0I9B4U=
cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA=
cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o=
cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM=
cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8=
cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E=
+cloud.google.com/go/networkconnectivity v1.11.0/go.mod h1:iWmDD4QF16VCDLXUqvyspJjIEtBR/4zq5hwnY2X3scM=
+cloud.google.com/go/networkconnectivity v1.12.1/go.mod h1:PelxSWYM7Sh9/guf8CFhi6vIqf19Ir/sbfZRUwXh92E=
cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8=
cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4=
cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY=
+cloud.google.com/go/networkmanagement v1.8.0/go.mod h1:Ho/BUGmtyEqrttTgWEe7m+8vDdK74ibQc+Be0q7Fof0=
cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ=
cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU=
cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k=
+cloud.google.com/go/networksecurity v0.8.0/go.mod h1:B78DkqsxFG5zRSVuwYFRZ9Xz8IcQ5iECsNrPn74hKHU=
+cloud.google.com/go/networksecurity v0.9.1/go.mod h1:MCMdxOKQ30wsBI1eI659f9kEp4wuuAueoC9AJKSPWZQ=
cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY=
cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34=
cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA=
cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0=
cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE=
+cloud.google.com/go/notebooks v1.8.0/go.mod h1:Lq6dYKOYOWUCTvw5t2q1gp1lAp0zxAxRycayS0iJcqQ=
+cloud.google.com/go/notebooks v1.9.1/go.mod h1:zqG9/gk05JrzgBt4ghLzEepPHNwE5jgPcHZRKhlC1A8=
cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4=
cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs=
cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI=
+cloud.google.com/go/optimization v1.4.1/go.mod h1:j64vZQP7h9bO49m2rVaTVoNM0vEBEN5eKPUPbZyXOrk=
cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA=
cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk=
cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ=
+cloud.google.com/go/orchestration v1.8.1/go.mod h1:4sluRF3wgbYVRqz7zJ1/EUNc90TTprliq9477fGobD8=
cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE=
cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc=
cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc=
+cloud.google.com/go/orgpolicy v1.11.0/go.mod h1:2RK748+FtVvnfuynxBzdnyu7sygtoZa1za/0ZfpOs1M=
+cloud.google.com/go/orgpolicy v1.11.1/go.mod h1:8+E3jQcpZJQliP+zaFfayC2Pg5bmhuLK755wKhIIUCE=
cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs=
cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg=
cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo=
cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw=
cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw=
+cloud.google.com/go/osconfig v1.12.0/go.mod h1:8f/PaYzoS3JMVfdfTubkowZYGmAhUCjjwnjqWI7NVBc=
+cloud.google.com/go/osconfig v1.12.1/go.mod h1:4CjBxND0gswz2gfYRCUoUzCm9zCABp91EeTtWXyz0tE=
cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E=
cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU=
cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70=
cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo=
cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs=
+cloud.google.com/go/oslogin v1.10.1/go.mod h1:x692z7yAue5nE7CsSnoG0aaMbNoRJRXO4sn73R+ZqAs=
cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0=
cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA=
cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk=
+cloud.google.com/go/phishingprotection v0.8.1/go.mod h1:AxonW7GovcA8qdEk13NfHq9hNx5KPtfxXNeUxTDxB6I=
cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg=
cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE=
cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw=
+cloud.google.com/go/policytroubleshooter v1.6.0/go.mod h1:zYqaPTsmfvpjm5ULxAyD/lINQxJ0DDsnWOP/GZ7xzBc=
+cloud.google.com/go/policytroubleshooter v1.7.1/go.mod h1:0NaT5v3Ag1M7U5r0GfDCpUFkWd9YqpubBWsQlhanRv0=
+cloud.google.com/go/policytroubleshooter v1.8.0/go.mod h1:tmn5Ir5EToWe384EuboTcVQT7nTag2+DuH3uHmKd1HU=
cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0=
cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI=
cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg=
+cloud.google.com/go/privatecatalog v0.8.0/go.mod h1:nQ6pfaegeDAq/Q5lrfCQzQLhubPiZhSaNhIgfJlnIXs=
+cloud.google.com/go/privatecatalog v0.9.1/go.mod h1:0XlDXW2unJXdf9zFz968Hp35gl/bhF4twwpXZAW50JA=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -373,8 +560,13 @@ cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjp
cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI=
cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0=
cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8=
+cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4=
+cloud.google.com/go/pubsub v1.32.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc=
+cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc=
cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg=
cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k=
+cloud.google.com/go/pubsublite v1.7.0/go.mod h1:8hVMwRXfDfvGm3fahVbtDbiLePT3gpoiJYJY+vxWxVM=
+cloud.google.com/go/pubsublite v1.8.1/go.mod h1:fOLdU4f5xldK4RGJrBMm+J7zMWNj/k4PxwEZXy39QS0=
cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4=
cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o=
cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk=
@@ -382,77 +574,109 @@ cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7d
cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE=
cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U=
cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA=
+cloud.google.com/go/recaptchaenterprise/v2 v2.7.0/go.mod h1:19wVj/fs5RtYtynAPJdDTb69oW0vNHYDBTbB4NvMD9c=
+cloud.google.com/go/recaptchaenterprise/v2 v2.7.2/go.mod h1:kR0KjsJS7Jt1YSyWFkseQ756D45kaYNTlDPPaRAvDBU=
cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg=
cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4=
cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac=
+cloud.google.com/go/recommendationengine v0.8.1/go.mod h1:MrZihWwtFYWDzE6Hz5nKcNz3gLizXVIDI/o3G1DLcrE=
cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg=
cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c=
cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs=
cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70=
cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ=
+cloud.google.com/go/recommender v1.10.1/go.mod h1:XFvrE4Suqn5Cq0Lf+mCP6oBHD/yRMA8XxP5sb7Q7gpA=
cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y=
cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A=
cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA=
cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM=
cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ=
+cloud.google.com/go/redis v1.13.1/go.mod h1:VP7DGLpE91M6bcsDdMuyCm2hIpB6Vp2hI090Mfd1tcg=
cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA=
cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0=
cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots=
+cloud.google.com/go/resourcemanager v1.6.0/go.mod h1:YcpXGRs8fDzcUl1Xw8uOVmI8JEadvhRIkoXXUNVYcVo=
+cloud.google.com/go/resourcemanager v1.7.0/go.mod h1:HlD3m6+bwhzj9XCouqmeiGuni95NTrExfhoSrkC/3EI=
+cloud.google.com/go/resourcemanager v1.9.1/go.mod h1:dVCuosgrh1tINZ/RwBufr8lULmWGOkPS8gL5gqyjdT8=
cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU=
cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg=
cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA=
+cloud.google.com/go/resourcesettings v1.6.1/go.mod h1:M7mk9PIZrC5Fgsu1kZJci6mpgN8o0IUzVx3eJU3y4Jw=
cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4=
cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY=
cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc=
cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y=
cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14=
+cloud.google.com/go/retail v1.14.1/go.mod h1:y3Wv3Vr2k54dLNIrCzenyKG8g8dhvhncT2NcNjb/6gE=
cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do=
cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo=
cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM=
+cloud.google.com/go/run v0.9.0/go.mod h1:Wwu+/vvg8Y+JUApMwEDfVfhetv30hCG4ZwDR/IXl2Qg=
+cloud.google.com/go/run v1.2.0/go.mod h1:36V1IlDzQ0XxbQjUx6IYbw8H3TJnWvhii963WW3B/bo=
cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s=
cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI=
cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk=
cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44=
cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc=
+cloud.google.com/go/scheduler v1.9.0/go.mod h1:yexg5t+KSmqu+njTIh3b7oYPheFtBWGcbVUYF1GGMIc=
+cloud.google.com/go/scheduler v1.10.1/go.mod h1:R63Ldltd47Bs4gnhQkmNDse5w8gBRrhObZ54PxgR2Oo=
cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA=
cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4=
cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4=
cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU=
+cloud.google.com/go/secretmanager v1.11.1/go.mod h1:znq9JlXgTNdBeQk9TBW/FnR/W4uChEKGeqQWAJ8SXFw=
cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4=
cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0=
cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU=
cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q=
cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA=
cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8=
+cloud.google.com/go/security v1.13.0/go.mod h1:Q1Nvxl1PAgmeW0y3HTt54JYIvUdtcpYKVfIB8AOMZ+0=
+cloud.google.com/go/security v1.15.1/go.mod h1:MvTnnbsWnehoizHi09zoiZob0iCHVcL4AUBj76h9fXA=
cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU=
cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc=
cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk=
cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk=
cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0=
+cloud.google.com/go/securitycenter v1.19.0/go.mod h1:LVLmSg8ZkkyaNy4u7HCIshAngSQ8EcIRREP3xBnyfag=
+cloud.google.com/go/securitycenter v1.23.0/go.mod h1:8pwQ4n+Y9WCWM278R8W3nF65QtY172h4S8aXyI9/hsQ=
cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU=
cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s=
+cloud.google.com/go/servicecontrol v1.10.0/go.mod h1:pQvyvSRh7YzUF2efw7H87V92mxU8FnFDawMClGCNuAA=
cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc=
+cloud.google.com/go/servicecontrol v1.11.1/go.mod h1:aSnNNlwEFBY+PWGQ2DoM0JJ/QUXqV5/ZD9DOLB7SnUk=
cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs=
cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg=
cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4=
cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U=
cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY=
+cloud.google.com/go/servicedirectory v1.9.0/go.mod h1:29je5JjiygNYlmsGz8k6o+OZ8vd4f//bQLtvzkPPT/s=
+cloud.google.com/go/servicedirectory v1.10.1/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ=
+cloud.google.com/go/servicedirectory v1.11.0/go.mod h1:Xv0YVH8s4pVOwfM/1eMTl0XJ6bzIOSLDt8f8eLaGOxQ=
cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco=
cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo=
cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc=
+cloud.google.com/go/servicemanagement v1.8.0/go.mod h1:MSS2TDlIEQD/fzsSGfCdJItQveu9NXnUniTrq/L8LK4=
cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E=
cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU=
cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec=
+cloud.google.com/go/serviceusage v1.6.0/go.mod h1:R5wwQcbOWsyuOfbP9tGdAnCAc6B9DRwPG1xtWMDeuPA=
cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4=
cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw=
cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A=
+cloud.google.com/go/shell v1.7.1/go.mod h1:u1RaM+huXFaTojTbW4g9P5emOrrmLE69KrxqQahKn4g=
cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos=
cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk=
+cloud.google.com/go/spanner v1.45.0/go.mod h1:FIws5LowYz8YAE1J8fOS7DJup8ff7xJeetWEo5REA2M=
+cloud.google.com/go/spanner v1.47.0/go.mod h1:IXsJwVW2j4UKs0eYDqodab6HgGuA1bViSqW4uH9lfUI=
cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM=
cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ=
cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0=
cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco=
cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0=
+cloud.google.com/go/speech v1.15.0/go.mod h1:y6oH7GhqCaZANH7+Oe0BhgIogsNInLlz542tg3VqeYI=
+cloud.google.com/go/speech v1.17.1/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo=
+cloud.google.com/go/speech v1.19.0/go.mod h1:8rVNzU43tQvxDaGvqOhpDqgkJTFowBpDvCJ14kGlJYo=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
@@ -463,82 +687,118 @@ cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq
cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc=
cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s=
cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y=
-cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0=
-cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80=
+cloud.google.com/go/storage v1.29.0/go.mod h1:4puEjyTKnku6gfKoTfNOU/W+a9JyuVNxjpS5GBrB8h4=
+cloud.google.com/go/storage v1.30.1/go.mod h1:NfxhC0UJE1aXSx7CIIbCf7y9HKT7BiccwkR7+P7gN8E=
+cloud.google.com/go/storage v1.50.0 h1:3TbVkzTooBvnZsk7WaAQfOsNrdoM8QHusXA1cpk6QJs=
+cloud.google.com/go/storage v1.50.0/go.mod h1:l7XeiD//vx5lfqE3RavfmU9yvk5Pp0Zhcv482poyafY=
cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w=
cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I=
cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4=
+cloud.google.com/go/storagetransfer v1.8.0/go.mod h1:JpegsHHU1eXg7lMHkvf+KE5XDJ7EQu0GwNJbbVGanEw=
+cloud.google.com/go/storagetransfer v1.10.0/go.mod h1:DM4sTlSmGiNczmV6iZyceIh2dbs+7z2Ayg6YAiQlYfA=
cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw=
cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g=
cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM=
cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA=
cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c=
+cloud.google.com/go/talent v1.6.2/go.mod h1:CbGvmKCG61mkdjcqTcLOkb2ZN1SrQI8MDyma2l7VD24=
cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8=
cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4=
cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc=
+cloud.google.com/go/texttospeech v1.7.1/go.mod h1:m7QfG5IXxeneGqTapXNxv2ItxP/FS0hCZBwXYqucgSk=
cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ=
cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg=
cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM=
+cloud.google.com/go/tpu v1.6.1/go.mod h1:sOdcHVIgDEEOKuqUoi6Fq53MKHJAtOwtz0GuKsWSH3E=
cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28=
cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y=
cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA=
+cloud.google.com/go/trace v1.9.0/go.mod h1:lOQqpE5IaWY0Ixg7/r2SjixMuc6lfTFeO4QGM4dQWOk=
+cloud.google.com/go/trace v1.10.1/go.mod h1:gbtL94KE5AJLH3y+WVpfWILmqgc6dXcqgNXdOPAQTYk=
+cloud.google.com/go/trace v1.11.6 h1:2O2zjPzqPYAHrn3OKl029qlqG6W8ZdYaOWRyr8NgMT4=
+cloud.google.com/go/trace v1.11.6/go.mod h1:GA855OeDEBiBMzcckLPE2kDunIpC72N+Pq8WFieFjnI=
cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs=
cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg=
+cloud.google.com/go/translate v1.5.0/go.mod h1:29YDSYveqqpA1CQFD7NQuP49xymq17RXNaUDdc0mNu0=
cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
+cloud.google.com/go/translate v1.7.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos=
+cloud.google.com/go/translate v1.8.1/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs=
+cloud.google.com/go/translate v1.8.2/go.mod h1:d1ZH5aaOA0CNhWeXeC8ujd4tdCFw8XoNWRljklu5RHs=
cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk=
cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw=
+cloud.google.com/go/video v1.12.0/go.mod h1:MLQew95eTuaNDEGriQdcYn0dTwf9oWiA4uYebxM5kdg=
cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk=
+cloud.google.com/go/video v1.14.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
+cloud.google.com/go/video v1.15.0/go.mod h1:SkgaXwT+lIIAKqWAJfktHT/RbgjSuY6DobxEp0C5yTQ=
+cloud.google.com/go/video v1.17.1/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU=
+cloud.google.com/go/video v1.19.0/go.mod h1:9qmqPqw/Ib2tLqaeHgtakU+l5TcJxCJbhFXM7UJjVzU=
cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU=
cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4=
cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M=
cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU=
cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU=
+cloud.google.com/go/videointelligence v1.11.1/go.mod h1:76xn/8InyQHarjTWsBR058SmlPCwQjgcvoW0aZykOvo=
cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0=
cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo=
cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo=
cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY=
cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E=
cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY=
+cloud.google.com/go/vision/v2 v2.7.0/go.mod h1:H89VysHy21avemp6xcf9b9JvZHVehWbET0uT/bcuY/0=
+cloud.google.com/go/vision/v2 v2.7.2/go.mod h1:jKa8oSYBWhYiXarHPvP4USxYANYUEdEsQrloLjrSwJU=
cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE=
cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g=
cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc=
+cloud.google.com/go/vmmigration v1.6.0/go.mod h1:bopQ/g4z+8qXzichC7GW1w2MjbErL54rk3/C843CjfY=
+cloud.google.com/go/vmmigration v1.7.1/go.mod h1:WD+5z7a/IpZ5bKK//YmT9E047AD+rjycCAvyMxGJbro=
cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208=
cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8=
+cloud.google.com/go/vmwareengine v0.3.0/go.mod h1:wvoyMvNWdIzxMYSpH/R7y2h5h3WFkx6d+1TIsP39WGY=
+cloud.google.com/go/vmwareengine v0.4.1/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0=
+cloud.google.com/go/vmwareengine v1.0.0/go.mod h1:Px64x+BvjPZwWuc4HdmVhoygcXqEkGHXoa7uyfTgSI0=
cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w=
cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8=
cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes=
+cloud.google.com/go/vpcaccess v1.7.1/go.mod h1:FogoD46/ZU+JUBX9D606X21EnxiszYi2tArQwLY4SXs=
cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE=
cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg=
cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc=
cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A=
cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg=
+cloud.google.com/go/webrisk v1.9.1/go.mod h1:4GCmXKcOa2BZcZPn6DCEvE7HypmEJcJkr4mtM+sqYPc=
cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo=
cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ=
cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng=
+cloud.google.com/go/websecurityscanner v1.6.1/go.mod h1:Njgaw3rttgRHXzwCB8kgCYqv5/rGpFCsBOvPbYgszpg=
cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0=
cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M=
cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M=
cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA=
cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw=
+cloud.google.com/go/workflows v1.11.1/go.mod h1:Z+t10G1wF7h8LgdY/EmRcQY8ptBD/nvofaL6FqlET6g=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8=
git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc=
-github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0/go.mod h1:iZDifYGJTIgIIkYRNWPENUnqx6bJ2xnSDFI2tjwZNuY=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1 h1:Wc1ml6QlJs2BHQ/9Bqu1jiyggbsSjramq2oUmp5WeIo=
+github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1/go.mod h1:Ot/6aikWnKWi4l9QB7qVSwa8iMphQNqkWALMoNT3rzM=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1 h1:B+blDbyVIG3WaikNxPnhPiJ1MThR03b3vKGtER95TP4=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.1/go.mod h1:JdM5psgjfBf5fo2uWOZhflPWyDBZ/O/CNAH9CtsuZE4=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=
+github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 h1:FPKJS1T+clwv+OLGt13a8UjqeRuh0O4SJ3lUriThc+4=
+github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1/go.mod h1:j2chePtV91HrC22tGoRX3sGY42uF13WzmmV80/OdVAA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 h1:LkHbJbgF3YyvC53aqYGR+wWQDn2Rdp9AQdGndf9QvY4=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0/go.mod h1:QyiQdW4f4/BIfB8ZutZ2s+28RAgfa/pT+zS++ZHyM1I=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 h1:bXwSugBiSbgtz7rOtbfGf+woewp4f06orW9OP5BjHLA=
github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk=
-github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70=
-github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0/go.mod h1:T5RfihdXtBDxt1Ch2wobif3TvzTdumDy29kahv6AV9A=
-github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0 h1:IfFdxTUDiV58iZqPKgyWiz4X4fCxZeQ1pTQPImLYXpY=
-github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.0/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU=
-github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0 h1:LR0kAX9ykz8G4YgLCaRDVJ3+n43R8MneB5dTy2konZo=
+github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.0/go.mod h1:DWAciXemNf++PQJLeXUB4HHH5OpsAh12HZnu2wXE1jA=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1 h1:lhZdRq7TIx0GJQvSyX2Si406vrYsov2FXGp/RnSEtcs=
+github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.1/go.mod h1:8cl44BDmi+effbARHMQjgOKA2AYvcohNm7KEt42mSV8=
+github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=
+github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJe7PpYPXT5A29ZkwJaPqcva7BVeemZOZs=
+github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU=
@@ -547,21 +807,22 @@ github.com/DATA-DOG/go-sqlmock v1.4.1/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0=
github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0 h1:ErKg/3iS1AKcTkf3yixlZ54f9U1rljCkQyEXWUnIUxc=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.27.0/go.mod h1:yAZHSGnqScoU556rBOVkwLze6WP5N+U11RHuWaGVxwY=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0 h1:5IT7xOdq17MtcdtL/vtl6mGfzhaq4m4vpollPRmlsBQ=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.50.0/go.mod h1:ZV4VOm0/eHR06JLrXWe09068dHpr3TRpY9Uo7T+anuA=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0 h1:nNMpRpnkWDAaqcpxMJvxa/Ud98gjbYwayJY4/9bdjiU=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.50.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.52.0 h1:wbMd4eG/fOhsCa6+IP8uEDvWF5vl7rNoUWmP5f72Tbs=
+github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.52.0/go.mod h1:gdIm9TxRk5soClCwuB0FtdXsbqtw0aqPwBEurK9tPkw=
github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM=
github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo=
github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk=
-github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA=
-github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q=
-github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
-github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VictoriaMetrics/fastcache v1.12.2 h1:N0y9ASrJ0F6h0QaC3o6uJb3NIZ9VKLjCM7NQbSmF7WI=
github.com/VictoriaMetrics/fastcache v1.12.2/go.mod h1:AmC+Nzz1+3G2eCPapF6UcsnkThDcMsQicp4xDukwJYI=
-github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
-github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY=
github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
@@ -576,6 +837,8 @@ github.com/alecthomas/colour v0.0.0-20160524082231-60882d9e2721/go.mod h1:QO9JBo
github.com/alecthomas/kingpin v1.3.8-0.20210301060133-17f40c25f497 h1:aDITxVUQ/3KBhpVWX57Vo9ntGTxoRw1F0T6/x/tRzNU=
github.com/alecthomas/kingpin v1.3.8-0.20210301060133-17f40c25f497/go.mod h1:b6br6/pDFSfMkBgC96TbpOji05q5pa+v5rIlS0Y6XtI=
github.com/alecthomas/kingpin/v2 v2.3.1/go.mod h1:oYL5vtsvEHZGHxU7DMp32Dvx+qL+ptGn6lWaot2vCNE=
+github.com/alecthomas/kingpin/v2 v2.4.0 h1:f48lwail6p8zpO1bC4TxtqACaGqHYA22qkHjHpqDjYY=
+github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
github.com/alecthomas/kong v0.1.17-0.20190424132513-439c674f7ae0/go.mod h1:+inYUSluD+p4L8KdviBSgzcqEjUQOfC5fQDRFuc36lI=
github.com/alecthomas/kong v0.2.1-0.20190708041108-0548c6b1afae/go.mod h1:+inYUSluD+p4L8KdviBSgzcqEjUQOfC5fQDRFuc36lI=
github.com/alecthomas/kong-hcl v0.1.8-0.20190615233001-b21fea9723c8/go.mod h1:MRgZdU3vrFd05IQ89AxUZ0aYdF39BYoNFa324SodPCA=
@@ -588,23 +851,20 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
-github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg=
-github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
-github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk=
-github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc=
-github.com/alicebob/miniredis/v2 v2.33.0 h1:uvTF0EDeu9RLnUEG27Db5I68ESoIxTiXbNUiji6lZrA=
-github.com/alicebob/miniredis/v2 v2.33.0/go.mod h1:MhP4a3EU7aENRi9aO+tHfTBZicLqQevyi/DJpoj6mi0=
-github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible h1:9gWa46nstkJ9miBReJcN8Gq34cBFbzSpQZVVT9N09TM=
-github.com/aliyun/aliyun-oss-go-sdk v2.2.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
+github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0=
+github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs=
+github.com/alicebob/miniredis/v2 v2.35.0 h1:QwLphYqCEAo1eu1TqPRN2jgVMPBweeQcR21jeqDCONI=
+github.com/alicebob/miniredis/v2 v2.35.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM=
+github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible h1:8psS8a+wKfiLt1iVDX79F7Y6wUM49Lcha2FMXt4UM8g=
+github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0=
-github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
-github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
+github.com/apache/arrow/go/v11 v11.0.0/go.mod h1:Eg5OsL5H+e299f7u5ssuXsuHQVEGC4xei5aX110hRiI=
+github.com/apache/arrow/go/v12 v12.0.0/go.mod h1:d+tV/eHZZ7Dz7RPrFKtPK02tpr+c9/PEd/zm8mDS9Vg=
github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
@@ -614,56 +874,45 @@ github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
-github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
-github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
-github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
-github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
-github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
-github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
-github.com/aws/aws-sdk-go-v2 v1.16.0/go.mod h1:lJYcuZZEHWNIb6ugJjbQY1fykdoobWbOS7kJYb4APoI=
-github.com/aws/aws-sdk-go-v2 v1.16.16 h1:M1fj4FE2lB4NzRb9Y0xdWsn2P0+2UHVxwKyOa4YJNjk=
-github.com/aws/aws-sdk-go-v2 v1.16.16/go.mod h1:SwiyXi/1zTUZ6KIAmLK5V5ll8SiURNUYOqTerZPaF9k=
-github.com/aws/aws-sdk-go-v2/config v1.15.1 h1:hTIZFepYESYyowQUBo47lu69WSxsYqGUILY9Nu8+7pY=
-github.com/aws/aws-sdk-go-v2/config v1.15.1/go.mod h1:MZHGbuW2WnqIOQQBKu2ZkhTjuutZSTnn56TDq4QyydE=
-github.com/aws/aws-sdk-go-v2/credentials v1.11.0/go.mod h1:EdV1ZFgtZ4XM5RDHWcRWK8H+xW5duNVBqWj2oLu7tRo=
-github.com/aws/aws-sdk-go-v2/credentials v1.12.20 h1:9+ZhlDY7N9dPnUmf7CDfW9In4sW5Ff3bh7oy4DzS1IE=
-github.com/aws/aws-sdk-go-v2/credentials v1.12.20/go.mod h1:UKY5HyIux08bbNA7Blv4PcXQ8cTkGh7ghHMFklaviR4=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.1/go.mod h1:Yph0XsTbQ5GGZ2+mO1a03P/SO9fdX3t1nejIp2tq79g=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17 h1:r08j4sbZu/RVi+BNxkBJwPMUYY3P8mgSDuKkZ/ZN1lE=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.17/go.mod h1:yIkQcCDYNsZfXpd5UX2Cy+sWA1jPgIhGTw9cOBzfVnQ=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.7/go.mod h1:oB9nZcxH1cGq7NPGurVJwxrO2vmJ9mmEBayCwcAlmT8=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23 h1:s4g/wnzMf+qepSNgTvaQQHNxyMLKSawNhKCPNy++2xY=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.23/go.mod h1:2DFxAQ9pfIRy0imBCJv+vZ2X6RKxves6fbnEuSry6b4=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.1/go.mod h1:K4vz7lRYCyLYpYAMCLObODahFgARdD3YVa0MvQte9Co=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17 h1:/K482T5A3623WJgWT8w1yRAFK4RzGzEl7y39yhtn9eA=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.17/go.mod h1:pRwaTYCJemADaqCbUAxltMoHKata7hmB5PjEXeu0kfg=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.3.8 h1:adr3PfiggFtqgFofAMUFCtdvwzpf3QxPES4ezK4M3iI=
-github.com/aws/aws-sdk-go-v2/internal/ini v1.3.8/go.mod h1:wLbQYt36AJqaRZUQiCNXzbtkNigyPfKHrotHuIDiCy8=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.1/go.mod h1:2Hhr9Eh1gJzDatwACX/ozAZ/ljq5vzvPRu5cdu25tzc=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17 h1:Jrd/oMh0PKQc6+BowB+pLEwLIgaQF29eYbe7E1Av9Ug=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.17/go.mod h1:4nYOrY41Lrbk2170/BGkcJKBhws9Pfn8MG3aGqjjeFI=
-github.com/aws/aws-sdk-go-v2/service/sso v1.11.1/go.mod h1:CvFTucADIx7U/M44vjLs/ZttpQHdpxwK+62+dUGhDeY=
-github.com/aws/aws-sdk-go-v2/service/sso v1.11.23 h1:pwvCchFUEnlceKIgPUouBJwK81aCkQ8UDMORfeFtW10=
-github.com/aws/aws-sdk-go-v2/service/sso v1.11.23/go.mod h1:/w0eg9IhFGjGyyncHIQrXtU8wvNsTJOP0R6PPj0wf80=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5 h1:GUnZ62TevLqIoDyHeiWj2P7EqaosgakBKVvWriIdLQY=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.5/go.mod h1:csZuQY65DAdFBt1oIjO5hhBR49kQqop4+lcuCjf2arA=
-github.com/aws/aws-sdk-go-v2/service/sts v1.16.1/go.mod h1:Aq2/Qggh2oemSfyHH+EO4UBbgWG6zFCXLHYI4ILTY7w=
-github.com/aws/aws-sdk-go-v2/service/sts v1.16.19 h1:9pPi0PsFNAGILFfPCk8Y0iyEBGc6lu6OQ97U7hmdesg=
-github.com/aws/aws-sdk-go-v2/service/sts v1.16.19/go.mod h1:h4J3oPZQbxLhzGnk+j9dfYHi5qIOVJ5kczZd658/ydM=
-github.com/aws/smithy-go v1.11.1/go.mod h1:3xHYmszWVx2c0kIwQeEVf9uSm4fYZt67FBJnwub1bgM=
-github.com/aws/smithy-go v1.13.3 h1:l7LYxGuzK6/K+NzJ2mC+VvLUbae0sL3bXU//04MkmnA=
-github.com/aws/smithy-go v1.13.3/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA=
-github.com/baidubce/bce-sdk-go v0.9.111 h1:yGgtPpZYUZW4uoVorQ4xnuEgVeddACydlcJKW87MDV4=
-github.com/baidubce/bce-sdk-go v0.9.111/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
+github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
+github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
+github.com/aws/aws-sdk-go-v2 v1.38.3 h1:B6cV4oxnMs45fql4yRH+/Po/YU+597zgWqvDpYMturk=
+github.com/aws/aws-sdk-go-v2 v1.38.3/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
+github.com/aws/aws-sdk-go-v2/config v1.29.15 h1:I5XjesVMpDZXZEZonVfjI12VNMrYa38LtLnw4NtY5Ss=
+github.com/aws/aws-sdk-go-v2/config v1.29.15/go.mod h1:tNIp4JIPonlsgaO5hxO372a6gjhN63aSWl2GVl5QoBQ=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.68 h1:cFb9yjI02/sWHBSYXAtkamjzCuRymvmeFmt0TC0MbYY=
+github.com/aws/aws-sdk-go-v2/credentials v1.17.68/go.mod h1:H6E+jBzyqUu8u0vGaU6POkK3P0NylYEeRZ6ynBpMqIk=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 h1:uF68eJA6+S9iVr9WgX1NaRGyQ/6MdIyc4JNUo6TN1FA=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6/go.mod h1:qlPeVZCGPiobx8wb1ft0GHT5l+dc6ldnwInDFaMvC7Y=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 h1:pa1DEC6JoI0zduhZePp3zmhWvk/xxm4NB8Hy/Tlsgos=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6/go.mod h1:gxEjPebnhWGJoaDdtDkA0JX46VRg1wcTHYe63OfX5pE=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
+github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
+github.com/aws/aws-sdk-go-v2/service/dynamodb v1.50.1 h1:MXUnj1TKjwQvotPPHFMfynlUljcpl5UccMrkiauKdWI=
+github.com/aws/aws-sdk-go-v2/service/dynamodb v1.50.1/go.mod h1:fe3UQAYwylCQRlGnihsqU/tTQkrc2nrW/IhWYwlW9vg=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
+github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.6 h1:34ojKW9OV123FZ6Q8Nua3Uwy6yVTcshZ+gLE4gpMDEs=
+github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.11.6/go.mod h1:sXXWh1G9LKKkNbuR0f0ZPd/IvDXlMGiag40opt4XEgY=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0 h1:eRhU3Sh8dGbaniI6B+I48XJMrTPRkK4DKo+vqIxziOU=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.0/go.mod h1:paNLV18DZ6FnWE/bd06RIKPDIFpjuvCkGKWTG/GDBeM=
+github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8=
+github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
+github.com/aws/aws-sdk-go-v2/service/sts v1.33.20 h1:oIaQ1e17CSKaWmUTu62MtraRWVIosn/iONMuZt0gbqc=
+github.com/aws/aws-sdk-go-v2/service/sts v1.33.20/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
+github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE=
+github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
+github.com/baidubce/bce-sdk-go v0.9.230 h1:HzELBKiD7QAgYqZ1qHZexoI2A3Lo/6zYGQFvcUbS5cA=
+github.com/baidubce/bce-sdk-go v0.9.230/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps=
github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0=
-github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
-github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
-github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -675,13 +924,13 @@ github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
-github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
-github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
+github.com/bradfitz/gomemcache v0.0.0-20250403215159-8d39553ac7cf h1:TqhNAT4zKbTdLa62d2HDBFdvgSbIGB3eJE8HqhgiL9I=
+github.com/bradfitz/gomemcache v0.0.0-20250403215159-8d39553ac7cf/go.mod h1:r5xuitiExdLAJ09PR7vBVENGvp4ZuTBeWTGtxuX3K+c=
+github.com/caio/go-tdigest v3.1.0+incompatible h1:uoVMJ3Q5lXmVLCCqaMGHLBWnbGoN6Lpu7OAUPR60cds=
+github.com/caio/go-tdigest v3.1.0+incompatible/go.mod h1:sHQM/ubZStBUmF1WbB8FAm8q9GjDajLC5T7ydxE3JHI=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
-github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -703,41 +952,38 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I=
github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng=
-github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
-github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
-github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b h1:ga8SEFjZ60pxLcmhnThWgvH2wg8376yUJmPhEH4H3kw=
-github.com/cncf/xds/go v0.0.0-20240423153145-555b57ec207b/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
+github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv1aFbZMiM9vblcSArJRf2Irls=
+github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
+github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
+github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=
+github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=
+github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=
github.com/coreos/etcd v3.3.25+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cortexproject/cortex v1.18.1 h1:mos5R1cTpZH3789Eer4DhpBJazCGILekDMZczlU7bWo=
-github.com/cortexproject/cortex v1.18.1/go.mod h1:BY4ZlFSP+2orh8IAjROR71rA/733hJbS4YczQNO7BnY=
-github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+github.com/cortexproject/cortex v1.20.1 h1:hBFBvmEPLXJk2g9wozOttcNPNFbsCAhhlE5n74bg/E4=
+github.com/cortexproject/cortex v1.20.1/go.mod h1:jlIdsPKX/bnvTcihtXYOmmNr9IBCOI4PnrTSMRmGWbQ=
+github.com/cortexproject/weaveworks-common v0.0.0-20250902164925-0315015a8b9f h1:hDM26bY51+giykKRIH8TUYzEy7fn62iKfTW7vfgDuNw=
+github.com/cortexproject/weaveworks-common v0.0.0-20250902164925-0315015a8b9f/go.mod h1:bls8PY13xoOKkZuRhhDdR2rNk4pfdGWCR6k2jF9s9+4=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/cristalhq/hedgedhttp v0.9.1 h1:g68L9cf8uUyQKQJwciD0A1Vgbsz+QgCjuB1I8FAsCDs=
+github.com/cristalhq/hedgedhttp v0.9.1/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI=
github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E=
github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964 h1:y5HC9v93H5EPKqaS1UYVg1uYah5Xf51mBfIoWehClUQ=
github.com/danwakefield/fnmatch v0.0.0-20160403171240-cbb64ac3d964/go.mod h1:Xd9hchkHSWYkEqJwUGisez3G1QY8Ryz0sdWrLPMGjLk=
@@ -747,84 +993,81 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE=
github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
-github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4=
-github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo=
+github.com/dgryski/go-metro v0.0.0-20200812162917-85c65e2d0165/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
+github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33 h1:ucRHb6/lvW/+mTEIGbvhcYU3S8+uSNkuMjx/qZFfhtM=
+github.com/dgryski/go-metro v0.0.0-20250106013310-edb8663e5e33/go.mod h1:c9O8+fpSOX1DM8cPNSkX/qsBWdkD4yd2dpciOWQjpBw=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/digitalocean/godo v1.157.0 h1:ReELaS6FxXNf8gryUiVH0wmyUmZN8/NCmBX4gXd3F0o=
+github.com/digitalocean/godo v1.157.0/go.mod h1:tYeiWY5ZXVpU48YaFv0M5irUFHXGorZpDNm7zzdWMzM=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/dlclark/regexp2 v1.1.6/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/dlclark/regexp2 v1.2.0 h1:8sAhBGEM0dRWogWqWyQeIJnxjWO6oIjl8FKqREDsGfk=
github.com/dlclark/regexp2 v1.2.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
-github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE=
-github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v28.3.0+incompatible h1:ffS62aKWupCWdvcee7nBU9fhnmknOqDPaJAMtfK0ImQ=
+github.com/docker/docker v28.3.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
-github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
-github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
-github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
-github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
-github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
-github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
-github.com/efficientgo/core v1.0.0-rc.2 h1:7j62qHLnrZqO3V3UA0AqOGd5d5aXV3AX6m/NZBHp78I=
-github.com/efficientgo/core v1.0.0-rc.2/go.mod h1:FfGdkzWarkuzOlY04VY+bGfb1lWrjaL6x/GLcQ4vJps=
+github.com/edsrzf/mmap-go v1.2.0 h1:hXLYlkbaPzt1SaQk+anYwKSRNhufIDCchSPkUD6dD84=
+github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
+github.com/efficientgo/core v1.0.0-rc.3 h1:X6CdgycYWDcbYiJr1H1+lQGzx13o7bq3EUkbB9DsSPc=
+github.com/efficientgo/core v1.0.0-rc.3/go.mod h1:FfGdkzWarkuzOlY04VY+bGfb1lWrjaL6x/GLcQ4vJps=
github.com/efficientgo/e2e v0.14.1-0.20230710114240-c316eb95ae5b h1:8VX23BNufsa4KCqnnEonvI3yrou2Pjp8JLcbdVn0Fs8=
github.com/efficientgo/e2e v0.14.1-0.20230710114240-c316eb95ae5b/go.mod h1:plsKU0YHE9uX+7utvr7SiDtVBSHJyEfHRO4UnUgDmts=
-github.com/efficientgo/tools/extkingpin v0.0.0-20220817170617-6c25e3b627dd h1:VaYzzXeUbC5fVheskcKVNOyJMEYD+HgrJNzIAg/mRIM=
-github.com/efficientgo/tools/extkingpin v0.0.0-20220817170617-6c25e3b627dd/go.mod h1:ZV0utlglOczUWv3ih2AbqPSoLoFzdplUYxwV62eZi6Q=
-github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE=
+github.com/efficientgo/tools/extkingpin v0.0.0-20230505153745-6b7392939a60 h1:JZLv+76vd7yTN049X7FRbDgvsXrbl7cDaZW12KQdebE=
+github.com/efficientgo/tools/extkingpin v0.0.0-20230505153745-6b7392939a60/go.mod h1:0rmhYYrjSfDaVnd8ubwq5vRc1epHv00KkiNrvWuxo+s=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
-github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
-github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
-github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
-github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34=
github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI=
-github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI=
-github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0=
+github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
+github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
+github.com/envoyproxy/go-control-plane/envoy v1.32.2/go.mod h1:eR2SOX2IedqlPvmiKjUH7Wu//S602JKI7HPC/L3SRq8=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
+github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
+github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
+github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo=
github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w=
github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss=
-github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A=
github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew=
+github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
+github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
+github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM=
github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
-github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
-github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
-github.com/felixge/fgprof v0.9.4 h1:ocDNwMFlnA0NU0zSB3I52xkO4sFXk80VK9lXjLClu88=
-github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
-github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
+github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY=
+github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
-github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
-github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
-github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
-github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
-github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
+github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
+github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474 h1:TufioMBjkJ6/Oqmlye/ReuxHFS35HyLmypj/BNy/8GY=
github.com/fullstorydev/emulators/storage v0.0.0-20240401123056-edc69752f474/go.mod h1:PQwxF4UU8wuL+srGxr3BOhIW5zXqgucwVlO/nPZLsxw=
+github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
+github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
+github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618=
+github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops=
github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g=
github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks=
github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY=
@@ -837,7 +1080,6 @@ github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU=
@@ -851,18 +1093,19 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE
github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
-github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w=
-github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE=
+github.com/go-openapi/errors v0.22.1 h1:kslMRRnK7NCb/CvR1q1VWuEQCEIsBGn5GgKD9e+HYhU=
+github.com/go-openapi/errors v0.22.1/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
-github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
+github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
+github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
@@ -876,18 +1119,21 @@ github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMg
github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
-github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE=
-github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ=
+github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
+github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M=
-github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g=
-github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0=
-github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
+github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM=
+github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg=
-github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
+github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
+github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
+github.com/go-zookeeper/zk v1.0.4 h1:DPzxraQx7OrPyXq2phlGlNSIyWEsAox0RJmjTseMV6I=
+github.com/go-zookeeper/zk v1.0.4/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
+github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
+github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
@@ -896,40 +1142,37 @@ github.com/gobwas/ws v1.1.0-rc.5/go.mod h1:nzvNcVha5eUziGrbxFCo6qFIojQHjJV5cLYIb
github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk=
github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
-github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
-github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
-github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
+github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
+github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
-github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
-github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E=
+github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0=
github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
-github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI=
-github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
+github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
+github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/gogo/status v1.0.3/go.mod h1:SavQ51ycCLnc7dGyJxp8YAmudx8xqiVrRf+6IXRsugc=
github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM=
github.com/gogo/status v1.1.1 h1:DuHXlSFHNKqTQ+/ACf5Vs6r4X/dH2EgIzR9Vr+H65kg=
github.com/gogo/status v1.1.1/go.mod h1:jpG3dM5QPcqu19Hg8lkUhBFBa3TcLs1DG7+2Jqci7oU=
-github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
-github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
+github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
+github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
+github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
@@ -958,10 +1201,10 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
-github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
-github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/golang/snappy v1.0.0 h1:Oy607GVXHs7RtbggtPBnr2RmDArIsAefDwvrdWvRhGs=
+github.com/golang/snappy v1.0.0/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac h1:Q0Jsdxl5jbxouNs1TQYt0gxesYMU4VXRbsTlgDloZ50=
github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc=
github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82 h1:EvokxLQsaaQjcWVWSV38221VAK7qc2zhaO17bKys/18=
@@ -978,11 +1221,11 @@ github.com/gonum/stat v0.0.0-20181125101827-41a0da705a5b h1:fbskpz/cPqWH8VqkQ7LJ
github.com/gonum/stat v0.0.0-20181125101827-41a0da705a5b/go.mod h1:Z4GIJBJO3Wa4gD4vbwQxXXZ+WHmW6E9ixmNrwvs0iZs=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
-github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
+github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
+github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
-github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
-github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
+github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
+github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -998,16 +1241,15 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II=
github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
-github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
@@ -1031,12 +1273,14 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g=
-github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
+github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18=
+github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
-github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM=
+github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
+github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
+github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
+github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
@@ -1046,8 +1290,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY
github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg=
github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
-github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
+github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
@@ -1058,54 +1302,49 @@ github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK
github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo=
github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY=
github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
-github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA=
-github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=
+github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
+github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
+github.com/googleapis/gax-go/v2 v2.10.0/go.mod h1:4UOEnMCrxsSqQ940WnTiD6qJ63le2ev3xfyagutxiPw=
+github.com/googleapis/gax-go/v2 v2.11.0/go.mod h1:DxmR61SGKkGLa2xigwuZIQpkCI2S5iydzRfb3peWZJI=
+github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=
+github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
-github.com/gophercloud/gophercloud v1.13.0 h1:8iY9d1DAbzMW6Vok1AxbbK5ZaUjzMp0tdyt4fX9IeJ0=
-github.com/gophercloud/gophercloud v1.13.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
-github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
+github.com/gophercloud/gophercloud/v2 v2.7.0 h1:o0m4kgVcPgHlcXiWAjoVxGd8QCmvM5VU+YM71pFbn0E=
+github.com/gophercloud/gophercloud/v2 v2.7.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk=
github.com/gorilla/csrf v1.6.0/go.mod h1:7tSf8kmjNYr7IWDCYhd3U8Ck34iQ/Yw5CJu7bAkHEGI=
github.com/gorilla/handlers v1.4.1/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
-github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
-github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=
+github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
github.com/gosimple/slug v1.1.1 h1:fRu/digW+NMwBIP+RmviTK97Ho/bEj/C9swrCspN3D4=
github.com/gosimple/slug v1.1.1/go.mod h1:ER78kgg1Mv0NQGlXiDe57DpCyfbNywXXZ9mIorhxAf0=
github.com/grafana-tools/sdk v0.0.0-20220203092117-edae16afa87b h1:R9LID2XreyUOQfJ/NKLGuYOF4/Wz6ljmYFAhlOaHVQ4=
github.com/grafana-tools/sdk v0.0.0-20220203092117-edae16afa87b/go.mod h1:AHHlOEv1+GGQ3ktHMlhuTUwo3zljV3QJbC0+8o2kn+4=
github.com/grafana/dskit v0.0.0-20211021180445-3bd016e9d7f1 h1:Qf+/W3Tup0nO21tgJmO14WJK0yyrm4L2UJipZP+Zoow=
github.com/grafana/dskit v0.0.0-20211021180445-3bd016e9d7f1/go.mod h1:uPG2nyK4CtgNDmWv7qyzYcdI+S90kHHRWvHnBtEMBXM=
-github.com/grafana/memberlist v0.2.5-0.20211201083710-c7bc8e9df94b h1:UlCBLaqvS4wVYNrMKSfqTBVsed/EOY9dnenhYZMUnrA=
-github.com/grafana/memberlist v0.2.5-0.20211201083710-c7bc8e9df94b/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU=
+github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248=
github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE=
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI=
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8=
-github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk=
-github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 h1:sGm2vDRFUrQJO/Veii4h4zG2vvqG6uWNkBHSTqXOZk0=
+github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2/go.mod h1:wd1YpapPLivG6nQgbf7ZkG1hhSOXDhhn4MLTknx2aAc=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
-github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
-github.com/hashicorp/consul/api v1.29.2 h1:aYyRn8EdE2mSfG14S1+L9Qkjtz8RzmaWh6AcNGRNwPw=
-github.com/hashicorp/consul/api v1.29.2/go.mod h1:0YObcaLNDSbtlgzIRtmRXI1ZkeuK0trCBxwZQ4MYnIk=
-github.com/hashicorp/consul/proto-public v0.6.2 h1:+DA/3g/IiKlJZb88NBn0ZgXrxJp2NlvCZdEyl+qxvL0=
-github.com/hashicorp/consul/proto-public v0.6.2/go.mod h1:cXXbOg74KBNGajC+o8RlA502Esf0R9prcoJgiOX/2Tg=
-github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/consul/api v1.32.0 h1:5wp5u780Gri7c4OedGEPzmlUEzi0g2KyiPphSr6zjVg=
+github.com/hashicorp/consul/api v1.32.0/go.mod h1:Z8YgY0eVPukT/17ejW+l+C7zJmKwgPHtjU1q16v/Y40=
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg=
github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s=
@@ -1124,6 +1363,8 @@ github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVH
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY=
+github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
@@ -1134,22 +1375,19 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU=
github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
-github.com/hashicorp/go-sockaddr v1.0.6 h1:RSG8rKU28VTUTvEKghe5gIhIQpv8evvNpnDEyqO4u9I=
-github.com/hashicorp/go-sockaddr v1.0.6/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI=
+github.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw=
+github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.6.0 h1:uL2shRDx7RTrOrTCUZEGP/wJUFiUI8QT6E7z5o8jga4=
@@ -1158,48 +1396,31 @@ github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
-github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1avVOnu2cfms8VAiD8lUq3vWI8mTocOXN/w=
-github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
+github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec h1:+YBzb977VrmffaCX/OBm17dEVJUcWn5dW+eqs3aIJ/A=
+github.com/hashicorp/nomad/api v0.0.0-20241218080744-e3ac00f30eec/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE=
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
-github.com/hetznercloud/hcloud-go/v2 v2.10.2 h1:9gyTUPhfNbfbS40Spgij5mV5k37bOZgt8iHKCbfGs5I=
-github.com/hetznercloud/hcloud-go/v2 v2.10.2/go.mod h1:xQ+8KhIS62W0D78Dpi57jsufWh844gUw1az5OUvaeq8=
-github.com/hodgesds/perf-utils v0.0.8/go.mod h1:F6TfvsbtrF88i++hou29dTXlI2sfsJv+gRZDtmTJkAs=
-github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible h1:tKTaPHNVwikS3I1rdyf1INNvgJXWSf/+TzqsiGbrgnQ=
-github.com/huaweicloud/huaweicloud-sdk-go-obs v3.23.3+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s=
-github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
+github.com/hetznercloud/hcloud-go/v2 v2.21.1 h1:IH3liW8/cCRjfJ4cyqYvw3s1ek+KWP8dl1roa0lD8JM=
+github.com/hetznercloud/hcloud-go/v2 v2.21.1/go.mod h1:XOaYycZJ3XKMVWzmqQ24/+1V7ormJHmPdck/kxrNnQA=
+github.com/huaweicloud/huaweicloud-sdk-go-obs v3.25.4+incompatible h1:yNjwdvn9fwuN6Ouxr0xHM0cVu03YMUWUyFmu2van/Yc=
+github.com/huaweicloud/huaweicloud-sdk-go-obs v3.25.4+incompatible/go.mod h1:l7VUhRbTKCzdOacdT4oWCwATKyvZqUOlOqr0Ous3k4s=
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
+github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
-github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4=
-github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/ionos-cloud/sdk-go/v6 v6.0.4 h1:4LoWeM7WtcDqYDjlntqQ3fD6XaENlCw2YqiVWkHQbNA=
github.com/ionos-cloud/sdk-go/v6 v6.0.4/go.mod h1:UE3V/2DjnqD5doOqtjYqzJRMpI1RiwrvuuSEPX1pdnk=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
-github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
-github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
-github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
-github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw=
-github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw=
-github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -1207,25 +1428,31 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
+github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=
+github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE=
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
-github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
-github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
+github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
-github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
-github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
+github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
+github.com/knadh/koanf/maps v0.1.2 h1:RBfmAW5CnZT+PJ1CVc1QSJKf4Xu9kxfQgYVQSu8hpbo=
+github.com/knadh/koanf/maps v0.1.2/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI=
+github.com/knadh/koanf/providers/confmap v1.0.0 h1:mHKLJTE7iXEys6deO5p6olAiZdG5zwp8Aebir+/EaRE=
+github.com/knadh/koanf/providers/confmap v1.0.0/go.mod h1:txHYHiI2hAtF0/0sCmcuol4IDcuQbKTybiB1nOcUo1A=
+github.com/knadh/koanf/v2 v2.2.1 h1:jaleChtw85y3UdBnI0wCqcg1sj1gPoz6D3caGNHtrNE=
+github.com/knadh/koanf/v2 v2.2.1/go.mod h1:PSFru3ufQgTsI7IF+95rf9s8XA1+aHxKuO/W+dPoHEY=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00=
github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -1246,27 +1473,28 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8=
-github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
-github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
-github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkkso=
-github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ=
-github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg=
+github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353 h1:X/79QL0b4YJVO5+OsPH9rF2u428CIrGL/jLmPsoOQQ4=
+github.com/leesper/go_rng v0.0.0-20190531154944-a612b043e353/go.mod h1:N0SVk0uhy+E1PZ3C9ctsPRlvOPAFPkCNlcPBDkt0N3U=
+github.com/linode/linodego v1.52.2 h1:N9ozU27To1LMSrDd8WvJZ5STSz1eGYdyLnxhAR/dIZg=
+github.com/linode/linodego v1.52.2/go.mod h1:bI949fZaVchjWyKIA08hNyvAcV6BAS+PM2op3p7PAWA=
github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA=
github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o=
-github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
+github.com/lyft/protoc-gen-star/v2 v2.0.3/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
+github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
+github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
-github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
-github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
+github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
+github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
@@ -1275,51 +1503,52 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU=
-github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA=
+github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
-github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc=
-github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA=
-github.com/mdlayher/netlink v0.0.0-20190828143259-340058475d09/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M=
-github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M=
-github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY=
-github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee/go.mod h1:Evt/EIne46u9PtQbeTx2NTcqURpr5K4SvKtGmBuDPN8=
+github.com/mdlayher/socket v0.5.1 h1:VZaqt6RkGkt2OE9l3GcC6nZkqD3xKeQLyfleW/uBcos=
+github.com/mdlayher/socket v0.5.1/go.mod h1:TjPLHI1UgwEv5J1B5q0zTZq12A/6H7nKmtTanQE37IQ=
+github.com/mdlayher/vsock v1.2.1 h1:pC1mTJTvjo1r9n9fbm7S1j04rCgCzhCOS5DY0zqHlnQ=
+github.com/mdlayher/vsock v1.2.1/go.mod h1:NRfCibel++DgeMD8z/hP+PPTjlNJsdPOmxcnENvE+SE=
github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a h1:0usWxe5SGXKQovz3p+BiQ81Jy845xSMu2CWKuXsXuUM=
github.com/metalmatze/signal v0.0.0-20210307161603-1c9aa721a97a/go.mod h1:3OETvrxfELvGsU2RoGGWercfeZ4bCL3+SOwzIWtJH/Q=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
-github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs=
-github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ=
+github.com/miekg/dns v1.1.66 h1:FeZXOS3VCVsKnEAd+wBkjMC3D2K+ww66Cq3VnCINuJE=
+github.com/miekg/dns v1.1.66/go.mod h1:jGFzBsSNbJw6z1HYut1RKBKHA9PBdxeHrZG8J+gC2WE=
github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY=
github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE=
+github.com/minio/crc64nvme v1.0.1 h1:DHQPrYPdqK7jQG/Ls5CTBZWeex/2FMS3G5XGkycuFrY=
+github.com/minio/crc64nvme v1.0.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
-github.com/minio/minio-go/v7 v7.0.75 h1:0uLrB6u6teY2Jt+cJUVi9cTvDRuBKWSRzSAcznRkwlE=
-github.com/minio/minio-go/v7 v7.0.75/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8=
+github.com/minio/minio-go/v7 v7.0.93 h1:lAB4QJp8Nq3vDMOU0eKgMuyBiEGMNlXQ5Glc8qAxqSU=
+github.com/minio/minio-go/v7 v7.0.93/go.mod h1:71t2CqDt3ThzESgZUlU1rBN54mksGGlkLcFgguDnnAc=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
+github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
+github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -1329,72 +1558,60 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ=
-github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60=
+github.com/mozillazg/go-httpheader v0.4.0 h1:aBn6aRXtFzyDLZ4VIRLsZbbJloagQfMnCiYgOq6hK4w=
+github.com/mozillazg/go-httpheader v0.4.0/go.mod h1:PuT8h0pw6efvp8ZeUec1Rs7dwjK08bt6gKSReGMqtdA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
-github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
-github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
-github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
-github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
-github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
-github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/ncw/swift v1.0.53 h1:luHjjTNtekIEvHg5KdAFIBaH7bWfNkefwFnpDffSIks=
github.com/ncw/swift v1.0.53/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E=
-github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
-github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
-github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
-github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU=
+github.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E=
+github.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
-github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk=
-github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0=
-github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
+github.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=
+github.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=
+github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
+github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0 h1:2pzb6bC/AAfciC9DN+8d7Y8Rsk8ZPCfp/ACTfZu87FQ=
+github.com/open-telemetry/opentelemetry-collector-contrib/internal/exp/metrics v0.129.0/go.mod h1:tIE4dzdxuM7HnFeYA6sj5zfLuUA/JxzQ+UDl1YrHvQw=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0 h1:ydkfqpZ5BWZfEJEs7OUhTHW59og5aZspbUYxoGcAEok=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.129.0/go.mod h1:oA+49dkzmhUx0YFC9JXGuPPSBL0TOTp6jkv7qSr2n0Q=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0 h1:AOVxBvCZfTPj0GLGqBVHpAnlC9t9pl1JXUQXymHliiY=
+github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.129.0/go.mod h1:0CAJ32V/bCUBhNTEvnN9wlOG5IsyZ+Bmhe9e3Eri7CU=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0 h1:yDLSAoIi3jNt4R/5xN4IJ9YAg1rhOShgchlO/ESv8EY=
+github.com/open-telemetry/opentelemetry-collector-contrib/processor/deltatocumulativeprocessor v0.129.0/go.mod h1:IXQHbTPxqNcuu44FvkyvpYJ6Qy4wh4YsCVkKsp0Flzo=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc=
-github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg=
github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo=
-github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
+github.com/opentracing-contrib/go-grpc v0.1.2 h1:MP16Ozc59kqqwn1v18aQxpeGZhsBanJ2iurZYaQSZ+g=
+github.com/opentracing-contrib/go-grpc v0.1.2/go.mod h1:glU6rl1Fhfp9aXUHkE36K2mR4ht8vih0ekOVlWKEUHM=
github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w=
-github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w=
-github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU=
-github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
-github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
+github.com/opentracing-contrib/go-stdlib v1.1.0 h1:cZBWc4pA4e65tqTJddbflK435S0tDImj6c9BMvkdUH0=
+github.com/opentracing-contrib/go-stdlib v1.1.0/go.mod h1:S0p+X9p6dcBkoMTL+Qq2VOvxKs9ys5PpYWXWqlCS0bQ=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
-github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
-github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
-github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
-github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
-github.com/oracle/oci-go-sdk/v65 v65.41.1 h1:+lbosOyNiib3TGJDvLq1HwEAuFqkOjPJDIkyxM15WdQ=
-github.com/oracle/oci-go-sdk/v65 v65.41.1/go.mod h1:MXMLMzHnnd9wlpgadPkdlkZ9YrwQmCOmbX5kjVEJodw=
-github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI=
-github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
-github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
+github.com/oracle/oci-go-sdk/v65 v65.93.1 h1:lIvy/6aQOUenQI+cxXH1wDBJeXFPO9Du3CaomXeYFaY=
+github.com/oracle/oci-go-sdk/v65 v65.93.1/go.mod h1:u6XRPsw9tPziBh76K7GrrRXPa8P8W3BQeqJ6ZZt9VLA=
+github.com/ovh/go-ovh v1.9.0 h1:6K8VoL3BYjVV3In9tPJUdT7qMx9h0GExN9EXx1r2kKE=
+github.com/ovh/go-ovh v1.9.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
-github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
+github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
+github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY=
+github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY=
github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI=
-github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
-github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
@@ -1403,103 +1620,106 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
+github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
-github.com/prometheus-community/prom-label-proxy v0.8.1-0.20240127162815-c1195f9aabc0 h1:owfYHh79h8Y5HvNMGyww+DaVwo10CKiRW1RQrrZzIwg=
-github.com/prometheus-community/prom-label-proxy v0.8.1-0.20240127162815-c1195f9aabc0/go.mod h1:rT989D4UtOcfd9tVqIZRVIM8rkg+9XbreBjFNEKXvVI=
-github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I=
-github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE=
+github.com/prometheus-community/prom-label-proxy v0.11.1 h1:jX+m+BQCNM0z3/P6V6jVxbiDKgugvk91SaICD6bVhT4=
+github.com/prometheus-community/prom-label-proxy v0.11.1/go.mod h1:uTeQW+wZ/VPV1LL3IPfvUE++wR2nPLex+Y4RE38Cpis=
+github.com/prometheus/alertmanager v0.28.1 h1:BK5pCoAtaKg01BYRUJhEDV1tqJMEtYBGzPw8QdvnnvA=
+github.com/prometheus/alertmanager v0.28.1/go.mod h1:0StpPUDDHi1VXeM7p2yYfeZgLVi/PPlt39vo9LQUHxM=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
-github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
-github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
-github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
-github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
+github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
+github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
-github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
+github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
+github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
+github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
-github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
-github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
-github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
-github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
-github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
-github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
+github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
+github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
github.com/prometheus/exporter-toolkit v0.8.2/go.mod h1:00shzmJL7KxcsabLWcONwpyNEuWhREOnFqZW7vadFS0=
-github.com/prometheus/exporter-toolkit v0.11.0 h1:yNTsuZ0aNCNFQ3aFTD2uhPOvr4iD7fdBvKPAEGkNf+g=
-github.com/prometheus/exporter-toolkit v0.11.0/go.mod h1:BVnENhnNecpwoTLiABx7mrPB/OLRIgN74qlQbV+FK1Q=
-github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc=
+github.com/prometheus/exporter-toolkit v0.14.0 h1:NMlswfibpcZZ+H0sZBiTjrA3/aBFHkNZqE+iCj5EmRg=
+github.com/prometheus/exporter-toolkit v0.14.0/go.mod h1:Gu5LnVvt7Nr/oqTBUC23WILZepW0nffNo10XdhQcwWA=
+github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588 h1:QlySqDdSESgWDePeAYskbbcKKdowI26m9aU9zloHyYE=
+github.com/prometheus/otlptranslator v0.0.0-20250620074007-94f535e0c588/go.mod h1:P8AwMgdD7XEr6QRUJ2QWLpiAZTgTE2UYgjlu3svompI=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
-github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
-github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
-github.com/prometheus/prometheus v0.54.0-rc.0 h1:OWyFAuGkQTJOcWOgMHw6HnVjjT3Nv3ZeVo6reb+amy4=
-github.com/prometheus/prometheus v0.54.0-rc.0/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY=
+github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
+github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
+github.com/prometheus/prometheus v0.306.0 h1:Q0Pvz/ZKS6vVWCa1VSgNyNJlEe8hxdRlKklFg7SRhNw=
+github.com/prometheus/prometheus v0.306.0/go.mod h1:7hMSGyZHt0dcmZ5r4kFPJ/vxPQU99N5/BGwSPDxeZrQ=
+github.com/prometheus/sigv4 v0.2.0 h1:qDFKnHYFswJxdzGeRP63c4HlH3Vbn1Yf/Ao2zabtVXk=
+github.com/prometheus/sigv4 v0.2.0/go.mod h1:D04rqmAaPPEUkjRQxGqjoxdyJuyCh6E0M18fZr0zBiE=
+github.com/puzpuzpuz/xsync/v3 v3.5.1 h1:GJYJZwO6IdxN/IKbneznS6yPkVC+c3zyY/j19c++5Fg=
+github.com/puzpuzpuz/xsync/v3 v3.5.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be h1:ta7tUOvsPHVHGom5hKW5VXNc2xZIkfCKP8iaqOyYtUQ=
github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be/go.mod h1:MIDFMn7db1kT65GmV94GzpX9Qdi7N/pQlwb+AN8wh+Q=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
-github.com/redis/rueidis v1.0.14-go1.18 h1:dGir5z8w8X1ex7JWO/Zx2FMBrZgQ8Yjm+lw9fPLSNGw=
-github.com/redis/rueidis v1.0.14-go1.18/go.mod h1:HGekzV3HbmzFmRK6j0xic8Z9119+ECoGMjeN1TV1NYU=
+github.com/rantav/go-grpc-channelz v0.0.4 h1:8GvqhA6siQVBsZYzal3yHhyJ9YiHEJx7RtSH2Jvm9Co=
+github.com/rantav/go-grpc-channelz v0.0.4/go.mod h1:HodrRmnnH1zXcEEfK7EJrI23YMPMT7uvyAYkq2JUIcI=
+github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
+github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
+github.com/redis/rueidis v1.0.61 h1:AkbCMeTyjFSQraGaNYncg3unMCTYGr6Y8WOqGhDOQu4=
+github.com/redis/rueidis v1.0.61/go.mod h1:Lkhr2QTgcoYBhxARU7kJRO8SyVlgUuEkcJO1Y8MCluA=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
-github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
-github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
-github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
-github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
+github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=
+github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=
github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w=
github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU=
-github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 h1:KhF0WejiUTDbL5X55nXowP7zNopwpowa6qaMAWyIE+0=
+github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33/go.mod h1:792k1RTU+5JeMXm35/e2Wgp71qPH/DmDoZrRc+EFZDk=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/sercand/kuberesolver v2.1.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ=
+github.com/segmentio/fasthash v1.0.3 h1:EI9+KE1EwvMLBWwjpRDc+fEM+prwxDYbslddQGtrmhM=
+github.com/segmentio/fasthash v1.0.3/go.mod h1:waKX8l2N8yckOgmSsXJi7x1ZfdKZ4x7KRMzBtS3oedY=
+github.com/seiflotfy/cuckoofilter v0.0.0-20240715131351-a2f2c23f1771 h1:emzAzMZ1L9iaKCTxdy3Em8Wv4ChIAGnfiz18Cda70g4=
+github.com/seiflotfy/cuckoofilter v0.0.0-20240715131351-a2f2c23f1771/go.mod h1:bR6DqgcAl1zTcOX8/pE2Qkj9XO00eCNqmKb7lXP8EAg=
github.com/sercand/kuberesolver v2.4.0+incompatible/go.mod h1:lWF3GL0xptCB/vCiJPl/ZshwPsX/n4Y7u0CW9E7aQIQ=
github.com/sercand/kuberesolver/v5 v5.1.1 h1:CYH+d67G0sGBj7q5wLK61yzqJJ8gLLC8aeprPTHb6yY=
github.com/sercand/kuberesolver/v5 v5.1.1/go.mod h1:Fs1KbKhVRnB2aDWN12NjKCB+RgYMWZJ294T3BtmVCpQ=
@@ -1507,38 +1727,26 @@ github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c h1:aqg5Vm5dwtvL+YgDpBcK1ITf3o96N/K7/wsRXQnUTEs=
github.com/shurcooL/httpfs v0.0.0-20230704072500-f1e31cf0ba5c/go.mod h1:owqhoLW1qZoYLZzLnBw+QkPP9WZnjlSWihhxAJC1+/M=
-github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU=
-github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw=
-github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8=
+github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92 h1:OfRzdxCzDhp+rsKWXuOO2I/quKMJ/+TQwVbIP/gltZg=
+github.com/shurcooL/vfsgen v0.0.0-20230704071429-0000e147ea92/go.mod h1:7/OT02F6S6I7v6WXb+IjhMuZEYfH/RJ5RwEWnEo5BMg=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
-github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/sony/gobreaker v1.0.0 h1:feX5fGGXSl3dYd4aHZItw+FpHLvvoaqkawKjVNiFMNQ=
github.com/sony/gobreaker v1.0.0/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
-github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
-github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
-github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
-github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
+github.com/stackitcloud/stackit-sdk-go/core v0.17.2 h1:jPyn+i8rkp2hM80+hOg0B/1EVRbMt778Tr5RWyK1m2E=
+github.com/stackitcloud/stackit-sdk-go/core v0.17.2/go.mod h1:8KIw3czdNJ9sdil9QQimxjR6vHjeINFrRv0iZ67wfn0=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@@ -1555,22 +1763,26 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/tencentyun/cos-go-sdk-v5 v0.7.40 h1:W6vDGKCHe4wBACI1d2UgE6+50sJFhRWU4O8IB2ozzxM=
-github.com/tencentyun/cos-go-sdk-v5 v0.7.40/go.mod h1:4dCEtLHGh8QPxHEkgq+nFaky7yZxQuYwgSJM87icDaw=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
+github.com/tencentyun/cos-go-sdk-v5 v0.7.66 h1:O4O6EsozBoDjxWbltr3iULgkI7WPj/BFNlYTXDuE64E=
+github.com/tencentyun/cos-go-sdk-v5 v0.7.66/go.mod h1:8+hG+mQMuRP/OIS9d83syAvXvrMj9HhkND6Q1fLghw0=
github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng=
github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM=
-github.com/thanos-io/objstore v0.0.0-20240622095743-1afe5d4bc3cd h1:YBDmfk3k/eOYLfP4SR/vZdXi5/65pqWPmR9Do2WjkRM=
-github.com/thanos-io/objstore v0.0.0-20240622095743-1afe5d4bc3cd/go.mod h1:3ukSkG4rIRUGkKM4oIz+BSuUx2e3RlQVVv3Cc3W+Tv4=
-github.com/thanos-io/promql-engine v0.0.0-20240718195911-cdbd6dfed36b h1:V06gjM1OFiJydoClwiGOMCpBWLSpxa5FZBvBc3coQg4=
-github.com/thanos-io/promql-engine v0.0.0-20240718195911-cdbd6dfed36b/go.mod h1:Gtv7CJIxGyiGsT+bNDg4nOAsL/bVKLlpfOZUSLSyYfY=
-github.com/thanos-io/thanos v0.35.2-0.20240722172812-990a60b72647 h1:0qjB7yYBB4LeGw+BWVrEsPMHabYgXjfh2pD2vkuRa9s=
-github.com/thanos-io/thanos v0.35.2-0.20240722172812-990a60b72647/go.mod h1:4QL7wA5z+Uh4tE6fm4Ar+nqQKgAxWzdOWdcBBjABUvo=
-github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU=
-github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/thanos-io/objstore v0.0.0-20250722142242-922b22272ee3 h1:P301Anc27aVL7Ls88el92j+qW3PJp8zmiDl+kOUZv3A=
+github.com/thanos-io/objstore v0.0.0-20250722142242-922b22272ee3/go.mod h1:uDHLkMKOGDAnlN75EAz8VrRzob1+VbgYSuUleatWuF0=
+github.com/thanos-io/promql-engine v0.0.0-20250924193140-e9123dc11264 h1:sOmANo4XVhem4VgvI9w05DBwqMex/qw+cDjuHW2FKWw=
+github.com/thanos-io/promql-engine v0.0.0-20250924193140-e9123dc11264/go.mod h1:MOFN0M1nDMcWZg1t4iF39sOard/K4SWgO/HHSODeDIc=
+github.com/thanos-io/thanos v0.39.3-0.20250729120336-88d0ae8071cb h1:z/ePbn3lo/D4vdHGH8hpa2kgH9M6iLq0kOFtZwuelKM=
+github.com/thanos-io/thanos v0.39.3-0.20250729120336-88d0ae8071cb/go.mod h1:gGUG3TDEoRSjTFVs/QO6QnQIILRgNF0P9l7BiiMfmHw=
+github.com/tinylib/msgp v1.3.0 h1:ULuf7GPooDaIlbyvgAxBV/FI7ynli6LZ1/nVUNu+0ww=
+github.com/tinylib/msgp v1.3.0/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0=
+github.com/tjhop/slog-gokit v0.1.4 h1:uj/vbDt3HaF0Py8bHPV4ti/s0utnO0miRbO277FLBKM=
+github.com/tjhop/slog-gokit v0.1.4/go.mod h1:Bbu5v2748qpAWH7k6gse/kw3076IJf6owJmh7yArmJs=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
@@ -1578,19 +1790,19 @@ github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs=
github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI=
-github.com/weaveworks/common v0.0.0-20210913144402-035033b78a78/go.mod h1:YU9FvnS7kUnRt6HY10G+2qHkwzP3n3Vb1XsXDsJTSp8=
-github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5 h1:nORobjToZAvi54wcuUXLq+XG2Rsr0XEizy5aHBHvqWQ=
-github.com/weaveworks/common v0.0.0-20230728070032-dd9e68f319d5/go.mod h1:rgbeLfJUtEr+G74cwFPR1k/4N0kDeaeSv/qhUNE4hm8=
github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M=
github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA=
+github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
+github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xhit/go-str2duration v1.2.0/go.mod h1:3cPSlfZlUHVlneIVfePFWcJZsuwf+P1v2SRTV4cUmp4=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc=
+github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
+github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78 h1:ilQV1hzziu+LLM3zUTJ0trRztfwgjqKnBWNtSRkbmwM=
+github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -1602,22 +1814,18 @@ github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd v3.3.25+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI=
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
-go.etcd.io/etcd/api/v3 v3.5.15 h1:3KpLJir1ZEBrYuV2v+Twaa/e2MdDCEZ/70H+lzEiwsk=
-go.etcd.io/etcd/api/v3 v3.5.15/go.mod h1:N9EhGzXq58WuMllgH9ZvnEr7SI9pS0k0+DHZezGp7jM=
+go.etcd.io/etcd/api/v3 v3.5.17 h1:cQB8eb8bxwuxOilBpMJAEo8fAONyrdXTHUNcMd8yT1w=
+go.etcd.io/etcd/api/v3 v3.5.17/go.mod h1:d1hvkRuXkts6PmaYk2Vrgqbv7H4ADfAKhyJqHNLJCB4=
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
-go.etcd.io/etcd/client/pkg/v3 v3.5.15 h1:fo0HpWz/KlHGMCC+YejpiCmyWDEuIpnTDzpJLB5fWlA=
-go.etcd.io/etcd/client/pkg/v3 v3.5.15/go.mod h1:mXDI4NAOwEiszrHCb0aqfAYNCrZP4e9hRca3d1YK8EU=
+go.etcd.io/etcd/client/pkg/v3 v3.5.17 h1:XxnDXAWq2pnxqx76ljWwiQ9jylbpC4rvkAeRVOUKKVw=
+go.etcd.io/etcd/client/pkg/v3 v3.5.17/go.mod h1:4DqK1TKacp/86nJk4FLQqo6Mn2vvQFBmruW3pP14H/w=
go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
-go.etcd.io/etcd/client/v3 v3.5.15 h1:23M0eY4Fd/inNv1ZfU3AxrbbOdW79r9V9Rl62Nm6ip4=
-go.etcd.io/etcd/client/v3 v3.5.15/go.mod h1:CLSJxrYjvLtHsrPKsy7LmZEE+DK2ktfd2bN4RhBMwlU=
-go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
-go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
-go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
-go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
+go.etcd.io/etcd/client/v3 v3.5.17 h1:o48sINNeWz5+pjy/Z0+HKpj/xSnBkuVhVvXkjEXbqZY=
+go.etcd.io/etcd/client/v3 v3.5.17/go.mod h1:j2d4eXTHWkT2ClBgnnEPm/Wuu7jsqku41v9DZ3OtjQo=
+go.mongodb.org/mongo-driver v1.17.4 h1:jUorfmVzljjr0FLzYQsGP8cgN/qzzxlY9Vh0C9KFXVw=
+go.mongodb.org/mongo-driver v1.17.4/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -1627,72 +1835,125 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
-go.opentelemetry.io/collector/pdata v1.12.0 h1:Xx5VK1p4VO0md8MWm2icwC1MnJ7f8EimKItMWw46BmA=
-go.opentelemetry.io/collector/pdata v1.12.0/go.mod h1:MYeB0MmMAxeM0hstCFrCqWLzdyeYySim2dG6pDT6nYI=
-go.opentelemetry.io/collector/semconv v0.105.0 h1:8p6dZ3JfxFTjbY38d8xlQGB1TQ3nPUvs+D0RERniZ1g=
-go.opentelemetry.io/collector/semconv v0.105.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg=
-go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
-go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
-go.opentelemetry.io/contrib/propagators/autoprop v0.53.0 h1:4zaVLcJ5mvYw0vlk63TX62qS4qty/4jAY1BKZ1usu18=
-go.opentelemetry.io/contrib/propagators/autoprop v0.53.0/go.mod h1:RPlvYtxp5D8PKnRzyPM+rwMQrvzdlfA49Sgworkg7aQ=
-go.opentelemetry.io/contrib/propagators/aws v1.28.0 h1:acyTl4oyin/iLr5Nz3u7p/PKHUbLh42w/fqg9LblExk=
-go.opentelemetry.io/contrib/propagators/aws v1.28.0/go.mod h1:5WgIv6yG9DvLlSY2uIHrYSeVVwCDCqp4jhwinNNyeT4=
-go.opentelemetry.io/contrib/propagators/b3 v1.28.0 h1:XR6CFQrQ/ttAYmTBX2loUEFGdk1h17pxYI8828dk/1Y=
-go.opentelemetry.io/contrib/propagators/b3 v1.28.0/go.mod h1:DWRkzJONLquRz7OJPh2rRbZ7MugQj62rk7g6HRnEqh0=
-go.opentelemetry.io/contrib/propagators/jaeger v1.28.0 h1:xQ3ktSVS128JWIaN1DiPGIjcH+GsvkibIAVRWFjS9eM=
-go.opentelemetry.io/contrib/propagators/jaeger v1.28.0/go.mod h1:O9HIyI2kVBrFoEwQZ0IN6PHXykGoit4mZV2aEjkTRH4=
-go.opentelemetry.io/contrib/propagators/ot v1.28.0 h1:rmlG+2pc5k5M7Y7izDrxAHZUIwDERdGMTD9oMV7llMk=
-go.opentelemetry.io/contrib/propagators/ot v1.28.0/go.mod h1:MNgXIn+UrMbNGpd7xyckyo2LCHIgCdmdjEE7YNZGG+w=
-go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
-go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
-go.opentelemetry.io/otel/bridge/opentracing v1.28.0 h1:erHvOxIUFnSXj/HuS5SqaKe2CbWSBskONXm2bEBxYgc=
-go.opentelemetry.io/otel/bridge/opentracing v1.28.0/go.mod h1:ZMOFThPtIKYiVqzKrU53s41j25Cj27KySyu5Az5jRPU=
-go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
-go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
-go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
-go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
-go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
-go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
+go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
+go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/collector/component v1.35.0 h1:JpvBukEcEUvJ/TInF1KYpXtWEP+C7iYkxCHKjI0o7BQ=
+go.opentelemetry.io/collector/component v1.35.0/go.mod h1:hU/ieWPxWbMAacODCSqem5ZaN6QH9W5GWiZ3MtXVuwc=
+go.opentelemetry.io/collector/component/componentstatus v0.129.0 h1:ejpBAt7hXAAZiQKcSxLvcy8sj8SjY4HOLdoXIlW6ybw=
+go.opentelemetry.io/collector/component/componentstatus v0.129.0/go.mod h1:/dLPIxn/tRMWmGi+DPtuFoBsffOLqPpSZ2IpEQzYtwI=
+go.opentelemetry.io/collector/component/componenttest v0.129.0 h1:gpKkZGCRPu3Yn0U2co09bMvhs17yLFb59oV8Gl9mmRI=
+go.opentelemetry.io/collector/component/componenttest v0.129.0/go.mod h1:JR9k34Qvd/pap6sYkPr5QqdHpTn66A5lYeYwhenKBAM=
+go.opentelemetry.io/collector/confmap v1.35.0 h1:U4JDATAl4PrKWe9bGHbZkoQXmJXefWgR2DIkFvw8ULQ=
+go.opentelemetry.io/collector/confmap v1.35.0/go.mod h1:qX37ExVBa+WU4jWWJCZc7IJ+uBjb58/9oL+/ctF1Bt0=
+go.opentelemetry.io/collector/confmap/xconfmap v0.129.0 h1:Q/+pJKrkCaMPSoSAH2BpC3UZCh+5hTiFkh/bdy5yChk=
+go.opentelemetry.io/collector/confmap/xconfmap v0.129.0/go.mod h1:RNMnlay2meJDXcKjxiLbST9/YAhKLJlj0kZCrJrLGgw=
+go.opentelemetry.io/collector/consumer v1.35.0 h1:mgS42yh1maXBIE65IT4//iOA89BE+7xSUzV8czyevHg=
+go.opentelemetry.io/collector/consumer v1.35.0/go.mod h1:9sSPX0hDHaHqzR2uSmfLOuFK9v3e9K3HRQ+fydAjOWs=
+go.opentelemetry.io/collector/consumer/consumertest v0.129.0 h1:kRmrAgVvPxH5c/rTaOYAzyy0YrrYhQpBNkuqtDRrgeU=
+go.opentelemetry.io/collector/consumer/consumertest v0.129.0/go.mod h1:JgJKms1+v/CuAjkPH+ceTnKeDgUUGTQV4snGu5wTEHY=
+go.opentelemetry.io/collector/consumer/xconsumer v0.129.0 h1:bRyJ9TGWwnrUnB5oQGTjPhxpVRbkIVeugmvks22bJ4A=
+go.opentelemetry.io/collector/consumer/xconsumer v0.129.0/go.mod h1:pbe5ZyPJrtzdt/RRI0LqfT1GVBiJLbtkDKx3SBRTiTY=
+go.opentelemetry.io/collector/featuregate v1.35.0 h1:c/XRtA35odgxVc4VgOF/PTIk7ajw1wYdQ6QI562gzd4=
+go.opentelemetry.io/collector/featuregate v1.35.0/go.mod h1:Y/KsHbvREENKvvN9RlpiWk/IGBK+CATBYzIIpU7nccc=
+go.opentelemetry.io/collector/internal/telemetry v0.129.0 h1:jkzRpIyMxMGdAzVOcBe8aRNrbP7eUrMq6cxEHe0sbzA=
+go.opentelemetry.io/collector/internal/telemetry v0.129.0/go.mod h1:riAPlR2LZBV7VEx4LicOKebg3N1Ja3izzkv5fl1Lhiw=
+go.opentelemetry.io/collector/pdata v1.35.0 h1:ck6WO6hCNjepADY/p9sT9/rLECTLO5ukYTumKzsqB/E=
+go.opentelemetry.io/collector/pdata v1.35.0/go.mod h1:pttpb089864qG1k0DMeXLgwwTFLk+o3fAW9I6MF9tzw=
+go.opentelemetry.io/collector/pdata/pprofile v0.129.0 h1:DgZTvjOGmyZRx7Or80hz8XbEaGwHPkIh2SX1A5eXttQ=
+go.opentelemetry.io/collector/pdata/pprofile v0.129.0/go.mod h1:uUBZxqJNOk6QIMvbx30qom//uD4hXJ1K/l3qysijMLE=
+go.opentelemetry.io/collector/pdata/testdata v0.129.0 h1:n1QLnLOtrcAR57oMSVzmtPsQEpCc/nE5Avk1xfuAkjY=
+go.opentelemetry.io/collector/pdata/testdata v0.129.0/go.mod h1:RfY5IKpmcvkS2IGVjl9jG9fcT7xpQEBWpg9sQOn/7mY=
+go.opentelemetry.io/collector/pipeline v0.129.0 h1:Mp7RuKLizLQJ0381eJqKQ0zpgkFlhTE9cHidpJQIvMU=
+go.opentelemetry.io/collector/pipeline v0.129.0/go.mod h1:TO02zju/K6E+oFIOdi372Wk0MXd+Szy72zcTsFQwXl4=
+go.opentelemetry.io/collector/processor v1.35.0 h1:YOfHemhhodYn4BnPjN7kWYYDhzPVqRkyHCaQ8mAlavs=
+go.opentelemetry.io/collector/processor v1.35.0/go.mod h1:cWHDOpmpAaVNCc9K9j2/okZoLIuP/EpGGRNhM4JGmFM=
+go.opentelemetry.io/collector/processor/processortest v0.129.0 h1:r5iJHdS7Ffdb2zmMVYx4ahe92PLrce5cas/AJEXivkY=
+go.opentelemetry.io/collector/processor/processortest v0.129.0/go.mod h1:gdf8GzyzjGoDTA11+CPwC4jfXphtC+B7MWbWn+LIWXc=
+go.opentelemetry.io/collector/processor/xprocessor v0.129.0 h1:V3Zgd+YIeu3Ij3DPlGtzdcTwpqOQIqQVcL5jdHHS7sc=
+go.opentelemetry.io/collector/processor/xprocessor v0.129.0/go.mod h1:78T+AP5NO137W/E+SibQhaqOyS67fR+IN697b4JFh00=
+go.opentelemetry.io/collector/semconv v0.128.0 h1:MzYOz7Vgb3Kf5D7b49pqqgeUhEmOCuT10bIXb/Cc+k4=
+go.opentelemetry.io/collector/semconv v0.128.0/go.mod h1:OPXer4l43X23cnjLXIZnRj/qQOjSuq4TgBLI76P9hns=
+go.opentelemetry.io/contrib/bridges/otelzap v0.11.0 h1:u2E32P7j1a/gRgZDWhIXC+Shd4rLg70mnE7QLI/Ssnw=
+go.opentelemetry.io/contrib/bridges/otelzap v0.11.0/go.mod h1:pJPCLM8gzX4ASqLlyAXjHBEYxgbOQJ/9bidWxD6PEPQ=
+go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo=
+go.opentelemetry.io/contrib/detectors/gcp v1.35.0 h1:bGvFt68+KTiAKFlacHW6AhA56GF2rS0bdD3aJYEnmzA=
+go.opentelemetry.io/contrib/detectors/gcp v1.35.0/go.mod h1:qGWP8/+ILwMRIUf9uIVLloR1uo5ZYAslM4O6OqUi1DA=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0 h1:lREC4C0ilyP4WibDhQ7Gg2ygAQFP8oR07Fst/5cafwI=
+go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.61.0/go.mod h1:HfvuU0kW9HewH14VCOLImqKvUgONodURG7Alj/IrnGI=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
+go.opentelemetry.io/contrib/propagators/autoprop v0.61.0 h1:cxOVDJ30qfzV27G5p9WMtJUB/3cXC0iL+u9EV1fSOws=
+go.opentelemetry.io/contrib/propagators/autoprop v0.61.0/go.mod h1:Y+xiUbWetg65vAroDZcIzJ5wyPNWRH32EoIV9rIaa0g=
+go.opentelemetry.io/contrib/propagators/aws v1.36.0 h1:Txhy/1LZIbbnutftc5pdU8Y9vOQuAkuIOFXuLsdDejs=
+go.opentelemetry.io/contrib/propagators/aws v1.36.0/go.mod h1:M3A0491jGFPNHU8b3zEW7r/gtsMpGOsFUO3WL+SZ1xw=
+go.opentelemetry.io/contrib/propagators/b3 v1.36.0 h1:xrAb/G80z/l5JL6XlmUMSD1i6W8vXkWrLfmkD3w/zZo=
+go.opentelemetry.io/contrib/propagators/b3 v1.36.0/go.mod h1:UREJtqioFu5awNaCR8aEx7MfJROFlAWb6lPaJFbHaG0=
+go.opentelemetry.io/contrib/propagators/jaeger v1.36.0 h1:SoCgXYF4ISDtNyfLUzsGDaaudZVTx2yJhOyBO0+/GYk=
+go.opentelemetry.io/contrib/propagators/jaeger v1.36.0/go.mod h1:VHu48l0YTRKSObdPQ+Sb8xMZvdnJlN7yhHuHoPgNqHM=
+go.opentelemetry.io/contrib/propagators/ot v1.36.0 h1:UBoZjbx483GslNKYK2YpfvePTJV4BHGeFd8+b7dexiM=
+go.opentelemetry.io/contrib/propagators/ot v1.36.0/go.mod h1:adDDRry19/n9WoA7mSCMjoVJcmzK/bZYzX9SR+g2+W4=
+go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg=
+go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
+go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
+go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
+go.opentelemetry.io/otel/bridge/opentracing v1.36.0 h1:GWGmcYhMCu6+K/Yz5KWSETU/esd/mkVGx+77uKtLjpk=
+go.opentelemetry.io/otel/bridge/opentracing v1.36.0/go.mod h1:bW7xTHgtWSNqY8QjhqXzloXBkw3iQIa8uBqCF/0EUbc=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc=
+go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I=
+go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc=
+go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E=
+go.opentelemetry.io/otel/log/logtest v0.0.0-20250526142609-aa5bd0e64989 h1:4JF7oY9CcHrPGfBLijDcXZyCzGckVEyOjuat5ktmQRg=
+go.opentelemetry.io/otel/log/logtest v0.0.0-20250526142609-aa5bd0e64989/go.mod h1:NToOxLDCS1tXDSB2dIj44H9xGPOpKr0csIN+gnuihv4=
+go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8=
+go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
+go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
+go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
+go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
+go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
+go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
+go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
+go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
+go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
+go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
+go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
+go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
+go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
-go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
-go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
-go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
-go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
-go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
+go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
+go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
+go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
go4.org/intern v0.0.0-20230525184215-6c62f75575cb h1:ae7kzL5Cfdmcecbh22ll7lYP3iuUdnfnhiPcSaDgH/8=
go4.org/intern v0.0.0-20230525184215-6c62f75575cb/go.mod h1:Ycrt6raEcnF5FTsLiLKkhBTO6DPX3RCUCUVnks3gFJU=
-go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 h1:WJhcL4p+YeDxmZWg141nRm7XC8IDmhz7lk5GpadO1Sg=
go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=
+go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 h1:lGdhQUN/cnWdSH3291CUuxSEqc+AsGTiDxPP3r2J0l4=
+go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@@ -1701,9 +1962,24 @@ golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20221012134737-56aed061732a/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
-golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
+golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
+golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
+golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
+golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
+golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
+golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
+golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
+golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
+golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
+golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
+golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
+golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
+golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
+golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
+golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1719,8 +1995,8 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20220827204233-334a2380cb91/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE=
-golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY=
-golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
+golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4=
+golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -1734,7 +2010,6 @@ golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeap
golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=
-golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@@ -1763,17 +2038,18 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
-golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
+golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -1784,11 +2060,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -1831,14 +2104,26 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
+golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
-golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
-golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
-golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
+golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
+golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
+golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
+golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
+golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
+golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
+golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
+golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
+golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
+golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
+golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
+golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
+golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1864,12 +2149,14 @@ golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri
golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
-golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
-golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
-golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
-golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
+golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
+golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
+golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
+golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1880,26 +2167,29 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
+golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
+golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
+golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181128092732-4ed8d59d0b35/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1907,15 +2197,12 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1924,7 +2211,6 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1995,21 +2281,48 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
-golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
+golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
+golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
-golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U=
-golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
-golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
+golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
+golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
+golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
+golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=
+golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
+golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
+golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
+golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
+golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E=
+golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
+golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
+golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
+golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -2022,32 +2335,36 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
+golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
+golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
+golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
-golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
+golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
+golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
@@ -2060,7 +2377,6 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -2069,14 +2385,12 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
-golang.org/x/tools v0.0.0-20200216192241-b320d3a0f5a2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
@@ -2109,8 +2423,12 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s=
-golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
-golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
+golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
+golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
+golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
+golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
+golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
+golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -2119,17 +2437,16 @@ golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
-golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
-golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0=
gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA=
+gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
+gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY=
gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo=
-google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@@ -2185,10 +2502,16 @@ google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/
google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI=
-google.golang.org/api v0.188.0 h1:51y8fJ/b1AaaBRJr4yWm96fPcuxSo0JcegXE3DaHQHw=
-google.golang.org/api v0.188.0/go.mod h1:VR0d+2SIiWOYG3r/jdm7adPW9hI2aRv9ETOSCQ9Beag=
+google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0=
+google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg=
+google.golang.org/api v0.118.0/go.mod h1:76TtD3vkgmZ66zZzp72bUUklpmQmKlhh6sYtIjYK+5E=
+google.golang.org/api v0.122.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms=
+google.golang.org/api v0.124.0/go.mod h1:xu2HQurE5gi/3t1aFCvhPD781p0a3p11sdunTJ2BlP4=
+google.golang.org/api v0.125.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
+google.golang.org/api v0.126.0/go.mod h1:mBwVAtz+87bEN6CbA1GtZPDOqY2R5ONPqJeIlvyo4Aw=
+google.golang.org/api v0.239.0 h1:2hZKUnFZEy81eugPs4e2XzIJ5SOwQg0G82bpXD65Puo=
+google.golang.org/api v0.239.0/go.mod h1:cOVEm2TpdAGHL2z+UwyS+kmlGr3bVWQQ6sYEqkKje50=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
@@ -2196,12 +2519,10 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
-google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
@@ -2322,61 +2643,59 @@ google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ
google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA=
google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
+google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw=
+google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA=
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
-google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b h1:dSTjko30weBaMj3eERKc0ZVXW4GudCswM3m+P++ukU0=
-google.golang.org/genproto v0.0.0-20240708141625-4ad9e859172b/go.mod h1:FfBgJBJg9GcpPvKIuHSZ/aE1g2ecGL74upMzGZjiGEY=
-google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY=
-google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d/go.mod h1:mw8MG/Qz5wfgYr6VqVCiZcHe/GJEfI+oGGDCohaVgB0=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b h1:04+jVzTs2XBnOZcPsLnmrTGqltqJbZQ1Ey26hjYdQQ0=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240708141625-4ad9e859172b/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
-google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
-google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
-google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
-google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
-google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
-google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
-google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
-google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
-google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
-google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
-google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
-google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
-google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
-google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
-google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
-google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
-google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
-google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
-google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
-google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
-google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
-google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI=
-google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
-google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
-google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
-google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8=
-google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
-google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
+google.golang.org/genproto v0.0.0-20230320184635-7606e756e683/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
+google.golang.org/genproto v0.0.0-20230323212658-478b75c54725/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230330154414-c0448cd141ea/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230331144136-dcfb400f0633/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230403163135-c38d8f061ccd/go.mod h1:UUQDJDOlWu4KYeJZffbWgBkS1YFobzKbLVfK69pe0Ak=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
+google.golang.org/genproto v0.0.0-20230525234025-438c736192d0/go.mod h1:9ExIQyXL5hZrHzQceCwuSYwZZ5QZBazOcprJ5rgs3lY=
+google.golang.org/genproto v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:zqTuNwFlFRsw5zIts5VnzLQxSRqh+CGOTVMlYbY0Eyk=
+google.golang.org/genproto v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64=
+google.golang.org/genproto v0.0.0-20230629202037-9506855d4529/go.mod h1:xZnkP7mREFX5MORlOPEzLMr+90PPZQ2QWzrVTWfAq64=
+google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y=
+google.golang.org/genproto v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:0ggbjUrZYpy1q+ANUS30SEoGZ53cdfwtbuG7Ptgy108=
+google.golang.org/genproto v0.0.0-20230803162519-f966b187b2e5/go.mod h1:oH/ZOT02u4kWEp7oYBGYFFkCdKS/uYR9Z7+0/xuuFp8=
+google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78=
+google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk=
+google.golang.org/genproto/googleapis/api v0.0.0-20230525234020-1aefcd67740a/go.mod h1:ts19tUU+Z0ZShN1y3aPyq2+O3d5FUNNgT6FtOzmrNn8=
+google.golang.org/genproto/googleapis/api v0.0.0-20230525234035-dd9d682886f9/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230629202037-9506855d4529/go.mod h1:vHYtlOoi6TsQ3Uk2yxR7NI5z8uoV+3pZtR4jmHIkRig=
+google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
+google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
+google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo=
+google.golang.org/genproto/googleapis/api v0.0.0-20241202173237-19429a94021a/go.mod h1:jehYqy3+AhJU9ve55aNOaSml7wUXjF9x6z2LcCfpAhY=
+google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw=
+google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
+google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
+google.golang.org/genproto/googleapis/bytestream v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:ylj+BE99M198VPbBh6A8d9n3w8fChvyLK3wwBOjXBFA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234015-3fc162c6f38a/go.mod h1:xURIpW9ES5+/GZhnV6beoEtxQrnkRGIfP5VQG2tCBLc=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230526203410-71b5a4ffd15e/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230530153820-e85fd2cbaebc/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:8mL13HKkDa+IuJ8yruA3ci0q+0vsUz4m//+ottjwS5o=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230803162519-f966b187b2e5/go.mod h1:zBEcrKX2ZOcEkHWxBPAIvYUWOKKMIhYcmNiUIu2ji3I=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241118233622-e639e219e697/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
+google.golang.org/grpc v1.71.2 h1:KnzCueW4s+8ojAPZ+NnyZAELjsIMJGteKjKejieEC7M=
+google.golang.org/grpc v1.71.2/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -2393,27 +2712,32 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc=
+google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
+gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
+gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -2424,13 +2748,15 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=
+gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
@@ -2439,29 +2765,34 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
-k8s.io/api v0.30.2 h1:+ZhRj+28QT4UOH+BKznu4CBgPWgkXO7XAvMcMl0qKvI=
-k8s.io/api v0.30.2/go.mod h1:ULg5g9JvOev2dG0u2hig4Z7tQ2hHIuS+m8MNZ+X6EmI=
-k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg=
-k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
-k8s.io/client-go v0.30.2 h1:sBIVJdojUNPDU/jObC+18tXWcTJVcwyqS9diGdWHk50=
-k8s.io/client-go v0.30.2/go.mod h1:JglKSWULm9xlJLx4KCkfLLQ7XwtlbflV6uFFSHTMgVs=
+k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw=
+k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw=
+k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4=
+k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
+k8s.io/client-go v0.33.1 h1:ZZV/Ks2g92cyxWkRRnfUDsnhNn28eFpt26aGc8KbXF4=
+k8s.io/client-go v0.33.1/go.mod h1:JAsUrl1ArO7uRVFWfcj6kOomSlCv+JpvIsp6usAGefA=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
-k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98=
-k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak=
-k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4=
+k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y=
+k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
+modernc.org/cc/v3 v3.37.0/go.mod h1:vtL+3mdHx/wcj3iEGz84rQa8vEqR6XM84v5Lcvfph20=
+modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0=
modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc=
modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw=
+modernc.org/ccgo/v3 v3.0.0-20220904174949-82d86e1b6d56/go.mod h1:YSXjPL62P2AMSxBphRHPn7IkzhVHqkvOnRKAKh+W6ZI=
modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ=
modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws=
modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo=
+modernc.org/ccgo/v3 v3.16.13-0.20221017192402-261537637ce8/go.mod h1:fUB3Vn0nVPReA+7IG7yZDfjv1TMWjhQP8gCxrFAtL5g=
+modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
@@ -2471,30 +2802,42 @@ modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU=
modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA=
modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0=
modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s=
+modernc.org/libc v1.17.4/go.mod h1:WNg2ZH56rDEwdropAJeZPQkXmDwh+JCA1s/htl6r2fA=
+modernc.org/libc v1.18.0/go.mod h1:vj6zehR5bfc98ipowQOM2nIDUZnVew/wNC/2tOGS+q0=
+modernc.org/libc v1.20.3/go.mod h1:ZRfIaEkgrYgZDl6pa4W39HgN5G/yDW+NRmNKZBDFrk0=
+modernc.org/libc v1.21.4/go.mod h1:przBsL5RDOZajTVslkugzLBj1evTue36jEomFQOoYuI=
+modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug=
modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw=
modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/memory v1.3.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/memory v1.4.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
+modernc.org/memory v1.5.0/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU=
modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4=
+modernc.org/sqlite v1.18.2/go.mod h1:kvrTLEWgxUcHa2GfHBQtanR1H9ht3hTJNtKpzH9k1u0=
modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw=
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw=
+modernc.org/tcl v1.13.2/go.mod h1:7CLiGIPo1M8Rv1Mitpv5akc2+8fxUd2y2UzC/MfMzy0=
modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
+modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
-sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
+sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
+sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
+sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
+sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
+sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
-sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
-sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
-sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=
+sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
+sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/pkg/analyse/ruler.go b/pkg/analyse/ruler.go
index 7ea4c9a1b..eedddb2b1 100644
--- a/pkg/analyse/ruler.go
+++ b/pkg/analyse/ruler.go
@@ -31,11 +31,11 @@ func ParseMetricsInRuleGroup(mir *MetricsInRuler, group rwrulefmt.RuleGroup, ns
)
for _, rule := range group.Rules {
- if rule.Record.Value != "" {
- ruleMetrics[rule.Record.Value] = struct{}{}
+ if rule.Record != "" {
+ ruleMetrics[rule.Record] = struct{}{}
}
- query := rule.Expr.Value
+ query := rule.Expr
expr, err := parser.ParseExpr(query)
if err != nil {
parseErrors = append(parseErrors, errors.Wrapf(err, "query=%v", query))
diff --git a/pkg/backfill/backfill.go b/pkg/backfill/backfill.go
index eca0e65b9..6abf9f430 100644
--- a/pkg/backfill/backfill.go
+++ b/pkg/backfill/backfill.go
@@ -4,13 +4,13 @@ import (
"context"
"fmt"
"io"
+ "log/slog"
"strconv"
"strings"
"text/tabwriter"
"time"
"github.com/alecthomas/units"
- "github.com/go-kit/log"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
@@ -87,7 +87,7 @@ func CreateBlocks(input IteratorCreator, mint, maxt int64, maxSamplesInAppender
for t := mint; t <= maxt; t = t + blockDuration {
err := func() error {
- w, err := tsdb.NewBlockWriter(log.NewNopLogger(), outputDir, blockDuration)
+ w, err := tsdb.NewBlockWriter(slog.New(slog.DiscardHandler), outputDir, blockDuration)
if err != nil {
return errors.Wrap(err, "block writer")
}
diff --git a/pkg/bench/query_runner.go b/pkg/bench/query_runner.go
index 53a8d5994..53024e05c 100644
--- a/pkg/bench/query_runner.go
+++ b/pkg/bench/query_runner.go
@@ -245,7 +245,7 @@ func (q *queryRunner) resolveAddrs() error {
defer cancel()
// If some of the dns resolution fails, log the error.
- if err := q.dnsProvider.Resolve(ctx, []string{q.cfg.Endpoint}); err != nil {
+ if err := q.dnsProvider.Resolve(ctx, []string{q.cfg.Endpoint}, true); err != nil {
level.Error(q.logger).Log("msg", "failed to resolve addresses", "err", err)
}
diff --git a/pkg/bench/write_runner.go b/pkg/bench/write_runner.go
index d19cbbff1..0875d5e65 100644
--- a/pkg/bench/write_runner.go
+++ b/pkg/bench/write_runner.go
@@ -223,7 +223,7 @@ func (w *WriteBenchmarkRunner) resolveAddrs() error {
defer cancel()
// If some of the dns resolution fails, log the error.
- if err := w.dnsProvider.Resolve(ctx, []string{w.cfg.Endpoint}); err != nil {
+ if err := w.dnsProvider.Resolve(ctx, []string{w.cfg.Endpoint}, true); err != nil {
level.Error(w.logger).Log("msg", "failed to resolve addresses", "err", err)
}
diff --git a/pkg/commands/block_gen.go b/pkg/commands/block_gen.go
index 45878af62..13a776074 100644
--- a/pkg/commands/block_gen.go
+++ b/pkg/commands/block_gen.go
@@ -2,8 +2,8 @@ package commands
import (
"context"
+ "log/slog"
"os"
- "sort"
"time"
"github.com/go-kit/log"
@@ -96,7 +96,7 @@ func (f *BlockGenCommand) run(_ *kingpin.ParseContext) error {
currentBlockID = blockID(currentTs, blockSize)
level.Info(logger).Log("msg", "starting new block", "block_id", currentBlockID, "blocks_left", lastBlockID-currentBlockID+1)
- w, err = tsdb.NewBlockWriter(log.NewNopLogger(), f.Cfg.BlockDir, blockSize)
+ w, err = tsdb.NewBlockWriter(slog.New(slog.DiscardHandler), f.Cfg.BlockDir, blockSize)
if err != nil {
return err
}
@@ -108,13 +108,10 @@ func (f *BlockGenCommand) run(_ *kingpin.ParseContext) error {
for _, s := range timeSeries {
var ref storage.SeriesRef
- labels := prompbLabelsToLabelsLabels(s.Labels)
- sort.Slice(labels, func(i, j int) bool {
- return labels[i].Name < labels[j].Name
- })
+ lbls := prompbLabelsToLabelsLabels(s.Labels)
for _, sample := range s.Samples {
- ref, err = app.Append(ref, labels, sample.Timestamp, sample.Value)
+ ref, err = app.Append(ref, lbls, sample.Timestamp, sample.Value)
if err != nil {
return err
}
@@ -139,10 +136,9 @@ func blockID(ts, blockSize int64) int64 {
}
func prompbLabelsToLabelsLabels(in []prompb.Label) labels.Labels {
- out := make(labels.Labels, len(in))
- for idx := range in {
- out[idx].Name = in[idx].Name
- out[idx].Value = in[idx].Value
+ b := labels.NewBuilder(labels.EmptyLabels())
+ for _, l := range in {
+ b.Set(l.Name, l.Value)
}
- return out
+ return b.Labels()
}
diff --git a/pkg/commands/bucket_validation.go b/pkg/commands/bucket_validation.go
index eabf91af0..c9ad1bc94 100644
--- a/pkg/commands/bucket_validation.go
+++ b/pkg/commands/bucket_validation.go
@@ -60,8 +60,8 @@ func (c *retryingBucketClient) withRetries(f func() error) error {
}
}
-func (c *retryingBucketClient) Upload(ctx context.Context, name string, r io.Reader) error {
- return c.withRetries(func() error { return c.Bucket.Upload(ctx, name, r) })
+func (c *retryingBucketClient) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error {
+ return c.withRetries(func() error { return c.Bucket.Upload(ctx, name, r, opts...) })
}
func (c *retryingBucketClient) Exists(ctx context.Context, name string) (bool, error) {
@@ -115,7 +115,7 @@ func (b *BucketValidationCommand) validate(_ *kingpin.ParseContext) error {
b.logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
ctx := context.Background()
- bucketClient, err := bucket.NewClient(ctx, b.cfg, "testClient", b.logger, prometheus.DefaultRegisterer)
+ bucketClient, err := bucket.NewClient(ctx, b.cfg, nil, "testClient", b.logger, prometheus.DefaultRegisterer)
if err != nil {
return errors.Wrap(err, "failed to create the bucket client")
}
diff --git a/pkg/commands/remote_read.go b/pkg/commands/remote_read.go
index e1df21a18..b04aa0b00 100644
--- a/pkg/commands/remote_read.go
+++ b/pkg/commands/remote_read.go
@@ -22,6 +22,7 @@ import (
"github.com/prometheus/prometheus/prompb"
"github.com/prometheus/prometheus/promql/parser"
"github.com/prometheus/prometheus/storage/remote"
+ "github.com/prometheus/prometheus/tsdb/chunkenc"
log "github.com/sirupsen/logrus"
"gopkg.in/alecthomas/kingpin.v2"
@@ -143,11 +144,11 @@ func (i *timeSeriesIterator) Labels() (l labels.Labels) {
}
series := i.ts[i.posSeries]
- i.labels = make(labels.Labels, len(series.Labels))
- for posLabel := range series.Labels {
- i.labels[posLabel].Name = series.Labels[posLabel].Name
- i.labels[posLabel].Value = series.Labels[posLabel].Value
+ b := labels.NewBuilder(labels.EmptyLabels())
+ for _, lbl := range series.Labels {
+ b.Set(lbl.Name, lbl.Value)
}
+ i.labels = b.Labels()
i.labelsSeriesPos = i.posSeries
return i.labels
}
@@ -252,12 +253,33 @@ func (c *RemoteReadCommand) prepare() (query func(context.Context) ([]*prompb.Ti
return func(ctx context.Context) ([]*prompb.TimeSeries, error) {
log.Infof("Querying time from=%s to=%s with selector=%s", from.Format(time.RFC3339), to.Format(time.RFC3339), c.selector)
- resp, err := readClient.Read(ctx, pbQuery)
+ ss, err := readClient.Read(ctx, pbQuery, false)
if err != nil {
return nil, err
}
- return resp.Timeseries, nil
+ var result []*prompb.TimeSeries
+ for ss.Next() {
+ series := ss.At()
+ ts := &prompb.TimeSeries{}
+ series.Labels().Range(func(l labels.Label) {
+ ts.Labels = append(ts.Labels, prompb.Label{Name: l.Name, Value: l.Value})
+ })
+ it := series.Iterator(nil)
+ for it.Next() == chunkenc.ValFloat {
+ t, v := it.At()
+ ts.Samples = append(ts.Samples, prompb.Sample{Timestamp: t, Value: v})
+ }
+ if err := it.Err(); err != nil {
+ return nil, err
+ }
+ result = append(result, ts)
+ }
+ if err := ss.Err(); err != nil {
+ return nil, err
+ }
+
+ return result, nil
}, from, to, nil
}
diff --git a/pkg/commands/rules.go b/pkg/commands/rules.go
index 5d5acc89b..849f8cf5f 100644
--- a/pkg/commands/rules.go
+++ b/pkg/commands/rules.go
@@ -653,7 +653,7 @@ func (r *RuleCommand) prepare(_ *kingpin.ParseContext) error {
}
// Do not apply the aggregation label to excluded rule groups.
- applyTo := func(group rwrulefmt.RuleGroup, _ rulefmt.RuleNode) bool {
+ applyTo := func(group rwrulefmt.RuleGroup, _ rulefmt.Rule) bool {
_, excluded := r.aggregationLabelExcludedRuleGroupsList[group.Name]
return !excluded
}
@@ -774,11 +774,11 @@ func checkDuplicates(groups []rwrulefmt.RuleGroup) []compareRuleType {
return duplicates
}
-func ruleMetric(rule rulefmt.RuleNode) string {
- if rule.Alert.Value != "" {
- return rule.Alert.Value
+func ruleMetric(rule rulefmt.Rule) string {
+ if rule.Alert != "" {
+ return rule.Alert
}
- return rule.Record.Value
+ return rule.Record
}
// End taken from https://github.com/prometheus/prometheus/blob/8c8de46003d1800c9d40121b4a5e5de8582ef6e1/cmd/promtool/main.go#L403
diff --git a/pkg/commands/rules_test.go b/pkg/commands/rules_test.go
index 706912955..f67a76a7e 100644
--- a/pkg/commands/rules_test.go
+++ b/pkg/commands/rules_test.go
@@ -5,7 +5,6 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/stretchr/testify/assert"
- "gopkg.in/yaml.v3"
"github.com/cortexproject/cortex-tools/pkg/rules/rwrulefmt"
)
@@ -21,14 +20,14 @@ func TestCheckDuplicates(t *testing.T) {
in: []rwrulefmt.RuleGroup{{
RuleGroup: rulefmt.RuleGroup{
Name: "rulegroup",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "up"},
- Expr: yaml.Node{Value: "up==1"},
+ Record: "up",
+ Expr: "up==1",
},
{
- Record: yaml.Node{Value: "down"},
- Expr: yaml.Node{Value: "up==0"},
+ Record: "down",
+ Expr: "up==0",
},
},
},
@@ -41,14 +40,14 @@ func TestCheckDuplicates(t *testing.T) {
in: []rwrulefmt.RuleGroup{{
RuleGroup: rulefmt.RuleGroup{
Name: "rulegroup",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "up"},
- Expr: yaml.Node{Value: "up==1"},
+ Record: "up",
+ Expr: "up==1",
},
{
- Record: yaml.Node{Value: "up"},
- Expr: yaml.Node{Value: "up==0"},
+ Record: "up",
+ Expr: "up==0",
},
},
},
diff --git a/pkg/rules/compare.go b/pkg/rules/compare.go
index 97660a59b..4e1350d49 100644
--- a/pkg/rules/compare.go
+++ b/pkg/rules/compare.go
@@ -102,10 +102,10 @@ func CompareGroups(groupOne, groupTwo rwrulefmt.RuleGroup) error {
return nil
}
-func rulesEqual(a, b *rulefmt.RuleNode) bool {
- if a.Alert.Value != b.Alert.Value ||
- a.Record.Value != b.Record.Value ||
- a.Expr.Value != b.Expr.Value ||
+func rulesEqual(a, b *rulefmt.Rule) bool {
+ if a.Alert != b.Alert ||
+ a.Record != b.Record ||
+ a.Expr != b.Expr ||
a.For != b.For {
return false
}
@@ -131,9 +131,9 @@ func rulesEqual(a, b *rulefmt.RuleNode) bool {
// CompareNamespaces returns the differences between the two provided
// namespaces
-func CompareNamespaces(original, new RuleNamespace) NamespaceChange {
+func CompareNamespaces(original, updated RuleNamespace) NamespaceChange {
result := NamespaceChange{
- Namespace: new.Namespace,
+ Namespace: updated.Namespace,
State: Unchanged,
GroupsUpdated: []UpdatedRuleGroup{},
GroupsCreated: []rwrulefmt.RuleGroup{},
@@ -145,7 +145,7 @@ func CompareNamespaces(original, new RuleNamespace) NamespaceChange {
origMap[g.Name] = g
}
- for _, newGroup := range new.Groups {
+ for _, newGroup := range updated.Groups {
origGroup, found := origMap[newGroup.Name]
if !found {
result.State = Updated
diff --git a/pkg/rules/compare_test.go b/pkg/rules/compare_test.go
index 40671686f..18b45603f 100644
--- a/pkg/rules/compare_test.go
+++ b/pkg/rules/compare_test.go
@@ -4,7 +4,6 @@ import (
"testing"
"github.com/prometheus/prometheus/model/rulefmt"
- yaml "gopkg.in/yaml.v3"
"github.com/cortexproject/cortex-tools/pkg/rules/rwrulefmt"
)
@@ -12,21 +11,21 @@ import (
func Test_rulesEqual(t *testing.T) {
tests := []struct {
name string
- a *rulefmt.RuleNode
- b *rulefmt.RuleNode
+ a *rulefmt.Rule
+ b *rulefmt.Rule
want bool
}{
{
name: "rule_node_identical",
- a: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ a: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
- b: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ b: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"c": "d", "a": "b"},
Labels: nil,
},
@@ -34,53 +33,53 @@ func Test_rulesEqual(t *testing.T) {
},
{
name: "rule_node_diff",
- a: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ a: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
},
- b: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "two"},
- Expr: yaml.Node{Value: "up"},
+ b: &rulefmt.Rule{
+ Record: "two",
+ Expr: "up",
},
want: false,
},
{
name: "rule_node_annotations_diff",
- a: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ a: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b"},
},
- b: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one", Column: 10},
- Expr: yaml.Node{Value: "up"},
+ b: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"c": "d"},
},
want: false,
},
{
name: "rule_node_annotations_nil_diff",
- a: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ a: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b"},
},
- b: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one", Column: 10},
- Expr: yaml.Node{Value: "up"},
+ b: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
Annotations: nil,
},
want: false,
},
{
name: "rule_node_yaml_diff",
- a: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ a: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
},
- b: &rulefmt.RuleNode{
- Record: yaml.Node{Value: "one", Column: 10},
- Expr: yaml.Node{Value: "up"},
+ b: &rulefmt.Rule{
+ Record: "one",
+ Expr: "up",
},
want: true,
},
@@ -106,10 +105,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -119,10 +118,10 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -136,10 +135,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -149,16 +148,16 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -172,10 +171,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -188,10 +187,10 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -208,10 +207,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -224,10 +223,10 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -245,10 +244,10 @@ func TestCompareGroups(t *testing.T) {
groupOne: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
@@ -261,10 +260,10 @@ func TestCompareGroups(t *testing.T) {
groupTwo: rwrulefmt.RuleGroup{
RuleGroup: rulefmt.RuleGroup{
Name: "example_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: "one"},
- Expr: yaml.Node{Value: "up"},
+ Record: "one",
+ Expr: "up",
Annotations: map[string]string{"a": "b", "c": "d"},
Labels: nil,
},
diff --git a/pkg/rules/parser_test.go b/pkg/rules/parser_test.go
index 8879a5193..067c284fc 100644
--- a/pkg/rules/parser_test.go
+++ b/pkg/rules/parser_test.go
@@ -28,7 +28,7 @@ func TestParseFiles(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "example_rule_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
// currently the tests only check length
},
@@ -59,7 +59,7 @@ func TestParseFiles(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "example_rule_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
// currently the tests only check length
},
@@ -74,7 +74,7 @@ func TestParseFiles(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "other_example_rule_group",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
// currently the tests only check length
},
diff --git a/pkg/rules/rules.go b/pkg/rules/rules.go
index fd68d3a78..674ff86df 100644
--- a/pkg/rules/rules.go
+++ b/pkg/rules/rules.go
@@ -35,13 +35,13 @@ func (r RuleNamespace) LintExpressions() (int, int, error) {
for i, group := range r.Groups {
for j, rule := range group.Rules {
log.WithFields(log.Fields{"rule": getRuleName(rule)}).Debugf("linting %s", "PromQL")
- exp, err := parseFn(rule.Expr.Value)
+ exp, err := parseFn(rule.Expr)
if err != nil {
return count, mod, err
}
count++
- if rule.Expr.Value != exp.String() {
+ if rule.Expr != exp.String() {
log.WithFields(log.Fields{
"rule": getRuleName(rule),
"currentExpr": rule.Expr,
@@ -49,7 +49,7 @@ func (r RuleNamespace) LintExpressions() (int, int, error) {
}).Debugf("expression differs")
mod++
- r.Groups[i].Rules[j].Expr.Value = exp.String()
+ r.Groups[i].Rules[j].Expr = exp.String()
}
}
}
@@ -70,10 +70,10 @@ func (r RuleNamespace) CheckRecordingRules(strict bool) int {
for _, group := range r.Groups {
for _, rule := range group.Rules {
// Assume if there is a rule.Record that this is a recording rule.
- if rule.Record.Value == "" {
+ if rule.Record == "" {
continue
}
- name = rule.Record.Value
+ name = rule.Record
log.WithFields(log.Fields{"rule": name}).Debugf("linting recording rule name")
chunks := strings.Split(name, ":")
if len(chunks) < reqChunks {
@@ -93,7 +93,7 @@ func (r RuleNamespace) CheckRecordingRules(strict bool) int {
// AggregateBy modifies the aggregation rules in groups to include a given Label.
// If the applyTo function is provided, the aggregation is applied only to rules
// for which the applyTo function returns true.
-func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.RuleNode) bool) (int, int, error) {
+func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.Rule) bool) (int, int, error) {
// `count` represents the number of rules we evaluated.
// `mod` represents the number of rules we modified - a modification can either be a lint or adding the
// label in the aggregation.
@@ -113,7 +113,7 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru
}
log.WithFields(log.Fields{"rule": getRuleName(rule)}).Debugf("evaluating...")
- exp, err := parser.ParseExpr(rule.Expr.Value)
+ exp, err := parser.ParseExpr(rule.Expr)
if err != nil {
return count, mod, err
}
@@ -125,14 +125,14 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru
parser.Inspect(exp, f)
// Only modify the ones that actually changed.
- if rule.Expr.Value != exp.String() {
+ if rule.Expr != exp.String() {
log.WithFields(log.Fields{
"rule": getRuleName(rule),
"currentExpr": rule.Expr,
"afterExpr": exp.String(),
}).Debugf("expression differs")
mod++
- r.Groups[i].Rules[j].Expr.Value = exp.String()
+ r.Groups[i].Rules[j].Expr = exp.String()
}
}
}
@@ -142,7 +142,7 @@ func (r RuleNamespace) AggregateBy(label string, applyTo func(group rwrulefmt.Ru
// exprNodeInspectorFunc returns a PromQL inspector.
// It modifies most PromQL expressions to include a given label.
-func exprNodeInspectorFunc(rule rulefmt.RuleNode, label string) func(node parser.Node, path []parser.Node) error {
+func exprNodeInspectorFunc(rule rulefmt.Rule, label string) func(node parser.Node, path []parser.Node) error {
return func(node parser.Node, _ []parser.Node) error {
var err error
switch n := node.(type) {
@@ -234,12 +234,12 @@ func (r RuleNamespace) Validate() []error {
func ValidateRuleGroup(g rwrulefmt.RuleGroup) []error {
var errs []error
for i, r := range g.Rules {
- for _, err := range r.Validate() {
+ for _, err := range r.Validate(rulefmt.RuleNode{}) {
var ruleName string
- if r.Alert.Value != "" {
- ruleName = r.Alert.Value
+ if r.Alert != "" {
+ ruleName = r.Alert
} else {
- ruleName = r.Record.Value
+ ruleName = r.Record
}
errs = append(errs, &rulefmt.Error{
Group: g.Name,
@@ -253,10 +253,10 @@ func ValidateRuleGroup(g rwrulefmt.RuleGroup) []error {
return errs
}
-func getRuleName(r rulefmt.RuleNode) string {
- if r.Record.Value != "" {
- return r.Record.Value
+func getRuleName(r rulefmt.Rule) string {
+ if r.Record != "" {
+ return r.Record
}
- return r.Alert.Value
+ return r.Alert
}
diff --git a/pkg/rules/rules_test.go b/pkg/rules/rules_test.go
index 76f31e8e7..5da7893c0 100644
--- a/pkg/rules/rules_test.go
+++ b/pkg/rules/rules_test.go
@@ -5,7 +5,6 @@ import (
"github.com/prometheus/prometheus/model/rulefmt"
"github.com/stretchr/testify/require"
- yaml "gopkg.in/yaml.v3"
"gotest.tools/assert"
"github.com/cortexproject/cortex-tools/pkg/rules/rwrulefmt"
@@ -15,7 +14,7 @@ func TestAggregateBy(t *testing.T) {
tt := []struct {
name string
rn RuleNamespace
- applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.RuleNode) bool
+ applyTo func(group rwrulefmt.RuleGroup, rule rulefmt.Rule) bool
expectedExpr []string
count, modified int
expect error
@@ -31,8 +30,8 @@ func TestAggregateBy(t *testing.T) {
Groups: []rwrulefmt.RuleGroup{
{
RuleGroup: rulefmt.RuleGroup{
- Name: "WithoutAggregation", Rules: []rulefmt.RuleNode{
- {Alert: yaml.Node{Value: "WithoutAggregation"}, Expr: yaml.Node{Value: "up != 1"}},
+ Name: "WithoutAggregation", Rules: []rulefmt.Rule{
+ {Alert: "WithoutAggregation", Expr: "up != 1"},
},
},
},
@@ -48,11 +47,10 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "SkipWithout",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{Value: "SkipWithout"},
- Expr: yaml.Node{
- Value: `
+ Alert: "SkipWithout",
+ Expr: `
min without(alertmanager) (
rate(prometheus_notifications_errors_total{job="default/prometheus"}[5m])
/
@@ -60,7 +58,6 @@ func TestAggregateBy(t *testing.T) {
)
* 100
> 3`,
- },
},
},
},
@@ -77,16 +74,14 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "WithAggregation",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{Value: "WithAggregation"},
- Expr: yaml.Node{
- Value: `
+ Alert: "WithAggregation",
+ Expr: `
sum(rate(cortex_prometheus_rule_evaluation_failures_total[1m])) by (namespace, job)
/
sum(rate(cortex_prometheus_rule_evaluations_total[1m])) by (namespace, job)
> 0.01`,
- },
},
},
},
@@ -103,15 +98,11 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "CountAggregation",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{
- Value: "CountAggregation",
- },
- Expr: yaml.Node{
- Value: `
+ Alert: "CountAggregation",
+ Expr: `
count(count by (gitVersion) (label_replace(kubernetes_build_info{job!~"kube-dns|coredns"},"gitVersion","$1","gitVersion","(v[0-9]*.[0-9]*.[0-9]*).*"))) > 1`,
- },
},
},
},
@@ -128,10 +119,10 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "BinaryExpressions",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{Value: "VectorMatching"},
- Expr: yaml.Node{Value: `count by (cluster, node) (sum by (node, cpu, cluster) (node_cpu_seconds_total{job="default/node-exporter"} * on (namespace, instance) group_left (node) node_namespace_pod:kube_pod_info:))`},
+ Alert: "VectorMatching",
+ Expr: `count by (cluster, node) (sum by (node, cpu, cluster) (node_cpu_seconds_total{job="default/node-exporter"} * on (namespace, instance) group_left (node) node_namespace_pod:kube_pod_info:))`,
},
},
},
@@ -148,35 +139,27 @@ func TestAggregateBy(t *testing.T) {
{
RuleGroup: rulefmt.RuleGroup{
Name: "CountAggregation",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{
- Value: "CountAggregation",
- },
- Expr: yaml.Node{
- Value: `count by (namespace) (test_series) > 1`,
- },
+ Alert: "CountAggregation",
+ Expr: `count by (namespace) (test_series) > 1`,
},
},
},
}, {
RuleGroup: rulefmt.RuleGroup{
Name: "CountSkipped",
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{
- Value: "CountSkipped",
- },
- Expr: yaml.Node{
- Value: `count by (namespace) (test_series) > 1`,
- },
+ Alert: "CountSkipped",
+ Expr: `count by (namespace) (test_series) > 1`,
},
},
},
},
},
},
- applyTo: func(group rwrulefmt.RuleGroup, _ rulefmt.RuleNode) bool {
+ applyTo: func(group rwrulefmt.RuleGroup, _ rulefmt.Rule) bool {
return group.Name != "CountSkipped"
},
expectedExpr: []string{`count by (namespace, cluster) (test_series) > 1`, `count by (namespace) (test_series) > 1`},
@@ -196,7 +179,7 @@ func TestAggregateBy(t *testing.T) {
expectedIdx := 0
for _, g := range tc.rn.Groups {
for _, r := range g.Rules {
- require.Equal(t, tc.expectedExpr[expectedIdx], r.Expr.Value)
+ require.Equal(t, tc.expectedExpr[expectedIdx], r.Expr)
expectedIdx++
}
}
@@ -236,8 +219,8 @@ func TestLintExpressions(t *testing.T) {
{
name: "with a complex expression",
expr: `sum by (cluster, namespace) (sum_over_time((rate(loki_distributor_bytes_received_total{job=~".*/distributor"}[1m]) * 60)[1h:1m])) / 1e+09 / 5 * 1 > (sum by (cluster, namespace) (memcached_limit_bytes{job=~".+/memcached"}) / 1e+09)`,
- expected: `sum by (cluster, namespace) (sum_over_time((rate(loki_distributor_bytes_received_total{job=~".*/distributor"}[1m]) * 60)[1h:1m])) / 1e+09 / 5 * 1 > (sum by (cluster, namespace) (memcached_limit_bytes{job=~".+/memcached"}) / 1e+09)`,
- count: 1, modified: 0,
+ expected: `sum by (cluster, namespace) (sum_over_time((rate(loki_distributor_bytes_received_total{job=~".*/distributor"}[1m]) * 60)[1h:1m])) / 1000000000 / 5 * 1 > (sum by (cluster, namespace) (memcached_limit_bytes{job=~".+/memcached"}) / 1000000000)`,
+ count: 1, modified: 1,
err: "",
},
{
@@ -254,10 +237,10 @@ func TestLintExpressions(t *testing.T) {
r := RuleNamespace{Groups: []rwrulefmt.RuleGroup{
{
RuleGroup: rulefmt.RuleGroup{
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Alert: yaml.Node{Value: "AName"},
- Expr: yaml.Node{Value: tc.expr},
+ Alert: "AName",
+ Expr: tc.expr,
},
},
},
@@ -266,7 +249,7 @@ func TestLintExpressions(t *testing.T) {
}
c, m, err := r.LintExpressions()
- rexpr := r.Groups[0].Rules[0].Expr.Value
+ rexpr := r.Groups[0].Rules[0].Expr
require.Equal(t, tc.count, c)
require.Equal(t, tc.modified, m)
@@ -324,10 +307,10 @@ func TestCheckRecordingRules(t *testing.T) {
Groups: []rwrulefmt.RuleGroup{
{
RuleGroup: rulefmt.RuleGroup{
- Rules: []rulefmt.RuleNode{
+ Rules: []rulefmt.Rule{
{
- Record: yaml.Node{Value: tc.ruleName},
- Expr: yaml.Node{Value: "rate(some_metric_total)[5m]"}},
+ Record: tc.ruleName,
+ Expr: "rate(some_metric_total)[5m]"},
},
},
},
diff --git a/vendor/cel.dev/expr/.bazelversion b/vendor/cel.dev/expr/.bazelversion
new file mode 100644
index 000000000..13c50892b
--- /dev/null
+++ b/vendor/cel.dev/expr/.bazelversion
@@ -0,0 +1,2 @@
+7.3.2
+# Keep this pinned version in parity with cel-go
diff --git a/vendor/cel.dev/expr/.gitattributes b/vendor/cel.dev/expr/.gitattributes
new file mode 100644
index 000000000..3de1ec213
--- /dev/null
+++ b/vendor/cel.dev/expr/.gitattributes
@@ -0,0 +1,2 @@
+*.pb.go linguist-generated=true
+*.pb.go -diff -merge
diff --git a/vendor/cel.dev/expr/.gitignore b/vendor/cel.dev/expr/.gitignore
new file mode 100644
index 000000000..0d4fed27c
--- /dev/null
+++ b/vendor/cel.dev/expr/.gitignore
@@ -0,0 +1,2 @@
+bazel-*
+MODULE.bazel.lock
diff --git a/vendor/cel.dev/expr/BUILD.bazel b/vendor/cel.dev/expr/BUILD.bazel
new file mode 100644
index 000000000..37d8adc95
--- /dev/null
+++ b/vendor/cel.dev/expr/BUILD.bazel
@@ -0,0 +1,34 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"]) # Apache 2.0
+
+go_library(
+ name = "expr",
+ srcs = [
+ "checked.pb.go",
+ "eval.pb.go",
+ "explain.pb.go",
+ "syntax.pb.go",
+ "value.pb.go",
+ ],
+ importpath = "cel.dev/expr",
+ visibility = ["//visibility:public"],
+ deps = [
+ "@org_golang_google_genproto_googleapis_rpc//status:go_default_library",
+ "@org_golang_google_protobuf//reflect/protoreflect",
+ "@org_golang_google_protobuf//runtime/protoimpl",
+ "@org_golang_google_protobuf//types/known/anypb",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/emptypb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":expr",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/cel.dev/expr/CODE_OF_CONDUCT.md b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..59908e2d8
--- /dev/null
+++ b/vendor/cel.dev/expr/CODE_OF_CONDUCT.md
@@ -0,0 +1,25 @@
+# Contributor Code of Conduct
+## Version 0.1.1 (adapted from 0.3b-angular)
+
+As contributors and maintainers of the Common Expression Language
+(CEL) project, we pledge to respect everyone who contributes by
+posting issues, updating documentation, submitting pull requests,
+providing feedback in comments, and any other activities.
+
+Communication through any of CEL's channels (GitHub, Gitter, IRC,
+mailing lists, Google+, Twitter, etc.) must be constructive and never
+resort to personal attacks, trolling, public or private harassment,
+insults, or other unprofessional conduct.
+
+We promise to extend courtesy and respect to everyone involved in this
+project regardless of gender, gender identity, sexual orientation,
+disability, age, race, ethnicity, religion, or level of experience. We
+expect anyone contributing to the project to do the same.
+
+If any member of the community violates this code of conduct, the
+maintainers of the CEL project may take action, removing issues,
+comments, and PRs or blocking accounts as deemed appropriate.
+
+If you are subject to or witness unacceptable behavior, or have any
+other concerns, please email us at
+[cel-conduct@google.com](mailto:cel-conduct@google.com).
diff --git a/vendor/cel.dev/expr/CONTRIBUTING.md b/vendor/cel.dev/expr/CONTRIBUTING.md
new file mode 100644
index 000000000..8f5fd5c31
--- /dev/null
+++ b/vendor/cel.dev/expr/CONTRIBUTING.md
@@ -0,0 +1,32 @@
+# How to Contribute
+
+We'd love to accept your patches and contributions to this project. There are a
+few guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult
+[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
+information on using pull requests.
+
+## What to expect from maintainers
+
+Expect maintainers to respond to new issues or pull requests within a week.
+For outstanding and ongoing issues and particularly for long-running
+pull requests, expect the maintainers to review within a week of a
+contributor asking for a new review. There is no commitment to resolution --
+merging or closing a pull request, or fixing or closing an issue -- because some
+issues will require more discussion than others.
diff --git a/vendor/cel.dev/expr/GOVERNANCE.md b/vendor/cel.dev/expr/GOVERNANCE.md
new file mode 100644
index 000000000..0a525bc17
--- /dev/null
+++ b/vendor/cel.dev/expr/GOVERNANCE.md
@@ -0,0 +1,43 @@
+# Project Governance
+
+This document defines the governance process for the CEL language. CEL is
+Google-developed, but openly governed. Major contributors to the CEL
+specification and its corresponding implementations constitute the CEL
+Language Council. New members may be added by a unanimous vote of the
+Council.
+
+The MAINTAINERS.md file lists the members of the CEL Language Council, and
+unofficially indicates the "areas of expertise" of each member with respect
+to the publicly available CEL repos.
+
+## Code Changes
+
+Code changes must follow the standard pull request (PR) model documented in the
+CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a
+maintainer. The maintainer reserves the right to request that any feature
+request (FR) or PR be reviewed by the language council.
+
+## Syntax and Semantic Changes
+
+Syntactic and semantic changes must be reviewed by the CEL Language Council.
+Maintainers may also request language council review at their discretion.
+
+The review process is as follows:
+
+- Create a Feature Request in the CEL-Spec repo. The feature description will
+ serve as an abstract for the detailed design document.
+- Co-develop a design document with the Language Council.
+- Once the proposer gives the design document approval, the document will be
+ linked to the FR in the CEL-Spec repo and opened for comments to members of
+ the cel-lang-discuss@googlegroups.com.
+- The Language Council will review the design doc at the next council meeting
+ (once every three weeks) and the council decision included in the document.
+
+If the proposal is approved, the spec will be updated by a maintainer (if
+applicable) and a rationale will be included in the CEL-Spec wiki to ensure
+future developers may follow CEL's growth and direction over time.
+
+Approved proposals may be implemented by the proposer or by the maintainers as
+the parties see fit. At the discretion of the maintainer, changes from the
+approved design are permitted during implementation if they improve the user
+experience and clarity of the feature.
diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/cel.dev/expr/LICENSE
similarity index 100%
rename from vendor/github.com/aws/aws-sdk-go/LICENSE.txt
rename to vendor/cel.dev/expr/LICENSE
diff --git a/vendor/cel.dev/expr/MAINTAINERS.md b/vendor/cel.dev/expr/MAINTAINERS.md
new file mode 100644
index 000000000..1ed2eb8ab
--- /dev/null
+++ b/vendor/cel.dev/expr/MAINTAINERS.md
@@ -0,0 +1,13 @@
+# CEL Language Council
+
+| Name | Company | Area of Expertise |
+|-----------------|--------------|-------------------|
+| Alfred Fuller | Facebook | cel-cpp, cel-spec |
+| Jim Larson | Google | cel-go, cel-spec |
+| Matthais Blume | Google | cel-spec |
+| Tristan Swadell | Google | cel-go, cel-spec |
+
+## Emeritus
+
+* Sanjay Ghemawat (Google)
+* Wolfgang Grieskamp (Facebook)
diff --git a/vendor/cel.dev/expr/MODULE.bazel b/vendor/cel.dev/expr/MODULE.bazel
new file mode 100644
index 000000000..85ac9ff61
--- /dev/null
+++ b/vendor/cel.dev/expr/MODULE.bazel
@@ -0,0 +1,74 @@
+module(
+ name = "cel-spec",
+)
+
+bazel_dep(
+ name = "bazel_skylib",
+ version = "1.7.1",
+)
+bazel_dep(
+ name = "gazelle",
+ version = "0.39.1",
+ repo_name = "bazel_gazelle",
+)
+bazel_dep(
+ name = "googleapis",
+ version = "0.0.0-20241220-5e258e33.bcr.1",
+ repo_name = "com_google_googleapis",
+)
+bazel_dep(
+ name = "googleapis-cc",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "googleapis-java",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "googleapis-go",
+ version = "1.0.0",
+)
+bazel_dep(
+ name = "protobuf",
+ version = "27.0",
+ repo_name = "com_google_protobuf",
+)
+bazel_dep(
+ name = "rules_cc",
+ version = "0.0.17",
+)
+bazel_dep(
+ name = "rules_go",
+ version = "0.53.0",
+ repo_name = "io_bazel_rules_go",
+)
+bazel_dep(
+ name = "rules_java",
+ version = "7.6.5",
+)
+bazel_dep(
+ name = "rules_proto",
+ version = "7.0.2",
+)
+bazel_dep(
+ name = "rules_python",
+ version = "0.35.0",
+)
+
+### PYTHON ###
+python = use_extension("@rules_python//python/extensions:python.bzl", "python")
+python.toolchain(
+ ignore_root_user_error = True,
+ python_version = "3.11",
+)
+
+go_sdk = use_extension("@io_bazel_rules_go//go:extensions.bzl", "go_sdk")
+go_sdk.download(version = "1.22.0")
+
+go_deps = use_extension("@bazel_gazelle//:extensions.bzl", "go_deps")
+go_deps.from_file(go_mod = "//:go.mod")
+use_repo(
+ go_deps,
+ "org_golang_google_genproto_googleapis_rpc",
+ "org_golang_google_protobuf",
+)
diff --git a/vendor/cel.dev/expr/README.md b/vendor/cel.dev/expr/README.md
new file mode 100644
index 000000000..42d67f87c
--- /dev/null
+++ b/vendor/cel.dev/expr/README.md
@@ -0,0 +1,71 @@
+# Common Expression Language
+
+The Common Expression Language (CEL) implements common semantics for expression
+evaluation, enabling different applications to more easily interoperate.
+
+Key Applications
+
+* Security policy: organizations have complex infrastructure and need common
+ tooling to reason about the system as a whole
+* Protocols: expressions are a useful data type and require interoperability
+ across programming languages and platforms.
+
+
+Guiding philosophy:
+
+1. Keep it small & fast.
+ * CEL evaluates in linear time, is mutation free, and not Turing-complete.
+ This limitation is a feature of the language design, which allows the
+ implementation to evaluate orders of magnitude faster than equivalently
+ sandboxed JavaScript.
+2. Make it extensible.
+ * CEL is designed to be embedded in applications, and allows for
+ extensibility via its context which allows for functions and data to be
+ provided by the software that embeds it.
+3. Developer-friendly.
+ * The language is approachable to developers. The initial spec was based
+ on the experience of developing Firebase Rules and usability testing
+ many prior iterations.
+ * The library itself and accompanying toolings should be easy to adopt by
+ teams that seek to integrate CEL into their platforms.
+
+The required components of a system that supports CEL are:
+
+* The textual representation of an expression as written by a developer. It is
+ of similar syntax to expressions in C/C++/Java/JavaScript
+* A representation of the program's abstract syntax tree (AST).
+* A compiler library that converts the textual representation to the binary
+ representation. This can be done ahead of time (in the control plane) or
+ just before evaluation (in the data plane).
+* A context containing one or more typed variables, often protobuf messages.
+ Most use-cases will use `attribute_context.proto`
+* An evaluator library that takes the binary format in the context and
+ produces a result, usually a Boolean.
+
+For use cases which require persistence or cross-process communcation, it is
+highly recommended to serialize the type-checked expression as a protocol
+buffer. The CEL team will maintains canonical protocol buffers for ASTs and
+will keep these versions identical and wire-compatible in perpetuity:
+
+* [CEL canonical](https://github.com/google/cel-spec/tree/master/proto/cel/expr)
+* [CEL v1alpha1](https://github.com/googleapis/googleapis/tree/master/google/api/expr/v1alpha1)
+
+
+Example of boolean conditions and object construction:
+
+``` c
+// Condition
+account.balance >= transaction.withdrawal
+ || (account.overdraftProtection
+ && account.overdraftLimit >= transaction.withdrawal - account.balance)
+
+// Object construction
+common.GeoPoint{ latitude: 10.0, longitude: -5.5 }
+```
+
+For more detail, see:
+
+* [Introduction](doc/intro.md)
+* [Language Definition](doc/langdef.md)
+
+Released under the [Apache License](LICENSE).
diff --git a/vendor/cel.dev/expr/WORKSPACE b/vendor/cel.dev/expr/WORKSPACE
new file mode 100644
index 000000000..b6dc9ed67
--- /dev/null
+++ b/vendor/cel.dev/expr/WORKSPACE
@@ -0,0 +1,145 @@
+load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
+
+http_archive(
+ name = "io_bazel_rules_go",
+ sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
+ "https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
+ ],
+)
+
+http_archive(
+ name = "bazel_gazelle",
+ sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d",
+ urls = [
+ "https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
+ "https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
+ ],
+)
+
+http_archive(
+ name = "rules_proto",
+ sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d",
+ strip_prefix = "rules_proto-4.0.0-3.20.0",
+ urls = [
+ "https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz",
+ ],
+)
+
+# googleapis as of 09/16/2024
+http_archive(
+ name = "com_google_googleapis",
+ strip_prefix = "googleapis-4082d5e51e8481f6ccc384cacd896f4e78f19dee",
+ sha256 = "57319889d47578b3c89bf1b3f34888d796a8913d63b32d750a4cd12ed303c4e8",
+ urls = [
+ "https://github.com/googleapis/googleapis/archive/4082d5e51e8481f6ccc384cacd896f4e78f19dee.tar.gz",
+ ],
+)
+
+# protobuf
+http_archive(
+ name = "com_google_protobuf",
+ sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2",
+ strip_prefix = "protobuf-3.21.5",
+ urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"],
+)
+
+# googletest
+http_archive(
+ name = "com_google_googletest",
+ urls = ["https://github.com/google/googletest/archive/master.zip"],
+ strip_prefix = "googletest-master",
+)
+
+# gflags
+http_archive(
+ name = "com_github_gflags_gflags",
+ sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe",
+ strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a",
+ urls = [
+ "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
+ "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
+ ],
+)
+
+# glog
+http_archive(
+ name = "com_google_glog",
+ sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21",
+ strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8",
+ urls = [
+ "https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
+ "https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
+ ],
+)
+
+# absl
+http_archive(
+ name = "com_google_absl",
+ strip_prefix = "abseil-cpp-master",
+ urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"],
+)
+
+load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains")
+load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
+load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language")
+load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains")
+load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
+
+switched_rules_by_language(
+ name = "com_google_googleapis_imports",
+ cc = True,
+)
+
+# Do *not* call *_dependencies(), etc, yet. See comment at the end.
+
+# Generated Google APIs protos for Golang
+# Generated Google APIs protos for Golang 08/26/2024
+go_repository(
+ name = "org_golang_google_genproto_googleapis_api",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/genproto/googleapis/api",
+ sum = "h1:YcyjlL1PRr2Q17/I0dPk2JmYS5CDXfcdb2Z3YRioEbw=",
+ version = "v0.0.0-20240826202546-f6391c0de4c7",
+)
+
+# Generated Google APIs protos for Golang 08/26/2024
+go_repository(
+ name = "org_golang_google_genproto_googleapis_rpc",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/genproto/googleapis/rpc",
+ sum = "h1:2035KHhUv+EpyB+hWgJnaWKJOdX1E95w2S8Rr4uWKTs=",
+ version = "v0.0.0-20240826202546-f6391c0de4c7",
+)
+
+# gRPC deps
+go_repository(
+ name = "org_golang_google_grpc",
+ build_file_proto_mode = "disable_global",
+ importpath = "google.golang.org/grpc",
+ tag = "v1.49.0",
+)
+
+go_repository(
+ name = "org_golang_x_net",
+ importpath = "golang.org/x/net",
+ sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=",
+ version = "v0.0.0-20190311183353-d8887717615a",
+)
+
+go_repository(
+ name = "org_golang_x_text",
+ importpath = "golang.org/x/text",
+ sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=",
+ version = "v0.3.2",
+)
+
+# Run the dependencies at the end. These will silently try to import some
+# of the above repositories but at different versions, so ours must come first.
+go_rules_dependencies()
+go_register_toolchains(version = "1.19.1")
+gazelle_dependencies()
+rules_proto_dependencies()
+rules_proto_toolchains()
+protobuf_deps()
diff --git a/vendor/cel.dev/expr/WORKSPACE.bzlmod b/vendor/cel.dev/expr/WORKSPACE.bzlmod
new file mode 100644
index 000000000..e69de29bb
diff --git a/vendor/cel.dev/expr/checked.pb.go b/vendor/cel.dev/expr/checked.pb.go
new file mode 100644
index 000000000..bb225c8ab
--- /dev/null
+++ b/vendor/cel.dev/expr/checked.pb.go
@@ -0,0 +1,1432 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/checked.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Type_PrimitiveType int32
+
+const (
+ Type_PRIMITIVE_TYPE_UNSPECIFIED Type_PrimitiveType = 0
+ Type_BOOL Type_PrimitiveType = 1
+ Type_INT64 Type_PrimitiveType = 2
+ Type_UINT64 Type_PrimitiveType = 3
+ Type_DOUBLE Type_PrimitiveType = 4
+ Type_STRING Type_PrimitiveType = 5
+ Type_BYTES Type_PrimitiveType = 6
+)
+
+// Enum value maps for Type_PrimitiveType.
+var (
+ Type_PrimitiveType_name = map[int32]string{
+ 0: "PRIMITIVE_TYPE_UNSPECIFIED",
+ 1: "BOOL",
+ 2: "INT64",
+ 3: "UINT64",
+ 4: "DOUBLE",
+ 5: "STRING",
+ 6: "BYTES",
+ }
+ Type_PrimitiveType_value = map[string]int32{
+ "PRIMITIVE_TYPE_UNSPECIFIED": 0,
+ "BOOL": 1,
+ "INT64": 2,
+ "UINT64": 3,
+ "DOUBLE": 4,
+ "STRING": 5,
+ "BYTES": 6,
+ }
+)
+
+func (x Type_PrimitiveType) Enum() *Type_PrimitiveType {
+ p := new(Type_PrimitiveType)
+ *p = x
+ return p
+}
+
+func (x Type_PrimitiveType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_PrimitiveType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_checked_proto_enumTypes[0].Descriptor()
+}
+
+func (Type_PrimitiveType) Type() protoreflect.EnumType {
+ return &file_cel_expr_checked_proto_enumTypes[0]
+}
+
+func (x Type_PrimitiveType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_PrimitiveType.Descriptor instead.
+func (Type_PrimitiveType) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type Type_WellKnownType int32
+
+const (
+ Type_WELL_KNOWN_TYPE_UNSPECIFIED Type_WellKnownType = 0
+ Type_ANY Type_WellKnownType = 1
+ Type_TIMESTAMP Type_WellKnownType = 2
+ Type_DURATION Type_WellKnownType = 3
+)
+
+// Enum value maps for Type_WellKnownType.
+var (
+ Type_WellKnownType_name = map[int32]string{
+ 0: "WELL_KNOWN_TYPE_UNSPECIFIED",
+ 1: "ANY",
+ 2: "TIMESTAMP",
+ 3: "DURATION",
+ }
+ Type_WellKnownType_value = map[string]int32{
+ "WELL_KNOWN_TYPE_UNSPECIFIED": 0,
+ "ANY": 1,
+ "TIMESTAMP": 2,
+ "DURATION": 3,
+ }
+)
+
+func (x Type_WellKnownType) Enum() *Type_WellKnownType {
+ p := new(Type_WellKnownType)
+ *p = x
+ return p
+}
+
+func (x Type_WellKnownType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Type_WellKnownType) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_checked_proto_enumTypes[1].Descriptor()
+}
+
+func (Type_WellKnownType) Type() protoreflect.EnumType {
+ return &file_cel_expr_checked_proto_enumTypes[1]
+}
+
+func (x Type_WellKnownType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Type_WellKnownType.Descriptor instead.
+func (Type_WellKnownType) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+type CheckedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ReferenceMap map[int64]*Reference `protobuf:"bytes,2,rep,name=reference_map,json=referenceMap,proto3" json:"reference_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ TypeMap map[int64]*Type `protobuf:"bytes,3,rep,name=type_map,json=typeMap,proto3" json:"type_map,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ SourceInfo *SourceInfo `protobuf:"bytes,5,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+ ExprVersion string `protobuf:"bytes,6,opt,name=expr_version,json=exprVersion,proto3" json:"expr_version,omitempty"`
+ Expr *Expr `protobuf:"bytes,4,opt,name=expr,proto3" json:"expr,omitempty"`
+}
+
+func (x *CheckedExpr) Reset() {
+ *x = CheckedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CheckedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CheckedExpr) ProtoMessage() {}
+
+func (x *CheckedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CheckedExpr.ProtoReflect.Descriptor instead.
+func (*CheckedExpr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CheckedExpr) GetReferenceMap() map[int64]*Reference {
+ if x != nil {
+ return x.ReferenceMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetTypeMap() map[int64]*Type {
+ if x != nil {
+ return x.TypeMap
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+func (x *CheckedExpr) GetExprVersion() string {
+ if x != nil {
+ return x.ExprVersion
+ }
+ return ""
+}
+
+func (x *CheckedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+type Type struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to TypeKind:
+ //
+ // *Type_Dyn
+ // *Type_Null
+ // *Type_Primitive
+ // *Type_Wrapper
+ // *Type_WellKnown
+ // *Type_ListType_
+ // *Type_MapType_
+ // *Type_Function
+ // *Type_MessageType
+ // *Type_TypeParam
+ // *Type_Type
+ // *Type_Error
+ // *Type_AbstractType_
+ TypeKind isType_TypeKind `protobuf_oneof:"type_kind"`
+}
+
+func (x *Type) Reset() {
+ *x = Type{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type) ProtoMessage() {}
+
+func (x *Type) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type.ProtoReflect.Descriptor instead.
+func (*Type) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *Type) GetTypeKind() isType_TypeKind {
+ if m != nil {
+ return m.TypeKind
+ }
+ return nil
+}
+
+func (x *Type) GetDyn() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Dyn); ok {
+ return x.Dyn
+ }
+ return nil
+}
+
+func (x *Type) GetNull() structpb.NullValue {
+ if x, ok := x.GetTypeKind().(*Type_Null); ok {
+ return x.Null
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Type) GetPrimitive() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Primitive); ok {
+ return x.Primitive
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWrapper() Type_PrimitiveType {
+ if x, ok := x.GetTypeKind().(*Type_Wrapper); ok {
+ return x.Wrapper
+ }
+ return Type_PRIMITIVE_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetWellKnown() Type_WellKnownType {
+ if x, ok := x.GetTypeKind().(*Type_WellKnown); ok {
+ return x.WellKnown
+ }
+ return Type_WELL_KNOWN_TYPE_UNSPECIFIED
+}
+
+func (x *Type) GetListType() *Type_ListType {
+ if x, ok := x.GetTypeKind().(*Type_ListType_); ok {
+ return x.ListType
+ }
+ return nil
+}
+
+func (x *Type) GetMapType() *Type_MapType {
+ if x, ok := x.GetTypeKind().(*Type_MapType_); ok {
+ return x.MapType
+ }
+ return nil
+}
+
+func (x *Type) GetFunction() *Type_FunctionType {
+ if x, ok := x.GetTypeKind().(*Type_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+func (x *Type) GetMessageType() string {
+ if x, ok := x.GetTypeKind().(*Type_MessageType); ok {
+ return x.MessageType
+ }
+ return ""
+}
+
+func (x *Type) GetTypeParam() string {
+ if x, ok := x.GetTypeKind().(*Type_TypeParam); ok {
+ return x.TypeParam
+ }
+ return ""
+}
+
+func (x *Type) GetType() *Type {
+ if x, ok := x.GetTypeKind().(*Type_Type); ok {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Type) GetError() *emptypb.Empty {
+ if x, ok := x.GetTypeKind().(*Type_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *Type) GetAbstractType() *Type_AbstractType {
+ if x, ok := x.GetTypeKind().(*Type_AbstractType_); ok {
+ return x.AbstractType
+ }
+ return nil
+}
+
+type isType_TypeKind interface {
+ isType_TypeKind()
+}
+
+type Type_Dyn struct {
+ Dyn *emptypb.Empty `protobuf:"bytes,1,opt,name=dyn,proto3,oneof"`
+}
+
+type Type_Null struct {
+ Null structpb.NullValue `protobuf:"varint,2,opt,name=null,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Type_Primitive struct {
+ Primitive Type_PrimitiveType `protobuf:"varint,3,opt,name=primitive,proto3,enum=cel.expr.Type_PrimitiveType,oneof"`
+}
+
+type Type_Wrapper struct {
+ Wrapper Type_PrimitiveType `protobuf:"varint,4,opt,name=wrapper,proto3,enum=cel.expr.Type_PrimitiveType,oneof"`
+}
+
+type Type_WellKnown struct {
+ WellKnown Type_WellKnownType `protobuf:"varint,5,opt,name=well_known,json=wellKnown,proto3,enum=cel.expr.Type_WellKnownType,oneof"`
+}
+
+type Type_ListType_ struct {
+ ListType *Type_ListType `protobuf:"bytes,6,opt,name=list_type,json=listType,proto3,oneof"`
+}
+
+type Type_MapType_ struct {
+ MapType *Type_MapType `protobuf:"bytes,7,opt,name=map_type,json=mapType,proto3,oneof"`
+}
+
+type Type_Function struct {
+ Function *Type_FunctionType `protobuf:"bytes,8,opt,name=function,proto3,oneof"`
+}
+
+type Type_MessageType struct {
+ MessageType string `protobuf:"bytes,9,opt,name=message_type,json=messageType,proto3,oneof"`
+}
+
+type Type_TypeParam struct {
+ TypeParam string `protobuf:"bytes,10,opt,name=type_param,json=typeParam,proto3,oneof"`
+}
+
+type Type_Type struct {
+ Type *Type `protobuf:"bytes,11,opt,name=type,proto3,oneof"`
+}
+
+type Type_Error struct {
+ Error *emptypb.Empty `protobuf:"bytes,12,opt,name=error,proto3,oneof"`
+}
+
+type Type_AbstractType_ struct {
+ AbstractType *Type_AbstractType `protobuf:"bytes,14,opt,name=abstract_type,json=abstractType,proto3,oneof"`
+}
+
+func (*Type_Dyn) isType_TypeKind() {}
+
+func (*Type_Null) isType_TypeKind() {}
+
+func (*Type_Primitive) isType_TypeKind() {}
+
+func (*Type_Wrapper) isType_TypeKind() {}
+
+func (*Type_WellKnown) isType_TypeKind() {}
+
+func (*Type_ListType_) isType_TypeKind() {}
+
+func (*Type_MapType_) isType_TypeKind() {}
+
+func (*Type_Function) isType_TypeKind() {}
+
+func (*Type_MessageType) isType_TypeKind() {}
+
+func (*Type_TypeParam) isType_TypeKind() {}
+
+func (*Type_Type) isType_TypeKind() {}
+
+func (*Type_Error) isType_TypeKind() {}
+
+func (*Type_AbstractType_) isType_TypeKind() {}
+
+type Decl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Types that are assignable to DeclKind:
+ //
+ // *Decl_Ident
+ // *Decl_Function
+ DeclKind isDecl_DeclKind `protobuf_oneof:"decl_kind"`
+}
+
+func (x *Decl) Reset() {
+ *x = Decl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl) ProtoMessage() {}
+
+func (x *Decl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl.ProtoReflect.Descriptor instead.
+func (*Decl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Decl) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *Decl) GetDeclKind() isDecl_DeclKind {
+ if m != nil {
+ return m.DeclKind
+ }
+ return nil
+}
+
+func (x *Decl) GetIdent() *Decl_IdentDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Ident); ok {
+ return x.Ident
+ }
+ return nil
+}
+
+func (x *Decl) GetFunction() *Decl_FunctionDecl {
+ if x, ok := x.GetDeclKind().(*Decl_Function); ok {
+ return x.Function
+ }
+ return nil
+}
+
+type isDecl_DeclKind interface {
+ isDecl_DeclKind()
+}
+
+type Decl_Ident struct {
+ Ident *Decl_IdentDecl `protobuf:"bytes,2,opt,name=ident,proto3,oneof"`
+}
+
+type Decl_Function struct {
+ Function *Decl_FunctionDecl `protobuf:"bytes,3,opt,name=function,proto3,oneof"`
+}
+
+func (*Decl_Ident) isDecl_DeclKind() {}
+
+func (*Decl_Function) isDecl_DeclKind() {}
+
+type Reference struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ OverloadId []string `protobuf:"bytes,3,rep,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ Value *Constant `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Reference) Reset() {
+ *x = Reference{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Reference) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Reference) ProtoMessage() {}
+
+func (x *Reference) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Reference.ProtoReflect.Descriptor instead.
+func (*Reference) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Reference) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Reference) GetOverloadId() []string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return nil
+}
+
+func (x *Reference) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+type Type_ListType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ElemType *Type `protobuf:"bytes,1,opt,name=elem_type,json=elemType,proto3" json:"elem_type,omitempty"`
+}
+
+func (x *Type_ListType) Reset() {
+ *x = Type_ListType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_ListType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_ListType) ProtoMessage() {}
+
+func (x *Type_ListType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_ListType.ProtoReflect.Descriptor instead.
+func (*Type_ListType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Type_ListType) GetElemType() *Type {
+ if x != nil {
+ return x.ElemType
+ }
+ return nil
+}
+
+type Type_MapType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ KeyType *Type `protobuf:"bytes,1,opt,name=key_type,json=keyType,proto3" json:"key_type,omitempty"`
+ ValueType *Type `protobuf:"bytes,2,opt,name=value_type,json=valueType,proto3" json:"value_type,omitempty"`
+}
+
+func (x *Type_MapType) Reset() {
+ *x = Type_MapType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_MapType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_MapType) ProtoMessage() {}
+
+func (x *Type_MapType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_MapType.ProtoReflect.Descriptor instead.
+func (*Type_MapType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Type_MapType) GetKeyType() *Type {
+ if x != nil {
+ return x.KeyType
+ }
+ return nil
+}
+
+func (x *Type_MapType) GetValueType() *Type {
+ if x != nil {
+ return x.ValueType
+ }
+ return nil
+}
+
+type Type_FunctionType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ResultType *Type `protobuf:"bytes,1,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ ArgTypes []*Type `protobuf:"bytes,2,rep,name=arg_types,json=argTypes,proto3" json:"arg_types,omitempty"`
+}
+
+func (x *Type_FunctionType) Reset() {
+ *x = Type_FunctionType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_FunctionType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_FunctionType) ProtoMessage() {}
+
+func (x *Type_FunctionType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_FunctionType.ProtoReflect.Descriptor instead.
+func (*Type_FunctionType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Type_FunctionType) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Type_FunctionType) GetArgTypes() []*Type {
+ if x != nil {
+ return x.ArgTypes
+ }
+ return nil
+}
+
+type Type_AbstractType struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ ParameterTypes []*Type `protobuf:"bytes,2,rep,name=parameter_types,json=parameterTypes,proto3" json:"parameter_types,omitempty"`
+}
+
+func (x *Type_AbstractType) Reset() {
+ *x = Type_AbstractType{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Type_AbstractType) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Type_AbstractType) ProtoMessage() {}
+
+func (x *Type_AbstractType) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Type_AbstractType.ProtoReflect.Descriptor instead.
+func (*Type_AbstractType) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Type_AbstractType) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Type_AbstractType) GetParameterTypes() []*Type {
+ if x != nil {
+ return x.ParameterTypes
+ }
+ return nil
+}
+
+type Decl_IdentDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type *Type `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value *Constant `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ Doc string `protobuf:"bytes,3,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_IdentDecl) Reset() {
+ *x = Decl_IdentDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_IdentDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_IdentDecl) ProtoMessage() {}
+
+func (x *Decl_IdentDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_IdentDecl.ProtoReflect.Descriptor instead.
+func (*Decl_IdentDecl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *Decl_IdentDecl) GetType() *Type {
+ if x != nil {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetValue() *Constant {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Decl_IdentDecl) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+type Decl_FunctionDecl struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Overloads []*Decl_FunctionDecl_Overload `protobuf:"bytes,1,rep,name=overloads,proto3" json:"overloads,omitempty"`
+}
+
+func (x *Decl_FunctionDecl) Reset() {
+ *x = Decl_FunctionDecl{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *Decl_FunctionDecl) GetOverloads() []*Decl_FunctionDecl_Overload {
+ if x != nil {
+ return x.Overloads
+ }
+ return nil
+}
+
+type Decl_FunctionDecl_Overload struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ OverloadId string `protobuf:"bytes,1,opt,name=overload_id,json=overloadId,proto3" json:"overload_id,omitempty"`
+ Params []*Type `protobuf:"bytes,2,rep,name=params,proto3" json:"params,omitempty"`
+ TypeParams []string `protobuf:"bytes,3,rep,name=type_params,json=typeParams,proto3" json:"type_params,omitempty"`
+ ResultType *Type `protobuf:"bytes,4,opt,name=result_type,json=resultType,proto3" json:"result_type,omitempty"`
+ IsInstanceFunction bool `protobuf:"varint,5,opt,name=is_instance_function,json=isInstanceFunction,proto3" json:"is_instance_function,omitempty"`
+ Doc string `protobuf:"bytes,6,opt,name=doc,proto3" json:"doc,omitempty"`
+}
+
+func (x *Decl_FunctionDecl_Overload) Reset() {
+ *x = Decl_FunctionDecl_Overload{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_checked_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Decl_FunctionDecl_Overload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Decl_FunctionDecl_Overload) ProtoMessage() {}
+
+func (x *Decl_FunctionDecl_Overload) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_checked_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Decl_FunctionDecl_Overload.ProtoReflect.Descriptor instead.
+func (*Decl_FunctionDecl_Overload) Descriptor() ([]byte, []int) {
+ return file_cel_expr_checked_proto_rawDescGZIP(), []int{2, 1, 0}
+}
+
+func (x *Decl_FunctionDecl_Overload) GetOverloadId() string {
+ if x != nil {
+ return x.OverloadId
+ }
+ return ""
+}
+
+func (x *Decl_FunctionDecl_Overload) GetParams() []*Type {
+ if x != nil {
+ return x.Params
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetTypeParams() []string {
+ if x != nil {
+ return x.TypeParams
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetResultType() *Type {
+ if x != nil {
+ return x.ResultType
+ }
+ return nil
+}
+
+func (x *Decl_FunctionDecl_Overload) GetIsInstanceFunction() bool {
+ if x != nil {
+ return x.IsInstanceFunction
+ }
+ return false
+}
+
+func (x *Decl_FunctionDecl_Overload) GetDoc() string {
+ if x != nil {
+ return x.Doc
+ }
+ return ""
+}
+
+var File_cel_expr_checked_proto protoreflect.FileDescriptor
+
+var file_cel_expr_checked_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x1a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e,
+ 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xba, 0x03, 0x0a, 0x0b, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64,
+ 0x45, 0x78, 0x70, 0x72, 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
+ 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d,
+ 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x4d, 0x61,
+ 0x70, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x72,
+ 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x65, 0x78, 0x70, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x65,
+ 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x54, 0x0a, 0x11, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x4d, 0x61, 0x70, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x0c, 0x54, 0x79, 0x70, 0x65, 0x4d, 0x61, 0x70,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0xe6, 0x09, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2a, 0x0a, 0x03, 0x64, 0x79,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48,
+ 0x00, 0x52, 0x03, 0x64, 0x79, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x48, 0x00, 0x52, 0x04, 0x6e, 0x75, 0x6c, 0x6c, 0x12, 0x3c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d,
+ 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x69,
+ 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
+ 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72,
+ 0x12, 0x3d, 0x0a, 0x0a, 0x77, 0x65, 0x6c, 0x6c, 0x5f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
+ 0x70, 0x65, 0x48, 0x00, 0x52, 0x09, 0x77, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x12,
+ 0x36, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x6c,
+ 0x69, 0x73, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x61, 0x70, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70,
+ 0x65, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x08,
+ 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x46,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x08, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52,
+ 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0a,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
+ 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x12, 0x24, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0d, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x2e, 0x41, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x62, 0x73, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x37, 0x0a, 0x08, 0x4c, 0x69, 0x73, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x65, 0x6c, 0x65, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x54, 0x79, 0x70, 0x65,
+ 0x1a, 0x63, 0x0a, 0x07, 0x4d, 0x61, 0x70, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x08, 0x6b,
+ 0x65, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07, 0x6b,
+ 0x65, 0x79, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2d, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x6c, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2f, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75,
+ 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x61, 0x72, 0x67, 0x54, 0x79,
+ 0x70, 0x65, 0x73, 0x1a, 0x5b, 0x0a, 0x0c, 0x41, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, 0x54,
+ 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x65, 0x74, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x52, 0x0e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x73,
+ 0x22, 0x73, 0x0a, 0x0d, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x1e, 0x0a, 0x1a, 0x50, 0x52, 0x49, 0x4d, 0x49, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x54,
+ 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x49,
+ 0x4e, 0x54, 0x36, 0x34, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34,
+ 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x0a,
+ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x59,
+ 0x54, 0x45, 0x53, 0x10, 0x06, 0x22, 0x56, 0x0a, 0x0d, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
+ 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x1b, 0x57, 0x45, 0x4c, 0x4c, 0x5f, 0x4b,
+ 0x4e, 0x4f, 0x57, 0x4e, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x59, 0x10, 0x01,
+ 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x53, 0x54, 0x41, 0x4d, 0x50, 0x10, 0x02, 0x12,
+ 0x0c, 0x0a, 0x08, 0x44, 0x55, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x42, 0x0b, 0x0a,
+ 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc2, 0x04, 0x0a, 0x04, 0x44,
+ 0x65, 0x63, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x6c,
+ 0x48, 0x00, 0x52, 0x05, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x63, 0x6c, 0x48, 0x00, 0x52, 0x08, 0x66, 0x75, 0x6e, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x6b, 0x0a, 0x09, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x63,
+ 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12,
+ 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64, 0x6f,
+ 0x63, 0x1a, 0xbe, 0x02, 0x0a, 0x0c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x63, 0x6c, 0x12, 0x42, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
+ 0x63, 0x6c, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x09, 0x6f, 0x76, 0x65,
+ 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x1a, 0xe9, 0x01, 0x0a, 0x08, 0x4f, 0x76, 0x65, 0x72, 0x6c,
+ 0x6f, 0x61, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f,
+ 0x61, 0x64, 0x49, 0x64, 0x12, 0x26, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a,
+ 0x0b, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30,
+ 0x0a, 0x14, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x66, 0x75,
+ 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x69, 0x73,
+ 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x10, 0x0a, 0x03, 0x64, 0x6f, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x64,
+ 0x6f, 0x63, 0x42, 0x0b, 0x0a, 0x09, 0x64, 0x65, 0x63, 0x6c, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22,
+ 0x6a, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x49,
+ 0x64, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73,
+ 0x74, 0x61, 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2c, 0x0a, 0x0c, 0x64,
+ 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x44, 0x65, 0x63,
+ 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
+ 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_cel_expr_checked_proto_rawDescOnce sync.Once
+ file_cel_expr_checked_proto_rawDescData = file_cel_expr_checked_proto_rawDesc
+)
+
+func file_cel_expr_checked_proto_rawDescGZIP() []byte {
+ file_cel_expr_checked_proto_rawDescOnce.Do(func() {
+ file_cel_expr_checked_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_checked_proto_rawDescData)
+ })
+ return file_cel_expr_checked_proto_rawDescData
+}
+
+var file_cel_expr_checked_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_cel_expr_checked_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_cel_expr_checked_proto_goTypes = []interface{}{
+ (Type_PrimitiveType)(0), // 0: cel.expr.Type.PrimitiveType
+ (Type_WellKnownType)(0), // 1: cel.expr.Type.WellKnownType
+ (*CheckedExpr)(nil), // 2: cel.expr.CheckedExpr
+ (*Type)(nil), // 3: cel.expr.Type
+ (*Decl)(nil), // 4: cel.expr.Decl
+ (*Reference)(nil), // 5: cel.expr.Reference
+ nil, // 6: cel.expr.CheckedExpr.ReferenceMapEntry
+ nil, // 7: cel.expr.CheckedExpr.TypeMapEntry
+ (*Type_ListType)(nil), // 8: cel.expr.Type.ListType
+ (*Type_MapType)(nil), // 9: cel.expr.Type.MapType
+ (*Type_FunctionType)(nil), // 10: cel.expr.Type.FunctionType
+ (*Type_AbstractType)(nil), // 11: cel.expr.Type.AbstractType
+ (*Decl_IdentDecl)(nil), // 12: cel.expr.Decl.IdentDecl
+ (*Decl_FunctionDecl)(nil), // 13: cel.expr.Decl.FunctionDecl
+ (*Decl_FunctionDecl_Overload)(nil), // 14: cel.expr.Decl.FunctionDecl.Overload
+ (*SourceInfo)(nil), // 15: cel.expr.SourceInfo
+ (*Expr)(nil), // 16: cel.expr.Expr
+ (*emptypb.Empty)(nil), // 17: google.protobuf.Empty
+ (structpb.NullValue)(0), // 18: google.protobuf.NullValue
+ (*Constant)(nil), // 19: cel.expr.Constant
+}
+var file_cel_expr_checked_proto_depIdxs = []int32{
+ 6, // 0: cel.expr.CheckedExpr.reference_map:type_name -> cel.expr.CheckedExpr.ReferenceMapEntry
+ 7, // 1: cel.expr.CheckedExpr.type_map:type_name -> cel.expr.CheckedExpr.TypeMapEntry
+ 15, // 2: cel.expr.CheckedExpr.source_info:type_name -> cel.expr.SourceInfo
+ 16, // 3: cel.expr.CheckedExpr.expr:type_name -> cel.expr.Expr
+ 17, // 4: cel.expr.Type.dyn:type_name -> google.protobuf.Empty
+ 18, // 5: cel.expr.Type.null:type_name -> google.protobuf.NullValue
+ 0, // 6: cel.expr.Type.primitive:type_name -> cel.expr.Type.PrimitiveType
+ 0, // 7: cel.expr.Type.wrapper:type_name -> cel.expr.Type.PrimitiveType
+ 1, // 8: cel.expr.Type.well_known:type_name -> cel.expr.Type.WellKnownType
+ 8, // 9: cel.expr.Type.list_type:type_name -> cel.expr.Type.ListType
+ 9, // 10: cel.expr.Type.map_type:type_name -> cel.expr.Type.MapType
+ 10, // 11: cel.expr.Type.function:type_name -> cel.expr.Type.FunctionType
+ 3, // 12: cel.expr.Type.type:type_name -> cel.expr.Type
+ 17, // 13: cel.expr.Type.error:type_name -> google.protobuf.Empty
+ 11, // 14: cel.expr.Type.abstract_type:type_name -> cel.expr.Type.AbstractType
+ 12, // 15: cel.expr.Decl.ident:type_name -> cel.expr.Decl.IdentDecl
+ 13, // 16: cel.expr.Decl.function:type_name -> cel.expr.Decl.FunctionDecl
+ 19, // 17: cel.expr.Reference.value:type_name -> cel.expr.Constant
+ 5, // 18: cel.expr.CheckedExpr.ReferenceMapEntry.value:type_name -> cel.expr.Reference
+ 3, // 19: cel.expr.CheckedExpr.TypeMapEntry.value:type_name -> cel.expr.Type
+ 3, // 20: cel.expr.Type.ListType.elem_type:type_name -> cel.expr.Type
+ 3, // 21: cel.expr.Type.MapType.key_type:type_name -> cel.expr.Type
+ 3, // 22: cel.expr.Type.MapType.value_type:type_name -> cel.expr.Type
+ 3, // 23: cel.expr.Type.FunctionType.result_type:type_name -> cel.expr.Type
+ 3, // 24: cel.expr.Type.FunctionType.arg_types:type_name -> cel.expr.Type
+ 3, // 25: cel.expr.Type.AbstractType.parameter_types:type_name -> cel.expr.Type
+ 3, // 26: cel.expr.Decl.IdentDecl.type:type_name -> cel.expr.Type
+ 19, // 27: cel.expr.Decl.IdentDecl.value:type_name -> cel.expr.Constant
+ 14, // 28: cel.expr.Decl.FunctionDecl.overloads:type_name -> cel.expr.Decl.FunctionDecl.Overload
+ 3, // 29: cel.expr.Decl.FunctionDecl.Overload.params:type_name -> cel.expr.Type
+ 3, // 30: cel.expr.Decl.FunctionDecl.Overload.result_type:type_name -> cel.expr.Type
+ 31, // [31:31] is the sub-list for method output_type
+ 31, // [31:31] is the sub-list for method input_type
+ 31, // [31:31] is the sub-list for extension type_name
+ 31, // [31:31] is the sub-list for extension extendee
+ 0, // [0:31] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_checked_proto_init() }
+func file_cel_expr_checked_proto_init() {
+ if File_cel_expr_checked_proto != nil {
+ return
+ }
+ file_cel_expr_syntax_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_checked_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CheckedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Reference); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_ListType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_MapType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_FunctionType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Type_AbstractType); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_IdentDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Decl_FunctionDecl_Overload); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_checked_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Type_Dyn)(nil),
+ (*Type_Null)(nil),
+ (*Type_Primitive)(nil),
+ (*Type_Wrapper)(nil),
+ (*Type_WellKnown)(nil),
+ (*Type_ListType_)(nil),
+ (*Type_MapType_)(nil),
+ (*Type_Function)(nil),
+ (*Type_MessageType)(nil),
+ (*Type_TypeParam)(nil),
+ (*Type_Type)(nil),
+ (*Type_Error)(nil),
+ (*Type_AbstractType_)(nil),
+ }
+ file_cel_expr_checked_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Decl_Ident)(nil),
+ (*Decl_Function)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_checked_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 13,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_checked_proto_goTypes,
+ DependencyIndexes: file_cel_expr_checked_proto_depIdxs,
+ EnumInfos: file_cel_expr_checked_proto_enumTypes,
+ MessageInfos: file_cel_expr_checked_proto_msgTypes,
+ }.Build()
+ File_cel_expr_checked_proto = out.File
+ file_cel_expr_checked_proto_rawDesc = nil
+ file_cel_expr_checked_proto_goTypes = nil
+ file_cel_expr_checked_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/cloudbuild.yaml b/vendor/cel.dev/expr/cloudbuild.yaml
new file mode 100644
index 000000000..e3e533a04
--- /dev/null
+++ b/vendor/cel.dev/expr/cloudbuild.yaml
@@ -0,0 +1,9 @@
+steps:
+- name: 'gcr.io/cloud-builders/bazel:7.3.2'
+ entrypoint: bazel
+ args: ['build', '...']
+ id: bazel-build
+ waitFor: ['-']
+timeout: 15m
+options:
+ machineType: 'N1_HIGHCPU_32'
diff --git a/vendor/cel.dev/expr/eval.pb.go b/vendor/cel.dev/expr/eval.pb.go
new file mode 100644
index 000000000..8f651f9cc
--- /dev/null
+++ b/vendor/cel.dev/expr/eval.pb.go
@@ -0,0 +1,490 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/eval.proto
+
+package expr
+
+import (
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type EvalState struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
+}
+
+func (x *EvalState) Reset() {
+ *x = EvalState{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EvalState) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState) ProtoMessage() {}
+
+func (x *EvalState) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState.ProtoReflect.Descriptor instead.
+func (*EvalState) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *EvalState) GetValues() []*ExprValue {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *EvalState) GetResults() []*EvalState_Result {
+ if x != nil {
+ return x.Results
+ }
+ return nil
+}
+
+type ExprValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Kind:
+ //
+ // *ExprValue_Value
+ // *ExprValue_Error
+ // *ExprValue_Unknown
+ Kind isExprValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (x *ExprValue) Reset() {
+ *x = ExprValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExprValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExprValue) ProtoMessage() {}
+
+func (x *ExprValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead.
+func (*ExprValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *ExprValue) GetKind() isExprValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (x *ExprValue) GetValue() *Value {
+ if x, ok := x.GetKind().(*ExprValue_Value); ok {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *ExprValue) GetError() *ErrorSet {
+ if x, ok := x.GetKind().(*ExprValue_Error); ok {
+ return x.Error
+ }
+ return nil
+}
+
+func (x *ExprValue) GetUnknown() *UnknownSet {
+ if x, ok := x.GetKind().(*ExprValue_Unknown); ok {
+ return x.Unknown
+ }
+ return nil
+}
+
+type isExprValue_Kind interface {
+ isExprValue_Kind()
+}
+
+type ExprValue_Value struct {
+ Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"`
+}
+
+type ExprValue_Error struct {
+ Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
+}
+
+type ExprValue_Unknown struct {
+ Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"`
+}
+
+func (*ExprValue_Value) isExprValue_Kind() {}
+
+func (*ExprValue_Error) isExprValue_Kind() {}
+
+func (*ExprValue_Unknown) isExprValue_Kind() {}
+
+type ErrorSet struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
+}
+
+func (x *ErrorSet) Reset() {
+ *x = ErrorSet{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ErrorSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorSet) ProtoMessage() {}
+
+func (x *ErrorSet) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead.
+func (*ErrorSet) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ErrorSet) GetErrors() []*status.Status {
+ if x != nil {
+ return x.Errors
+ }
+ return nil
+}
+
+type UnknownSet struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"`
+}
+
+func (x *UnknownSet) Reset() {
+ *x = UnknownSet{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UnknownSet) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnknownSet) ProtoMessage() {}
+
+func (x *UnknownSet) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead.
+func (*UnknownSet) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *UnknownSet) GetExprs() []int64 {
+ if x != nil {
+ return x.Exprs
+ }
+ return nil
+}
+
+type EvalState_Result struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"`
+ Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *EvalState_Result) Reset() {
+ *x = EvalState_Result{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_eval_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EvalState_Result) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EvalState_Result) ProtoMessage() {}
+
+func (x *EvalState_Result) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_eval_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead.
+func (*EvalState_Result) Descriptor() ([]byte, []int) {
+ return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *EvalState_Result) GetExpr() int64 {
+ if x != nil {
+ return x.Expr
+ }
+ return 0
+}
+
+func (x *EvalState_Result) GetValue() int64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+var File_cel_expr_eval_proto protoreflect.FileDescriptor
+
+var file_cel_expr_eval_proto_rawDesc = []byte{
+ 0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a,
+ 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70,
+ 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2,
+ 0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63,
+ 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73,
+ 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e,
+ 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a,
+ 0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70,
+ 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72,
+ 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52,
+ 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52,
+ 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64,
+ 0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e,
+ 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c,
+ 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76,
+ 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
+ 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_eval_proto_rawDescOnce sync.Once
+ file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc
+)
+
+func file_cel_expr_eval_proto_rawDescGZIP() []byte {
+ file_cel_expr_eval_proto_rawDescOnce.Do(func() {
+ file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData)
+ })
+ return file_cel_expr_eval_proto_rawDescData
+}
+
+var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_cel_expr_eval_proto_goTypes = []interface{}{
+ (*EvalState)(nil), // 0: cel.expr.EvalState
+ (*ExprValue)(nil), // 1: cel.expr.ExprValue
+ (*ErrorSet)(nil), // 2: cel.expr.ErrorSet
+ (*UnknownSet)(nil), // 3: cel.expr.UnknownSet
+ (*EvalState_Result)(nil), // 4: cel.expr.EvalState.Result
+ (*Value)(nil), // 5: cel.expr.Value
+ (*status.Status)(nil), // 6: google.rpc.Status
+}
+var file_cel_expr_eval_proto_depIdxs = []int32{
+ 1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue
+ 4, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result
+ 5, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value
+ 2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet
+ 3, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet
+ 6, // 5: cel.expr.ErrorSet.errors:type_name -> google.rpc.Status
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_eval_proto_init() }
+func file_cel_expr_eval_proto_init() {
+ if File_cel_expr_eval_proto != nil {
+ return
+ }
+ file_cel_expr_value_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EvalState); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExprValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ErrorSet); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UnknownSet); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EvalState_Result); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*ExprValue_Value)(nil),
+ (*ExprValue_Error)(nil),
+ (*ExprValue_Unknown)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_eval_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_eval_proto_goTypes,
+ DependencyIndexes: file_cel_expr_eval_proto_depIdxs,
+ MessageInfos: file_cel_expr_eval_proto_msgTypes,
+ }.Build()
+ File_cel_expr_eval_proto = out.File
+ file_cel_expr_eval_proto_rawDesc = nil
+ file_cel_expr_eval_proto_goTypes = nil
+ file_cel_expr_eval_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/explain.pb.go b/vendor/cel.dev/expr/explain.pb.go
new file mode 100644
index 000000000..79fd5443b
--- /dev/null
+++ b/vendor/cel.dev/expr/explain.pb.go
@@ -0,0 +1,236 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/explain.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Deprecated: Do not use.
+type Explain struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
+}
+
+func (x *Explain) Reset() {
+ *x = Explain{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_explain_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain) ProtoMessage() {}
+
+func (x *Explain) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_explain_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain.ProtoReflect.Descriptor instead.
+func (*Explain) Descriptor() ([]byte, []int) {
+ return file_cel_expr_explain_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Explain) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *Explain) GetExprSteps() []*Explain_ExprStep {
+ if x != nil {
+ return x.ExprSteps
+ }
+ return nil
+}
+
+type Explain_ExprStep struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
+}
+
+func (x *Explain_ExprStep) Reset() {
+ *x = Explain_ExprStep{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_explain_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Explain_ExprStep) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Explain_ExprStep) ProtoMessage() {}
+
+func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_explain_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead.
+func (*Explain_ExprStep) Descriptor() ([]byte, []int) {
+ return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Explain_ExprStep) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (x *Explain_ExprStep) GetValueIndex() int32 {
+ if x != nil {
+ return x.ValueIndex
+ }
+ return 0
+}
+
+var File_cel_expr_explain_proto protoreflect.FileDescriptor
+
+var file_cel_expr_explain_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a,
+ 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
+ 0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65,
+ 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e,
+ 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61,
+ 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
+ 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_explain_proto_rawDescOnce sync.Once
+ file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc
+)
+
+func file_cel_expr_explain_proto_rawDescGZIP() []byte {
+ file_cel_expr_explain_proto_rawDescOnce.Do(func() {
+ file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData)
+ })
+ return file_cel_expr_explain_proto_rawDescData
+}
+
+var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_cel_expr_explain_proto_goTypes = []interface{}{
+ (*Explain)(nil), // 0: cel.expr.Explain
+ (*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep
+ (*Value)(nil), // 2: cel.expr.Value
+}
+var file_cel_expr_explain_proto_depIdxs = []int32{
+ 2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value
+ 1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_explain_proto_init() }
+func file_cel_expr_explain_proto_init() {
+ if File_cel_expr_explain_proto != nil {
+ return
+ }
+ file_cel_expr_value_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Explain_ExprStep); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_explain_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_explain_proto_goTypes,
+ DependencyIndexes: file_cel_expr_explain_proto_depIdxs,
+ MessageInfos: file_cel_expr_explain_proto_msgTypes,
+ }.Build()
+ File_cel_expr_explain_proto = out.File
+ file_cel_expr_explain_proto_rawDesc = nil
+ file_cel_expr_explain_proto_goTypes = nil
+ file_cel_expr_explain_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/regen_go_proto.sh b/vendor/cel.dev/expr/regen_go_proto.sh
new file mode 100644
index 000000000..fdcbb3ce2
--- /dev/null
+++ b/vendor/cel.dev/expr/regen_go_proto.sh
@@ -0,0 +1,9 @@
+#!/bin/sh
+bazel build //proto/cel/expr/conformance/...
+files=($(bazel aquery 'kind(proto, //proto/cel/expr/conformance/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n"))
+for src in ${files[@]};
+do
+ dst=$(echo $src | sed 's/\(.*\/cel.dev\/expr\/\(.*\)\)/\2/')
+ echo "copying $dst"
+ $(cp $src $dst)
+done
diff --git a/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
new file mode 100644
index 000000000..9a13479e4
--- /dev/null
+++ b/vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+bazel build //proto/cel/expr:all
+
+rm -vf ./*.pb.go
+
+files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") )
+for src in "${files[@]}";
+do
+ cp -v "${src}" ./
+done
diff --git a/vendor/cel.dev/expr/syntax.pb.go b/vendor/cel.dev/expr/syntax.pb.go
new file mode 100644
index 000000000..48a952872
--- /dev/null
+++ b/vendor/cel.dev/expr/syntax.pb.go
@@ -0,0 +1,1633 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/syntax.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type SourceInfo_Extension_Component int32
+
+const (
+ SourceInfo_Extension_COMPONENT_UNSPECIFIED SourceInfo_Extension_Component = 0
+ SourceInfo_Extension_COMPONENT_PARSER SourceInfo_Extension_Component = 1
+ SourceInfo_Extension_COMPONENT_TYPE_CHECKER SourceInfo_Extension_Component = 2
+ SourceInfo_Extension_COMPONENT_RUNTIME SourceInfo_Extension_Component = 3
+)
+
+// Enum value maps for SourceInfo_Extension_Component.
+var (
+ SourceInfo_Extension_Component_name = map[int32]string{
+ 0: "COMPONENT_UNSPECIFIED",
+ 1: "COMPONENT_PARSER",
+ 2: "COMPONENT_TYPE_CHECKER",
+ 3: "COMPONENT_RUNTIME",
+ }
+ SourceInfo_Extension_Component_value = map[string]int32{
+ "COMPONENT_UNSPECIFIED": 0,
+ "COMPONENT_PARSER": 1,
+ "COMPONENT_TYPE_CHECKER": 2,
+ "COMPONENT_RUNTIME": 3,
+ }
+)
+
+func (x SourceInfo_Extension_Component) Enum() *SourceInfo_Extension_Component {
+ p := new(SourceInfo_Extension_Component)
+ *p = x
+ return p
+}
+
+func (x SourceInfo_Extension_Component) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SourceInfo_Extension_Component) Descriptor() protoreflect.EnumDescriptor {
+ return file_cel_expr_syntax_proto_enumTypes[0].Descriptor()
+}
+
+func (SourceInfo_Extension_Component) Type() protoreflect.EnumType {
+ return &file_cel_expr_syntax_proto_enumTypes[0]
+}
+
+func (x SourceInfo_Extension_Component) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Component.Descriptor instead.
+func (SourceInfo_Extension_Component) EnumDescriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0}
+}
+
+type ParsedExpr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Expr *Expr `protobuf:"bytes,2,opt,name=expr,proto3" json:"expr,omitempty"`
+ SourceInfo *SourceInfo `protobuf:"bytes,3,opt,name=source_info,json=sourceInfo,proto3" json:"source_info,omitempty"`
+}
+
+func (x *ParsedExpr) Reset() {
+ *x = ParsedExpr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ParsedExpr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ParsedExpr) ProtoMessage() {}
+
+func (x *ParsedExpr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ParsedExpr.ProtoReflect.Descriptor instead.
+func (*ParsedExpr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ParsedExpr) GetExpr() *Expr {
+ if x != nil {
+ return x.Expr
+ }
+ return nil
+}
+
+func (x *ParsedExpr) GetSourceInfo() *SourceInfo {
+ if x != nil {
+ return x.SourceInfo
+ }
+ return nil
+}
+
+type Expr struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
+ // Types that are assignable to ExprKind:
+ //
+ // *Expr_ConstExpr
+ // *Expr_IdentExpr
+ // *Expr_SelectExpr
+ // *Expr_CallExpr
+ // *Expr_ListExpr
+ // *Expr_StructExpr
+ // *Expr_ComprehensionExpr
+ ExprKind isExpr_ExprKind `protobuf_oneof:"expr_kind"`
+}
+
+func (x *Expr) Reset() {
+ *x = Expr{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr) ProtoMessage() {}
+
+func (x *Expr) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr.ProtoReflect.Descriptor instead.
+func (*Expr) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Expr) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr) GetExprKind() isExpr_ExprKind {
+ if m != nil {
+ return m.ExprKind
+ }
+ return nil
+}
+
+func (x *Expr) GetConstExpr() *Constant {
+ if x, ok := x.GetExprKind().(*Expr_ConstExpr); ok {
+ return x.ConstExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetIdentExpr() *Expr_Ident {
+ if x, ok := x.GetExprKind().(*Expr_IdentExpr); ok {
+ return x.IdentExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetSelectExpr() *Expr_Select {
+ if x, ok := x.GetExprKind().(*Expr_SelectExpr); ok {
+ return x.SelectExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetCallExpr() *Expr_Call {
+ if x, ok := x.GetExprKind().(*Expr_CallExpr); ok {
+ return x.CallExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetListExpr() *Expr_CreateList {
+ if x, ok := x.GetExprKind().(*Expr_ListExpr); ok {
+ return x.ListExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetStructExpr() *Expr_CreateStruct {
+ if x, ok := x.GetExprKind().(*Expr_StructExpr); ok {
+ return x.StructExpr
+ }
+ return nil
+}
+
+func (x *Expr) GetComprehensionExpr() *Expr_Comprehension {
+ if x, ok := x.GetExprKind().(*Expr_ComprehensionExpr); ok {
+ return x.ComprehensionExpr
+ }
+ return nil
+}
+
+type isExpr_ExprKind interface {
+ isExpr_ExprKind()
+}
+
+type Expr_ConstExpr struct {
+ ConstExpr *Constant `protobuf:"bytes,3,opt,name=const_expr,json=constExpr,proto3,oneof"`
+}
+
+type Expr_IdentExpr struct {
+ IdentExpr *Expr_Ident `protobuf:"bytes,4,opt,name=ident_expr,json=identExpr,proto3,oneof"`
+}
+
+type Expr_SelectExpr struct {
+ SelectExpr *Expr_Select `protobuf:"bytes,5,opt,name=select_expr,json=selectExpr,proto3,oneof"`
+}
+
+type Expr_CallExpr struct {
+ CallExpr *Expr_Call `protobuf:"bytes,6,opt,name=call_expr,json=callExpr,proto3,oneof"`
+}
+
+type Expr_ListExpr struct {
+ ListExpr *Expr_CreateList `protobuf:"bytes,7,opt,name=list_expr,json=listExpr,proto3,oneof"`
+}
+
+type Expr_StructExpr struct {
+ StructExpr *Expr_CreateStruct `protobuf:"bytes,8,opt,name=struct_expr,json=structExpr,proto3,oneof"`
+}
+
+type Expr_ComprehensionExpr struct {
+ ComprehensionExpr *Expr_Comprehension `protobuf:"bytes,9,opt,name=comprehension_expr,json=comprehensionExpr,proto3,oneof"`
+}
+
+func (*Expr_ConstExpr) isExpr_ExprKind() {}
+
+func (*Expr_IdentExpr) isExpr_ExprKind() {}
+
+func (*Expr_SelectExpr) isExpr_ExprKind() {}
+
+func (*Expr_CallExpr) isExpr_ExprKind() {}
+
+func (*Expr_ListExpr) isExpr_ExprKind() {}
+
+func (*Expr_StructExpr) isExpr_ExprKind() {}
+
+func (*Expr_ComprehensionExpr) isExpr_ExprKind() {}
+
+type Constant struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to ConstantKind:
+ //
+ // *Constant_NullValue
+ // *Constant_BoolValue
+ // *Constant_Int64Value
+ // *Constant_Uint64Value
+ // *Constant_DoubleValue
+ // *Constant_StringValue
+ // *Constant_BytesValue
+ // *Constant_DurationValue
+ // *Constant_TimestampValue
+ ConstantKind isConstant_ConstantKind `protobuf_oneof:"constant_kind"`
+}
+
+func (x *Constant) Reset() {
+ *x = Constant{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Constant) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Constant) ProtoMessage() {}
+
+func (x *Constant) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Constant.ProtoReflect.Descriptor instead.
+func (*Constant) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{2}
+}
+
+func (m *Constant) GetConstantKind() isConstant_ConstantKind {
+ if m != nil {
+ return m.ConstantKind
+ }
+ return nil
+}
+
+func (x *Constant) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetConstantKind().(*Constant_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Constant) GetBoolValue() bool {
+ if x, ok := x.GetConstantKind().(*Constant_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Constant) GetInt64Value() int64 {
+ if x, ok := x.GetConstantKind().(*Constant_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetUint64Value() uint64 {
+ if x, ok := x.GetConstantKind().(*Constant_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Constant) GetDoubleValue() float64 {
+ if x, ok := x.GetConstantKind().(*Constant_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Constant) GetStringValue() string {
+ if x, ok := x.GetConstantKind().(*Constant_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Constant) GetBytesValue() []byte {
+ if x, ok := x.GetConstantKind().(*Constant_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetDurationValue() *durationpb.Duration {
+ if x, ok := x.GetConstantKind().(*Constant_DurationValue); ok {
+ return x.DurationValue
+ }
+ return nil
+}
+
+// Deprecated: Do not use.
+func (x *Constant) GetTimestampValue() *timestamppb.Timestamp {
+ if x, ok := x.GetConstantKind().(*Constant_TimestampValue); ok {
+ return x.TimestampValue
+ }
+ return nil
+}
+
+type isConstant_ConstantKind interface {
+ isConstant_ConstantKind()
+}
+
+type Constant_NullValue struct {
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Constant_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Constant_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Constant_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Constant_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Constant_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Constant_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Constant_DurationValue struct {
+ // Deprecated: Do not use.
+ DurationValue *durationpb.Duration `protobuf:"bytes,8,opt,name=duration_value,json=durationValue,proto3,oneof"`
+}
+
+type Constant_TimestampValue struct {
+ // Deprecated: Do not use.
+ TimestampValue *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=timestamp_value,json=timestampValue,proto3,oneof"`
+}
+
+func (*Constant_NullValue) isConstant_ConstantKind() {}
+
+func (*Constant_BoolValue) isConstant_ConstantKind() {}
+
+func (*Constant_Int64Value) isConstant_ConstantKind() {}
+
+func (*Constant_Uint64Value) isConstant_ConstantKind() {}
+
+func (*Constant_DoubleValue) isConstant_ConstantKind() {}
+
+func (*Constant_StringValue) isConstant_ConstantKind() {}
+
+func (*Constant_BytesValue) isConstant_ConstantKind() {}
+
+func (*Constant_DurationValue) isConstant_ConstantKind() {}
+
+func (*Constant_TimestampValue) isConstant_ConstantKind() {}
+
+type SourceInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SyntaxVersion string `protobuf:"bytes,1,opt,name=syntax_version,json=syntaxVersion,proto3" json:"syntax_version,omitempty"`
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ LineOffsets []int32 `protobuf:"varint,3,rep,packed,name=line_offsets,json=lineOffsets,proto3" json:"line_offsets,omitempty"`
+ Positions map[int64]int32 `protobuf:"bytes,4,rep,name=positions,proto3" json:"positions,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
+ MacroCalls map[int64]*Expr `protobuf:"bytes,5,rep,name=macro_calls,json=macroCalls,proto3" json:"macro_calls,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Extensions []*SourceInfo_Extension `protobuf:"bytes,6,rep,name=extensions,proto3" json:"extensions,omitempty"`
+}
+
+func (x *SourceInfo) Reset() {
+ *x = SourceInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo) ProtoMessage() {}
+
+func (x *SourceInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo.ProtoReflect.Descriptor instead.
+func (*SourceInfo) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *SourceInfo) GetSyntaxVersion() string {
+ if x != nil {
+ return x.SyntaxVersion
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *SourceInfo) GetLineOffsets() []int32 {
+ if x != nil {
+ return x.LineOffsets
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetPositions() map[int64]int32 {
+ if x != nil {
+ return x.Positions
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetMacroCalls() map[int64]*Expr {
+ if x != nil {
+ return x.MacroCalls
+ }
+ return nil
+}
+
+func (x *SourceInfo) GetExtensions() []*SourceInfo_Extension {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+type Expr_Ident struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *Expr_Ident) Reset() {
+ *x = Expr_Ident{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Ident) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Ident) ProtoMessage() {}
+
+func (x *Expr_Ident) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Ident.ProtoReflect.Descriptor instead.
+func (*Expr_Ident) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Expr_Ident) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+type Expr_Select struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Operand *Expr `protobuf:"bytes,1,opt,name=operand,proto3" json:"operand,omitempty"`
+ Field string `protobuf:"bytes,2,opt,name=field,proto3" json:"field,omitempty"`
+ TestOnly bool `protobuf:"varint,3,opt,name=test_only,json=testOnly,proto3" json:"test_only,omitempty"`
+}
+
+func (x *Expr_Select) Reset() {
+ *x = Expr_Select{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Select) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Select) ProtoMessage() {}
+
+func (x *Expr_Select) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Select.ProtoReflect.Descriptor instead.
+func (*Expr_Select) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *Expr_Select) GetOperand() *Expr {
+ if x != nil {
+ return x.Operand
+ }
+ return nil
+}
+
+func (x *Expr_Select) GetField() string {
+ if x != nil {
+ return x.Field
+ }
+ return ""
+}
+
+func (x *Expr_Select) GetTestOnly() bool {
+ if x != nil {
+ return x.TestOnly
+ }
+ return false
+}
+
+type Expr_Call struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Target *Expr `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
+ Function string `protobuf:"bytes,2,opt,name=function,proto3" json:"function,omitempty"`
+ Args []*Expr `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
+}
+
+func (x *Expr_Call) Reset() {
+ *x = Expr_Call{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Call) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Call) ProtoMessage() {}
+
+func (x *Expr_Call) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Call.ProtoReflect.Descriptor instead.
+func (*Expr_Call) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *Expr_Call) GetTarget() *Expr {
+ if x != nil {
+ return x.Target
+ }
+ return nil
+}
+
+func (x *Expr_Call) GetFunction() string {
+ if x != nil {
+ return x.Function
+ }
+ return ""
+}
+
+func (x *Expr_Call) GetArgs() []*Expr {
+ if x != nil {
+ return x.Args
+ }
+ return nil
+}
+
+type Expr_CreateList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Elements []*Expr `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"`
+ OptionalIndices []int32 `protobuf:"varint,2,rep,packed,name=optional_indices,json=optionalIndices,proto3" json:"optional_indices,omitempty"`
+}
+
+func (x *Expr_CreateList) Reset() {
+ *x = Expr_CreateList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateList) ProtoMessage() {}
+
+func (x *Expr_CreateList) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateList.ProtoReflect.Descriptor instead.
+func (*Expr_CreateList) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 3}
+}
+
+func (x *Expr_CreateList) GetElements() []*Expr {
+ if x != nil {
+ return x.Elements
+ }
+ return nil
+}
+
+func (x *Expr_CreateList) GetOptionalIndices() []int32 {
+ if x != nil {
+ return x.OptionalIndices
+ }
+ return nil
+}
+
+type Expr_CreateStruct struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MessageName string `protobuf:"bytes,1,opt,name=message_name,json=messageName,proto3" json:"message_name,omitempty"`
+ Entries []*Expr_CreateStruct_Entry `protobuf:"bytes,2,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *Expr_CreateStruct) Reset() {
+ *x = Expr_CreateStruct{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct) ProtoMessage() {}
+
+func (x *Expr_CreateStruct) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4}
+}
+
+func (x *Expr_CreateStruct) GetMessageName() string {
+ if x != nil {
+ return x.MessageName
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct) GetEntries() []*Expr_CreateStruct_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type Expr_Comprehension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ IterVar string `protobuf:"bytes,1,opt,name=iter_var,json=iterVar,proto3" json:"iter_var,omitempty"`
+ IterRange *Expr `protobuf:"bytes,2,opt,name=iter_range,json=iterRange,proto3" json:"iter_range,omitempty"`
+ AccuVar string `protobuf:"bytes,3,opt,name=accu_var,json=accuVar,proto3" json:"accu_var,omitempty"`
+ AccuInit *Expr `protobuf:"bytes,4,opt,name=accu_init,json=accuInit,proto3" json:"accu_init,omitempty"`
+ LoopCondition *Expr `protobuf:"bytes,5,opt,name=loop_condition,json=loopCondition,proto3" json:"loop_condition,omitempty"`
+ LoopStep *Expr `protobuf:"bytes,6,opt,name=loop_step,json=loopStep,proto3" json:"loop_step,omitempty"`
+ Result *Expr `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty"`
+}
+
+func (x *Expr_Comprehension) Reset() {
+ *x = Expr_Comprehension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_Comprehension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_Comprehension) ProtoMessage() {}
+
+func (x *Expr_Comprehension) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_Comprehension.ProtoReflect.Descriptor instead.
+func (*Expr_Comprehension) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 5}
+}
+
+func (x *Expr_Comprehension) GetIterVar() string {
+ if x != nil {
+ return x.IterVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetIterRange() *Expr {
+ if x != nil {
+ return x.IterRange
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetAccuVar() string {
+ if x != nil {
+ return x.AccuVar
+ }
+ return ""
+}
+
+func (x *Expr_Comprehension) GetAccuInit() *Expr {
+ if x != nil {
+ return x.AccuInit
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopCondition() *Expr {
+ if x != nil {
+ return x.LoopCondition
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetLoopStep() *Expr {
+ if x != nil {
+ return x.LoopStep
+ }
+ return nil
+}
+
+func (x *Expr_Comprehension) GetResult() *Expr {
+ if x != nil {
+ return x.Result
+ }
+ return nil
+}
+
+type Expr_CreateStruct_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
+ // Types that are assignable to KeyKind:
+ //
+ // *Expr_CreateStruct_Entry_FieldKey
+ // *Expr_CreateStruct_Entry_MapKey
+ KeyKind isExpr_CreateStruct_Entry_KeyKind `protobuf_oneof:"key_kind"`
+ Value *Expr `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
+ OptionalEntry bool `protobuf:"varint,5,opt,name=optional_entry,json=optionalEntry,proto3" json:"optional_entry,omitempty"`
+}
+
+func (x *Expr_CreateStruct_Entry) Reset() {
+ *x = Expr_CreateStruct_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Expr_CreateStruct_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Expr_CreateStruct_Entry) ProtoMessage() {}
+
+func (x *Expr_CreateStruct_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Expr_CreateStruct_Entry.ProtoReflect.Descriptor instead.
+func (*Expr_CreateStruct_Entry) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{1, 4, 0}
+}
+
+func (x *Expr_CreateStruct_Entry) GetId() int64 {
+ if x != nil {
+ return x.Id
+ }
+ return 0
+}
+
+func (m *Expr_CreateStruct_Entry) GetKeyKind() isExpr_CreateStruct_Entry_KeyKind {
+ if m != nil {
+ return m.KeyKind
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetFieldKey() string {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_FieldKey); ok {
+ return x.FieldKey
+ }
+ return ""
+}
+
+func (x *Expr_CreateStruct_Entry) GetMapKey() *Expr {
+ if x, ok := x.GetKeyKind().(*Expr_CreateStruct_Entry_MapKey); ok {
+ return x.MapKey
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetValue() *Expr {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+func (x *Expr_CreateStruct_Entry) GetOptionalEntry() bool {
+ if x != nil {
+ return x.OptionalEntry
+ }
+ return false
+}
+
+type isExpr_CreateStruct_Entry_KeyKind interface {
+ isExpr_CreateStruct_Entry_KeyKind()
+}
+
+type Expr_CreateStruct_Entry_FieldKey struct {
+ FieldKey string `protobuf:"bytes,2,opt,name=field_key,json=fieldKey,proto3,oneof"`
+}
+
+type Expr_CreateStruct_Entry_MapKey struct {
+ MapKey *Expr `protobuf:"bytes,3,opt,name=map_key,json=mapKey,proto3,oneof"`
+}
+
+func (*Expr_CreateStruct_Entry_FieldKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+func (*Expr_CreateStruct_Entry_MapKey) isExpr_CreateStruct_Entry_KeyKind() {}
+
+type SourceInfo_Extension struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ AffectedComponents []SourceInfo_Extension_Component `protobuf:"varint,2,rep,packed,name=affected_components,json=affectedComponents,proto3,enum=cel.expr.SourceInfo_Extension_Component" json:"affected_components,omitempty"`
+ Version *SourceInfo_Extension_Version `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
+}
+
+func (x *SourceInfo_Extension) Reset() {
+ *x = SourceInfo_Extension{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension) ProtoMessage() {}
+
+func (x *SourceInfo_Extension) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2}
+}
+
+func (x *SourceInfo_Extension) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *SourceInfo_Extension) GetAffectedComponents() []SourceInfo_Extension_Component {
+ if x != nil {
+ return x.AffectedComponents
+ }
+ return nil
+}
+
+func (x *SourceInfo_Extension) GetVersion() *SourceInfo_Extension_Version {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+type SourceInfo_Extension_Version struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Major int64 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
+ Minor int64 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
+}
+
+func (x *SourceInfo_Extension_Version) Reset() {
+ *x = SourceInfo_Extension_Version{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_syntax_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SourceInfo_Extension_Version) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SourceInfo_Extension_Version) ProtoMessage() {}
+
+func (x *SourceInfo_Extension_Version) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_syntax_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SourceInfo_Extension_Version.ProtoReflect.Descriptor instead.
+func (*SourceInfo_Extension_Version) Descriptor() ([]byte, []int) {
+ return file_cel_expr_syntax_proto_rawDescGZIP(), []int{3, 2, 0}
+}
+
+func (x *SourceInfo_Extension_Version) GetMajor() int64 {
+ if x != nil {
+ return x.Major
+ }
+ return 0
+}
+
+func (x *SourceInfo_Extension_Version) GetMinor() int64 {
+ if x != nil {
+ return x.Minor
+ }
+ return 0
+}
+
+var File_cel_expr_syntax_proto protoreflect.FileDescriptor
+
+var file_cel_expr_syntax_proto_rawDesc = []byte{
+ 0x0a, 0x15, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61,
+ 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x67, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x22,
+ 0x0a, 0x04, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63,
+ 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x04, 0x65, 0x78,
+ 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
+ 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xfd, 0x0a, 0x0a, 0x04, 0x45, 0x78,
+ 0x70, 0x72, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02,
+ 0x69, 0x64, 0x12, 0x33, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f,
+ 0x6e, 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x35, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x49, 0x64, 0x65, 0x6e,
+ 0x74, 0x48, 0x00, 0x52, 0x09, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38,
+ 0x0a, 0x0b, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x65,
+ 0x6c, 0x65, 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x32, 0x0a, 0x09, 0x63, 0x61, 0x6c, 0x6c,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x61, 0x6c, 0x6c,
+ 0x48, 0x00, 0x52, 0x08, 0x63, 0x61, 0x6c, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x12, 0x38, 0x0a, 0x09,
+ 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e,
+ 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x69,
+ 0x73, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3e, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x63, 0x65,
+ 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x73, 0x74, 0x72, 0x75,
+ 0x63, 0x74, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4d, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65,
+ 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x09, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78,
+ 0x70, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x48, 0x00, 0x52, 0x11, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x45, 0x78, 0x70, 0x72, 0x1a, 0x1b, 0x0a, 0x05, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x1a, 0x65, 0x0a, 0x06, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x12, 0x28, 0x0a, 0x07,
+ 0x6f, 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x07, 0x6f,
+ 0x70, 0x65, 0x72, 0x61, 0x6e, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x09,
+ 0x74, 0x65, 0x73, 0x74, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x08, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x1a, 0x6e, 0x0a, 0x04, 0x43, 0x61, 0x6c,
+ 0x6c, 0x12, 0x26, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
+ 0x72, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6e,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x03, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x1a, 0x63, 0x0a, 0x0a, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x69, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0f, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x65, 0x73, 0x1a, 0xab,
+ 0x02, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45,
+ 0x78, 0x70, 0x72, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74,
+ 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a,
+ 0xba, 0x01, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x09, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08,
+ 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x5f,
+ 0x6b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
+ 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6d, 0x61, 0x70,
+ 0x4b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78,
+ 0x70, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0d, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x42, 0x0a, 0x0a, 0x08, 0x6b, 0x65, 0x79, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x1a, 0xad, 0x02, 0x0a,
+ 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x68, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x19,
+ 0x0a, 0x08, 0x69, 0x74, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x69, 0x74, 0x65, 0x72, 0x56, 0x61, 0x72, 0x12, 0x2d, 0x0a, 0x0a, 0x69, 0x74, 0x65,
+ 0x72, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x09, 0x69,
+ 0x74, 0x65, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x63, 0x63, 0x75,
+ 0x5f, 0x76, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x63, 0x63, 0x75,
+ 0x56, 0x61, 0x72, 0x12, 0x2b, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x75, 0x5f, 0x69, 0x6e, 0x69, 0x74,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70,
+ 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x61, 0x63, 0x63, 0x75, 0x49, 0x6e, 0x69, 0x74,
+ 0x12, 0x35, 0x0a, 0x0e, 0x6c, 0x6f, 0x6f, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65,
+ 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x6c, 0x6f, 0x6f, 0x70, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x09, 0x6c, 0x6f, 0x6f, 0x70, 0x5f,
+ 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x08, 0x6c, 0x6f, 0x6f, 0x70,
+ 0x53, 0x74, 0x65, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x45, 0x78, 0x70, 0x72, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x0b, 0x0a, 0x09,
+ 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xc1, 0x03, 0x0a, 0x08, 0x43, 0x6f,
+ 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c,
+ 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74,
+ 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36,
+ 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52,
+ 0x0b, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c,
+ 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62,
+ 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x0e, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x02, 0x18, 0x01,
+ 0x48, 0x00, 0x52, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x49, 0x0a, 0x0f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0e, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d,
+ 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0xac, 0x06,
+ 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x25, 0x0a, 0x0e,
+ 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x56, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x21, 0x0a, 0x0c, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0b, 0x6c, 0x69, 0x6e, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x73, 0x12, 0x41, 0x0a, 0x09, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x0b, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x5f, 0x63,
+ 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f,
+ 0x2e, 0x4d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0a, 0x6d, 0x61, 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x3e, 0x0a, 0x0a,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x0e,
+ 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4d, 0x0a, 0x0f, 0x4d, 0x61,
+ 0x63, 0x72, 0x6f, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xe0, 0x02, 0x0a, 0x09, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x59, 0x0a, 0x13, 0x61, 0x66, 0x66, 0x65, 0x63,
+ 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x52, 0x12,
+ 0x61, 0x66, 0x66, 0x65, 0x63, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e,
+ 0x74, 0x73, 0x12, 0x40, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x35, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05,
+ 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x22, 0x6f, 0x0a, 0x09, 0x43,
+ 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x15, 0x43, 0x4f, 0x4d, 0x50,
+ 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54,
+ 0x5f, 0x50, 0x41, 0x52, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d,
+ 0x50, 0x4f, 0x4e, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43,
+ 0x4b, 0x45, 0x52, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x4f, 0x4d, 0x50, 0x4f, 0x4e, 0x45,
+ 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x42, 0x2e, 0x0a, 0x0c,
+ 0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0b, 0x53, 0x79,
+ 0x6e, 0x74, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c,
+ 0x2e, 0x64, 0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_cel_expr_syntax_proto_rawDescOnce sync.Once
+ file_cel_expr_syntax_proto_rawDescData = file_cel_expr_syntax_proto_rawDesc
+)
+
+func file_cel_expr_syntax_proto_rawDescGZIP() []byte {
+ file_cel_expr_syntax_proto_rawDescOnce.Do(func() {
+ file_cel_expr_syntax_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_syntax_proto_rawDescData)
+ })
+ return file_cel_expr_syntax_proto_rawDescData
+}
+
+var file_cel_expr_syntax_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_cel_expr_syntax_proto_msgTypes = make([]protoimpl.MessageInfo, 15)
+var file_cel_expr_syntax_proto_goTypes = []interface{}{
+ (SourceInfo_Extension_Component)(0), // 0: cel.expr.SourceInfo.Extension.Component
+ (*ParsedExpr)(nil), // 1: cel.expr.ParsedExpr
+ (*Expr)(nil), // 2: cel.expr.Expr
+ (*Constant)(nil), // 3: cel.expr.Constant
+ (*SourceInfo)(nil), // 4: cel.expr.SourceInfo
+ (*Expr_Ident)(nil), // 5: cel.expr.Expr.Ident
+ (*Expr_Select)(nil), // 6: cel.expr.Expr.Select
+ (*Expr_Call)(nil), // 7: cel.expr.Expr.Call
+ (*Expr_CreateList)(nil), // 8: cel.expr.Expr.CreateList
+ (*Expr_CreateStruct)(nil), // 9: cel.expr.Expr.CreateStruct
+ (*Expr_Comprehension)(nil), // 10: cel.expr.Expr.Comprehension
+ (*Expr_CreateStruct_Entry)(nil), // 11: cel.expr.Expr.CreateStruct.Entry
+ nil, // 12: cel.expr.SourceInfo.PositionsEntry
+ nil, // 13: cel.expr.SourceInfo.MacroCallsEntry
+ (*SourceInfo_Extension)(nil), // 14: cel.expr.SourceInfo.Extension
+ (*SourceInfo_Extension_Version)(nil), // 15: cel.expr.SourceInfo.Extension.Version
+ (structpb.NullValue)(0), // 16: google.protobuf.NullValue
+ (*durationpb.Duration)(nil), // 17: google.protobuf.Duration
+ (*timestamppb.Timestamp)(nil), // 18: google.protobuf.Timestamp
+}
+var file_cel_expr_syntax_proto_depIdxs = []int32{
+ 2, // 0: cel.expr.ParsedExpr.expr:type_name -> cel.expr.Expr
+ 4, // 1: cel.expr.ParsedExpr.source_info:type_name -> cel.expr.SourceInfo
+ 3, // 2: cel.expr.Expr.const_expr:type_name -> cel.expr.Constant
+ 5, // 3: cel.expr.Expr.ident_expr:type_name -> cel.expr.Expr.Ident
+ 6, // 4: cel.expr.Expr.select_expr:type_name -> cel.expr.Expr.Select
+ 7, // 5: cel.expr.Expr.call_expr:type_name -> cel.expr.Expr.Call
+ 8, // 6: cel.expr.Expr.list_expr:type_name -> cel.expr.Expr.CreateList
+ 9, // 7: cel.expr.Expr.struct_expr:type_name -> cel.expr.Expr.CreateStruct
+ 10, // 8: cel.expr.Expr.comprehension_expr:type_name -> cel.expr.Expr.Comprehension
+ 16, // 9: cel.expr.Constant.null_value:type_name -> google.protobuf.NullValue
+ 17, // 10: cel.expr.Constant.duration_value:type_name -> google.protobuf.Duration
+ 18, // 11: cel.expr.Constant.timestamp_value:type_name -> google.protobuf.Timestamp
+ 12, // 12: cel.expr.SourceInfo.positions:type_name -> cel.expr.SourceInfo.PositionsEntry
+ 13, // 13: cel.expr.SourceInfo.macro_calls:type_name -> cel.expr.SourceInfo.MacroCallsEntry
+ 14, // 14: cel.expr.SourceInfo.extensions:type_name -> cel.expr.SourceInfo.Extension
+ 2, // 15: cel.expr.Expr.Select.operand:type_name -> cel.expr.Expr
+ 2, // 16: cel.expr.Expr.Call.target:type_name -> cel.expr.Expr
+ 2, // 17: cel.expr.Expr.Call.args:type_name -> cel.expr.Expr
+ 2, // 18: cel.expr.Expr.CreateList.elements:type_name -> cel.expr.Expr
+ 11, // 19: cel.expr.Expr.CreateStruct.entries:type_name -> cel.expr.Expr.CreateStruct.Entry
+ 2, // 20: cel.expr.Expr.Comprehension.iter_range:type_name -> cel.expr.Expr
+ 2, // 21: cel.expr.Expr.Comprehension.accu_init:type_name -> cel.expr.Expr
+ 2, // 22: cel.expr.Expr.Comprehension.loop_condition:type_name -> cel.expr.Expr
+ 2, // 23: cel.expr.Expr.Comprehension.loop_step:type_name -> cel.expr.Expr
+ 2, // 24: cel.expr.Expr.Comprehension.result:type_name -> cel.expr.Expr
+ 2, // 25: cel.expr.Expr.CreateStruct.Entry.map_key:type_name -> cel.expr.Expr
+ 2, // 26: cel.expr.Expr.CreateStruct.Entry.value:type_name -> cel.expr.Expr
+ 2, // 27: cel.expr.SourceInfo.MacroCallsEntry.value:type_name -> cel.expr.Expr
+ 0, // 28: cel.expr.SourceInfo.Extension.affected_components:type_name -> cel.expr.SourceInfo.Extension.Component
+ 15, // 29: cel.expr.SourceInfo.Extension.version:type_name -> cel.expr.SourceInfo.Extension.Version
+ 30, // [30:30] is the sub-list for method output_type
+ 30, // [30:30] is the sub-list for method input_type
+ 30, // [30:30] is the sub-list for extension type_name
+ 30, // [30:30] is the sub-list for extension extendee
+ 0, // [0:30] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_syntax_proto_init() }
+func file_cel_expr_syntax_proto_init() {
+ if File_cel_expr_syntax_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_syntax_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ParsedExpr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Constant); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Ident); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Select); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Call); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_Comprehension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Expr_CreateStruct_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SourceInfo_Extension_Version); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_syntax_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Expr_ConstExpr)(nil),
+ (*Expr_IdentExpr)(nil),
+ (*Expr_SelectExpr)(nil),
+ (*Expr_CallExpr)(nil),
+ (*Expr_ListExpr)(nil),
+ (*Expr_StructExpr)(nil),
+ (*Expr_ComprehensionExpr)(nil),
+ }
+ file_cel_expr_syntax_proto_msgTypes[2].OneofWrappers = []interface{}{
+ (*Constant_NullValue)(nil),
+ (*Constant_BoolValue)(nil),
+ (*Constant_Int64Value)(nil),
+ (*Constant_Uint64Value)(nil),
+ (*Constant_DoubleValue)(nil),
+ (*Constant_StringValue)(nil),
+ (*Constant_BytesValue)(nil),
+ (*Constant_DurationValue)(nil),
+ (*Constant_TimestampValue)(nil),
+ }
+ file_cel_expr_syntax_proto_msgTypes[10].OneofWrappers = []interface{}{
+ (*Expr_CreateStruct_Entry_FieldKey)(nil),
+ (*Expr_CreateStruct_Entry_MapKey)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_syntax_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 15,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_syntax_proto_goTypes,
+ DependencyIndexes: file_cel_expr_syntax_proto_depIdxs,
+ EnumInfos: file_cel_expr_syntax_proto_enumTypes,
+ MessageInfos: file_cel_expr_syntax_proto_msgTypes,
+ }.Build()
+ File_cel_expr_syntax_proto = out.File
+ file_cel_expr_syntax_proto_rawDesc = nil
+ file_cel_expr_syntax_proto_goTypes = nil
+ file_cel_expr_syntax_proto_depIdxs = nil
+}
diff --git a/vendor/cel.dev/expr/value.pb.go b/vendor/cel.dev/expr/value.pb.go
new file mode 100644
index 000000000..e5e29228c
--- /dev/null
+++ b/vendor/cel.dev/expr/value.pb.go
@@ -0,0 +1,653 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.28.1
+// protoc v3.21.5
+// source: cel/expr/value.proto
+
+package expr
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Value struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Kind:
+ //
+ // *Value_NullValue
+ // *Value_BoolValue
+ // *Value_Int64Value
+ // *Value_Uint64Value
+ // *Value_DoubleValue
+ // *Value_StringValue
+ // *Value_BytesValue
+ // *Value_EnumValue
+ // *Value_ObjectValue
+ // *Value_MapValue
+ // *Value_ListValue
+ // *Value_TypeValue
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+}
+
+func (x *Value) Reset() {
+ *x = Value{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Value) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Value) ProtoMessage() {}
+
+func (x *Value) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Value.ProtoReflect.Descriptor instead.
+func (*Value) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *Value) GetKind() isValue_Kind {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (x *Value) GetNullValue() structpb.NullValue {
+ if x, ok := x.GetKind().(*Value_NullValue); ok {
+ return x.NullValue
+ }
+ return structpb.NullValue(0)
+}
+
+func (x *Value) GetBoolValue() bool {
+ if x, ok := x.GetKind().(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *Value) GetInt64Value() int64 {
+ if x, ok := x.GetKind().(*Value_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *Value) GetUint64Value() uint64 {
+ if x, ok := x.GetKind().(*Value_Uint64Value); ok {
+ return x.Uint64Value
+ }
+ return 0
+}
+
+func (x *Value) GetDoubleValue() float64 {
+ if x, ok := x.GetKind().(*Value_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *Value) GetStringValue() string {
+ if x, ok := x.GetKind().(*Value_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *Value) GetBytesValue() []byte {
+ if x, ok := x.GetKind().(*Value_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+func (x *Value) GetEnumValue() *EnumValue {
+ if x, ok := x.GetKind().(*Value_EnumValue); ok {
+ return x.EnumValue
+ }
+ return nil
+}
+
+func (x *Value) GetObjectValue() *anypb.Any {
+ if x, ok := x.GetKind().(*Value_ObjectValue); ok {
+ return x.ObjectValue
+ }
+ return nil
+}
+
+func (x *Value) GetMapValue() *MapValue {
+ if x, ok := x.GetKind().(*Value_MapValue); ok {
+ return x.MapValue
+ }
+ return nil
+}
+
+func (x *Value) GetListValue() *ListValue {
+ if x, ok := x.GetKind().(*Value_ListValue); ok {
+ return x.ListValue
+ }
+ return nil
+}
+
+func (x *Value) GetTypeValue() string {
+ if x, ok := x.GetKind().(*Value_TypeValue); ok {
+ return x.TypeValue
+ }
+ return ""
+}
+
+type isValue_Kind interface {
+ isValue_Kind()
+}
+
+type Value_NullValue struct {
+ NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Value_Uint64Value struct {
+ Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
+}
+
+type Value_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+ StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+type Value_EnumValue struct {
+ EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"`
+}
+
+type Value_ObjectValue struct {
+ ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"`
+}
+
+type Value_MapValue struct {
+ MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+ ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+type Value_TypeValue struct {
+ TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_Int64Value) isValue_Kind() {}
+
+func (*Value_Uint64Value) isValue_Kind() {}
+
+func (*Value_DoubleValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BytesValue) isValue_Kind() {}
+
+func (*Value_EnumValue) isValue_Kind() {}
+
+func (*Value_ObjectValue) isValue_Kind() {}
+
+func (*Value_MapValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (*Value_TypeValue) isValue_Kind() {}
+
+type EnumValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *EnumValue) Reset() {
+ *x = EnumValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EnumValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumValue) ProtoMessage() {}
+
+func (x *EnumValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead.
+func (*EnumValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *EnumValue) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *EnumValue) GetValue() int32 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+type ListValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *ListValue) Reset() {
+ *x = ListValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListValue) ProtoMessage() {}
+
+func (x *ListValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
+func (*ListValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListValue) GetValues() []*Value {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+type MapValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+}
+
+func (x *MapValue) Reset() {
+ *x = MapValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue) ProtoMessage() {}
+
+func (x *MapValue) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue.ProtoReflect.Descriptor instead.
+func (*MapValue) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *MapValue) GetEntries() []*MapValue_Entry {
+ if x != nil {
+ return x.Entries
+ }
+ return nil
+}
+
+type MapValue_Entry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *MapValue_Entry) Reset() {
+ *x = MapValue_Entry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_cel_expr_value_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MapValue_Entry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MapValue_Entry) ProtoMessage() {}
+
+func (x *MapValue_Entry) ProtoReflect() protoreflect.Message {
+ mi := &file_cel_expr_value_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead.
+func (*MapValue_Entry) Descriptor() ([]byte, []int) {
+ return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *MapValue_Entry) GetKey() *Value {
+ if x != nil {
+ return x.Key
+ }
+ return nil
+}
+
+func (x *MapValue_Entry) GetValue() *Value {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_cel_expr_value_proto protoreflect.FileDescriptor
+
+var file_cel_expr_value_proto_rawDesc = []byte{
+ 0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
+ 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72,
+ 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69,
+ 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
+ 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48,
+ 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23,
+ 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65,
+ 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48,
+ 0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c,
+ 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00,
+ 0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69,
+ 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
+ 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75,
+ 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a,
+ 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
+ 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
+ 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65,
+ 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
+ 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_cel_expr_value_proto_rawDescOnce sync.Once
+ file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc
+)
+
+func file_cel_expr_value_proto_rawDescGZIP() []byte {
+ file_cel_expr_value_proto_rawDescOnce.Do(func() {
+ file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData)
+ })
+ return file_cel_expr_value_proto_rawDescData
+}
+
+var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_cel_expr_value_proto_goTypes = []interface{}{
+ (*Value)(nil), // 0: cel.expr.Value
+ (*EnumValue)(nil), // 1: cel.expr.EnumValue
+ (*ListValue)(nil), // 2: cel.expr.ListValue
+ (*MapValue)(nil), // 3: cel.expr.MapValue
+ (*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry
+ (structpb.NullValue)(0), // 5: google.protobuf.NullValue
+ (*anypb.Any)(nil), // 6: google.protobuf.Any
+}
+var file_cel_expr_value_proto_depIdxs = []int32{
+ 5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue
+ 1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue
+ 6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any
+ 3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue
+ 2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue
+ 0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value
+ 4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry
+ 0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value
+ 0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_cel_expr_value_proto_init() }
+func file_cel_expr_value_proto_init() {
+ if File_cel_expr_value_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Value); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EnumValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MapValue_Entry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*Value_NullValue)(nil),
+ (*Value_BoolValue)(nil),
+ (*Value_Int64Value)(nil),
+ (*Value_Uint64Value)(nil),
+ (*Value_DoubleValue)(nil),
+ (*Value_StringValue)(nil),
+ (*Value_BytesValue)(nil),
+ (*Value_EnumValue)(nil),
+ (*Value_ObjectValue)(nil),
+ (*Value_MapValue)(nil),
+ (*Value_ListValue)(nil),
+ (*Value_TypeValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_cel_expr_value_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_cel_expr_value_proto_goTypes,
+ DependencyIndexes: file_cel_expr_value_proto_depIdxs,
+ MessageInfos: file_cel_expr_value_proto_msgTypes,
+ }.Build()
+ File_cel_expr_value_proto = out.File
+ file_cel_expr_value_proto_rawDesc = nil
+ file_cel_expr_value_proto_goTypes = nil
+ file_cel_expr_value_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/auth/CHANGES.md b/vendor/cloud.google.com/go/auth/CHANGES.md
index 73d8ea945..66131916e 100644
--- a/vendor/cloud.google.com/go/auth/CHANGES.md
+++ b/vendor/cloud.google.com/go/auth/CHANGES.md
@@ -1,5 +1,240 @@
# Changelog
+## [0.16.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.1...auth/v0.16.2) (2025-06-04)
+
+
+### Bug Fixes
+
+* **auth:** Add back DirectPath misconfiguration logging ([#11162](https://github.com/googleapis/google-cloud-go/issues/11162)) ([8d52da5](https://github.com/googleapis/google-cloud-go/commit/8d52da58da5a0ed77a0f6307d1b561bc045406a1))
+* **auth:** Remove s2a fallback option ([#12354](https://github.com/googleapis/google-cloud-go/issues/12354)) ([d5acc59](https://github.com/googleapis/google-cloud-go/commit/d5acc599cd775ddc404349e75906fa02e8ff133e))
+
+## [0.16.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.16.0...auth/v0.16.1) (2025-04-23)
+
+
+### Bug Fixes
+
+* **auth:** Clone detectopts before assigning TokenBindingType ([#11881](https://github.com/googleapis/google-cloud-go/issues/11881)) ([2167b02](https://github.com/googleapis/google-cloud-go/commit/2167b020fdc43b517c2b6ecca264a10e357ea035))
+
+## [0.16.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.15.0...auth/v0.16.0) (2025-04-14)
+
+
+### Features
+
+* **auth/credentials:** Return X.509 certificate chain as subject token ([#11948](https://github.com/googleapis/google-cloud-go/issues/11948)) ([d445a3f](https://github.com/googleapis/google-cloud-go/commit/d445a3f66272ffd5c39c4939af9bebad4582631c)), refs [#11757](https://github.com/googleapis/google-cloud-go/issues/11757)
+* **auth:** Configure DirectPath bound credentials from AllowedHardBoundTokens ([#11665](https://github.com/googleapis/google-cloud-go/issues/11665)) ([0fc40bc](https://github.com/googleapis/google-cloud-go/commit/0fc40bcf4e4673704df0973e9fa65957395d7bb4))
+
+
+### Bug Fixes
+
+* **auth:** Allow non-default SA credentials for DP ([#11828](https://github.com/googleapis/google-cloud-go/issues/11828)) ([3a996b4](https://github.com/googleapis/google-cloud-go/commit/3a996b4129e6d0a34dfda6671f535d5aefb26a82))
+* **auth:** Restore calling DialContext ([#11930](https://github.com/googleapis/google-cloud-go/issues/11930)) ([9ec9a29](https://github.com/googleapis/google-cloud-go/commit/9ec9a29494e93197edbaf45aba28984801e9770a)), refs [#11118](https://github.com/googleapis/google-cloud-go/issues/11118)
+
+## [0.15.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.1...auth/v0.15.0) (2025-02-19)
+
+
+### Features
+
+* **auth:** Add hard-bound token request to compute token provider. ([#11588](https://github.com/googleapis/google-cloud-go/issues/11588)) ([0e608bb](https://github.com/googleapis/google-cloud-go/commit/0e608bb5ac3d694c8ad36ca4340071d3a2c78699))
+
+## [0.14.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.0...auth/v0.14.1) (2025-01-24)
+
+
+### Documentation
+
+* **auth:** Add warning about externally-provided credentials ([#11462](https://github.com/googleapis/google-cloud-go/issues/11462)) ([49fb6ff](https://github.com/googleapis/google-cloud-go/commit/49fb6ff4d754895f82c9c4d502fc7547d3b5a941))
+
+## [0.14.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.13.0...auth/v0.14.0) (2025-01-08)
+
+
+### Features
+
+* **auth:** Add universe domain support to idtoken ([#11059](https://github.com/googleapis/google-cloud-go/issues/11059)) ([72add7e](https://github.com/googleapis/google-cloud-go/commit/72add7e9f8f455af695e8ef79212a4bd3122fb3a))
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+* **auth:** Fix copy of delegates in impersonate.NewIDTokenCredentials ([#11386](https://github.com/googleapis/google-cloud-go/issues/11386)) ([ff7ef8e](https://github.com/googleapis/google-cloud-go/commit/ff7ef8e7ade7171bce3e4f30ff10a2e9f6c27ca0)), refs [#11379](https://github.com/googleapis/google-cloud-go/issues/11379)
+* **auth:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+
+## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.1...auth/v0.13.0) (2024-12-13)
+
+
+### Features
+
+* **auth:** Add logging support ([#11079](https://github.com/googleapis/google-cloud-go/issues/11079)) ([c80e31d](https://github.com/googleapis/google-cloud-go/commit/c80e31df5ecb33a810be3dfb9d9e27ac531aa91d))
+* **auth:** Pass logger from auth layer to metadata package ([#11288](https://github.com/googleapis/google-cloud-go/issues/11288)) ([b552efd](https://github.com/googleapis/google-cloud-go/commit/b552efd6ab34e5dfded18438e0fbfd925805614f))
+
+
+### Bug Fixes
+
+* **auth:** Check compute cred type before non-default flag for DP ([#11255](https://github.com/googleapis/google-cloud-go/issues/11255)) ([4347ca1](https://github.com/googleapis/google-cloud-go/commit/4347ca141892be8ae813399b4b437662a103bc90))
+
+## [0.12.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.0...auth/v0.12.1) (2024-12-10)
+
+
+### Bug Fixes
+
+* **auth:** Correct typo in link ([#11160](https://github.com/googleapis/google-cloud-go/issues/11160)) ([af6fb46](https://github.com/googleapis/google-cloud-go/commit/af6fb46d7cd694ddbe8c9d63bc4cdcd62b9fb2c1))
+
+## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.11.0...auth/v0.12.0) (2024-12-04)
+
+
+### Features
+
+* **auth:** Add support for providing custom certificate URL ([#11006](https://github.com/googleapis/google-cloud-go/issues/11006)) ([ebf3657](https://github.com/googleapis/google-cloud-go/commit/ebf36579724afb375d3974cf1da38f703e3b7dbc)), refs [#11005](https://github.com/googleapis/google-cloud-go/issues/11005)
+
+
+### Bug Fixes
+
+* **auth:** Ensure endpoints are present in Validator ([#11209](https://github.com/googleapis/google-cloud-go/issues/11209)) ([106cd53](https://github.com/googleapis/google-cloud-go/commit/106cd53309facaef1b8ea78376179f523f6912b9)), refs [#11006](https://github.com/googleapis/google-cloud-go/issues/11006) [#11190](https://github.com/googleapis/google-cloud-go/issues/11190) [#11189](https://github.com/googleapis/google-cloud-go/issues/11189) [#11188](https://github.com/googleapis/google-cloud-go/issues/11188)
+
+## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.2...auth/v0.11.0) (2024-11-21)
+
+
+### Features
+
+* **auth:** Add universe domain support to mTLS ([#11159](https://github.com/googleapis/google-cloud-go/issues/11159)) ([117748b](https://github.com/googleapis/google-cloud-go/commit/117748ba1cfd4ae62a6a4feb7e30951cb2bc9344))
+
+## [0.10.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.1...auth/v0.10.2) (2024-11-12)
+
+
+### Bug Fixes
+
+* **auth:** Restore use of grpc.Dial ([#11118](https://github.com/googleapis/google-cloud-go/issues/11118)) ([2456b94](https://github.com/googleapis/google-cloud-go/commit/2456b943b7b8aaabd4d8bfb7572c0f477ae0db45)), refs [#7556](https://github.com/googleapis/google-cloud-go/issues/7556)
+
+## [0.10.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.0...auth/v0.10.1) (2024-11-06)
+
+
+### Bug Fixes
+
+* **auth:** Restore Application Default Credentials support to idtoken ([#11083](https://github.com/googleapis/google-cloud-go/issues/11083)) ([8771f2e](https://github.com/googleapis/google-cloud-go/commit/8771f2ea9807ab822083808e0678392edff3b4f2))
+* **auth:** Skip impersonate universe domain check if empty ([#11086](https://github.com/googleapis/google-cloud-go/issues/11086)) ([87159c1](https://github.com/googleapis/google-cloud-go/commit/87159c1059d4a18d1367ce62746a838a94964ab6))
+
+## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.9...auth/v0.10.0) (2024-10-30)
+
+
+### Features
+
+* **auth:** Add universe domain support to credentials/impersonate ([#10953](https://github.com/googleapis/google-cloud-go/issues/10953)) ([e06cb64](https://github.com/googleapis/google-cloud-go/commit/e06cb6499f7eda3aef08ab18ff197016f667684b))
+
+## [0.9.9](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.8...auth/v0.9.9) (2024-10-22)
+
+
+### Bug Fixes
+
+* **auth:** Fallback cert lookups for missing files ([#11013](https://github.com/googleapis/google-cloud-go/issues/11013)) ([bd76695](https://github.com/googleapis/google-cloud-go/commit/bd766957ec238b7c40ddbabb369e612dc9b07313)), refs [#10844](https://github.com/googleapis/google-cloud-go/issues/10844)
+* **auth:** Replace MDS endpoint universe_domain with universe-domain ([#11000](https://github.com/googleapis/google-cloud-go/issues/11000)) ([6a1586f](https://github.com/googleapis/google-cloud-go/commit/6a1586f2ce9974684affaea84e7b629313b4d114))
+
+## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09)
+
+
+### Bug Fixes
+
+* **auth:** Restore OpenTelemetry handling in transports ([#10968](https://github.com/googleapis/google-cloud-go/issues/10968)) ([08c6d04](https://github.com/googleapis/google-cloud-go/commit/08c6d04901c1a20e219b2d86df41dbaa6d7d7b55)), refs [#10962](https://github.com/googleapis/google-cloud-go/issues/10962)
+* **auth:** Try talk to plaintext S2A if credentials can not be found for mTLS-S2A ([#10941](https://github.com/googleapis/google-cloud-go/issues/10941)) ([0f0bf2d](https://github.com/googleapis/google-cloud-go/commit/0f0bf2d18c97dd8b65bcf0099f0802b5631c6287))
+
+## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01)
+
+
+### Bug Fixes
+
+* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907)
+
+## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30)
+
+
+### Bug Fixes
+
+* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8))
+
+## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25)
+
+
+### Bug Fixes
+
+* **auth:** Restore support for GOOGLE_CLOUD_UNIVERSE_DOMAIN env ([#10915](https://github.com/googleapis/google-cloud-go/issues/10915)) ([94caaaa](https://github.com/googleapis/google-cloud-go/commit/94caaaa061362d0e00ef6214afcc8a0a3e7ebfb2))
+* **auth:** Skip directpath credentials overwrite when it's not on GCE ([#10833](https://github.com/googleapis/google-cloud-go/issues/10833)) ([7e5e8d1](https://github.com/googleapis/google-cloud-go/commit/7e5e8d10b761b0a6e43e19a028528db361bc07b1))
+* **auth:** Use new context for non-blocking token refresh ([#10919](https://github.com/googleapis/google-cloud-go/issues/10919)) ([cf7102d](https://github.com/googleapis/google-cloud-go/commit/cf7102d33a21be1e5a9d47a49456b3a57c43b350))
+
+## [0.9.4](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.3...auth/v0.9.4) (2024-09-11)
+
+
+### Bug Fixes
+
+* **auth:** Enable self-signed JWT for non-GDU universe domain ([#10831](https://github.com/googleapis/google-cloud-go/issues/10831)) ([f9869f7](https://github.com/googleapis/google-cloud-go/commit/f9869f7903cfd34d1b97c25d0dc5669d2c5138e6))
+
+## [0.9.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.2...auth/v0.9.3) (2024-09-03)
+
+
+### Bug Fixes
+
+* **auth:** Choose quota project envvar over file when both present ([#10807](https://github.com/googleapis/google-cloud-go/issues/10807)) ([2d8dd77](https://github.com/googleapis/google-cloud-go/commit/2d8dd7700eff92d4b95027be55e26e1e7aa79181)), refs [#10804](https://github.com/googleapis/google-cloud-go/issues/10804)
+
+## [0.9.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.1...auth/v0.9.2) (2024-08-30)
+
+
+### Bug Fixes
+
+* **auth:** Handle non-Transport DefaultTransport ([#10733](https://github.com/googleapis/google-cloud-go/issues/10733)) ([98d91dc](https://github.com/googleapis/google-cloud-go/commit/98d91dc8316b247498fab41ab35e57a0446fe556)), refs [#10742](https://github.com/googleapis/google-cloud-go/issues/10742)
+* **auth:** Make sure quota option takes precedence over env/file ([#10797](https://github.com/googleapis/google-cloud-go/issues/10797)) ([f1b050d](https://github.com/googleapis/google-cloud-go/commit/f1b050d56d804b245cab048c2980d32b0eaceb4e)), refs [#10795](https://github.com/googleapis/google-cloud-go/issues/10795)
+
+
+### Documentation
+
+* **auth:** Fix Go doc comment link ([#10751](https://github.com/googleapis/google-cloud-go/issues/10751)) ([015acfa](https://github.com/googleapis/google-cloud-go/commit/015acfab4d172650928bb1119bc2cd6307b9a437))
+
+## [0.9.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.0...auth/v0.9.1) (2024-08-22)
+
+
+### Bug Fixes
+
+* **auth:** Setting expireEarly to default when the value is 0 ([#10732](https://github.com/googleapis/google-cloud-go/issues/10732)) ([5e67869](https://github.com/googleapis/google-cloud-go/commit/5e67869a31e9e8ecb4eeebd2cfa11a761c3b1948))
+
+## [0.9.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.1...auth/v0.9.0) (2024-08-16)
+
+
+### Features
+
+* **auth:** Auth library can talk to S2A over mTLS ([#10634](https://github.com/googleapis/google-cloud-go/issues/10634)) ([5250a13](https://github.com/googleapis/google-cloud-go/commit/5250a13ec95b8d4eefbe0158f82857ff2189cb45))
+
+## [0.8.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.8.0...auth/v0.8.1) (2024-08-13)
+
+
+### Bug Fixes
+
+* **auth:** Make default client creation more lenient ([#10669](https://github.com/googleapis/google-cloud-go/issues/10669)) ([1afb9ee](https://github.com/googleapis/google-cloud-go/commit/1afb9ee1ee9de9810722800018133304a0ca34d1)), refs [#10638](https://github.com/googleapis/google-cloud-go/issues/10638)
+
+## [0.8.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.3...auth/v0.8.0) (2024-08-07)
+
+
+### Features
+
+* **auth:** Adds support for X509 workload identity federation ([#10373](https://github.com/googleapis/google-cloud-go/issues/10373)) ([5d07505](https://github.com/googleapis/google-cloud-go/commit/5d075056cbe27bb1da4072a26070c41f8999eb9b))
+
+## [0.7.3](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.2...auth/v0.7.3) (2024-08-01)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+* **auth:** Disable automatic universe domain check for MDS ([#10620](https://github.com/googleapis/google-cloud-go/issues/10620)) ([7cea5ed](https://github.com/googleapis/google-cloud-go/commit/7cea5edd5a0c1e6bca558696f5607879141910e8))
+* **auth:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+
+## [0.7.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.1...auth/v0.7.2) (2024-07-22)
+
+
+### Bug Fixes
+
+* **auth:** Use default client for universe metadata lookup ([#10551](https://github.com/googleapis/google-cloud-go/issues/10551)) ([d9046fd](https://github.com/googleapis/google-cloud-go/commit/d9046fdd1435d1ce48f374806c1def4cb5ac6cd3)), refs [#10544](https://github.com/googleapis/google-cloud-go/issues/10544)
+
+## [0.7.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.7.0...auth/v0.7.1) (2024-07-10)
+
+
+### Bug Fixes
+
+* **auth:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
+
## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.6.1...auth/v0.7.0) (2024-07-09)
diff --git a/vendor/cloud.google.com/go/auth/README.md b/vendor/cloud.google.com/go/auth/README.md
index 36de276a0..6fe4f0763 100644
--- a/vendor/cloud.google.com/go/auth/README.md
+++ b/vendor/cloud.google.com/go/auth/README.md
@@ -1,4 +1,40 @@
-# auth
+# Google Auth Library for Go
-This module is currently EXPERIMENTAL and under active development. It is not
-yet intended to be used.
+[](https://pkg.go.dev/cloud.google.com/go/auth)
+
+## Install
+
+``` bash
+go get cloud.google.com/go/auth@latest
+```
+
+## Usage
+
+The most common way this library is used is transitively, by default, from any
+of our Go client libraries.
+
+### Notable use-cases
+
+- To create a credential directly please see examples in the
+ [credentials](https://pkg.go.dev/cloud.google.com/go/auth/credentials)
+ package.
+- To create a authenticated HTTP client please see examples in the
+ [httptransport](https://pkg.go.dev/cloud.google.com/go/auth/httptransport)
+ package.
+- To create a authenticated gRPC connection please see examples in the
+ [grpctransport](https://pkg.go.dev/cloud.google.com/go/auth/grpctransport)
+ package.
+- To create an ID token please see examples in the
+ [idtoken](https://pkg.go.dev/cloud.google.com/go/auth/credentials/idtoken)
+ package.
+
+## Contributing
+
+Contributions are welcome. Please, see the
+[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md)
+document for details.
+
+Please note that this project is released with a Contributor Code of Conduct.
+By participating in this project you agree to abide by its terms.
+See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/main/CONTRIBUTING.md#contributor-code-of-conduct)
+for more information.
diff --git a/vendor/cloud.google.com/go/auth/auth.go b/vendor/cloud.google.com/go/auth/auth.go
index 58af93188..cd5e98868 100644
--- a/vendor/cloud.google.com/go/auth/auth.go
+++ b/vendor/cloud.google.com/go/auth/auth.go
@@ -12,6 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// Package auth provides utilities for managing Google Cloud credentials,
+// including functionality for creating, caching, and refreshing OAuth2 tokens.
+// It offers customizable options for different OAuth2 flows, such as 2-legged
+// (2LO) and 3-legged (3LO) OAuth, along with support for PKCE and automatic
+// token management.
package auth
import (
@@ -19,6 +24,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "log/slog"
"net/http"
"net/url"
"strings"
@@ -27,6 +33,7 @@ import (
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/jwt"
+ "github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -101,6 +108,20 @@ func (t *Token) IsValid() bool {
return t.isValidWithEarlyExpiry(defaultExpiryDelta)
}
+// MetadataString is a convenience method for accessing string values in the
+// token's metadata. Returns an empty string if the metadata is nil or the value
+// for the given key cannot be cast to a string.
+func (t *Token) MetadataString(k string) string {
+ if t.Metadata == nil {
+ return ""
+ }
+ s, ok := t.Metadata[k].(string)
+ if !ok {
+ return ""
+ }
+ return s
+}
+
func (t *Token) isValidWithEarlyExpiry(earlyExpiry time.Duration) bool {
if t.isEmpty() {
return false
@@ -116,7 +137,9 @@ func (t *Token) isEmpty() bool {
}
// Credentials holds Google credentials, including
-// [Application Default Credentials](https://developers.google.com/accounts/docs/application-default-credentials).
+// [Application Default Credentials].
+//
+// [Application Default Credentials]: https://developers.google.com/accounts/docs/application-default-credentials
type Credentials struct {
json []byte
projectID CredentialsPropertyProvider
@@ -206,9 +229,7 @@ type CredentialsOptions struct {
UniverseDomainProvider CredentialsPropertyProvider
}
-// NewCredentials returns new [Credentials] from the provided options. Most users
-// will want to build this object a function from the
-// [cloud.google.com/go/auth/credentials] package.
+// NewCredentials returns new [Credentials] from the provided options.
func NewCredentials(opts *CredentialsOptions) *Credentials {
creds := &Credentials{
TokenProvider: opts.TokenProvider,
@@ -221,8 +242,8 @@ func NewCredentials(opts *CredentialsOptions) *Credentials {
return creds
}
-// CachedTokenProviderOptions provided options for configuring a
-// CachedTokenProvider.
+// CachedTokenProviderOptions provides options for configuring a cached
+// [TokenProvider].
type CachedTokenProviderOptions struct {
// DisableAutoRefresh makes the TokenProvider always return the same token,
// even if it is expired. The default is false. Optional.
@@ -232,7 +253,7 @@ type CachedTokenProviderOptions struct {
// seconds. Optional.
ExpireEarly time.Duration
// DisableAsyncRefresh configures a synchronous workflow that refreshes
- // stale tokens while blocking. The default is false. Optional.
+ // tokens in a blocking manner. The default is false. Optional.
DisableAsyncRefresh bool
}
@@ -244,7 +265,7 @@ func (ctpo *CachedTokenProviderOptions) autoRefresh() bool {
}
func (ctpo *CachedTokenProviderOptions) expireEarly() time.Duration {
- if ctpo == nil {
+ if ctpo == nil || ctpo.ExpireEarly == 0 {
return defaultExpiryDelta
}
return ctpo.ExpireEarly
@@ -259,12 +280,7 @@ func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool {
// NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned
// by the underlying provider. By default it will refresh tokens asynchronously
-// (non-blocking mode) within a window that starts 3 minutes and 45 seconds
-// before they expire. The asynchronous (non-blocking) refresh can be changed to
-// a synchronous (blocking) refresh using the
-// CachedTokenProviderOptions.DisableAsyncRefresh option. The time-before-expiry
-// duration can be configured using the CachedTokenProviderOptions.ExpireEarly
-// option.
+// a few minutes before they expire.
func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider {
if ctp, ok := tp.(*cachedTokenProvider); ok {
return ctp
@@ -307,7 +323,9 @@ func (c *cachedTokenProvider) tokenNonBlocking(ctx context.Context) (*Token, err
defer c.mu.Unlock()
return c.cachedToken, nil
case stale:
- c.tokenAsync(ctx)
+ // Call tokenAsync with a new Context because the user-provided context
+ // may have a short timeout incompatible with async token refresh.
+ c.tokenAsync(context.Background())
// Return the stale token immediately to not block customer requests to Cloud services.
c.mu.Lock()
defer c.mu.Unlock()
@@ -322,13 +340,14 @@ func (c *cachedTokenProvider) tokenState() tokenState {
c.mu.Lock()
defer c.mu.Unlock()
t := c.cachedToken
+ now := timeNow()
if t == nil || t.Value == "" {
return invalid
} else if t.Expiry.IsZero() {
return fresh
- } else if timeNow().After(t.Expiry.Round(0)) {
+ } else if now.After(t.Expiry.Round(0)) {
return invalid
- } else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) {
+ } else if now.After(t.Expiry.Round(0).Add(-c.expireEarly)) {
return stale
}
return fresh
@@ -473,13 +492,18 @@ type Options2LO struct {
// UseIDToken requests that the token returned be an ID token if one is
// returned from the server. Optional.
UseIDToken bool
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
}
func (o *Options2LO) client() *http.Client {
if o.Client != nil {
return o.Client
}
- return internal.CloneDefaultClient()
+ return internal.DefaultClient()
}
func (o *Options2LO) validate() error {
@@ -503,12 +527,13 @@ func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) {
if err := opts.validate(); err != nil {
return nil, err
}
- return tokenProvider2LO{opts: opts, Client: opts.client()}, nil
+ return tokenProvider2LO{opts: opts, Client: opts.client(), logger: internallog.New(opts.Logger)}, nil
}
type tokenProvider2LO struct {
opts *Options2LO
Client *http.Client
+ logger *slog.Logger
}
func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) {
@@ -543,10 +568,12 @@ func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ tp.logger.DebugContext(ctx, "2LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
resp, body, err := internal.DoRequest(tp.Client, req)
if err != nil {
return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
}
+ tp.logger.DebugContext(ctx, "2LO token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
return nil, &Error{
Response: resp,
diff --git a/vendor/cloud.google.com/go/auth/credentials/compute.go b/vendor/cloud.google.com/go/auth/credentials/compute.go
index 6f70fa353..e4a8078f8 100644
--- a/vendor/cloud.google.com/go/auth/credentials/compute.go
+++ b/vendor/cloud.google.com/go/auth/credentials/compute.go
@@ -37,8 +37,12 @@ var (
// computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that
// uses the metadata service to retrieve tokens.
-func computeTokenProvider(opts *DetectOptions) auth.TokenProvider {
- return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{
+func computeTokenProvider(opts *DetectOptions, client *metadata.Client) auth.TokenProvider {
+ return auth.NewCachedTokenProvider(&computeProvider{
+ scopes: opts.Scopes,
+ client: client,
+ tokenBindingType: opts.TokenBindingType,
+ }, &auth.CachedTokenProviderOptions{
ExpireEarly: opts.EarlyTokenRefresh,
DisableAsyncRefresh: opts.DisableAsyncRefresh,
})
@@ -46,7 +50,9 @@ func computeTokenProvider(opts *DetectOptions) auth.TokenProvider {
// computeProvider fetches tokens from the google cloud metadata service.
type computeProvider struct {
- scopes []string
+ scopes []string
+ client *metadata.Client
+ tokenBindingType TokenBindingType
}
type metadataTokenResp struct {
@@ -55,17 +61,27 @@ type metadataTokenResp struct {
TokenType string `json:"token_type"`
}
-func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) {
+func (cs *computeProvider) Token(ctx context.Context) (*auth.Token, error) {
tokenURI, err := url.Parse(computeTokenURI)
if err != nil {
return nil, err
}
- if len(cs.scopes) > 0 {
+ hasScopes := len(cs.scopes) > 0
+ if hasScopes || cs.tokenBindingType != NoBinding {
v := url.Values{}
- v.Set("scopes", strings.Join(cs.scopes, ","))
+ if hasScopes {
+ v.Set("scopes", strings.Join(cs.scopes, ","))
+ }
+ switch cs.tokenBindingType {
+ case MTLSHardBinding:
+ v.Set("transport", "mtls")
+ v.Set("binding-enforcement", "on")
+ case ALTSHardBinding:
+ v.Set("transport", "alts")
+ }
tokenURI.RawQuery = v.Encode()
}
- tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String())
+ tokenJSON, err := cs.client.GetWithContext(ctx, tokenURI.String())
if err != nil {
return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/detect.go b/vendor/cloud.google.com/go/auth/credentials/detect.go
index 2d9a73edf..d8f7d9614 100644
--- a/vendor/cloud.google.com/go/auth/credentials/detect.go
+++ b/vendor/cloud.google.com/go/auth/credentials/detect.go
@@ -19,6 +19,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "log/slog"
"net/http"
"os"
"time"
@@ -27,6 +28,7 @@ import (
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
"cloud.google.com/go/compute/metadata"
+ "github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -49,6 +51,23 @@ var (
allowOnGCECheck = true
)
+// TokenBindingType specifies the type of binding used when requesting a token
+// whether to request a hard-bound token using mTLS or an instance identity
+// bound token using ALTS.
+type TokenBindingType int
+
+const (
+ // NoBinding specifies that requested tokens are not required to have a
+ // binding. This is the default option.
+ NoBinding TokenBindingType = iota
+ // MTLSHardBinding specifies that a hard-bound token should be requested
+ // using an mTLS with S2A channel.
+ MTLSHardBinding
+ // ALTSHardBinding specifies that an instance identity bound token should
+ // be requested using an ALTS channel.
+ ALTSHardBinding
+)
+
// OnGCE reports whether this process is running in Google Cloud.
func OnGCE() bool {
// TODO(codyoss): once all libs use this auth lib move metadata check here
@@ -96,12 +115,17 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) {
}
if OnGCE() {
+ metadataClient := metadata.NewWithOptions(&metadata.Options{
+ Logger: opts.logger(),
+ })
return auth.NewCredentials(&auth.CredentialsOptions{
- TokenProvider: computeTokenProvider(opts),
- ProjectIDProvider: auth.CredentialsPropertyFunc(func(context.Context) (string, error) {
- return metadata.ProjectID()
+ TokenProvider: computeTokenProvider(opts, metadataClient),
+ ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
+ return metadataClient.ProjectIDWithContext(ctx)
}),
- UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{},
+ UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{
+ MetadataClient: metadataClient,
+ },
}), nil
}
@@ -114,6 +138,10 @@ type DetectOptions struct {
// https://www.googleapis.com/auth/cloud-platform. Required if Audience is
// not provided.
Scopes []string
+ // TokenBindingType specifies the type of binding used when requesting a
+ // token whether to request a hard-bound token using mTLS or an instance
+ // identity bound token using ALTS. Optional.
+ TokenBindingType TokenBindingType
// Audience that credentials tokens should have. Only applicable for 2LO
// flows with service accounts. If specified, scopes should not be provided.
Audience string
@@ -142,10 +170,26 @@ type DetectOptions struct {
// CredentialsFile overrides detection logic and sources a credential file
// from the provided filepath. If provided, CredentialsJSON must not be.
// Optional.
+ //
+ // Important: If you accept a credential configuration (credential
+ // JSON/File/Stream) from an external source for authentication to Google
+ // Cloud Platform, you must validate it before providing it to any Google
+ // API or library. Providing an unvalidated credential configuration to
+ // Google APIs can compromise the security of your systems and data. For
+ // more information, refer to [Validate credential configurations from
+ // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
CredentialsFile string
// CredentialsJSON overrides detection logic and uses the JSON bytes as the
// source for the credential. If provided, CredentialsFile must not be.
// Optional.
+ //
+ // Important: If you accept a credential configuration (credential
+ // JSON/File/Stream) from an external source for authentication to Google
+ // Cloud Platform, you must validate it before providing it to any Google
+ // API or library. Providing an unvalidated credential configuration to
+ // Google APIs can compromise the security of your systems and data. For
+ // more information, refer to [Validate credential configurations from
+ // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
CredentialsJSON []byte
// UseSelfSignedJWT directs service account based credentials to create a
// self-signed JWT with the private key found in the file, skipping any
@@ -158,6 +202,11 @@ type DetectOptions struct {
// The default value is "googleapis.com". This option is ignored for
// authentication flows that do not support universe domain. Optional.
UniverseDomain string
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
}
func (o *DetectOptions) validate() error {
@@ -190,7 +239,11 @@ func (o *DetectOptions) client() *http.Client {
if o.Client != nil {
return o.Client
}
- return internal.CloneDefaultClient()
+ return internal.DefaultClient()
+}
+
+func (o *DetectOptions) logger() *slog.Logger {
+ return internallog.New(o.Logger)
}
func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) {
@@ -253,6 +306,7 @@ func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO {
AuthURL: c.AuthURI,
TokenURL: c.TokenURI,
Client: opts.client(),
+ Logger: opts.logger(),
EarlyTokenExpiry: opts.EarlyTokenRefresh,
AuthHandlerOpts: handleOpts,
// TODO(codyoss): refactor this out. We need to add in auto-detection
diff --git a/vendor/cloud.google.com/go/auth/credentials/filetypes.go b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
index fe9355738..e5243e6cf 100644
--- a/vendor/cloud.google.com/go/auth/credentials/filetypes.go
+++ b/vendor/cloud.google.com/go/auth/credentials/filetypes.go
@@ -33,7 +33,7 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
return nil, err
}
- var projectID, quotaProjectID, universeDomain string
+ var projectID, universeDomain string
var tp auth.TokenProvider
switch fileType {
case credsfile.ServiceAccountKey:
@@ -56,7 +56,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
if err != nil {
return nil, err
}
- quotaProjectID = f.QuotaProjectID
universeDomain = f.UniverseDomain
case credsfile.ExternalAccountKey:
f, err := credsfile.ParseExternalAccount(b)
@@ -67,7 +66,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
if err != nil {
return nil, err
}
- quotaProjectID = f.QuotaProjectID
universeDomain = resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
case credsfile.ExternalAccountAuthorizedUserKey:
f, err := credsfile.ParseExternalAccountAuthorizedUser(b)
@@ -78,7 +76,6 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
if err != nil {
return nil, err
}
- quotaProjectID = f.QuotaProjectID
universeDomain = f.UniverseDomain
case credsfile.ImpersonatedServiceAccountKey:
f, err := credsfile.ParseImpersonatedServiceAccount(b)
@@ -108,9 +105,9 @@ func fileCredentials(b []byte, opts *DetectOptions) (*auth.Credentials, error) {
TokenProvider: auth.NewCachedTokenProvider(tp, &auth.CachedTokenProviderOptions{
ExpireEarly: opts.EarlyTokenRefresh,
}),
- JSON: b,
- ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID),
- QuotaProjectIDProvider: internalauth.StaticCredentialsProperty(quotaProjectID),
+ JSON: b,
+ ProjectIDProvider: internalauth.StaticCredentialsProperty(projectID),
+ // TODO(codyoss): only set quota project here if there was a user override
UniverseDomainProvider: internalauth.StaticCredentialsProperty(universeDomain),
}), nil
}
@@ -127,8 +124,14 @@ func resolveUniverseDomain(optsUniverseDomain, fileUniverseDomain string) string
}
func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
+ ud := resolveUniverseDomain(opts.UniverseDomain, f.UniverseDomain)
if opts.UseSelfSignedJWT {
return configureSelfSignedJWT(f, opts)
+ } else if ud != "" && ud != internalauth.DefaultUniverseDomain {
+ // For non-GDU universe domains, token exchange is impossible and services
+ // must support self-signed JWTs.
+ opts.UseSelfSignedJWT = true
+ return configureSelfSignedJWT(f, opts)
}
opts2LO := &auth.Options2LO{
Email: f.ClientEmail,
@@ -138,6 +141,7 @@ func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions)
TokenURL: f.TokenURL,
Subject: opts.Subject,
Client: opts.client(),
+ Logger: opts.logger(),
}
if opts2LO.TokenURL == "" {
opts2LO.TokenURL = jwtTokenURL
@@ -156,6 +160,7 @@ func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions)
EarlyTokenExpiry: opts.EarlyTokenRefresh,
RefreshToken: f.RefreshToken,
Client: opts.client(),
+ Logger: opts.logger(),
}
return auth.New3LOTokenProvider(opts3LO)
}
@@ -174,6 +179,8 @@ func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions
Scopes: opts.scopes(),
WorkforcePoolUserProject: f.WorkforcePoolUserProject,
Client: opts.client(),
+ Logger: opts.logger(),
+ IsDefaultClient: opts.Client == nil,
}
if f.ServiceAccountImpersonation != nil {
externalOpts.ServiceAccountImpersonationLifetimeSeconds = f.ServiceAccountImpersonation.TokenLifetimeSeconds
@@ -191,6 +198,7 @@ func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedU
ClientSecret: f.ClientSecret,
Scopes: opts.scopes(),
Client: opts.client(),
+ Logger: opts.logger(),
}
return externalaccountuser.NewTokenProvider(externalOpts)
}
@@ -210,6 +218,7 @@ func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFil
Tp: tp,
Delegates: f.Delegates,
Client: opts.client(),
+ Logger: opts.logger(),
})
}
@@ -217,5 +226,6 @@ func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectO
return gdch.NewTokenProvider(f, &gdch.Options{
STSAudience: opts.STSAudience,
Client: opts.client(),
+ Logger: opts.logger(),
})
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
index a34f6b06f..9ecd1f64b 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/aws_provider.go
@@ -23,6 +23,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "log/slog"
"net/http"
"net/url"
"os"
@@ -32,6 +33,7 @@ import (
"time"
"cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
)
var (
@@ -87,6 +89,7 @@ type awsSubjectProvider struct {
reqOpts *RequestOptions
Client *http.Client
+ logger *slog.Logger
}
func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) {
@@ -94,32 +97,30 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error)
if sp.RegionalCredVerificationURL == "" {
sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL
}
- if sp.requestSigner == nil {
- headers := make(map[string]string)
- if sp.shouldUseMetadataServer() {
- awsSessionToken, err := sp.getAWSSessionToken(ctx)
- if err != nil {
- return "", err
- }
-
- if awsSessionToken != "" {
- headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
- }
- }
-
- awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers)
+ headers := make(map[string]string)
+ if sp.shouldUseMetadataServer() {
+ awsSessionToken, err := sp.getAWSSessionToken(ctx)
if err != nil {
return "", err
}
- if sp.region, err = sp.getRegion(ctx, headers); err != nil {
- return "", err
- }
- sp.requestSigner = &awsRequestSigner{
- RegionName: sp.region,
- AwsSecurityCredentials: awsSecurityCredentials,
+
+ if awsSessionToken != "" {
+ headers[awsIMDSv2SessionTokenHeader] = awsSessionToken
}
}
+ awsSecurityCredentials, err := sp.getSecurityCredentials(ctx, headers)
+ if err != nil {
+ return "", err
+ }
+ if sp.region, err = sp.getRegion(ctx, headers); err != nil {
+ return "", err
+ }
+ sp.requestSigner = &awsRequestSigner{
+ RegionName: sp.region,
+ AwsSecurityCredentials: awsSecurityCredentials,
+ }
+
// Generate the signed request to AWS STS GetCallerIdentity API.
// Use the required regional endpoint. Otherwise, the request will fail.
req, err := http.NewRequestWithContext(ctx, "POST", strings.Replace(sp.RegionalCredVerificationURL, "{region}", sp.region, 1), nil)
@@ -194,10 +195,12 @@ func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, e
}
req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL)
+ sp.logger.DebugContext(ctx, "aws session token request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return "", err
}
+ sp.logger.DebugContext(ctx, "aws session token response", "response", internallog.HTTPResponse(resp, body))
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body)
}
@@ -227,10 +230,12 @@ func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]
for name, value := range headers {
req.Header.Add(name, value)
}
+ sp.logger.DebugContext(ctx, "aws region request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return "", err
}
+ sp.logger.DebugContext(ctx, "aws region response", "response", internallog.HTTPResponse(resp, body))
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body)
}
@@ -285,10 +290,12 @@ func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context
for name, value := range headers {
req.Header.Add(name, value)
}
+ sp.logger.DebugContext(ctx, "aws security credential request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return result, err
}
+ sp.logger.DebugContext(ctx, "aws security credential response", "response", internallog.HTTPResponse(resp, body))
if resp.StatusCode != http.StatusOK {
return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body)
}
@@ -310,10 +317,12 @@ func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers m
req.Header.Add(name, value)
}
+ sp.logger.DebugContext(ctx, "aws metadata role request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return "", err
}
+ sp.logger.DebugContext(ctx, "aws metadata role response", "response", internallog.HTTPResponse(resp, body))
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body)
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go
index b19c6edea..f4f49f175 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/externalaccount.go
@@ -18,6 +18,7 @@ import (
"context"
"errors"
"fmt"
+ "log/slog"
"net/http"
"regexp"
"strconv"
@@ -28,6 +29,7 @@ import (
"cloud.google.com/go/auth/credentials/internal/impersonate"
"cloud.google.com/go/auth/credentials/internal/stsexchange"
"cloud.google.com/go/auth/internal/credsfile"
+ "github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -100,6 +102,15 @@ type Options struct {
AwsSecurityCredentialsProvider AwsSecurityCredentialsProvider
// Client for token request.
Client *http.Client
+ // IsDefaultClient marks whether the client passed in is a default client that can be overriden.
+ // This is important for X509 credentials which should create a new client if the default was used
+ // but should respect a client explicitly passed in by the user.
+ IsDefaultClient bool
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
}
// SubjectTokenProvider can be used to supply a subject token to exchange for a
@@ -181,6 +192,26 @@ func (o *Options) validate() error {
return nil
}
+// client returns the http client that should be used for the token exchange. If a non-default client
+// is provided, then the client configured in the options will always be returned. If a default client
+// is provided and the options are configured for X509 credentials, a new client will be created.
+func (o *Options) client() (*http.Client, error) {
+ // If a client was provided and no override certificate config location was provided, use the provided client.
+ if o.CredentialSource == nil || o.CredentialSource.Certificate == nil || (!o.IsDefaultClient && o.CredentialSource.Certificate.CertificateConfigLocation == "") {
+ return o.Client, nil
+ }
+
+ // If a new client should be created, validate and use the certificate source to create a new mTLS client.
+ cert := o.CredentialSource.Certificate
+ if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" {
+ return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true")
+ }
+ if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" {
+ return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true")
+ }
+ return createX509Client(cert.CertificateConfigLocation)
+}
+
// resolveTokenURL sets the default STS token endpoint with the configured
// universe domain.
func (o *Options) resolveTokenURL() {
@@ -200,15 +231,24 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
return nil, err
}
opts.resolveTokenURL()
+ logger := internallog.New(opts.Logger)
stp, err := newSubjectTokenProvider(opts)
if err != nil {
return nil, err
}
+
+ client, err := opts.client()
+ if err != nil {
+ return nil, err
+ }
+
tp := &tokenProvider{
- client: opts.Client,
+ client: client,
opts: opts,
stp: stp,
+ logger: logger,
}
+
if opts.ServiceAccountImpersonationURL == "" {
return auth.NewCachedTokenProvider(tp, nil), nil
}
@@ -218,11 +258,12 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
// needed for impersonation
tp.opts.Scopes = []string{"https://www.googleapis.com/auth/cloud-platform"}
imp, err := impersonate.NewTokenProvider(&impersonate.Options{
- Client: opts.Client,
+ Client: client,
URL: opts.ServiceAccountImpersonationURL,
Scopes: scopes,
Tp: auth.NewCachedTokenProvider(tp, nil),
TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds,
+ Logger: logger,
})
if err != nil {
return nil, err
@@ -238,6 +279,7 @@ type subjectTokenProvider interface {
// tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens.
type tokenProvider struct {
client *http.Client
+ logger *slog.Logger
opts *Options
stp subjectTokenProvider
}
@@ -279,6 +321,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
Authentication: clientAuth,
Headers: header,
ExtraOpts: options,
+ Logger: tp.logger,
})
if err != nil {
return nil, err
@@ -299,12 +342,14 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
// newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a
// subjectTokenProvider
func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
+ logger := internallog.New(o.Logger)
reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType}
if o.AwsSecurityCredentialsProvider != nil {
return &awsSubjectProvider{
securityCredentialsProvider: o.AwsSecurityCredentialsProvider,
TargetResource: o.Audience,
reqOpts: reqOpts,
+ logger: logger,
}, nil
} else if o.SubjectTokenProvider != nil {
return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil
@@ -321,6 +366,7 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
CredVerificationURL: o.CredentialSource.URL,
TargetResource: o.Audience,
Client: o.Client,
+ logger: logger,
}
if o.CredentialSource.IMDSv2SessionTokenURL != "" {
awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL
@@ -331,7 +377,13 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
} else if o.CredentialSource.File != "" {
return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil
} else if o.CredentialSource.URL != "" {
- return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil
+ return &urlSubjectProvider{
+ URL: o.CredentialSource.URL,
+ Headers: o.CredentialSource.Headers,
+ Format: o.CredentialSource.Format,
+ Client: o.Client,
+ Logger: logger,
+ }, nil
} else if o.CredentialSource.Executable != nil {
ec := o.CredentialSource.Executable
if ec.Command == "" {
@@ -353,6 +405,18 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
execProvider.opts = o
execProvider.env = runtimeEnvironment{}
return execProvider, nil
+ } else if o.CredentialSource.Certificate != nil {
+ cert := o.CredentialSource.Certificate
+ if !cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation == "" {
+ return nil, errors.New("credentials: \"certificate\" object must either specify a certificate_config_location or use_default_certificate_config should be true")
+ }
+ if cert.UseDefaultCertificateConfig && cert.CertificateConfigLocation != "" {
+ return nil, errors.New("credentials: \"certificate\" object cannot specify both a certificate_config_location and use_default_certificate_config=true")
+ }
+ return &x509Provider{
+ TrustChainPath: o.CredentialSource.Certificate.TrustChainPath,
+ ConfigFilePath: o.CredentialSource.Certificate.CertificateConfigLocation,
+ }, nil
}
return nil, errors.New("credentials: unable to parse credential source")
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go
index e33d35a26..754ecf4fe 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/url_provider.go
@@ -19,10 +19,12 @@ import (
"encoding/json"
"errors"
"fmt"
+ "log/slog"
"net/http"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
+ "github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -30,6 +32,7 @@ const (
fileTypeJSON = "json"
urlProviderType = "url"
programmaticProviderType = "programmatic"
+ x509ProviderType = "x509"
)
type urlSubjectProvider struct {
@@ -37,6 +40,7 @@ type urlSubjectProvider struct {
Headers map[string]string
Format *credsfile.Format
Client *http.Client
+ Logger *slog.Logger
}
func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) {
@@ -48,10 +52,12 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error)
for key, val := range sp.Headers {
req.Header.Add(key, val)
}
+ sp.Logger.DebugContext(ctx, "url subject token request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err)
}
+ sp.Logger.DebugContext(ctx, "url subject token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
return "", fmt.Errorf("credentials: status code %d: %s", c, body)
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go
new file mode 100644
index 000000000..d86ca593c
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccount/x509_provider.go
@@ -0,0 +1,220 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package externalaccount
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/json"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io/fs"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth/internal/transport/cert"
+)
+
+// x509Provider implements the subjectTokenProvider type for x509 workload
+// identity credentials. This provider retrieves and formats a JSON array
+// containing the leaf certificate and trust chain (if provided) as
+// base64-encoded strings. This JSON array serves as the subject token for
+// mTLS authentication.
+type x509Provider struct {
+ // TrustChainPath is the path to the file containing the trust chain certificates.
+ // The file should contain one or more PEM-encoded certificates.
+ TrustChainPath string
+ // ConfigFilePath is the path to the configuration file containing the path
+ // to the leaf certificate file.
+ ConfigFilePath string
+}
+
+const pemCertificateHeader = "-----BEGIN CERTIFICATE-----"
+
+func (xp *x509Provider) providerType() string {
+ return x509ProviderType
+}
+
+// loadLeafCertificate loads and parses the leaf certificate from the specified
+// configuration file. It retrieves the certificate path from the config file,
+// reads the certificate file, and parses the certificate data.
+func loadLeafCertificate(configFilePath string) (*x509.Certificate, error) {
+ // Get the path to the certificate file from the configuration file.
+ path, err := cert.GetCertificatePath(configFilePath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get certificate path from config file: %w", err)
+ }
+ leafCertBytes, err := os.ReadFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read leaf certificate file: %w", err)
+ }
+ // Parse the certificate bytes.
+ return parseCertificate(leafCertBytes)
+}
+
+// encodeCert encodes a x509.Certificate to a base64 string.
+func encodeCert(cert *x509.Certificate) string {
+ // cert.Raw contains the raw DER-encoded certificate. Encode the raw certificate bytes to base64.
+ return base64.StdEncoding.EncodeToString(cert.Raw)
+}
+
+// parseCertificate parses a PEM-encoded certificate from the given byte slice.
+func parseCertificate(certData []byte) (*x509.Certificate, error) {
+ if len(certData) == 0 {
+ return nil, errors.New("invalid certificate data: empty input")
+ }
+ // Decode the PEM-encoded data.
+ block, _ := pem.Decode(certData)
+ if block == nil {
+ return nil, errors.New("invalid PEM-encoded certificate data: no PEM block found")
+ }
+ if block.Type != "CERTIFICATE" {
+ return nil, fmt.Errorf("invalid PEM-encoded certificate data: expected CERTIFICATE block type, got %s", block.Type)
+ }
+ // Parse the DER-encoded certificate.
+ certificate, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse certificate: %w", err)
+ }
+ return certificate, nil
+}
+
+// readTrustChain reads a file of PEM-encoded X.509 certificates and returns a slice of parsed certificates.
+// It splits the file content into PEM certificate blocks and parses each one.
+func readTrustChain(trustChainPath string) ([]*x509.Certificate, error) {
+ certificateTrustChain := []*x509.Certificate{}
+
+ // If no trust chain path is provided, return an empty slice.
+ if trustChainPath == "" {
+ return certificateTrustChain, nil
+ }
+
+ // Read the trust chain file.
+ trustChainData, err := os.ReadFile(trustChainPath)
+ if err != nil {
+ if errors.Is(err, fs.ErrNotExist) {
+ return nil, fmt.Errorf("trust chain file not found: %w", err)
+ }
+ return nil, fmt.Errorf("failed to read trust chain file: %w", err)
+ }
+
+ // Split the file content into PEM certificate blocks.
+ certBlocks := strings.Split(string(trustChainData), pemCertificateHeader)
+
+ // Iterate over each certificate block.
+ for _, certBlock := range certBlocks {
+ // Trim whitespace from the block.
+ certBlock = strings.TrimSpace(certBlock)
+
+ if certBlock != "" {
+ // Add the PEM header to the block.
+ certData := pemCertificateHeader + "\n" + certBlock
+
+ // Parse the certificate data.
+ cert, err := parseCertificate([]byte(certData))
+ if err != nil {
+ return nil, fmt.Errorf("error parsing certificate from trust chain file: %w", err)
+ }
+
+ // Append the certificate to the trust chain.
+ certificateTrustChain = append(certificateTrustChain, cert)
+ }
+ }
+
+ return certificateTrustChain, nil
+}
+
+// subjectToken retrieves the X.509 subject token. It loads the leaf
+// certificate and, if a trust chain path is configured, the trust chain
+// certificates. It then constructs a JSON array containing the base64-encoded
+// leaf certificate and each base64-encoded certificate in the trust chain.
+// The leaf certificate must be at the top of the trust chain file. This JSON
+// array is used as the subject token for mTLS authentication.
+func (xp *x509Provider) subjectToken(context.Context) (string, error) {
+ // Load the leaf certificate.
+ leafCert, err := loadLeafCertificate(xp.ConfigFilePath)
+ if err != nil {
+ return "", fmt.Errorf("failed to load leaf certificate: %w", err)
+ }
+
+ // Read the trust chain.
+ trustChain, err := readTrustChain(xp.TrustChainPath)
+ if err != nil {
+ return "", fmt.Errorf("failed to read trust chain: %w", err)
+ }
+
+ // Initialize the certificate chain with the leaf certificate.
+ certChain := []string{encodeCert(leafCert)}
+
+ // If there is a trust chain, add certificates to the certificate chain.
+ if len(trustChain) > 0 {
+ firstCert := encodeCert(trustChain[0])
+
+ // If the first certificate in the trust chain is not the same as the leaf certificate, add it to the chain.
+ if firstCert != certChain[0] {
+ certChain = append(certChain, firstCert)
+ }
+
+ // Iterate over the remaining certificates in the trust chain.
+ for i := 1; i < len(trustChain); i++ {
+ encoded := encodeCert(trustChain[i])
+
+ // Return an error if the current certificate is the same as the leaf certificate.
+ if encoded == certChain[0] {
+ return "", errors.New("the leaf certificate must be at the top of the trust chain file")
+ }
+
+ // Add the current certificate to the chain.
+ certChain = append(certChain, encoded)
+ }
+ }
+
+ // Convert the certificate chain to a JSON array of base64-encoded strings.
+ jsonChain, err := json.Marshal(certChain)
+ if err != nil {
+ return "", fmt.Errorf("failed to format certificate data: %w", err)
+ }
+
+ // Return the JSON-formatted certificate chain.
+ return string(jsonChain), nil
+
+}
+
+// createX509Client creates a new client that is configured with mTLS, using the
+// certificate configuration specified in the credential source.
+func createX509Client(certificateConfigLocation string) (*http.Client, error) {
+ certProvider, err := cert.NewWorkloadX509CertProvider(certificateConfigLocation)
+ if err != nil {
+ return nil, err
+ }
+ trans := http.DefaultTransport.(*http.Transport).Clone()
+
+ trans.TLSClientConfig = &tls.Config{
+ GetClientCertificate: certProvider,
+ }
+
+ // Create a client with default settings plus the X509 workload cert and key.
+ client := &http.Client{
+ Transport: trans,
+ Timeout: 30 * time.Second,
+ }
+
+ return client, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go
index 0d7885479..ae39206e5 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/externalaccountuser/externalaccountuser.go
@@ -17,12 +17,14 @@ package externalaccountuser
import (
"context"
"errors"
+ "log/slog"
"net/http"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/credentials/internal/stsexchange"
"cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
)
// Options stores the configuration for fetching tokens with external authorized
@@ -51,6 +53,8 @@ type Options struct {
// Client for token request.
Client *http.Client
+ // Logger for logging.
+ Logger *slog.Logger
}
func (c *Options) validate() bool {
@@ -90,6 +94,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
RefreshToken: opts.RefreshToken,
Authentication: clientAuth,
Headers: headers,
+ Logger: internallog.New(tp.o.Logger),
})
if err != nil {
return nil, err
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go
index 720045d3b..c2d320fdf 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/gdch/gdch.go
@@ -16,12 +16,13 @@ package gdch
import (
"context"
- "crypto/rsa"
+ "crypto"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
+ "log/slog"
"net/http"
"net/url"
"os"
@@ -32,6 +33,7 @@ import (
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
"cloud.google.com/go/auth/internal/jwt"
+ "github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -51,6 +53,7 @@ var (
type Options struct {
STSAudience string
Client *http.Client
+ Logger *slog.Logger
}
// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a
@@ -62,7 +65,7 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok
if o.STSAudience == "" {
return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows")
}
- pk, err := internal.ParseKey([]byte(f.PrivateKey))
+ signer, err := internal.ParseKey([]byte(f.PrivateKey))
if err != nil {
return nil, err
}
@@ -75,10 +78,11 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok
serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name),
tokenURL: f.TokenURL,
aud: o.STSAudience,
- pk: pk,
+ signer: signer,
pkID: f.PrivateKeyID,
certPool: certPool,
client: o.Client,
+ logger: internallog.New(o.Logger),
}
return tp, nil
}
@@ -97,11 +101,12 @@ type gdchProvider struct {
serviceIdentity string
tokenURL string
aud string
- pk *rsa.PrivateKey
+ signer crypto.Signer
pkID string
certPool *x509.CertPool
client *http.Client
+ logger *slog.Logger
}
func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) {
@@ -120,7 +125,7 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) {
Type: jwt.HeaderType,
KeyID: string(g.pkID),
}
- payload, err := jwt.EncodeJWS(&h, &claims, g.pk)
+ payload, err := jwt.EncodeJWS(&h, &claims, g.signer)
if err != nil {
return nil, err
}
@@ -136,10 +141,12 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ g.logger.DebugContext(ctx, "gdch token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
resp, body, err := internal.DoRequest(g.client, req)
if err != nil {
return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
}
+ g.logger.DebugContext(ctx, "gdch token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
return nil, &auth.Error{
Response: resp,
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go
new file mode 100644
index 000000000..705462c16
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/idtoken.go
@@ -0,0 +1,105 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package impersonate
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "strings"
+ "time"
+
+ "cloud.google.com/go/auth"
+ "cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
+)
+
+var (
+ universeDomainPlaceholder = "UNIVERSE_DOMAIN"
+ iamCredentialsUniverseDomainEndpoint = "https://iamcredentials.UNIVERSE_DOMAIN"
+)
+
+// IDTokenIAMOptions provides configuration for [IDTokenIAMOptions.Token].
+type IDTokenIAMOptions struct {
+ // Client is required.
+ Client *http.Client
+ // Logger is required.
+ Logger *slog.Logger
+ UniverseDomain auth.CredentialsPropertyProvider
+ ServiceAccountEmail string
+ GenerateIDTokenRequest
+}
+
+// GenerateIDTokenRequest holds the request to the IAM generateIdToken RPC.
+type GenerateIDTokenRequest struct {
+ Audience string `json:"audience"`
+ IncludeEmail bool `json:"includeEmail"`
+ // Delegates are the ordered, fully-qualified resource name for service
+ // accounts in a delegation chain. Each service account must be granted
+ // roles/iam.serviceAccountTokenCreator on the next service account in the
+ // chain. The delegates must have the following format:
+ // projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}. The - wildcard
+ // character is required; replacing it with a project ID is invalid.
+ // Optional.
+ Delegates []string `json:"delegates,omitempty"`
+}
+
+// GenerateIDTokenResponse holds the response from the IAM generateIdToken RPC.
+type GenerateIDTokenResponse struct {
+ Token string `json:"token"`
+}
+
+// Token call IAM generateIdToken with the configuration provided in [IDTokenIAMOptions].
+func (o IDTokenIAMOptions) Token(ctx context.Context) (*auth.Token, error) {
+ universeDomain, err := o.UniverseDomain.GetProperty(ctx)
+ if err != nil {
+ return nil, err
+ }
+ endpoint := strings.Replace(iamCredentialsUniverseDomainEndpoint, universeDomainPlaceholder, universeDomain, 1)
+ url := fmt.Sprintf("%s/v1/%s:generateIdToken", endpoint, internal.FormatIAMServiceAccountResource(o.ServiceAccountEmail))
+
+ bodyBytes, err := json.Marshal(o.GenerateIDTokenRequest)
+ if err != nil {
+ return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err)
+ }
+
+ req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyBytes))
+ if err != nil {
+ return nil, fmt.Errorf("impersonate: unable to create request: %w", err)
+ }
+ req.Header.Set("Content-Type", "application/json")
+ o.Logger.DebugContext(ctx, "impersonated idtoken request", "request", internallog.HTTPRequest(req, bodyBytes))
+ resp, body, err := internal.DoRequest(o.Client, req)
+ if err != nil {
+ return nil, fmt.Errorf("impersonate: unable to generate ID token: %w", err)
+ }
+ o.Logger.DebugContext(ctx, "impersonated idtoken response", "response", internallog.HTTPResponse(resp, body))
+ if c := resp.StatusCode; c < 200 || c > 299 {
+ return nil, fmt.Errorf("impersonate: status code %d: %s", c, body)
+ }
+
+ var tokenResp GenerateIDTokenResponse
+ if err := json.Unmarshal(body, &tokenResp); err != nil {
+ return nil, fmt.Errorf("impersonate: unable to parse response: %w", err)
+ }
+ return &auth.Token{
+ Value: tokenResp.Token,
+ // Generated ID tokens are good for one hour.
+ Expiry: time.Now().Add(1 * time.Hour),
+ }, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
index ed53afa51..b3a99261f 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/impersonate/impersonate.go
@@ -20,11 +20,13 @@ import (
"encoding/json"
"errors"
"fmt"
+ "log/slog"
"net/http"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -74,6 +76,11 @@ type Options struct {
// Client configures the underlying client used to make network requests
// when fetching tokens. Required.
Client *http.Client
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
}
func (o *Options) validate() error {
@@ -88,6 +95,7 @@ func (o *Options) validate() error {
// Token performs the exchange to get a temporary service account token to allow access to GCP.
func (o *Options) Token(ctx context.Context) (*auth.Token, error) {
+ logger := internallog.New(o.Logger)
lifetime := defaultTokenLifetime
if o.TokenLifetimeSeconds != 0 {
lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds)
@@ -109,10 +117,12 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) {
if err := setAuthHeader(ctx, o.Tp, req); err != nil {
return nil, err
}
+ logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b))
resp, body, err := internal.DoRequest(o.Client, req)
if err != nil {
return nil, fmt.Errorf("credentials: unable to generate access token: %w", err)
}
+ logger.DebugContext(ctx, "impersonated token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go
index 768a9dafc..e1d2b1503 100644
--- a/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go
+++ b/vendor/cloud.google.com/go/auth/credentials/internal/stsexchange/sts_exchange.go
@@ -19,6 +19,7 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
+ "log/slog"
"net/http"
"net/url"
"strconv"
@@ -26,6 +27,7 @@ import (
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -40,6 +42,7 @@ const (
// Options stores the configuration for making an sts exchange request.
type Options struct {
Client *http.Client
+ Logger *slog.Logger
Endpoint string
Request *TokenRequest
Authentication ClientAuthentication
@@ -80,6 +83,7 @@ func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) {
func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) {
opts.Authentication.InjectAuthentication(data, opts.Headers)
encodedData := data.Encode()
+ logger := internallog.New(opts.Logger)
req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData))
if err != nil {
@@ -93,10 +97,12 @@ func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenRespo
}
req.Header.Set("Content-Length", strconv.Itoa(len(encodedData)))
+ logger.DebugContext(ctx, "sts token request", "request", internallog.HTTPRequest(req, []byte(encodedData)))
resp, body, err := internal.DoRequest(opts.Client, req)
if err != nil {
return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err)
}
+ logger.DebugContext(ctx, "sts token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
}
diff --git a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
index b62a8ae4d..8d335ccec 100644
--- a/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
+++ b/vendor/cloud.google.com/go/auth/credentials/selfsignedjwt.go
@@ -16,8 +16,10 @@ package credentials
import (
"context"
- "crypto/rsa"
+ "crypto"
+ "errors"
"fmt"
+ "log/slog"
"strings"
"time"
@@ -35,7 +37,10 @@ var (
// configureSelfSignedJWT uses the private key in the service account to create
// a JWT without making a network call.
func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions) (auth.TokenProvider, error) {
- pk, err := internal.ParseKey([]byte(f.PrivateKey))
+ if len(opts.scopes()) == 0 && opts.Audience == "" {
+ return nil, errors.New("credentials: both scopes and audience are empty")
+ }
+ signer, err := internal.ParseKey([]byte(f.PrivateKey))
if err != nil {
return nil, fmt.Errorf("credentials: could not parse key: %w", err)
}
@@ -43,8 +48,9 @@ func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions
email: f.ClientEmail,
audience: opts.Audience,
scopes: opts.scopes(),
- pk: pk,
+ signer: signer,
pkID: f.PrivateKeyID,
+ logger: opts.logger(),
}, nil
}
@@ -52,8 +58,9 @@ type selfSignedTokenProvider struct {
email string
audience string
scopes []string
- pk *rsa.PrivateKey
+ signer crypto.Signer
pkID string
+ logger *slog.Logger
}
func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) {
@@ -73,9 +80,10 @@ func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) {
Type: jwt.HeaderType,
KeyID: string(tp.pkID),
}
- msg, err := jwt.EncodeJWS(h, c, tp.pk)
+ tok, err := jwt.EncodeJWS(h, c, tp.signer)
if err != nil {
return nil, fmt.Errorf("credentials: could not encode JWT: %w", err)
}
- return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil
+ tp.logger.Debug("created self-signed JWT", "token", tok)
+ return &auth.Token{Value: tok, Type: internal.TokenTypeBearer, Expiry: exp}, nil
}
diff --git a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
index 8dbfa7ef7..69d6d0034 100644
--- a/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
+++ b/vendor/cloud.google.com/go/auth/grpctransport/directpath.go
@@ -20,13 +20,18 @@ import (
"os"
"strconv"
"strings"
+ "time"
"cloud.google.com/go/auth"
- "cloud.google.com/go/compute/metadata"
+ "cloud.google.com/go/auth/credentials"
+ "cloud.google.com/go/auth/internal/compute"
+ "golang.org/x/time/rate"
"google.golang.org/grpc"
grpcgoogle "google.golang.org/grpc/credentials/google"
)
+var logRateLimiter = rate.Sometimes{Interval: 1 * time.Second}
+
func isDirectPathEnabled(endpoint string, opts *Options) bool {
if opts.InternalOptions != nil && !opts.InternalOptions.EnableDirectPath {
return false
@@ -55,7 +60,7 @@ func checkDirectPathEndPoint(endpoint string) bool {
return true
}
-func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool {
+func isTokenProviderComputeEngine(tp auth.TokenProvider) bool {
if tp == nil {
return false
}
@@ -66,15 +71,25 @@ func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, _ *Options) bool
if tok == nil {
return false
}
- if source, _ := tok.Metadata["auth.google.tokenSource"].(string); source != "compute-metadata" {
+ if tok.MetadataString("auth.google.tokenSource") != "compute-metadata" {
return false
}
- if acct, _ := tok.Metadata["auth.google.serviceAccount"].(string); acct != "default" {
+ if tok.MetadataString("auth.google.serviceAccount") != "default" {
return false
}
return true
}
+func isTokenProviderDirectPathCompatible(tp auth.TokenProvider, o *Options) bool {
+ if tp == nil {
+ return false
+ }
+ if o.InternalOptions != nil && o.InternalOptions.EnableNonDefaultSAForDirectPath {
+ return true
+ }
+ return isTokenProviderComputeEngine(tp)
+}
+
func isDirectPathXdsUsed(o *Options) bool {
// Method 1: Enable DirectPath xDS by env;
if b, _ := strconv.ParseBool(os.Getenv(enableDirectPathXdsEnvVar)); b {
@@ -87,14 +102,36 @@ func isDirectPathXdsUsed(o *Options) bool {
return false
}
+func isDirectPathBoundTokenEnabled(opts *InternalOptions) bool {
+ for _, ev := range opts.AllowHardBoundTokens {
+ if ev == "ALTS" {
+ return true
+ }
+ }
+ return false
+}
+
// configureDirectPath returns some dial options and an endpoint to use if the
// configuration allows the use of direct path. If it does not the provided
// grpcOpts and endpoint are returned.
-func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string) {
- if isDirectPathEnabled(endpoint, opts) && metadata.OnGCE() && isTokenProviderDirectPathCompatible(creds, opts) {
+func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint string, creds *auth.Credentials) ([]grpc.DialOption, string, error) {
+ logRateLimiter.Do(func() {
+ logDirectPathMisconfig(endpoint, creds, opts)
+ })
+ if isDirectPathEnabled(endpoint, opts) && compute.OnComputeEngine() && isTokenProviderDirectPathCompatible(creds, opts) {
// Overwrite all of the previously specific DialOptions, DirectPath uses its own set of credentials and certificates.
+ defaultCredetialsOptions := grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}
+ if isDirectPathBoundTokenEnabled(opts.InternalOptions) && isTokenProviderComputeEngine(creds) {
+ optsClone := opts.resolveDetectOptions()
+ optsClone.TokenBindingType = credentials.ALTSHardBinding
+ altsCreds, err := credentials.DetectDefault(optsClone)
+ if err != nil {
+ return nil, "", err
+ }
+ defaultCredetialsOptions.ALTSPerRPCCreds = &grpcCredentialsProvider{creds: altsCreds}
+ }
grpcOpts = []grpc.DialOption{
- grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(grpcgoogle.DefaultCredentialsOptions{PerRPCCreds: &grpcCredentialsProvider{creds: creds}}))}
+ grpc.WithCredentialsBundle(grpcgoogle.NewDefaultCredentialsWithOptions(defaultCredetialsOptions))}
if timeoutDialerOption != nil {
grpcOpts = append(grpcOpts, timeoutDialerOption)
}
@@ -119,5 +156,22 @@ func configureDirectPath(grpcOpts []grpc.DialOption, opts *Options, endpoint str
}
// TODO: add support for system parameters (quota project, request reason) via chained interceptor.
}
- return grpcOpts, endpoint
+ return grpcOpts, endpoint, nil
+}
+
+func logDirectPathMisconfig(endpoint string, creds *auth.Credentials, o *Options) {
+
+ // Case 1: does not enable DirectPath
+ if !isDirectPathEnabled(endpoint, o) {
+ o.logger().Warn("DirectPath is disabled. To enable, please set the EnableDirectPath option along with the EnableDirectPathXds option.")
+ } else {
+ // Case 2: credential is not correctly set
+ if !isTokenProviderDirectPathCompatible(creds, o) {
+ o.logger().Warn("DirectPath is disabled. Please make sure the token source is fetched from GCE metadata server and the default service account is used.")
+ }
+ // Case 3: not running on GCE
+ if !compute.OnComputeEngine() {
+ o.logger().Warn("DirectPath is disabled. DirectPath is only available in a GCE environment.")
+ }
+ }
}
diff --git a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
index 5c3bc66f9..834aef41c 100644
--- a/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
+++ b/vendor/cloud.google.com/go/auth/grpctransport/grpctransport.go
@@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// Package grpctransport provides functionality for managing gRPC client
+// connections to Google Cloud services.
package grpctransport
import (
@@ -19,16 +21,21 @@ import (
"crypto/tls"
"errors"
"fmt"
+ "log/slog"
"net/http"
+ "os"
+ "sync"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/credentials"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
- "go.opencensus.io/plugin/ocgrpc"
+ "github.com/googleapis/gax-go/v2/internallog"
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"google.golang.org/grpc"
grpccreds "google.golang.org/grpc/credentials"
grpcinsecure "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/stats"
)
const (
@@ -38,7 +45,7 @@ const (
// Check env to decide if using google-c2p resolver for DirectPath traffic.
enableDirectPathXdsEnvVar = "GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS"
- quotaProjectHeaderKey = "X-Goog-User-Project"
+ quotaProjectHeaderKey = "X-goog-user-project"
)
var (
@@ -46,6 +53,27 @@ var (
timeoutDialerOption grpc.DialOption
)
+// otelStatsHandler is a singleton otelgrpc.clientHandler to be used across
+// all dial connections to avoid the memory leak documented in
+// https://github.com/open-telemetry/opentelemetry-go-contrib/issues/4226
+//
+// TODO: When this module depends on a version of otelgrpc containing the fix,
+// replace this singleton with inline usage for simplicity.
+// The fix should be in https://github.com/open-telemetry/opentelemetry-go/pull/5797.
+var (
+ initOtelStatsHandlerOnce sync.Once
+ otelStatsHandler stats.Handler
+)
+
+// otelGRPCStatsHandler returns singleton otelStatsHandler for reuse across all
+// dial connections.
+func otelGRPCStatsHandler() stats.Handler {
+ initOtelStatsHandlerOnce.Do(func() {
+ otelStatsHandler = otelgrpc.NewClientHandler()
+ })
+ return otelStatsHandler
+}
+
// ClientCertProvider is a function that returns a TLS client certificate to be
// used when opening TLS connections. It follows the same semantics as
// [crypto/tls.Config.GetClientCertificate].
@@ -90,6 +118,11 @@ type Options struct {
// APIKey specifies an API key to be used as the basis for authentication.
// If set DetectOpts are ignored.
APIKey string
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
// InternalOptions are NOT meant to be set directly by consumers of this
// package, they should only be set by generated client code.
@@ -105,6 +138,10 @@ func (o *Options) client() *http.Client {
return nil
}
+func (o *Options) logger() *slog.Logger {
+ return internallog.New(o.Logger)
+}
+
func (o *Options) validate() error {
if o == nil {
return errors.New("grpctransport: opts required to be non-nil")
@@ -146,6 +183,9 @@ func (o *Options) resolveDetectOptions() *credentials.DetectOptions {
do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig)
do.TokenURL = credentials.GoogleMTLSTokenURL
}
+ if do.Logger == nil {
+ do.Logger = o.logger()
+ }
return do
}
@@ -164,6 +204,10 @@ type InternalOptions struct {
EnableDirectPathXds bool
// EnableJWTWithScope specifies if scope can be used with self-signed JWT.
EnableJWTWithScope bool
+ // AllowHardBoundTokens allows libraries to request a hard-bound token.
+ // Obtaining hard-bound tokens requires the connection to be established
+ // using either ALTS or mTLS with S2A.
+ AllowHardBoundTokens []string
// DefaultAudience specifies a default audience to be used as the audience
// field ("aud") for the JWT token authentication.
DefaultAudience string
@@ -214,6 +258,7 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er
ClientCertProvider: opts.ClientCertProvider,
Client: opts.client(),
UniverseDomain: opts.UniverseDomain,
+ Logger: opts.logger(),
}
if io := opts.InternalOptions; io != nil {
tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate
@@ -221,13 +266,13 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er
tOpts.EnableDirectPath = io.EnableDirectPath
tOpts.EnableDirectPathXds = io.EnableDirectPathXds
}
- transportCreds, endpoint, err := transport.GetGRPCTransportCredsAndEndpoint(tOpts)
+ transportCreds, err := transport.GetGRPCTransportCredsAndEndpoint(tOpts)
if err != nil {
return nil, err
}
if !secure {
- transportCreds = grpcinsecure.NewCredentials()
+ transportCreds.TransportCredentials = grpcinsecure.NewCredentials()
}
// Initialize gRPC dial options with transport-level security options.
@@ -256,8 +301,21 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er
if opts.Credentials != nil {
creds = opts.Credentials
} else {
+ // This condition is only met for non-DirectPath clients because
+ // TransportTypeMTLSS2A is used only when InternalOptions.EnableDirectPath
+ // is false.
+ optsClone := opts.resolveDetectOptions()
+ if transportCreds.TransportType == transport.TransportTypeMTLSS2A {
+ // Check that the client allows requesting hard-bound token for the transport type mTLS using S2A.
+ for _, ev := range opts.InternalOptions.AllowHardBoundTokens {
+ if ev == "MTLS_S2A" {
+ optsClone.TokenBindingType = credentials.MTLSHardBinding
+ break
+ }
+ }
+ }
var err error
- creds, err = credentials.DetectDefault(opts.resolveDetectOptions())
+ creds, err = credentials.DetectDefault(optsClone)
if err != nil {
return nil, err
}
@@ -271,7 +329,10 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er
if metadata == nil {
metadata = make(map[string]string, 1)
}
- metadata[quotaProjectHeaderKey] = qp
+ // Don't overwrite user specified quota
+ if _, ok := metadata[quotaProjectHeaderKey]; !ok {
+ metadata[quotaProjectHeaderKey] = qp
+ }
}
grpcOpts = append(grpcOpts,
grpc.WithPerRPCCredentials(&grpcCredentialsProvider{
@@ -280,18 +341,20 @@ func dial(ctx context.Context, secure bool, opts *Options) (*grpc.ClientConn, er
clientUniverseDomain: opts.UniverseDomain,
}),
)
-
// Attempt Direct Path
- grpcOpts, endpoint = configureDirectPath(grpcOpts, opts, endpoint, creds)
+ grpcOpts, transportCreds.Endpoint, err = configureDirectPath(grpcOpts, opts, transportCreds.Endpoint, creds)
+ if err != nil {
+ return nil, err
+ }
}
// Add tracing, but before the other options, so that clients can override the
// gRPC stats handler.
// This assumes that gRPC options are processed in order, left to right.
- grpcOpts = addOCStatsHandler(grpcOpts, opts)
+ grpcOpts = addOpenTelemetryStatsHandler(grpcOpts, opts)
grpcOpts = append(grpcOpts, opts.GRPCDialOpts...)
- return grpc.DialContext(ctx, endpoint, grpcOpts...)
+ return grpc.DialContext(ctx, transportCreds.Endpoint, grpcOpts...)
}
// grpcKeyProvider satisfies https://pkg.go.dev/google.golang.org/grpc/credentials#PerRPCCredentials.
@@ -325,29 +388,39 @@ type grpcCredentialsProvider struct {
clientUniverseDomain string
}
-// getClientUniverseDomain returns the default service domain for a given Cloud universe.
-// The default value is "googleapis.com". This is the universe domain
-// configured for the client, which will be compared to the universe domain
-// that is separately configured for the credentials.
+// getClientUniverseDomain returns the default service domain for a given Cloud
+// universe, with the following precedence:
+//
+// 1. A non-empty option.WithUniverseDomain or similar client option.
+// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN.
+// 3. The default value "googleapis.com".
+//
+// This is the universe domain configured for the client, which will be compared
+// to the universe domain that is separately configured for the credentials.
func (c *grpcCredentialsProvider) getClientUniverseDomain() string {
- if c.clientUniverseDomain == "" {
- return internal.DefaultUniverseDomain
+ if c.clientUniverseDomain != "" {
+ return c.clientUniverseDomain
}
- return c.clientUniverseDomain
+ if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" {
+ return envUD
+ }
+ return internal.DefaultUniverseDomain
}
func (c *grpcCredentialsProvider) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
- credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx)
- if err != nil {
- return nil, err
- }
- if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
- return nil, err
- }
token, err := c.creds.Token(ctx)
if err != nil {
return nil, err
}
+ if token.MetadataString("auth.google.tokenSource") != "compute-metadata" {
+ credentialsUniverseDomain, err := c.creds.UniverseDomain(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if err := transport.ValidateUniverseDomain(c.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
+ return nil, err
+ }
+ }
if c.secure {
ri, _ := grpccreds.RequestInfoFromContext(ctx)
if err = grpccreds.CheckSecurityLevel(ri.AuthInfo, grpccreds.PrivacyAndIntegrity); err != nil {
@@ -376,9 +449,9 @@ func (c *grpcCredentialsProvider) RequireTransportSecurity() bool {
return c.secure
}
-func addOCStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption {
+func addOpenTelemetryStatsHandler(dialOpts []grpc.DialOption, opts *Options) []grpc.DialOption {
if opts.DisableTelemetry {
return dialOpts
}
- return append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
+ return append(dialOpts, grpc.WithStatsHandler(otelGRPCStatsHandler()))
}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
index 969c8d4d2..5758e85b5 100644
--- a/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
+++ b/vendor/cloud.google.com/go/auth/httptransport/httptransport.go
@@ -12,18 +12,22 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+// Package httptransport provides functionality for managing HTTP client
+// connections to Google Cloud services.
package httptransport
import (
"crypto/tls"
"errors"
"fmt"
+ "log/slog"
"net/http"
"cloud.google.com/go/auth"
detect "cloud.google.com/go/auth/credentials"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
+ "github.com/googleapis/gax-go/v2/internallog"
)
// ClientCertProvider is a function that returns a TLS client certificate to be
@@ -67,6 +71,11 @@ type Options struct {
// configured for the client, which will be compared to the universe domain
// that is separately configured for the credentials.
UniverseDomain string
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
// InternalOptions are NOT meant to be set directly by consumers of this
// package, they should only be set by generated client code.
@@ -99,6 +108,10 @@ func (o *Options) client() *http.Client {
return nil
}
+func (o *Options) logger() *slog.Logger {
+ return internallog.New(o.Logger)
+}
+
func (o *Options) resolveDetectOptions() *detect.DetectOptions {
io := o.InternalOptions
// soft-clone these so we are not updating a ref the user holds and may reuse
@@ -123,6 +136,9 @@ func (o *Options) resolveDetectOptions() *detect.DetectOptions {
do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig)
do.TokenURL = detect.GoogleMTLSTokenURL
}
+ if do.Logger == nil {
+ do.Logger = o.logger()
+ }
return do
}
@@ -145,14 +161,21 @@ type InternalOptions struct {
// service.
DefaultScopes []string
// SkipValidation bypasses validation on Options. It should only be used
- // internally for clients that needs more control over their transport.
+ // internally for clients that need more control over their transport.
SkipValidation bool
+ // SkipUniverseDomainValidation skips the verification that the universe
+ // domain configured for the client matches the universe domain configured
+ // for the credentials. It should only be used internally for clients that
+ // need more control over their transport. The default is false.
+ SkipUniverseDomainValidation bool
}
// AddAuthorizationMiddleware adds a middleware to the provided client's
// transport that sets the Authorization header with the value produced by the
// provided [cloud.google.com/go/auth.Credentials]. An error is returned only
// if client or creds is nil.
+//
+// This function does not support setting a universe domain value on the client.
func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error {
if client == nil || creds == nil {
return fmt.Errorf("httptransport: client and tp must not be nil")
@@ -171,7 +194,6 @@ func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) er
client.Transport = &authTransport{
creds: creds,
base: base,
- // TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls.
}
return nil
}
@@ -189,6 +211,7 @@ func NewClient(opts *Options) (*http.Client, error) {
ClientCertProvider: opts.ClientCertProvider,
Client: opts.client(),
UniverseDomain: opts.UniverseDomain,
+ Logger: opts.logger(),
}
if io := opts.InternalOptions; io != nil {
tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate
diff --git a/vendor/cloud.google.com/go/auth/httptransport/trace.go b/vendor/cloud.google.com/go/auth/httptransport/trace.go
deleted file mode 100644
index 467c477c0..000000000
--- a/vendor/cloud.google.com/go/auth/httptransport/trace.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httptransport
-
-import (
- "encoding/binary"
- "encoding/hex"
- "fmt"
- "net/http"
- "strconv"
- "strings"
-
- "go.opencensus.io/trace"
- "go.opencensus.io/trace/propagation"
-)
-
-const (
- httpHeaderMaxSize = 200
- cloudTraceHeader = `X-Cloud-Trace-Context`
-)
-
-// asserts the httpFormat fulfills this foreign interface
-var _ propagation.HTTPFormat = (*httpFormat)(nil)
-
-// httpFormat implements propagation.httpFormat to propagate
-// traces in HTTP headers for Google Cloud Platform and Cloud Trace.
-type httpFormat struct{}
-
-// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests.
-func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
- h := req.Header.Get(cloudTraceHeader)
- // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat.
- // Return if the header is empty or missing, or if the header is unreasonably
- // large, to avoid making unnecessary copies of a large string.
- if h == "" || len(h) > httpHeaderMaxSize {
- return trace.SpanContext{}, false
- }
-
- // Parse the trace id field.
- slash := strings.Index(h, `/`)
- if slash == -1 {
- return trace.SpanContext{}, false
- }
- tid, h := h[:slash], h[slash+1:]
-
- buf, err := hex.DecodeString(tid)
- if err != nil {
- return trace.SpanContext{}, false
- }
- copy(sc.TraceID[:], buf)
-
- // Parse the span id field.
- spanstr := h
- semicolon := strings.Index(h, `;`)
- if semicolon != -1 {
- spanstr, h = h[:semicolon], h[semicolon+1:]
- }
- sid, err := strconv.ParseUint(spanstr, 10, 64)
- if err != nil {
- return trace.SpanContext{}, false
- }
- binary.BigEndian.PutUint64(sc.SpanID[:], sid)
-
- // Parse the options field, options field is optional.
- if !strings.HasPrefix(h, "o=") {
- return sc, true
- }
- o, err := strconv.ParseUint(h[2:], 10, 32)
- if err != nil {
- return trace.SpanContext{}, false
- }
- sc.TraceOptions = trace.TraceOptions(o)
- return sc, true
-}
-
-// SpanContextToRequest modifies the given request to include a Cloud Trace header.
-func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
- sid := binary.BigEndian.Uint64(sc.SpanID[:])
- header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions))
- req.Header.Set(cloudTraceHeader, header)
-}
diff --git a/vendor/cloud.google.com/go/auth/httptransport/transport.go b/vendor/cloud.google.com/go/auth/httptransport/transport.go
index 94caeb00f..ee215b6dc 100644
--- a/vendor/cloud.google.com/go/auth/httptransport/transport.go
+++ b/vendor/cloud.google.com/go/auth/httptransport/transport.go
@@ -19,6 +19,7 @@ import (
"crypto/tls"
"net"
"net/http"
+ "os"
"time"
"cloud.google.com/go/auth"
@@ -26,12 +27,12 @@ import (
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
"cloud.google.com/go/auth/internal/transport/cert"
- "go.opencensus.io/plugin/ochttp"
+ "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"golang.org/x/net/http2"
)
const (
- quotaProjectHeaderKey = "X-Goog-User-Project"
+ quotaProjectHeaderKey = "X-goog-user-project"
)
func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, error) {
@@ -41,7 +42,7 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err
headers: headers,
}
var trans http.RoundTripper = ht
- trans = addOCTransport(trans, opts)
+ trans = addOpenTelemetryTransport(trans, opts)
switch {
case opts.DisableAuthentication:
// Do nothing.
@@ -76,13 +77,21 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err
if headers == nil {
headers = make(map[string][]string, 1)
}
- headers.Set(quotaProjectHeaderKey, qp)
+ // Don't overwrite user specified quota
+ if v := headers.Get(quotaProjectHeaderKey); v == "" {
+ headers.Set(quotaProjectHeaderKey, qp)
+ }
+ }
+ var skipUD bool
+ if iOpts := opts.InternalOptions; iOpts != nil {
+ skipUD = iOpts.SkipUniverseDomainValidation
}
creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil)
trans = &authTransport{
- base: trans,
- creds: creds,
- clientUniverseDomain: opts.UniverseDomain,
+ base: trans,
+ creds: creds,
+ clientUniverseDomain: opts.UniverseDomain,
+ skipUniverseDomainValidation: skipUD,
}
}
return trans, nil
@@ -94,7 +103,11 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err
// http.DefaultTransport.
// If TLSCertificate is available, set TLSClientConfig as well.
func defaultBaseTransport(clientCertSource cert.Provider, dialTLSContext func(context.Context, string, string) (net.Conn, error)) http.RoundTripper {
- trans := http.DefaultTransport.(*http.Transport).Clone()
+ defaultTransport, ok := http.DefaultTransport.(*http.Transport)
+ if !ok {
+ defaultTransport = transport.BaseTransport()
+ }
+ trans := defaultTransport.Clone()
trans.MaxIdleConnsPerHost = 100
if clientCertSource != nil {
@@ -155,29 +168,37 @@ func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) {
return rt.RoundTrip(&newReq)
}
-func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
+func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
if opts.DisableTelemetry {
return trans
}
- return &ochttp.Transport{
- Base: trans,
- Propagation: &httpFormat{},
- }
+ return otelhttp.NewTransport(trans)
}
type authTransport struct {
- creds *auth.Credentials
- base http.RoundTripper
- clientUniverseDomain string
+ creds *auth.Credentials
+ base http.RoundTripper
+ clientUniverseDomain string
+ skipUniverseDomainValidation bool
}
-// getClientUniverseDomain returns the universe domain configured for the client.
-// The default value is "googleapis.com".
+// getClientUniverseDomain returns the default service domain for a given Cloud
+// universe, with the following precedence:
+//
+// 1. A non-empty option.WithUniverseDomain or similar client option.
+// 2. A non-empty environment variable GOOGLE_CLOUD_UNIVERSE_DOMAIN.
+// 3. The default value "googleapis.com".
+//
+// This is the universe domain configured for the client, which will be compared
+// to the universe domain that is separately configured for the credentials.
func (t *authTransport) getClientUniverseDomain() string {
- if t.clientUniverseDomain == "" {
- return internal.DefaultUniverseDomain
+ if t.clientUniverseDomain != "" {
+ return t.clientUniverseDomain
+ }
+ if envUD := os.Getenv(internal.UniverseDomainEnvVar); envUD != "" {
+ return envUD
}
- return t.clientUniverseDomain
+ return internal.DefaultUniverseDomain
}
// RoundTrip authorizes and authenticates the request with an
@@ -193,17 +214,19 @@ func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) {
}
}()
}
- credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context())
- if err != nil {
- return nil, err
- }
- if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
- return nil, err
- }
token, err := t.creds.Token(req.Context())
if err != nil {
return nil, err
}
+ if !t.skipUniverseDomainValidation && token.MetadataString("auth.google.tokenSource") != "compute-metadata" {
+ credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context())
+ if err != nil {
+ return nil, err
+ }
+ if err := transport.ValidateUniverseDomain(t.getClientUniverseDomain(), credentialsUniverseDomain); err != nil {
+ return nil, err
+ }
+ }
req2 := req.Clone(req.Context())
SetAuthHeader(token, req2)
reqBodyClosed = true
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/compute.go b/vendor/cloud.google.com/go/auth/internal/compute/compute.go
new file mode 100644
index 000000000..05c7e8bdd
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/compute.go
@@ -0,0 +1,65 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+import (
+ "log"
+ "runtime"
+ "strings"
+ "sync"
+)
+
+var (
+ vmOnGCEOnce sync.Once
+ vmOnGCE bool
+)
+
+// OnComputeEngine returns whether the client is running on GCE.
+//
+// This is a copy of the gRPC internal googlecloud.OnGCE() func at:
+// https://github.com/grpc/grpc-go/blob/master/internal/googlecloud/googlecloud.go
+// The functionality is similar to the metadata.OnGCE() func at:
+// https://github.com/googleapis/google-cloud-go/blob/main/compute/metadata/metadata.go
+// The difference is that OnComputeEngine() does not perform HTTP or DNS check on the metadata server.
+// In particular, OnComputeEngine() will return false on Serverless.
+func OnComputeEngine() bool {
+ vmOnGCEOnce.Do(func() {
+ mf, err := manufacturer()
+ if err != nil {
+ log.Printf("Failed to read manufacturer, vmOnGCE=false: %v", err)
+ return
+ }
+ vmOnGCE = isRunningOnGCE(mf, runtime.GOOS)
+ })
+ return vmOnGCE
+}
+
+// isRunningOnGCE checks whether the local system, without doing a network request, is
+// running on GCP.
+func isRunningOnGCE(manufacturer []byte, goos string) bool {
+ name := string(manufacturer)
+ switch goos {
+ case "linux":
+ name = strings.TrimSpace(name)
+ return name == "Google" || name == "Google Compute Engine"
+ case "windows":
+ name = strings.Replace(name, " ", "", -1)
+ name = strings.Replace(name, "\n", "", -1)
+ name = strings.Replace(name, "\r", "", -1)
+ return name == "Google"
+ default:
+ return false
+ }
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go
new file mode 100644
index 000000000..af490bf4f
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer.go
@@ -0,0 +1,22 @@
+//go:build !(linux || windows)
+// +build !linux,!windows
+
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+func manufacturer() ([]byte, error) {
+ return nil, nil
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go
new file mode 100644
index 000000000..d92178df8
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_linux.go
@@ -0,0 +1,23 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+import "os"
+
+const linuxProductNameFile = "/sys/class/dmi/id/product_name"
+
+func manufacturer() ([]byte, error) {
+ return os.ReadFile(linuxProductNameFile)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go
new file mode 100644
index 000000000..16be9df30
--- /dev/null
+++ b/vendor/cloud.google.com/go/auth/internal/compute/manufacturer_windows.go
@@ -0,0 +1,46 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compute
+
+import (
+ "errors"
+ "os/exec"
+ "regexp"
+ "strings"
+)
+
+const (
+ windowsCheckCommand = "powershell.exe"
+ windowsCheckCommandArgs = "Get-WmiObject -Class Win32_BIOS"
+ powershellOutputFilter = "Manufacturer"
+ windowsManufacturerRegex = ":(.*)"
+)
+
+func manufacturer() ([]byte, error) {
+ cmd := exec.Command(windowsCheckCommand, windowsCheckCommandArgs)
+ out, err := cmd.Output()
+ if err != nil {
+ return nil, err
+ }
+ for _, line := range strings.Split(strings.TrimSuffix(string(out), "\n"), "\n") {
+ if strings.HasPrefix(line, powershellOutputFilter) {
+ re := regexp.MustCompile(windowsManufacturerRegex)
+ name := re.FindString(line)
+ name = strings.TrimLeft(name, ":")
+ return []byte(name), nil
+ }
+ }
+ return nil, errors.New("cannot determine the machine's manufacturer")
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go
index 69e30779f..606347304 100644
--- a/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go
+++ b/vendor/cloud.google.com/go/auth/internal/credsfile/filetype.go
@@ -90,19 +90,20 @@ type ExternalAccountAuthorizedUserFile struct {
// CredentialSource stores the information necessary to retrieve the credentials for the STS exchange.
//
-// One field amongst File, URL, and Executable should be filled, depending on the kind of credential in question.
+// One field amongst File, URL, Certificate, and Executable should be filled, depending on the kind of credential in question.
// The EnvironmentID should start with AWS if being used for an AWS credential.
type CredentialSource struct {
- File string `json:"file"`
- URL string `json:"url"`
- Headers map[string]string `json:"headers"`
- Executable *ExecutableConfig `json:"executable,omitempty"`
- EnvironmentID string `json:"environment_id"`
- RegionURL string `json:"region_url"`
- RegionalCredVerificationURL string `json:"regional_cred_verification_url"`
- CredVerificationURL string `json:"cred_verification_url"`
- IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"`
- Format *Format `json:"format,omitempty"`
+ File string `json:"file"`
+ URL string `json:"url"`
+ Headers map[string]string `json:"headers"`
+ Executable *ExecutableConfig `json:"executable,omitempty"`
+ Certificate *CertificateConfig `json:"certificate"`
+ EnvironmentID string `json:"environment_id"` // TODO: Make type for this
+ RegionURL string `json:"region_url"`
+ RegionalCredVerificationURL string `json:"regional_cred_verification_url"`
+ CredVerificationURL string `json:"cred_verification_url"`
+ IMDSv2SessionTokenURL string `json:"imdsv2_session_token_url"`
+ Format *Format `json:"format,omitempty"`
}
// Format describes the format of a [CredentialSource].
@@ -121,6 +122,14 @@ type ExecutableConfig struct {
OutputFile string `json:"output_file"`
}
+// CertificateConfig represents the options used to set up X509 based workload
+// [CredentialSource]
+type CertificateConfig struct {
+ UseDefaultCertificateConfig bool `json:"use_default_certificate_config"`
+ CertificateConfigLocation string `json:"certificate_config_location"`
+ TrustChainPath string `json:"trust_chain_path"`
+}
+
// ServiceAccountImpersonationInfo has impersonation configuration.
type ServiceAccountImpersonationInfo struct {
TokenLifetimeSeconds int `json:"token_lifetime_seconds"`
diff --git a/vendor/cloud.google.com/go/auth/internal/internal.go b/vendor/cloud.google.com/go/auth/internal/internal.go
index 8c328e2fb..6a8eab6eb 100644
--- a/vendor/cloud.google.com/go/auth/internal/internal.go
+++ b/vendor/cloud.google.com/go/auth/internal/internal.go
@@ -16,7 +16,7 @@ package internal
import (
"context"
- "crypto/rsa"
+ "crypto"
"crypto/x509"
"encoding/json"
"encoding/pem"
@@ -38,42 +38,61 @@ const (
// QuotaProjectEnvVar is the environment variable for setting the quota
// project.
QuotaProjectEnvVar = "GOOGLE_CLOUD_QUOTA_PROJECT"
- projectEnvVar = "GOOGLE_CLOUD_PROJECT"
- maxBodySize = 1 << 20
+ // UniverseDomainEnvVar is the environment variable for setting the default
+ // service domain for a given Cloud universe.
+ UniverseDomainEnvVar = "GOOGLE_CLOUD_UNIVERSE_DOMAIN"
+ projectEnvVar = "GOOGLE_CLOUD_PROJECT"
+ maxBodySize = 1 << 20
// DefaultUniverseDomain is the default value for universe domain.
// Universe domain is the default service domain for a given Cloud universe.
DefaultUniverseDomain = "googleapis.com"
)
-// CloneDefaultClient returns a [http.Client] with some good defaults.
-func CloneDefaultClient() *http.Client {
+type clonableTransport interface {
+ Clone() *http.Transport
+}
+
+// DefaultClient returns an [http.Client] with some defaults set. If
+// the current [http.DefaultTransport] is a [clonableTransport], as
+// is the case for an [*http.Transport], the clone will be used.
+// Otherwise the [http.DefaultTransport] is used directly.
+func DefaultClient() *http.Client {
+ if transport, ok := http.DefaultTransport.(clonableTransport); ok {
+ return &http.Client{
+ Transport: transport.Clone(),
+ Timeout: 30 * time.Second,
+ }
+ }
+
return &http.Client{
- Transport: http.DefaultTransport.(*http.Transport).Clone(),
+ Transport: http.DefaultTransport,
Timeout: 30 * time.Second,
}
}
// ParseKey converts the binary contents of a private key file
-// to an *rsa.PrivateKey. It detects whether the private key is in a
+// to an crypto.Signer. It detects whether the private key is in a
// PEM container or not. If so, it extracts the the private key
// from PEM container before conversion. It only supports PEM
// containers with no passphrase.
-func ParseKey(key []byte) (*rsa.PrivateKey, error) {
+func ParseKey(key []byte) (crypto.Signer, error) {
block, _ := pem.Decode(key)
if block != nil {
key = block.Bytes
}
- parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+ var parsedKey crypto.PrivateKey
+ var err error
+ parsedKey, err = x509.ParsePKCS8PrivateKey(key)
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
if err != nil {
return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err)
}
}
- parsed, ok := parsedKey.(*rsa.PrivateKey)
+ parsed, ok := parsedKey.(crypto.Signer)
if !ok {
- return nil, errors.New("private key is invalid")
+ return nil, errors.New("private key is not a signer")
}
return parsed, nil
}
@@ -162,6 +181,7 @@ func (p StaticProperty) GetProperty(context.Context) (string, error) {
// ComputeUniverseDomainProvider fetches the credentials universe domain from
// the google cloud metadata service.
type ComputeUniverseDomainProvider struct {
+ MetadataClient *metadata.Client
universeDomainOnce sync.Once
universeDomain string
universeDomainErr error
@@ -171,7 +191,7 @@ type ComputeUniverseDomainProvider struct {
// metadata service.
func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) {
c.universeDomainOnce.Do(func() {
- c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx)
+ c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx, c.MetadataClient)
})
if c.universeDomainErr != nil {
return "", c.universeDomainErr
@@ -180,13 +200,14 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string
}
// httpGetMetadataUniverseDomain is a package var for unit test substitution.
-var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) {
- client := metadata.NewClient(&http.Client{Timeout: time.Second})
- return client.GetWithContext(ctx, "universe/universe_domain")
+var httpGetMetadataUniverseDomain = func(ctx context.Context, client *metadata.Client) (string, error) {
+ ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
+ defer cancel()
+ return client.GetWithContext(ctx, "universe/universe-domain")
}
-func getMetadataUniverseDomain(ctx context.Context) (string, error) {
- universeDomain, err := httpGetMetadataUniverseDomain(ctx)
+func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (string, error) {
+ universeDomain, err := httpGetMetadataUniverseDomain(ctx, client)
if err == nil {
return universeDomain, nil
}
@@ -196,3 +217,9 @@ func getMetadataUniverseDomain(ctx context.Context) (string, error) {
}
return "", err
}
+
+// FormatIAMServiceAccountResource sets a service account name in an IAM resource
+// name.
+func FormatIAMServiceAccountResource(name string) string {
+ return fmt.Sprintf("projects/-/serviceAccounts/%s", name)
+}
diff --git a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go
index dc28b3c3b..9bd55f510 100644
--- a/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go
+++ b/vendor/cloud.google.com/go/auth/internal/jwt/jwt.go
@@ -111,7 +111,7 @@ func (c *Claims) encode() (string, error) {
}
// EncodeJWS encodes the data using the provided key as a JSON web signature.
-func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) {
+func EncodeJWS(header *Header, c *Claims, signer crypto.Signer) (string, error) {
head, err := header.encode()
if err != nil {
return "", err
@@ -123,7 +123,7 @@ func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) {
ss := fmt.Sprintf("%s.%s", head, claims)
h := sha256.New()
h.Write([]byte(ss))
- sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil))
+ sig, err := signer.Sign(rand.Reader, h.Sum(nil), crypto.SHA256)
if err != nil {
return "", err
}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cba.go b/vendor/cloud.google.com/go/auth/internal/transport/cba.go
index d94e0af08..14bca966e 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/cba.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cba.go
@@ -17,7 +17,10 @@ package transport
import (
"context"
"crypto/tls"
+ "crypto/x509"
"errors"
+ "log"
+ "log/slog"
"net"
"net/http"
"net/url"
@@ -28,7 +31,6 @@ import (
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport/cert"
"github.com/google/s2a-go"
- "github.com/google/s2a-go/fallback"
"google.golang.org/grpc/credentials"
)
@@ -44,11 +46,19 @@ const (
googleAPIUseMTLSOld = "GOOGLE_API_USE_MTLS"
universeDomainPlaceholder = "UNIVERSE_DOMAIN"
+
+ mtlsMDSRoot = "/run/google-mds-mtls/root.crt"
+ mtlsMDSKey = "/run/google-mds-mtls/client.key"
)
-var (
- mdsMTLSAutoConfigSource mtlsConfigSource
- errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com")
+// Type represents the type of transport used.
+type Type int
+
+const (
+ // TransportTypeUnknown represents an unknown transport type and is the default option.
+ TransportTypeUnknown Type = iota
+ // TransportTypeMTLSS2A represents the mTLS transport type using S2A.
+ TransportTypeMTLSS2A
)
// Options is a struct that is duplicated information from the individual
@@ -56,13 +66,14 @@ var (
// fields on httptransport.Options and grpctransport.Options.
type Options struct {
Endpoint string
- DefaultMTLSEndpoint string
DefaultEndpointTemplate string
+ DefaultMTLSEndpoint string
ClientCertProvider cert.Provider
Client *http.Client
UniverseDomain string
EnableDirectPath bool
EnableDirectPathXds bool
+ Logger *slog.Logger
}
// getUniverseDomain returns the default service domain for a given Cloud
@@ -90,6 +101,16 @@ func (o *Options) defaultEndpoint() string {
return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1)
}
+// defaultMTLSEndpoint returns the DefaultMTLSEndpointTemplate merged with the
+// universe domain if the DefaultMTLSEndpointTemplate is set, otherwise returns an
+// empty string.
+func (o *Options) defaultMTLSEndpoint() string {
+ if o.DefaultMTLSEndpoint == "" {
+ return ""
+ }
+ return strings.Replace(o.DefaultMTLSEndpoint, universeDomainPlaceholder, o.getUniverseDomain(), 1)
+}
+
// mergedEndpoint merges a user-provided Endpoint of format host[:port] with the
// default endpoint.
func (o *Options) mergedEndpoint() (string, error) {
@@ -108,39 +129,55 @@ func fixScheme(baseURL string) string {
return baseURL
}
+// GRPCTransportCredentials embeds interface TransportCredentials with additional data.
+type GRPCTransportCredentials struct {
+ credentials.TransportCredentials
+ Endpoint string
+ TransportType Type
+}
+
// GetGRPCTransportCredsAndEndpoint returns an instance of
// [google.golang.org/grpc/credentials.TransportCredentials], and the
-// corresponding endpoint to use for GRPC client.
-func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCredentials, string, error) {
+// corresponding endpoint and transport type to use for GRPC client.
+func GetGRPCTransportCredsAndEndpoint(opts *Options) (*GRPCTransportCredentials, error) {
config, err := getTransportConfig(opts)
if err != nil {
- return nil, "", err
+ return nil, err
}
defaultTransportCreds := credentials.NewTLS(&tls.Config{
GetClientCertificate: config.clientCertSource,
})
- if config.s2aAddress == "" {
- return defaultTransportCreds, config.endpoint, nil
- }
- var fallbackOpts *s2a.FallbackOptions
- // In case of S2A failure, fall back to the endpoint that would've been used without S2A.
- if fallbackHandshake, err := fallback.DefaultFallbackClientHandshakeFunc(config.endpoint); err == nil {
- fallbackOpts = &s2a.FallbackOptions{
- FallbackClientHandshakeFunc: fallbackHandshake,
+ var s2aAddr string
+ var transportCredsForS2A credentials.TransportCredentials
+
+ if config.mtlsS2AAddress != "" {
+ s2aAddr = config.mtlsS2AAddress
+ transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey)
+ if err != nil {
+ log.Printf("Loading MTLS MDS credentials failed: %v", err)
+ if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
+ }
}
+ } else if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
}
s2aTransportCreds, err := s2a.NewClientCreds(&s2a.ClientOptions{
- S2AAddress: config.s2aAddress,
- FallbackOpts: fallbackOpts,
+ S2AAddress: s2aAddr,
+ TransportCreds: transportCredsForS2A,
})
if err != nil {
// Use default if we cannot initialize S2A client transport credentials.
- return defaultTransportCreds, config.endpoint, nil
+ return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
}
- return s2aTransportCreds, config.s2aMTLSEndpoint, nil
+ return &GRPCTransportCredentials{s2aTransportCreds, config.s2aMTLSEndpoint, TransportTypeMTLSS2A}, nil
}
// GetHTTPTransportConfig returns a client certificate source and a function for
@@ -151,30 +188,58 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context,
return nil, nil, err
}
- if config.s2aAddress == "" {
- return config.clientCertSource, nil, nil
- }
-
- var fallbackOpts *s2a.FallbackOptions
- // In case of S2A failure, fall back to the endpoint that would've been used without S2A.
- if fallbackURL, err := url.Parse(config.endpoint); err == nil {
- if fallbackDialer, fallbackServerAddr, err := fallback.DefaultFallbackDialerAndAddress(fallbackURL.Hostname()); err == nil {
- fallbackOpts = &s2a.FallbackOptions{
- FallbackDialer: &s2a.FallbackDialer{
- Dialer: fallbackDialer,
- ServerAddr: fallbackServerAddr,
- },
+ var s2aAddr string
+ var transportCredsForS2A credentials.TransportCredentials
+
+ if config.mtlsS2AAddress != "" {
+ s2aAddr = config.mtlsS2AAddress
+ transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey)
+ if err != nil {
+ log.Printf("Loading MTLS MDS credentials failed: %v", err)
+ if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return config.clientCertSource, nil, nil
}
}
+ } else if config.s2aAddress != "" {
+ s2aAddr = config.s2aAddress
+ } else {
+ return config.clientCertSource, nil, nil
}
dialTLSContextFunc := s2a.NewS2ADialTLSContextFunc(&s2a.ClientOptions{
- S2AAddress: config.s2aAddress,
- FallbackOpts: fallbackOpts,
+ S2AAddress: s2aAddr,
+ TransportCreds: transportCredsForS2A,
})
return nil, dialTLSContextFunc, nil
}
+func loadMTLSMDSTransportCreds(mtlsMDSRootFile, mtlsMDSKeyFile string) (credentials.TransportCredentials, error) {
+ rootPEM, err := os.ReadFile(mtlsMDSRootFile)
+ if err != nil {
+ return nil, err
+ }
+ caCertPool := x509.NewCertPool()
+ ok := caCertPool.AppendCertsFromPEM(rootPEM)
+ if !ok {
+ return nil, errors.New("failed to load MTLS MDS root certificate")
+ }
+ // The mTLS MDS credentials are formatted as the concatenation of a PEM-encoded certificate chain
+ // followed by a PEM-encoded private key. For this reason, the concatenation is passed in to the
+ // tls.X509KeyPair function as both the certificate chain and private key arguments.
+ cert, err := tls.LoadX509KeyPair(mtlsMDSKeyFile, mtlsMDSKeyFile)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig := tls.Config{
+ RootCAs: caCertPool,
+ Certificates: []tls.Certificate{cert},
+ MinVersion: tls.VersionTLS13,
+ }
+ return credentials.NewTLS(&tlsConfig), nil
+}
+
func getTransportConfig(opts *Options) (*transportConfig, error) {
clientCertSource, err := GetClientCertificateProvider(opts)
if err != nil {
@@ -192,21 +257,18 @@ func getTransportConfig(opts *Options) (*transportConfig, error) {
if !shouldUseS2A(clientCertSource, opts) {
return &defaultTransportConfig, nil
}
- if !opts.isUniverseDomainGDU() {
- return nil, errUniverseNotSupportedMTLS
- }
- s2aMTLSEndpoint := opts.DefaultMTLSEndpoint
-
- s2aAddress := GetS2AAddress()
- if s2aAddress == "" {
+ s2aAddress := GetS2AAddress(opts.Logger)
+ mtlsS2AAddress := GetMTLSS2AAddress(opts.Logger)
+ if s2aAddress == "" && mtlsS2AAddress == "" {
return &defaultTransportConfig, nil
}
return &transportConfig{
clientCertSource: clientCertSource,
endpoint: endpoint,
s2aAddress: s2aAddress,
- s2aMTLSEndpoint: s2aMTLSEndpoint,
+ mtlsS2AAddress: mtlsS2AAddress,
+ s2aMTLSEndpoint: opts.defaultMTLSEndpoint(),
}, nil
}
@@ -241,8 +303,10 @@ type transportConfig struct {
clientCertSource cert.Provider
// The corresponding endpoint to use based on client certificate source.
endpoint string
- // The S2A address if it can be used, otherwise an empty string.
+ // The plaintext S2A address if it can be used, otherwise an empty string.
s2aAddress string
+ // The MTLS S2A address if it can be used, otherwise an empty string.
+ mtlsS2AAddress string
// The MTLS endpoint to use with S2A.
s2aMTLSEndpoint string
}
@@ -250,24 +314,23 @@ type transportConfig struct {
// getEndpoint returns the endpoint for the service, taking into account the
// user-provided endpoint override "settings.Endpoint".
//
-// If no endpoint override is specified, we will either return the default endpoint or
-// the default mTLS endpoint if a client certificate is available.
+// If no endpoint override is specified, we will either return the default
+// endpoint or the default mTLS endpoint if a client certificate is available.
//
-// You can override the default endpoint choice (mtls vs. regular) by setting the
-// GOOGLE_API_USE_MTLS_ENDPOINT environment variable.
+// You can override the default endpoint choice (mTLS vs. regular) by setting
+// the GOOGLE_API_USE_MTLS_ENDPOINT environment variable.
//
// If the endpoint override is an address (host:port) rather than full base
// URL (ex. https://...), then the user-provided address will be merged into
// the default endpoint. For example, WithEndpoint("myhost:8000") and
-// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return "https://myhost:8080/bar/baz"
+// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return
+// "https://myhost:8080/bar/baz". Note that this does not apply to the mTLS
+// endpoint.
func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) {
if opts.Endpoint == "" {
mtlsMode := getMTLSMode()
if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) {
- if !opts.isUniverseDomainGDU() {
- return "", errUniverseNotSupportedMTLS
- }
- return opts.DefaultMTLSEndpoint, nil
+ return opts.defaultMTLSEndpoint(), nil
}
return opts.defaultEndpoint(), nil
}
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go
index 366515916..6c954ae19 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/enterprise_cert.go
@@ -16,7 +16,6 @@ package cert
import (
"crypto/tls"
- "errors"
"github.com/googleapis/enterprise-certificate-proxy/client"
)
@@ -37,10 +36,9 @@ type ecpSource struct {
func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) {
key, err := client.Cred(configFilePath)
if err != nil {
- if errors.Is(err, client.ErrCredUnavailable) {
- return nil, errSourceUnavailable
- }
- return nil, err
+ // TODO(codyoss): once this is fixed upstream can handle this error a
+ // little better here. But be safe for now and assume unavailable.
+ return nil, errSourceUnavailable
}
return (&ecpSource{
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
index 3227aba28..738cb2161 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/secureconnect_cert.go
@@ -62,11 +62,11 @@ func NewSecureConnectProvider(configFilePath string) (Provider, error) {
file, err := os.ReadFile(configFilePath)
if err != nil {
- if errors.Is(err, os.ErrNotExist) {
- // Config file missing means Secure Connect is not supported.
- return nil, errSourceUnavailable
- }
- return nil, err
+ // Config file missing means Secure Connect is not supported.
+ // There are non-os.ErrNotExist errors that may be returned.
+ // (e.g. if the home directory is /dev/null, *nix systems will
+ // return ENOTDIR instead of ENOENT)
+ return nil, errSourceUnavailable
}
var metadata secureConnectMetadata
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
index e8675bf82..b2a3be23c 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/cert/workload_cert.go
@@ -37,6 +37,36 @@ type certificateConfig struct {
CertConfigs certConfigs `json:"cert_configs"`
}
+// getconfigFilePath determines the path to the certificate configuration file.
+// It first checks for the presence of an environment variable that specifies
+// the file path. If the environment variable is not set, it falls back to
+// a default configuration file path.
+func getconfigFilePath() string {
+ envFilePath := util.GetConfigFilePathFromEnv()
+ if envFilePath != "" {
+ return envFilePath
+ }
+ return util.GetDefaultConfigFilePath()
+
+}
+
+// GetCertificatePath retrieves the certificate file path from the provided
+// configuration file. If the configFilePath is empty, it attempts to load
+// the configuration from a well-known gcloud location.
+// This function is exposed to allow other packages, such as the
+// externalaccount package, to retrieve the certificate path without needing
+// to load the entire certificate configuration.
+func GetCertificatePath(configFilePath string) (string, error) {
+ if configFilePath == "" {
+ configFilePath = getconfigFilePath()
+ }
+ certFile, _, err := getCertAndKeyFiles(configFilePath)
+ if err != nil {
+ return "", err
+ }
+ return certFile, nil
+}
+
// NewWorkloadX509CertProvider creates a certificate source
// that reads a certificate and private key file from the local file system.
// This is intended to be used for workload identity federation.
@@ -47,14 +77,8 @@ type certificateConfig struct {
// a well-known gcloud location.
func NewWorkloadX509CertProvider(configFilePath string) (Provider, error) {
if configFilePath == "" {
- envFilePath := util.GetConfigFilePathFromEnv()
- if envFilePath != "" {
- configFilePath = envFilePath
- } else {
- configFilePath = util.GetDefaultConfigFilePath()
- }
+ configFilePath = getconfigFilePath()
}
-
certFile, keyFile, err := getCertAndKeyFiles(configFilePath)
if err != nil {
return nil, err
@@ -82,10 +106,7 @@ func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo)
func getCertAndKeyFiles(configFilePath string) (string, string, error) {
jsonFile, err := os.Open(configFilePath)
if err != nil {
- if errors.Is(err, os.ErrNotExist) {
- return "", "", errSourceUnavailable
- }
- return "", "", err
+ return "", "", errSourceUnavailable
}
byteValue, err := io.ReadAll(jsonFile)
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
index 2ed532deb..a63309956 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/s2a.go
@@ -15,12 +15,14 @@
package transport
import (
+ "context"
"encoding/json"
+ "fmt"
"log"
+ "log/slog"
"os"
"strconv"
"sync"
- "time"
"cloud.google.com/go/auth/internal/transport/cert"
"cloud.google.com/go/compute/metadata"
@@ -31,41 +33,38 @@ const (
)
var (
- // The period an MTLS config can be reused before needing refresh.
- configExpiry = time.Hour
+ mtlsConfiguration *mtlsConfig
- // mdsMTLSAutoConfigSource is an instance of reuseMTLSConfigSource, with metadataMTLSAutoConfig as its config source.
mtlsOnce sync.Once
)
// GetS2AAddress returns the S2A address to be reached via plaintext connection.
// Returns empty string if not set or invalid.
-func GetS2AAddress() string {
- c, err := getMetadataMTLSAutoConfig().Config()
- if err != nil {
- return ""
- }
- if !c.Valid() {
+func GetS2AAddress(logger *slog.Logger) string {
+ getMetadataMTLSAutoConfig(logger)
+ if !mtlsConfiguration.valid() {
return ""
}
- return c.S2A.PlaintextAddress
+ return mtlsConfiguration.S2A.PlaintextAddress
}
-type mtlsConfigSource interface {
- Config() (*mtlsConfig, error)
+// GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection.
+// Returns empty string if not set or invalid.
+func GetMTLSS2AAddress(logger *slog.Logger) string {
+ getMetadataMTLSAutoConfig(logger)
+ if !mtlsConfiguration.valid() {
+ return ""
+ }
+ return mtlsConfiguration.S2A.MTLSAddress
}
// mtlsConfig contains the configuration for establishing MTLS connections with Google APIs.
type mtlsConfig struct {
- S2A *s2aAddresses `json:"s2a"`
- Expiry time.Time
+ S2A *s2aAddresses `json:"s2a"`
}
-func (c *mtlsConfig) Valid() bool {
- return c != nil && c.S2A != nil && !c.expired()
-}
-func (c *mtlsConfig) expired() bool {
- return c.Expiry.Before(time.Now())
+func (c *mtlsConfig) valid() bool {
+ return c != nil && c.S2A != nil
}
// s2aAddresses contains the plaintext and/or MTLS S2A addresses.
@@ -76,80 +75,39 @@ type s2aAddresses struct {
MTLSAddress string `json:"mtls_address"`
}
-// getMetadataMTLSAutoConfig returns mdsMTLSAutoConfigSource, which is backed by config from MDS with auto-refresh.
-func getMetadataMTLSAutoConfig() mtlsConfigSource {
+func getMetadataMTLSAutoConfig(logger *slog.Logger) {
+ var err error
mtlsOnce.Do(func() {
- mdsMTLSAutoConfigSource = &reuseMTLSConfigSource{
- src: &metadataMTLSAutoConfig{},
+ mtlsConfiguration, err = queryConfig(logger)
+ if err != nil {
+ log.Printf("Getting MTLS config failed: %v", err)
}
})
- return mdsMTLSAutoConfigSource
-}
-
-// reuseMTLSConfigSource caches a valid version of mtlsConfig, and uses `src` to refresh upon config expiry.
-// It implements the mtlsConfigSource interface, so calling Config() on it returns an mtlsConfig.
-type reuseMTLSConfigSource struct {
- src mtlsConfigSource // src.Config() is called when config is expired
- mu sync.Mutex // mutex guards config
- config *mtlsConfig // cached config
-}
-
-func (cs *reuseMTLSConfigSource) Config() (*mtlsConfig, error) {
- cs.mu.Lock()
- defer cs.mu.Unlock()
-
- if cs.config.Valid() {
- return cs.config, nil
- }
- c, err := cs.src.Config()
- if err != nil {
- return nil, err
- }
- cs.config = c
- return c, nil
}
-// metadataMTLSAutoConfig is an implementation of the interface mtlsConfigSource
-// It has the logic to query MDS and return an mtlsConfig
-type metadataMTLSAutoConfig struct{}
-
-var httpGetMetadataMTLSConfig = func() (string, error) {
- return metadata.Get(configEndpointSuffix)
+var httpGetMetadataMTLSConfig = func(logger *slog.Logger) (string, error) {
+ metadataClient := metadata.NewWithOptions(&metadata.Options{
+ Logger: logger,
+ })
+ return metadataClient.GetWithContext(context.Background(), configEndpointSuffix)
}
-func (cs *metadataMTLSAutoConfig) Config() (*mtlsConfig, error) {
- resp, err := httpGetMetadataMTLSConfig()
+func queryConfig(logger *slog.Logger) (*mtlsConfig, error) {
+ resp, err := httpGetMetadataMTLSConfig(logger)
if err != nil {
- log.Printf("querying MTLS config from MDS endpoint failed: %v", err)
- return defaultMTLSConfig(), nil
+ return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err)
}
var config mtlsConfig
err = json.Unmarshal([]byte(resp), &config)
if err != nil {
- log.Printf("unmarshalling MTLS config from MDS endpoint failed: %v", err)
- return defaultMTLSConfig(), nil
+ return nil, fmt.Errorf("unmarshalling MTLS config from MDS endpoint failed: %w", err)
}
-
if config.S2A == nil {
- log.Printf("returned MTLS config from MDS endpoint is invalid: %v", config)
- return defaultMTLSConfig(), nil
+ return nil, fmt.Errorf("returned MTLS config from MDS endpoint is invalid: %v", config)
}
-
- // set new expiry
- config.Expiry = time.Now().Add(configExpiry)
return &config, nil
}
-func defaultMTLSConfig() *mtlsConfig {
- return &mtlsConfig{
- S2A: &s2aAddresses{
- PlaintextAddress: "",
- MTLSAddress: "",
- },
- Expiry: time.Now().Add(configExpiry),
- }
-}
-
func shouldUseS2A(clientCertSource cert.Provider, opts *Options) bool {
// If client cert is found, use that over S2A.
if clientCertSource != nil {
diff --git a/vendor/cloud.google.com/go/auth/internal/transport/transport.go b/vendor/cloud.google.com/go/auth/internal/transport/transport.go
index 718a6b171..5c8721efa 100644
--- a/vendor/cloud.google.com/go/auth/internal/transport/transport.go
+++ b/vendor/cloud.google.com/go/auth/internal/transport/transport.go
@@ -37,6 +37,7 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt
}
newDo := &credentials.DetectOptions{
// Simple types
+ TokenBindingType: oldDo.TokenBindingType,
Audience: oldDo.Audience,
Subject: oldDo.Subject,
EarlyTokenRefresh: oldDo.EarlyTokenRefresh,
@@ -46,9 +47,10 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt
UseSelfSignedJWT: oldDo.UseSelfSignedJWT,
UniverseDomain: oldDo.UniverseDomain,
- // These fields are are pointer types that we just want to use exactly
- // as the user set, copy the ref
+ // These fields are pointer types that we just want to use exactly as
+ // the user set, copy the ref
Client: oldDo.Client,
+ Logger: oldDo.Logger,
AuthHandlerOptions: oldDo.AuthHandlerOptions,
}
@@ -81,12 +83,14 @@ func ValidateUniverseDomain(clientUniverseDomain, credentialsUniverseDomain stri
// DefaultHTTPClientWithTLS constructs an HTTPClient using the provided tlsConfig, to support mTLS.
func DefaultHTTPClientWithTLS(tlsConfig *tls.Config) *http.Client {
- trans := baseTransport()
+ trans := BaseTransport()
trans.TLSClientConfig = tlsConfig
return &http.Client{Transport: trans}
}
-func baseTransport() *http.Transport {
+// BaseTransport returns a default [http.Transport] which can be used if
+// [http.DefaultTransport] has been overwritten.
+func BaseTransport() *http.Transport {
return &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
index ff9747bed..42716752e 100644
--- a/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
+++ b/vendor/cloud.google.com/go/auth/oauth2adapt/CHANGES.md
@@ -1,5 +1,47 @@
# Changelog
+## [0.2.8](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.7...auth/oauth2adapt/v0.2.8) (2025-03-17)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to 0.37.0 ([1144978](https://github.com/googleapis/google-cloud-go/commit/11449782c7fb4896bf8b8b9cde8e7441c84fb2fd))
+
+## [0.2.7](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.6...auth/oauth2adapt/v0.2.7) (2025-01-09)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+
+## [0.2.6](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.5...auth/oauth2adapt/v0.2.6) (2024-11-21)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Copy map in tokenSourceAdapter.Token ([#11164](https://github.com/googleapis/google-cloud-go/issues/11164)) ([8cb0cbc](https://github.com/googleapis/google-cloud-go/commit/8cb0cbccdc32886dfb3af49fee04012937d114d2)), refs [#11161](https://github.com/googleapis/google-cloud-go/issues/11161)
+
+## [0.2.5](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.4...auth/oauth2adapt/v0.2.5) (2024-10-30)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Convert token metadata where possible ([#11062](https://github.com/googleapis/google-cloud-go/issues/11062)) ([34bf1c1](https://github.com/googleapis/google-cloud-go/commit/34bf1c164465d66745c0cfdf7cd10a8e2da92e52))
+
+## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+
+## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.2...auth/oauth2adapt/v0.2.3) (2024-07-10)
+
+
+### Bug Fixes
+
+* **auth/oauth2adapt:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b))
+
## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.1...auth/oauth2adapt/v0.2.2) (2024-04-23)
diff --git a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go
index 9835ac571..9cc33e5ee 100644
--- a/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go
+++ b/vendor/cloud.google.com/go/auth/oauth2adapt/oauth2adapt.go
@@ -26,6 +26,13 @@ import (
"golang.org/x/oauth2/google"
)
+const (
+ oauth2TokenSourceKey = "oauth2.google.tokenSource"
+ oauth2ServiceAccountKey = "oauth2.google.serviceAccount"
+ authTokenSourceKey = "auth.google.tokenSource"
+ authServiceAccountKey = "auth.google.serviceAccount"
+)
+
// TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource]
// into a [cloud.google.com/go/auth.TokenProvider].
func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider {
@@ -47,10 +54,21 @@ func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) {
}
return nil, err
}
+ // Preserve compute token metadata, for both types of tokens.
+ metadata := map[string]interface{}{}
+ if val, ok := tok.Extra(oauth2TokenSourceKey).(string); ok {
+ metadata[authTokenSourceKey] = val
+ metadata[oauth2TokenSourceKey] = val
+ }
+ if val, ok := tok.Extra(oauth2ServiceAccountKey).(string); ok {
+ metadata[authServiceAccountKey] = val
+ metadata[oauth2ServiceAccountKey] = val
+ }
return &auth.Token{
- Value: tok.AccessToken,
- Type: tok.Type(),
- Expiry: tok.Expiry,
+ Value: tok.AccessToken,
+ Type: tok.Type(),
+ Expiry: tok.Expiry,
+ Metadata: metadata,
}, nil
}
@@ -76,11 +94,29 @@ func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) {
}
return nil, err
}
- return &oauth2.Token{
+ tok2 := &oauth2.Token{
AccessToken: tok.Value,
TokenType: tok.Type,
Expiry: tok.Expiry,
- }, nil
+ }
+ // Preserve token metadata.
+ m := tok.Metadata
+ if m != nil {
+ // Copy map to avoid concurrent map writes error (#11161).
+ metadata := make(map[string]interface{}, len(m)+2)
+ for k, v := range m {
+ metadata[k] = v
+ }
+ // Append compute token metadata in converted form.
+ if val, ok := metadata[authTokenSourceKey].(string); ok && val != "" {
+ metadata[oauth2TokenSourceKey] = val
+ }
+ if val, ok := metadata[authServiceAccountKey].(string); ok && val != "" {
+ metadata[oauth2ServiceAccountKey] = val
+ }
+ tok2 = tok2.WithExtra(metadata)
+ }
+ return tok2, nil
}
// AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials]
diff --git a/vendor/cloud.google.com/go/auth/threelegged.go b/vendor/cloud.google.com/go/auth/threelegged.go
index a8ce6cd8a..07804dc16 100644
--- a/vendor/cloud.google.com/go/auth/threelegged.go
+++ b/vendor/cloud.google.com/go/auth/threelegged.go
@@ -20,6 +20,7 @@ import (
"encoding/json"
"errors"
"fmt"
+ "log/slog"
"mime"
"net/http"
"net/url"
@@ -28,6 +29,7 @@ import (
"time"
"cloud.google.com/go/auth/internal"
+ "github.com/googleapis/gax-go/v2/internallog"
)
// AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for
@@ -69,6 +71,11 @@ type Options3LO struct {
// AuthHandlerOpts provides a set of options for doing a
// 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional.
AuthHandlerOpts *AuthorizationHandlerOptions
+ // Logger is used for debug logging. If provided, logging will be enabled
+ // at the loggers configured level. By default logging is disabled unless
+ // enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
+ // logger will be used. Optional.
+ Logger *slog.Logger
}
func (o *Options3LO) validate() error {
@@ -96,6 +103,10 @@ func (o *Options3LO) validate() error {
return nil
}
+func (o *Options3LO) logger() *slog.Logger {
+ return internallog.New(o.Logger)
+}
+
// PKCEOptions holds parameters to support PKCE.
type PKCEOptions struct {
// Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier.
@@ -128,7 +139,7 @@ func (o *Options3LO) client() *http.Client {
if o.Client != nil {
return o.Client
}
- return internal.CloneDefaultClient()
+ return internal.DefaultClient()
}
// authCodeURL returns a URL that points to a OAuth2 consent page.
@@ -293,12 +304,15 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin
if o.AuthStyle == StyleInHeader {
req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret))
}
+ logger := o.logger()
+ logger.DebugContext(ctx, "3LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
// Make request
resp, body, err := internal.DoRequest(o.client(), req)
if err != nil {
return nil, refreshToken, err
}
+ logger.DebugContext(ctx, "3LO token response", "response", internallog.HTTPResponse(resp, body))
failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299
tokError := &Error{
Response: resp,
diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
index 2cbb405de..1f848ce0b 100644
--- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
+++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md
@@ -1,5 +1,40 @@
# Changes
+## [0.7.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.6.0...compute/metadata/v0.7.0) (2025-05-13)
+
+
+### Features
+
+* **compute/metadata:** Allow canceling GCE detection ([#11786](https://github.com/googleapis/google-cloud-go/issues/11786)) ([78100fe](https://github.com/googleapis/google-cloud-go/commit/78100fe7e28cd30f1e10b47191ac3c9839663b64))
+
+## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.2...compute/metadata/v0.6.0) (2024-12-13)
+
+
+### Features
+
+* **compute/metadata:** Add debug logging ([#11078](https://github.com/googleapis/google-cloud-go/issues/11078)) ([a816814](https://github.com/googleapis/google-cloud-go/commit/a81681463906e4473570a2f426eb0dc2de64e53f))
+
+## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20)
+
+
+### Bug Fixes
+
+* **compute/metadata:** Close Response Body for failed request ([#10891](https://github.com/googleapis/google-cloud-go/issues/10891)) ([e91d45e](https://github.com/googleapis/google-cloud-go/commit/e91d45e4757a9e354114509ba9800085d9e0ff1f))
+
+## [0.5.1](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.0...compute/metadata/v0.5.1) (2024-09-12)
+
+
+### Bug Fixes
+
+* **compute/metadata:** Check error chain for retryable error ([#10840](https://github.com/googleapis/google-cloud-go/issues/10840)) ([2bdedef](https://github.com/googleapis/google-cloud-go/commit/2bdedeff621b223d63cebc4355fcf83bc68412cd))
+
+## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10)
+
+
+### Features
+
+* **compute/metadata:** Add sys check for windows OnGCE ([#10521](https://github.com/googleapis/google-cloud-go/issues/10521)) ([3b9a830](https://github.com/googleapis/google-cloud-go/commit/3b9a83063960d2a2ac20beb47cc15818a68bd302))
+
## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.3.0...compute/metadata/v0.4.0) (2024-07-01)
diff --git a/vendor/cloud.google.com/go/compute/metadata/log.go b/vendor/cloud.google.com/go/compute/metadata/log.go
new file mode 100644
index 000000000..8ec673b88
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/log.go
@@ -0,0 +1,149 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metadata
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "net/http"
+ "strings"
+)
+
+// Code below this point is copied from github.com/googleapis/gax-go/v2/internallog
+// to avoid the dependency. The compute/metadata module is used by too many
+// non-client library modules that can't justify the dependency.
+
+// The handler returned if logging is not enabled.
+type noOpHandler struct{}
+
+func (h noOpHandler) Enabled(_ context.Context, _ slog.Level) bool {
+ return false
+}
+
+func (h noOpHandler) Handle(_ context.Context, _ slog.Record) error {
+ return nil
+}
+
+func (h noOpHandler) WithAttrs(_ []slog.Attr) slog.Handler {
+ return h
+}
+
+func (h noOpHandler) WithGroup(_ string) slog.Handler {
+ return h
+}
+
+// httpRequest returns a lazily evaluated [slog.LogValuer] for a
+// [http.Request] and the associated body.
+func httpRequest(req *http.Request, body []byte) slog.LogValuer {
+ return &request{
+ req: req,
+ payload: body,
+ }
+}
+
+type request struct {
+ req *http.Request
+ payload []byte
+}
+
+func (r *request) LogValue() slog.Value {
+ if r == nil || r.req == nil {
+ return slog.Value{}
+ }
+ var groupValueAttrs []slog.Attr
+ groupValueAttrs = append(groupValueAttrs, slog.String("method", r.req.Method))
+ groupValueAttrs = append(groupValueAttrs, slog.String("url", r.req.URL.String()))
+
+ var headerAttr []slog.Attr
+ for k, val := range r.req.Header {
+ headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ",")))
+ }
+ if len(headerAttr) > 0 {
+ groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr))
+ }
+
+ if len(r.payload) > 0 {
+ if attr, ok := processPayload(r.payload); ok {
+ groupValueAttrs = append(groupValueAttrs, attr)
+ }
+ }
+ return slog.GroupValue(groupValueAttrs...)
+}
+
+// httpResponse returns a lazily evaluated [slog.LogValuer] for a
+// [http.Response] and the associated body.
+func httpResponse(resp *http.Response, body []byte) slog.LogValuer {
+ return &response{
+ resp: resp,
+ payload: body,
+ }
+}
+
+type response struct {
+ resp *http.Response
+ payload []byte
+}
+
+func (r *response) LogValue() slog.Value {
+ if r == nil {
+ return slog.Value{}
+ }
+ var groupValueAttrs []slog.Attr
+ groupValueAttrs = append(groupValueAttrs, slog.String("status", fmt.Sprint(r.resp.StatusCode)))
+
+ var headerAttr []slog.Attr
+ for k, val := range r.resp.Header {
+ headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ",")))
+ }
+ if len(headerAttr) > 0 {
+ groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr))
+ }
+
+ if len(r.payload) > 0 {
+ if attr, ok := processPayload(r.payload); ok {
+ groupValueAttrs = append(groupValueAttrs, attr)
+ }
+ }
+ return slog.GroupValue(groupValueAttrs...)
+}
+
+func processPayload(payload []byte) (slog.Attr, bool) {
+ peekChar := payload[0]
+ if peekChar == '{' {
+ // JSON object
+ var m map[string]any
+ if err := json.Unmarshal(payload, &m); err == nil {
+ return slog.Any("payload", m), true
+ }
+ } else if peekChar == '[' {
+ // JSON array
+ var m []any
+ if err := json.Unmarshal(payload, &m); err == nil {
+ return slog.Any("payload", m), true
+ }
+ } else {
+ // Everything else
+ buf := &bytes.Buffer{}
+ if err := json.Compact(buf, payload); err != nil {
+ // Write raw payload incase of error
+ buf.Write(payload)
+ }
+ return slog.String("payload", buf.String()), true
+ }
+ return slog.Attr{}, false
+}
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
index e686f24d1..322be8032 100644
--- a/vendor/cloud.google.com/go/compute/metadata/metadata.go
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -24,11 +24,11 @@ import (
"encoding/json"
"fmt"
"io"
+ "log/slog"
"net"
"net/http"
"net/url"
"os"
- "runtime"
"strings"
"sync"
"time"
@@ -61,7 +61,10 @@ var (
instID = &cachedValue{k: "instance/id", trim: true}
)
-var defaultClient = &Client{hc: newDefaultHTTPClient()}
+var defaultClient = &Client{
+ hc: newDefaultHTTPClient(),
+ logger: slog.New(noOpHandler{}),
+}
func newDefaultHTTPClient() *http.Client {
return &http.Client{
@@ -114,94 +117,18 @@ var (
// NOTE: True returned from `OnGCE` does not guarantee that the metadata server
// is accessible from this process and have all the metadata defined.
func OnGCE() bool {
- onGCEOnce.Do(initOnGCE)
- return onGCE
-}
-
-func initOnGCE() {
- onGCE = testOnGCE()
+ return OnGCEWithContext(context.Background())
}
-func testOnGCE() bool {
- // The user explicitly said they're on GCE, so trust them.
- if os.Getenv(metadataHostEnv) != "" {
- return true
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- resc := make(chan bool, 2)
-
- // Try two strategies in parallel.
- // See https://github.com/googleapis/google-cloud-go/issues/194
- go func() {
- req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
- req.Header.Set("User-Agent", userAgent)
- res, err := newDefaultHTTPClient().Do(req.WithContext(ctx))
- if err != nil {
- resc <- false
- return
- }
- defer res.Body.Close()
- resc <- res.Header.Get("Metadata-Flavor") == "Google"
- }()
-
- go func() {
- resolver := &net.Resolver{}
- addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.")
- if err != nil || len(addrs) == 0 {
- resc <- false
- return
- }
- resc <- strsContains(addrs, metadataIP)
- }()
-
- tryHarder := systemInfoSuggestsGCE()
- if tryHarder {
- res := <-resc
- if res {
- // The first strategy succeeded, so let's use it.
- return true
- }
- // Wait for either the DNS or metadata server probe to
- // contradict the other one and say we are running on
- // GCE. Give it a lot of time to do so, since the system
- // info already suggests we're running on a GCE BIOS.
- timer := time.NewTimer(5 * time.Second)
- defer timer.Stop()
- select {
- case res = <-resc:
- return res
- case <-timer.C:
- // Too slow. Who knows what this system is.
- return false
- }
- }
-
- // There's no hint from the system info that we're running on
- // GCE, so use the first probe's result as truth, whether it's
- // true or false. The goal here is to optimize for speed for
- // users who are NOT running on GCE. We can't assume that
- // either a DNS lookup or an HTTP request to a blackholed IP
- // address is fast. Worst case this should return when the
- // metaClient's Transport.ResponseHeaderTimeout or
- // Transport.Dial.Timeout fires (in two seconds).
- return <-resc
-}
-
-// systemInfoSuggestsGCE reports whether the local system (without
-// doing network requests) suggests that we're running on GCE. If this
-// returns true, testOnGCE tries a bit harder to reach its metadata
-// server.
-func systemInfoSuggestsGCE() bool {
- if runtime.GOOS != "linux" {
- // We don't have any non-Linux clues available, at least yet.
- return false
- }
- slurp, _ := os.ReadFile("/sys/class/dmi/id/product_name")
- name := strings.TrimSpace(string(slurp))
- return name == "Google" || name == "Google Compute Engine"
+// OnGCEWithContext reports whether this process is running on Google Compute Platforms.
+// This function's return value is memoized for better performance.
+// NOTE: True returned from `OnGCEWithContext` does not guarantee that the metadata server
+// is accessible from this process and have all the metadata defined.
+func OnGCEWithContext(ctx context.Context) bool {
+ onGCEOnce.Do(func() {
+ onGCE = defaultClient.OnGCEWithContext(ctx)
+ })
+ return onGCE
}
// Subscribe calls Client.SubscribeWithContext on the default client.
@@ -423,17 +350,120 @@ func strsContains(ss []string, s string) bool {
// A Client provides metadata.
type Client struct {
- hc *http.Client
+ hc *http.Client
+ logger *slog.Logger
+}
+
+// Options for configuring a [Client].
+type Options struct {
+ // Client is the HTTP client used to make requests. Optional.
+ Client *http.Client
+ // Logger is used to log information about HTTP request and responses.
+ // If not provided, nothing will be logged. Optional.
+ Logger *slog.Logger
}
// NewClient returns a Client that can be used to fetch metadata.
// Returns the client that uses the specified http.Client for HTTP requests.
// If nil is specified, returns the default client.
func NewClient(c *http.Client) *Client {
- if c == nil {
+ return NewWithOptions(&Options{
+ Client: c,
+ })
+}
+
+// NewWithOptions returns a Client that is configured with the provided Options.
+func NewWithOptions(opts *Options) *Client {
+ if opts == nil {
return defaultClient
}
- return &Client{hc: c}
+ client := opts.Client
+ if client == nil {
+ client = newDefaultHTTPClient()
+ }
+ logger := opts.Logger
+ if logger == nil {
+ logger = slog.New(noOpHandler{})
+ }
+ return &Client{hc: client, logger: logger}
+}
+
+// NOTE: metadataRequestStrategy is assigned to a variable for test stubbing purposes.
+var metadataRequestStrategy = func(ctx context.Context, httpClient *http.Client, resc chan bool) {
+ req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
+ req.Header.Set("User-Agent", userAgent)
+ res, err := httpClient.Do(req.WithContext(ctx))
+ if err != nil {
+ resc <- false
+ return
+ }
+ defer res.Body.Close()
+ resc <- res.Header.Get("Metadata-Flavor") == "Google"
+}
+
+// NOTE: dnsRequestStrategy is assigned to a variable for test stubbing purposes.
+var dnsRequestStrategy = func(ctx context.Context, resc chan bool) {
+ resolver := &net.Resolver{}
+ addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.")
+ if err != nil || len(addrs) == 0 {
+ resc <- false
+ return
+ }
+ resc <- strsContains(addrs, metadataIP)
+}
+
+// OnGCEWithContext reports whether this process is running on Google Compute Platforms.
+// NOTE: True returned from `OnGCEWithContext` does not guarantee that the metadata server
+// is accessible from this process and have all the metadata defined.
+func (c *Client) OnGCEWithContext(ctx context.Context) bool {
+ // The user explicitly said they're on GCE, so trust them.
+ if os.Getenv(metadataHostEnv) != "" {
+ return true
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ resc := make(chan bool, 2)
+
+ // Try two strategies in parallel.
+ // See https://github.com/googleapis/google-cloud-go/issues/194
+ go metadataRequestStrategy(ctx, c.hc, resc)
+ go dnsRequestStrategy(ctx, resc)
+
+ tryHarder := systemInfoSuggestsGCE()
+ if tryHarder {
+ res := <-resc
+ if res {
+ // The first strategy succeeded, so let's use it.
+ return true
+ }
+
+ // Wait for either the DNS or metadata server probe to
+ // contradict the other one and say we are running on
+ // GCE. Give it a lot of time to do so, since the system
+ // info already suggests we're running on a GCE BIOS.
+ // Ensure cancellations from the calling context are respected.
+ waitContext, cancelWait := context.WithTimeout(ctx, 5*time.Second)
+ defer cancelWait()
+ select {
+ case res = <-resc:
+ return res
+ case <-waitContext.Done():
+ // Too slow. Who knows what this system is.
+ return false
+ }
+ }
+
+ // There's no hint from the system info that we're running on
+ // GCE, so use the first probe's result as truth, whether it's
+ // true or false. The goal here is to optimize for speed for
+ // users who are NOT running on GCE. We can't assume that
+ // either a DNS lookup or an HTTP request to a blackholed IP
+ // address is fast. Worst case this should return when the
+ // metaClient's Transport.ResponseHeaderTimeout or
+ // Transport.Dial.Timeout fires (in two seconds).
+ return <-resc
}
// getETag returns a value from the metadata service as well as the associated ETag.
@@ -463,14 +493,26 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
req.Header.Set("User-Agent", userAgent)
var res *http.Response
var reqErr error
+ var body []byte
retryer := newRetryer()
for {
+ c.logger.DebugContext(ctx, "metadata request", "request", httpRequest(req, nil))
res, reqErr = c.hc.Do(req)
var code int
if res != nil {
code = res.StatusCode
+ body, err = io.ReadAll(res.Body)
+ if err != nil {
+ res.Body.Close()
+ return "", "", err
+ }
+ c.logger.DebugContext(ctx, "metadata response", "response", httpResponse(res, body))
+ res.Body.Close()
}
if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry {
+ if res != nil && res.Body != nil {
+ res.Body.Close()
+ }
if err := sleep(ctx, delay); err != nil {
return "", "", err
}
@@ -481,18 +523,13 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
if reqErr != nil {
return "", "", reqErr
}
- defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
- all, err := io.ReadAll(res.Body)
- if err != nil {
- return "", "", err
- }
if res.StatusCode != 200 {
- return "", "", &Error{Code: res.StatusCode, Message: string(all)}
+ return "", "", &Error{Code: res.StatusCode, Message: string(body)}
}
- return string(all), res.Header.Get("Etag"), nil
+ return string(body), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
diff --git a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
index bb412f891..2e53f0123 100644
--- a/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
+++ b/vendor/cloud.google.com/go/compute/metadata/retry_linux.go
@@ -17,10 +17,15 @@
package metadata
-import "syscall"
+import (
+ "errors"
+ "syscall"
+)
func init() {
// Initialize syscallRetryable to return true on transient socket-level
// errors. These errors are specific to Linux.
- syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED }
+ syscallRetryable = func(err error) bool {
+ return errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.ECONNREFUSED)
+ }
}
diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck.go b/vendor/cloud.google.com/go/compute/metadata/syscheck.go
new file mode 100644
index 000000000..d57ae1b27
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/syscheck.go
@@ -0,0 +1,28 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !linux
+
+package metadata
+
+// systemInfoSuggestsGCE reports whether the local system (without
+// doing network requests) suggests that we're running on GCE. If this
+// returns true, testOnGCE tries a bit harder to reach its metadata
+// server.
+//
+// NOTE: systemInfoSuggestsGCE is assigned to a varible for test stubbing purposes.
+var systemInfoSuggestsGCE = func() bool {
+ // We don't currently have checks for other GOOS
+ return false
+}
diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go
new file mode 100644
index 000000000..17ba5a3a2
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go
@@ -0,0 +1,30 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+
+package metadata
+
+import (
+ "os"
+ "strings"
+)
+
+// NOTE: systemInfoSuggestsGCE is assigned to a varible for test stubbing purposes.
+var systemInfoSuggestsGCE = func() bool {
+ b, _ := os.ReadFile("/sys/class/dmi/id/product_name")
+
+ name := strings.TrimSpace(string(b))
+ return name == "Google" || name == "Google Compute Engine"
+}
diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go
new file mode 100644
index 000000000..f57a5b14e
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go
@@ -0,0 +1,39 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package metadata
+
+import (
+ "strings"
+
+ "golang.org/x/sys/windows/registry"
+)
+
+// NOTE: systemInfoSuggestsGCE is assigned to a varible for test stubbing purposes.
+var systemInfoSuggestsGCE = func() bool {
+ k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\HardwareConfig\Current`, registry.QUERY_VALUE)
+ if err != nil {
+ return false
+ }
+ defer k.Close()
+
+ s, _, err := k.GetStringValue("SystemProductName")
+ if err != nil {
+ return false
+ }
+ s = strings.TrimSpace(s)
+ return strings.HasPrefix(s, "Google")
+}
diff --git a/vendor/cloud.google.com/go/iam/CHANGES.md b/vendor/cloud.google.com/go/iam/CHANGES.md
index 5aab66312..7839f3b89 100644
--- a/vendor/cloud.google.com/go/iam/CHANGES.md
+++ b/vendor/cloud.google.com/go/iam/CHANGES.md
@@ -1,6 +1,107 @@
# Changes
+## [1.5.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.5.1...iam/v1.5.2) (2025-04-15)
+
+
+### Bug Fixes
+
+* **iam:** Update google.golang.org/api to 0.229.0 ([3319672](https://github.com/googleapis/google-cloud-go/commit/3319672f3dba84a7150772ccb5433e02dab7e201))
+
+## [1.5.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.5.0...iam/v1.5.1) (2025-04-15)
+
+
+### Documentation
+
+* **iam:** Formatting update for ListPolicyBindingsRequest ([dfdf404](https://github.com/googleapis/google-cloud-go/commit/dfdf404138728724aa6305c5c465ecc6fe5b1264))
+* **iam:** Minor doc update for ListPrincipalAccessBoundaryPoliciesResponse ([20f762c](https://github.com/googleapis/google-cloud-go/commit/20f762c528726a3f038d3e1f37e8a4952118badf))
+* **iam:** Minor doc update for ListPrincipalAccessBoundaryPoliciesResponse ([20f762c](https://github.com/googleapis/google-cloud-go/commit/20f762c528726a3f038d3e1f37e8a4952118badf))
+
+## [1.5.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.2...iam/v1.5.0) (2025-03-31)
+
+
+### Features
+
+* **iam:** New client(s) ([#11933](https://github.com/googleapis/google-cloud-go/issues/11933)) ([d5cb2e5](https://github.com/googleapis/google-cloud-go/commit/d5cb2e58334c6963cc46885f565fe3b19c52cb63))
+
+## [1.4.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.1...iam/v1.4.2) (2025-03-13)
+
+
+### Bug Fixes
+
+* **iam:** Update golang.org/x/net to 0.37.0 ([1144978](https://github.com/googleapis/google-cloud-go/commit/11449782c7fb4896bf8b8b9cde8e7441c84fb2fd))
+
+## [1.4.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.4.0...iam/v1.4.1) (2025-03-06)
+
+
+### Bug Fixes
+
+* **iam:** Fix out-of-sync version.go ([28f0030](https://github.com/googleapis/google-cloud-go/commit/28f00304ebb13abfd0da2f45b9b79de093cca1ec))
+
+## [1.4.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.3.1...iam/v1.4.0) (2025-02-12)
+
+
+### Features
+
+* **iam/admin:** Regenerate client ([#11570](https://github.com/googleapis/google-cloud-go/issues/11570)) ([eab87d7](https://github.com/googleapis/google-cloud-go/commit/eab87d73bea884c636ec88f03b9aa90102a2833f)), refs [#8219](https://github.com/googleapis/google-cloud-go/issues/8219)
+
+## [1.3.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.3.0...iam/v1.3.1) (2025-01-02)
+
+
+### Bug Fixes
+
+* **iam:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+
+## [1.3.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.2...iam/v1.3.0) (2024-12-04)
+
+
+### Features
+
+* **iam:** Add ResourcePolicyMember to google/iam/v1 ([8dedb87](https://github.com/googleapis/google-cloud-go/commit/8dedb878c070cc1e92d62bb9b32358425e3ceffb))
+
+## [1.2.2](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.1...iam/v1.2.2) (2024-10-23)
+
+
+### Bug Fixes
+
+* **iam:** Update google.golang.org/api to v0.203.0 ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e))
+* **iam:** WARNING: On approximately Dec 1, 2024, an update to Protobuf will change service registration function signatures to use an interface instead of a concrete type in generated .pb.go files. This change is expected to affect very few if any users of this client library. For more information, see https://togithub.com/googleapis/google-cloud-go/issues/11020. ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e))
+
+## [1.2.1](https://github.com/googleapis/google-cloud-go/compare/iam/v1.2.0...iam/v1.2.1) (2024-09-12)
+
+
+### Bug Fixes
+
+* **iam:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04))
+
+## [1.2.0](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.13...iam/v1.2.0) (2024-08-20)
+
+
+### Features
+
+* **iam:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18))
+
+## [1.1.13](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.12...iam/v1.1.13) (2024-08-08)
+
+
+### Bug Fixes
+
+* **iam:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73))
+
+## [1.1.12](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.11...iam/v1.1.12) (2024-07-24)
+
+
+### Bug Fixes
+
+* **iam:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+
+## [1.1.11](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.10...iam/v1.1.11) (2024-07-10)
+
+
+### Bug Fixes
+
+* **iam:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
+
## [1.1.10](https://github.com/googleapis/google-cloud-go/compare/iam/v1.1.9...iam/v1.1.10) (2024-07-01)
diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
index 619b4c4fa..2b57ae3b8 100644
--- a/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
+++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/iam_policy.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/iam/v1/iam_policy.proto
@@ -65,11 +65,9 @@ type SetIamPolicyRequest struct {
func (x *SetIamPolicyRequest) Reset() {
*x = SetIamPolicyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SetIamPolicyRequest) String() string {
@@ -80,7 +78,7 @@ func (*SetIamPolicyRequest) ProtoMessage() {}
func (x *SetIamPolicyRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -132,11 +130,9 @@ type GetIamPolicyRequest struct {
func (x *GetIamPolicyRequest) Reset() {
*x = GetIamPolicyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetIamPolicyRequest) String() string {
@@ -147,7 +143,7 @@ func (*GetIamPolicyRequest) ProtoMessage() {}
func (x *GetIamPolicyRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -194,11 +190,9 @@ type TestIamPermissionsRequest struct {
func (x *TestIamPermissionsRequest) Reset() {
*x = TestIamPermissionsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *TestIamPermissionsRequest) String() string {
@@ -209,7 +203,7 @@ func (*TestIamPermissionsRequest) ProtoMessage() {}
func (x *TestIamPermissionsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -251,11 +245,9 @@ type TestIamPermissionsResponse struct {
func (x *TestIamPermissionsResponse) Reset() {
*x = TestIamPermissionsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *TestIamPermissionsResponse) String() string {
@@ -266,7 +258,7 @@ func (*TestIamPermissionsResponse) ProtoMessage() {}
func (x *TestIamPermissionsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_iam_policy_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -363,16 +355,15 @@ var file_google_iam_v1_iam_policy_proto_rawDesc = []byte{
0x65, 0x3d, 0x2a, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72,
0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1e, 0xca, 0x41, 0x1b, 0x69, 0x61, 0x6d,
0x2d, 0x6d, 0x65, 0x74, 0x61, 0x2d, 0x61, 0x70, 0x69, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x7f, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x42, 0x7c, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x0e, 0x49,
0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f,
0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69,
- 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xaa, 0x02, 0x13,
- 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d,
- 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f,
- 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x33,
+ 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70, 0x62, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31,
+ 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c,
+ 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -421,56 +412,6 @@ func file_google_iam_v1_iam_policy_proto_init() {
}
file_google_iam_v1_options_proto_init()
file_google_iam_v1_policy_proto_init()
- if !protoimpl.UnsafeEnabled {
- file_google_iam_v1_iam_policy_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*SetIamPolicyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_iam_policy_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*GetIamPolicyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_iam_policy_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*TestIamPermissionsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_iam_policy_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*TestIamPermissionsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
index f1c1c084e..745de05ba 100644
--- a/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
+++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/options.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/iam/v1/options.proto
@@ -64,11 +64,9 @@ type GetPolicyOptions struct {
func (x *GetPolicyOptions) Reset() {
*x = GetPolicyOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_options_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_options_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetPolicyOptions) String() string {
@@ -79,7 +77,7 @@ func (*GetPolicyOptions) ProtoMessage() {}
func (x *GetPolicyOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_options_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -152,20 +150,6 @@ func file_google_iam_v1_options_proto_init() {
if File_google_iam_v1_options_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_iam_v1_options_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*GetPolicyOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
index 4dda5d6d0..0eba15089 100644
--- a/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
+++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/policy.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.34.2
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/iam/v1/policy.proto
@@ -337,11 +337,9 @@ type Policy struct {
func (x *Policy) Reset() {
*x = Policy{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_policy_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_policy_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Policy) String() string {
@@ -352,7 +350,7 @@ func (*Policy) ProtoMessage() {}
func (x *Policy) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_policy_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -462,11 +460,9 @@ type Binding struct {
func (x *Binding) Reset() {
*x = Binding{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_policy_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_policy_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Binding) String() string {
@@ -477,7 +473,7 @@ func (*Binding) ProtoMessage() {}
func (x *Binding) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_policy_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -579,11 +575,9 @@ type AuditConfig struct {
func (x *AuditConfig) Reset() {
*x = AuditConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_policy_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_policy_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AuditConfig) String() string {
@@ -594,7 +588,7 @@ func (*AuditConfig) ProtoMessage() {}
func (x *AuditConfig) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_policy_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -658,11 +652,9 @@ type AuditLogConfig struct {
func (x *AuditLogConfig) Reset() {
*x = AuditLogConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_policy_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_policy_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AuditLogConfig) String() string {
@@ -673,7 +665,7 @@ func (*AuditLogConfig) ProtoMessage() {}
func (x *AuditLogConfig) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_policy_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -716,11 +708,9 @@ type PolicyDelta struct {
func (x *PolicyDelta) Reset() {
*x = PolicyDelta{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_policy_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_policy_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *PolicyDelta) String() string {
@@ -731,7 +721,7 @@ func (*PolicyDelta) ProtoMessage() {}
func (x *PolicyDelta) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_policy_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -784,11 +774,9 @@ type BindingDelta struct {
func (x *BindingDelta) Reset() {
*x = BindingDelta{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_policy_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_policy_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BindingDelta) String() string {
@@ -799,7 +787,7 @@ func (*BindingDelta) ProtoMessage() {}
func (x *BindingDelta) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_policy_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -869,11 +857,9 @@ type AuditConfigDelta struct {
func (x *AuditConfigDelta) Reset() {
*x = AuditConfigDelta{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_iam_v1_policy_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_iam_v1_policy_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *AuditConfigDelta) String() string {
@@ -884,7 +870,7 @@ func (*AuditConfigDelta) ProtoMessage() {}
func (x *AuditConfigDelta) ProtoReflect() protoreflect.Message {
mi := &file_google_iam_v1_policy_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1072,92 +1058,6 @@ func file_google_iam_v1_policy_proto_init() {
if File_google_iam_v1_policy_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_iam_v1_policy_proto_msgTypes[0].Exporter = func(v any, i int) any {
- switch v := v.(*Policy); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_policy_proto_msgTypes[1].Exporter = func(v any, i int) any {
- switch v := v.(*Binding); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_policy_proto_msgTypes[2].Exporter = func(v any, i int) any {
- switch v := v.(*AuditConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_policy_proto_msgTypes[3].Exporter = func(v any, i int) any {
- switch v := v.(*AuditLogConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_policy_proto_msgTypes[4].Exporter = func(v any, i int) any {
- switch v := v.(*PolicyDelta); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_policy_proto_msgTypes[5].Exporter = func(v any, i int) any {
- switch v := v.(*BindingDelta); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_iam_v1_policy_proto_msgTypes[6].Exporter = func(v any, i int) any {
- switch v := v.(*AuditConfigDelta); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go b/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go
new file mode 100644
index 000000000..c3339e26c
--- /dev/null
+++ b/vendor/cloud.google.com/go/iam/apiv1/iampb/resource_policy_member.pb.go
@@ -0,0 +1,185 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/iam/v1/resource_policy_member.proto
+
+package iampb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Output-only policy member strings of a Google Cloud resource's built-in
+// identity.
+type ResourcePolicyMember struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // IAM policy binding member referring to a Google Cloud resource by
+ // user-assigned name (https://google.aip.dev/122). If a resource is deleted
+ // and recreated with the same name, the binding will be applicable to the new
+ // resource.
+ //
+ // Example:
+ // `principal://parametermanager.googleapis.com/projects/12345/name/locations/us-central1-a/parameters/my-parameter`
+ IamPolicyNamePrincipal string `protobuf:"bytes,1,opt,name=iam_policy_name_principal,json=iamPolicyNamePrincipal,proto3" json:"iam_policy_name_principal,omitempty"`
+ // IAM policy binding member referring to a Google Cloud resource by
+ // system-assigned unique identifier (https://google.aip.dev/148#uid). If a
+ // resource is deleted and recreated with the same name, the binding will not
+ // be applicable to the new resource
+ //
+ // Example:
+ // `principal://parametermanager.googleapis.com/projects/12345/uid/locations/us-central1-a/parameters/a918fed5`
+ IamPolicyUidPrincipal string `protobuf:"bytes,2,opt,name=iam_policy_uid_principal,json=iamPolicyUidPrincipal,proto3" json:"iam_policy_uid_principal,omitempty"`
+}
+
+func (x *ResourcePolicyMember) Reset() {
+ *x = ResourcePolicyMember{}
+ mi := &file_google_iam_v1_resource_policy_member_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ResourcePolicyMember) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourcePolicyMember) ProtoMessage() {}
+
+func (x *ResourcePolicyMember) ProtoReflect() protoreflect.Message {
+ mi := &file_google_iam_v1_resource_policy_member_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourcePolicyMember.ProtoReflect.Descriptor instead.
+func (*ResourcePolicyMember) Descriptor() ([]byte, []int) {
+ return file_google_iam_v1_resource_policy_member_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ResourcePolicyMember) GetIamPolicyNamePrincipal() string {
+ if x != nil {
+ return x.IamPolicyNamePrincipal
+ }
+ return ""
+}
+
+func (x *ResourcePolicyMember) GetIamPolicyUidPrincipal() string {
+ if x != nil {
+ return x.IamPolicyUidPrincipal
+ }
+ return ""
+}
+
+var File_google_iam_v1_resource_policy_member_proto protoreflect.FileDescriptor
+
+var file_google_iam_v1_resource_policy_member_proto_rawDesc = []byte{
+ 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x69, 0x61, 0x6d, 0x2f, 0x76, 0x31, 0x2f,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x5f,
+ 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x1a, 0x1f, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65,
+ 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a,
+ 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4d,
+ 0x65, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x19, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70,
+ 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x69,
+ 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x69, 0x6e,
+ 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x3c, 0x0a, 0x18, 0x69, 0x61, 0x6d, 0x5f, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x5f, 0x75, 0x69, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61,
+ 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x15, 0x69, 0x61,
+ 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x55, 0x69, 0x64, 0x50, 0x72, 0x69, 0x6e, 0x63, 0x69,
+ 0x70, 0x61, 0x6c, 0x42, 0x87, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x42, 0x19, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x29, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x69, 0x61, 0x6d, 0x2f,
+ 0x61, 0x70, 0x69, 0x76, 0x31, 0x2f, 0x69, 0x61, 0x6d, 0x70, 0x62, 0x3b, 0x69, 0x61, 0x6d, 0x70,
+ 0x62, 0xaa, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64,
+ 0x2e, 0x49, 0x61, 0x6d, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x13, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x49, 0x61, 0x6d, 0x5c, 0x56, 0x31, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_iam_v1_resource_policy_member_proto_rawDescOnce sync.Once
+ file_google_iam_v1_resource_policy_member_proto_rawDescData = file_google_iam_v1_resource_policy_member_proto_rawDesc
+)
+
+func file_google_iam_v1_resource_policy_member_proto_rawDescGZIP() []byte {
+ file_google_iam_v1_resource_policy_member_proto_rawDescOnce.Do(func() {
+ file_google_iam_v1_resource_policy_member_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_iam_v1_resource_policy_member_proto_rawDescData)
+ })
+ return file_google_iam_v1_resource_policy_member_proto_rawDescData
+}
+
+var file_google_iam_v1_resource_policy_member_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_iam_v1_resource_policy_member_proto_goTypes = []any{
+ (*ResourcePolicyMember)(nil), // 0: google.iam.v1.ResourcePolicyMember
+}
+var file_google_iam_v1_resource_policy_member_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_iam_v1_resource_policy_member_proto_init() }
+func file_google_iam_v1_resource_policy_member_proto_init() {
+ if File_google_iam_v1_resource_policy_member_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_iam_v1_resource_policy_member_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_iam_v1_resource_policy_member_proto_goTypes,
+ DependencyIndexes: file_google_iam_v1_resource_policy_member_proto_depIdxs,
+ MessageInfos: file_google_iam_v1_resource_policy_member_proto_msgTypes,
+ }.Build()
+ File_google_iam_v1_resource_policy_member_proto = out.File
+ file_google_iam_v1_resource_policy_member_proto_rawDesc = nil
+ file_google_iam_v1_resource_policy_member_proto_goTypes = nil
+ file_google_iam_v1_resource_policy_member_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
index a3e99df29..d72e82329 100644
--- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
+++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json
@@ -39,6 +39,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/ai/generativelanguage/apiv1alpha": {
+ "api_shortname": "generativelanguage",
+ "distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1alpha",
+ "description": "Generative Language API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/ai/latest/generativelanguage/apiv1alpha",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/ai/generativelanguage/apiv1beta": {
"api_shortname": "generativelanguage",
"distribution_name": "cloud.google.com/go/ai/generativelanguage/apiv1beta",
@@ -179,6 +189,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/apihub/apiv1": {
+ "api_shortname": "apihub",
+ "distribution_name": "cloud.google.com/go/apihub/apiv1",
+ "description": "API hub API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/apihub/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/apikeys/apiv2": {
"api_shortname": "apikeys",
"distribution_name": "cloud.google.com/go/apikeys/apiv2",
@@ -346,7 +366,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/backupdr/latest/apiv1",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/baremetalsolution/apiv2": {
@@ -526,7 +546,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/migration/apiv2",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/bigquery/migration/apiv2alpha": {
@@ -559,6 +579,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/bigquery/storage/apiv1alpha": {
+ "api_shortname": "bigquerystorage",
+ "distribution_name": "cloud.google.com/go/bigquery/storage/apiv1alpha",
+ "description": "BigQuery Storage API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigquery/latest/storage/apiv1alpha",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/bigquery/storage/apiv1beta1": {
"api_shortname": "bigquerystorage",
"distribution_name": "cloud.google.com/go/bigquery/storage/apiv1beta1",
@@ -589,6 +619,26 @@
"release_level": "stable",
"library_type": "GAPIC_MANUAL"
},
+ "cloud.google.com/go/bigtable/admin/apiv2": {
+ "api_shortname": "bigtableadmin",
+ "distribution_name": "cloud.google.com/go/bigtable/admin/apiv2",
+ "description": "Cloud Bigtable Admin API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest/admin/apiv2",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/bigtable/apiv2": {
+ "api_shortname": "bigtable",
+ "distribution_name": "cloud.google.com/go/bigtable/apiv2",
+ "description": "Cloud Bigtable API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/bigtable/latest/apiv2",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/billing/apiv1": {
"api_shortname": "cloudbilling",
"distribution_name": "cloud.google.com/go/billing/apiv1",
@@ -696,7 +746,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudcontrolspartner/latest/apiv1",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/cloudcontrolspartner/apiv1beta": {
@@ -736,6 +786,16 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudquotas/latest/apiv1",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/cloudquotas/apiv1beta": {
+ "api_shortname": "cloudquotas",
+ "distribution_name": "cloud.google.com/go/cloudquotas/apiv1beta",
+ "description": "Cloud Quotas API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/cloudquotas/latest/apiv1beta",
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
@@ -899,16 +959,6 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
- "cloud.google.com/go/dataform/apiv1alpha2": {
- "api_shortname": "dataform",
- "distribution_name": "cloud.google.com/go/dataform/apiv1alpha2",
- "description": "Dataform API",
- "language": "go",
- "client_library_type": "generated",
- "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/dataform/latest/apiv1alpha2",
- "release_level": "preview",
- "library_type": "GAPIC_AUTO"
- },
"cloud.google.com/go/dataform/apiv1beta1": {
"api_shortname": "dataform",
"distribution_name": "cloud.google.com/go/dataform/apiv1beta1",
@@ -989,6 +1039,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/datastore/apiv1": {
+ "api_shortname": "datastore",
+ "distribution_name": "cloud.google.com/go/datastore/apiv1",
+ "description": "Cloud Datastore API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/datastore/latest/apiv1",
+ "release_level": "stable",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/datastream/apiv1": {
"api_shortname": "datastream",
"distribution_name": "cloud.google.com/go/datastream/apiv1",
@@ -1166,7 +1226,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/edgenetwork/latest/apiv1",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/errorreporting": {
@@ -1229,6 +1289,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/financialservices/apiv1": {
+ "api_shortname": "financialservices",
+ "distribution_name": "cloud.google.com/go/financialservices/apiv1",
+ "description": "Financial Services API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/financialservices/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/firestore": {
"api_shortname": "firestore",
"distribution_name": "cloud.google.com/go/firestore",
@@ -1309,6 +1379,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/gkeconnect/gateway/apiv1": {
+ "api_shortname": "connectgateway",
+ "distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1",
+ "description": "Connect Gateway API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkeconnect/latest/gateway/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/gkeconnect/gateway/apiv1beta1": {
"api_shortname": "connectgateway",
"distribution_name": "cloud.google.com/go/gkeconnect/gateway/apiv1beta1",
@@ -1332,7 +1412,7 @@
"cloud.google.com/go/gkemulticloud/apiv1": {
"api_shortname": "gkemulticloud",
"distribution_name": "cloud.google.com/go/gkemulticloud/apiv1",
- "description": "Anthos Multi-Cloud API",
+ "description": "GKE Multi-Cloud API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gkemulticloud/latest/apiv1",
@@ -1342,7 +1422,7 @@
"cloud.google.com/go/gsuiteaddons/apiv1": {
"api_shortname": "gsuiteaddons",
"distribution_name": "cloud.google.com/go/gsuiteaddons/apiv1",
- "description": "Google Workspace Add-ons API",
+ "description": "Google Workspace add-ons API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/gsuiteaddons/latest/apiv1",
@@ -1532,7 +1612,7 @@
"cloud.google.com/go/managedkafka/apiv1": {
"api_shortname": "managedkafka",
"distribution_name": "cloud.google.com/go/managedkafka/apiv1",
- "description": "Apache Kafka for BigQuery API",
+ "description": "Managed Service for Apache Kafka API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/managedkafka/latest/apiv1",
@@ -1549,6 +1629,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/maps/areainsights/apiv1": {
+ "api_shortname": "areainsights",
+ "distribution_name": "cloud.google.com/go/maps/areainsights/apiv1",
+ "description": "Places Insights API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/maps/latest/areainsights/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/maps/fleetengine/apiv1": {
"api_shortname": "fleetengine",
"distribution_name": "cloud.google.com/go/maps/fleetengine/apiv1",
@@ -1639,6 +1729,26 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/memorystore/apiv1": {
+ "api_shortname": "memorystore",
+ "distribution_name": "cloud.google.com/go/memorystore/apiv1",
+ "description": "Memorystore API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memorystore/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
+ "cloud.google.com/go/memorystore/apiv1beta": {
+ "api_shortname": "memorystore",
+ "distribution_name": "cloud.google.com/go/memorystore/apiv1beta",
+ "description": "Memorystore API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/memorystore/latest/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/metastore/apiv1": {
"api_shortname": "metastore",
"distribution_name": "cloud.google.com/go/metastore/apiv1",
@@ -1679,6 +1789,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/modelarmor/apiv1": {
+ "api_shortname": "modelarmor",
+ "distribution_name": "cloud.google.com/go/modelarmor/apiv1",
+ "description": "Model Armor API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/modelarmor/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/monitoring/apiv3/v2": {
"api_shortname": "monitoring",
"distribution_name": "cloud.google.com/go/monitoring/apiv3/v2",
@@ -1686,7 +1806,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/monitoring/latest/apiv3/v2",
- "release_level": "stable",
+ "release_level": "preview",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/monitoring/dashboard/apiv1": {
@@ -1809,6 +1929,16 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/oracledatabase/apiv1": {
+ "api_shortname": "oracledatabase",
+ "distribution_name": "cloud.google.com/go/oracledatabase/apiv1",
+ "description": "Oracle Database@Google Cloud API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/oracledatabase/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/orchestration/airflow/service/apiv1": {
"api_shortname": "composer",
"distribution_name": "cloud.google.com/go/orchestration/airflow/service/apiv1",
@@ -1899,6 +2029,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/parallelstore/apiv1": {
+ "api_shortname": "parallelstore",
+ "distribution_name": "cloud.google.com/go/parallelstore/apiv1",
+ "description": "Parallelstore API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/parallelstore/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/parallelstore/apiv1beta": {
"api_shortname": "parallelstore",
"distribution_name": "cloud.google.com/go/parallelstore/apiv1beta",
@@ -1909,6 +2049,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/parametermanager/apiv1": {
+ "api_shortname": "parametermanager",
+ "distribution_name": "cloud.google.com/go/parametermanager/apiv1",
+ "description": "Parameter Manager API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/parametermanager/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/phishingprotection/apiv1beta1": {
"api_shortname": "phishingprotection",
"distribution_name": "cloud.google.com/go/phishingprotection/apiv1beta1",
@@ -1959,6 +2109,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/privilegedaccessmanager/apiv1": {
+ "api_shortname": "privilegedaccessmanager",
+ "distribution_name": "cloud.google.com/go/privilegedaccessmanager/apiv1",
+ "description": "Privileged Access Manager API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/privilegedaccessmanager/latest/apiv1",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/profiler": {
"api_shortname": "cloudprofiler",
"distribution_name": "cloud.google.com/go/profiler",
@@ -2119,16 +2279,6 @@
"release_level": "stable",
"library_type": "GAPIC_AUTO"
},
- "cloud.google.com/go/resourcesettings/apiv1": {
- "api_shortname": "resourcesettings",
- "distribution_name": "cloud.google.com/go/resourcesettings/apiv1",
- "description": "Resource Settings API",
- "language": "go",
- "client_library_type": "generated",
- "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/resourcesettings/latest/apiv1",
- "release_level": "stable",
- "library_type": "GAPIC_AUTO"
- },
"cloud.google.com/go/retail/apiv2": {
"api_shortname": "retail",
"distribution_name": "cloud.google.com/go/retail/apiv2",
@@ -2226,7 +2376,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securesourcemanager/latest/apiv1",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/security/privateca/apiv1": {
@@ -2312,11 +2462,11 @@
"cloud.google.com/go/securitycentermanagement/apiv1": {
"api_shortname": "securitycentermanagement",
"distribution_name": "cloud.google.com/go/securitycentermanagement/apiv1",
- "description": "Security Center Management API",
+ "description": "Security Command Center Management API",
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/securitycentermanagement/latest/apiv1",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/securityposture/apiv1": {
@@ -2509,6 +2659,16 @@
"release_level": "preview",
"library_type": "GAPIC_AUTO"
},
+ "cloud.google.com/go/shopping/merchant/reviews/apiv1beta": {
+ "api_shortname": "merchantapi",
+ "distribution_name": "cloud.google.com/go/shopping/merchant/reviews/apiv1beta",
+ "description": "Merchant API",
+ "language": "go",
+ "client_library_type": "generated",
+ "client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/shopping/latest/merchant/reviews/apiv1beta",
+ "release_level": "preview",
+ "library_type": "GAPIC_AUTO"
+ },
"cloud.google.com/go/spanner": {
"api_shortname": "spanner",
"distribution_name": "cloud.google.com/go/spanner",
@@ -2606,7 +2766,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/storage/latest/control/apiv2",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/storage/internal/apiv2": {
@@ -2686,7 +2846,7 @@
"language": "go",
"client_library_type": "generated",
"client_documentation": "https://cloud.google.com/go/docs/reference/cloud.google.com/go/telcoautomation/latest/apiv1",
- "release_level": "preview",
+ "release_level": "stable",
"library_type": "GAPIC_AUTO"
},
"cloud.google.com/go/texttospeech/apiv1": {
diff --git a/vendor/cloud.google.com/go/internal/trace/trace.go b/vendor/cloud.google.com/go/internal/trace/trace.go
index e8daf800a..fcff2a7e4 100644
--- a/vendor/cloud.google.com/go/internal/trace/trace.go
+++ b/vendor/cloud.google.com/go/internal/trace/trace.go
@@ -18,143 +18,39 @@ import (
"context"
"errors"
"fmt"
- "os"
- "strings"
- "sync"
- "go.opencensus.io/trace"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
- ottrace "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace"
"google.golang.org/api/googleapi"
- "google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/status"
)
const (
- // Deprecated: The default experimental tracing support for OpenCensus is
- // now deprecated in the Google Cloud client libraries for Go.
- // TelemetryPlatformTracingOpenCensus is the value to which the environment
- // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be
- // set to enable OpenCensus tracing.
- TelemetryPlatformTracingOpenCensus = "opencensus"
- // TelemetryPlatformTracingOpenTelemetry is the value to which the environment
- // variable GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING should be
- // set to enable OpenTelemetry tracing.
- TelemetryPlatformTracingOpenTelemetry = "opentelemetry"
- // TelemetryPlatformTracingVar is the name of the environment
- // variable that can be set to change the default tracing from OpenTelemetry
- // to OpenCensus.
- //
- // The default experimental tracing support for OpenCensus is now deprecated
- // in the Google Cloud client libraries for Go.
- TelemetryPlatformTracingVar = "GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING"
- // OpenTelemetryTracerName is the name given to the OpenTelemetry Tracer
- // when it is obtained from the OpenTelemetry TracerProvider.
OpenTelemetryTracerName = "cloud.google.com/go"
)
-var (
- // openCensusTracingEnabledMu guards access to openCensusTracingEnabled field
- openCensusTracingEnabledMu = sync.RWMutex{}
- // openCensusTracingEnabled is true if the environment variable
- // GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the
- // case-insensitive value "opencensus".
- openCensusTracingEnabled bool = strings.EqualFold(strings.TrimSpace(
- os.Getenv(TelemetryPlatformTracingVar)), TelemetryPlatformTracingOpenCensus)
-)
-
-// SetOpenTelemetryTracingEnabledField programmatically sets the value provided
-// by GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING for the purpose of
-// unit testing. Do not invoke it directly. Intended for use only in unit tests.
-// Restore original value after each test.
-//
-// The default experimental tracing support for OpenCensus is now deprecated in
-// the Google Cloud client libraries for Go.
-func SetOpenTelemetryTracingEnabledField(enabled bool) {
- openCensusTracingEnabledMu.Lock()
- defer openCensusTracingEnabledMu.Unlock()
- openCensusTracingEnabled = !enabled
-}
-
-// Deprecated: The default experimental tracing support for OpenCensus is now
-// deprecated in the Google Cloud client libraries for Go.
-//
-// IsOpenCensusTracingEnabled returns true if the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is set to the
-// case-insensitive value "opencensus".
-func IsOpenCensusTracingEnabled() bool {
- openCensusTracingEnabledMu.RLock()
- defer openCensusTracingEnabledMu.RUnlock()
- return openCensusTracingEnabled
-}
-
-// IsOpenTelemetryTracingEnabled returns true if the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING is NOT set to the
-// case-insensitive value "opencensus".
-func IsOpenTelemetryTracingEnabled() bool {
- return !IsOpenCensusTracingEnabled()
-}
-
-// StartSpan adds a span to the trace with the given name. If IsOpenCensusTracingEnabled
-// returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled
-// returns true, the span will be an OpenTelemetry span. Set the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
-// value "opencensus" before loading the package to use OpenCensus tracing.
-// The default was OpenCensus until May 29, 2024, at which time the default was
-// changed to "opencensus". Explicitly setting the environment variable to
-// "opencensus" is required to continue using OpenCensus tracing.
+// StartSpan adds an OpenTelemetry span to the trace with the given name.
//
// The default experimental tracing support for OpenCensus is now deprecated in
// the Google Cloud client libraries for Go.
func StartSpan(ctx context.Context, name string) context.Context {
- if IsOpenTelemetryTracingEnabled() {
- ctx, _ = otel.GetTracerProvider().Tracer(OpenTelemetryTracerName).Start(ctx, name)
- } else {
- ctx, _ = trace.StartSpan(ctx, name)
- }
+ ctx, _ = otel.GetTracerProvider().Tracer(OpenTelemetryTracerName).Start(ctx, name)
return ctx
}
-// EndSpan ends a span with the given error. If IsOpenCensusTracingEnabled
-// returns true, the span will be an OpenCensus span. If IsOpenTelemetryTracingEnabled
-// returns true, the span will be an OpenTelemetry span. Set the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
-// value "opencensus" before loading the package to use OpenCensus tracing.
-// The default was OpenCensus until May 29, 2024, at which time the default was
-// changed to "opencensus". Explicitly setting the environment variable to
-// "opencensus" is required to continue using OpenCensus tracing.
+// EndSpan ends an OpenTelemetry span with the given error.
//
// The default experimental tracing support for OpenCensus is now deprecated in
// the Google Cloud client libraries for Go.
func EndSpan(ctx context.Context, err error) {
- if IsOpenTelemetryTracingEnabled() {
- span := ottrace.SpanFromContext(ctx)
- if err != nil {
- span.SetStatus(codes.Error, toOpenTelemetryStatusDescription(err))
- span.RecordError(err)
- }
- span.End()
- } else {
- span := trace.FromContext(ctx)
- if err != nil {
- span.SetStatus(toStatus(err))
- }
- span.End()
- }
-}
-
-// toStatus converts an error to an equivalent OpenCensus status.
-func toStatus(err error) trace.Status {
- var err2 *googleapi.Error
- if ok := errors.As(err, &err2); ok {
- return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
- } else if s, ok := status.FromError(err); ok {
- return trace.Status{Code: int32(s.Code()), Message: s.Message()}
- } else {
- return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
+ span := trace.SpanFromContext(ctx)
+ if err != nil {
+ span.SetStatus(codes.Error, toOpenTelemetryStatusDescription(err))
+ span.RecordError(err)
}
+ span.End()
}
// toOpenTelemetryStatus converts an error to an equivalent OpenTelemetry status description.
@@ -169,87 +65,13 @@ func toOpenTelemetryStatusDescription(err error) string {
}
}
-// TODO(deklerk): switch to using OpenCensus function when it becomes available.
-// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
-func httpStatusCodeToOCCode(httpStatusCode int) int32 {
- switch httpStatusCode {
- case 200:
- return int32(code.Code_OK)
- case 499:
- return int32(code.Code_CANCELLED)
- case 500:
- return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
- case 400:
- return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
- case 504:
- return int32(code.Code_DEADLINE_EXCEEDED)
- case 404:
- return int32(code.Code_NOT_FOUND)
- case 409:
- return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
- case 403:
- return int32(code.Code_PERMISSION_DENIED)
- case 401:
- return int32(code.Code_UNAUTHENTICATED)
- case 429:
- return int32(code.Code_RESOURCE_EXHAUSTED)
- case 501:
- return int32(code.Code_UNIMPLEMENTED)
- case 503:
- return int32(code.Code_UNAVAILABLE)
- default:
- return int32(code.Code_UNKNOWN)
- }
-}
-
-// TracePrintf retrieves the current OpenCensus or OpenTelemetry span from context, then:
-// * calls Span.Annotatef if OpenCensus is enabled; or
-// * calls Span.AddEvent if OpenTelemetry is enabled.
-//
-// If IsOpenCensusTracingEnabled returns true, the expected span must be an
-// OpenCensus span. If IsOpenTelemetryTracingEnabled returns true, the expected
-// span must be an OpenTelemetry span. Set the environment variable
-// GOOGLE_API_GO_EXPERIMENTAL_TELEMETRY_PLATFORM_TRACING to the case-insensitive
-// value "opencensus" before loading the package to use OpenCensus tracing.
-// The default was OpenCensus until May 29, 2024, at which time the default was
-// changed to "opencensus". Explicitly setting the environment variable to
-// "opencensus" is required to continue using OpenCensus tracing.
-//
-// The default experimental tracing support for OpenCensus is now deprecated in
-// the Google Cloud client libraries for Go.
+// TracePrintf retrieves the current OpenTelemetry span from context, then calls
+// Span.AddEvent. The expected span must be an OpenTelemetry span. The default
+// experimental tracing support for OpenCensus is now deprecated in the Google
+// Cloud client libraries for Go.
func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
- if IsOpenTelemetryTracingEnabled() {
- attrs := otAttrs(attrMap)
- ottrace.SpanFromContext(ctx).AddEvent(fmt.Sprintf(format, args...), ottrace.WithAttributes(attrs...))
- } else {
- attrs := ocAttrs(attrMap)
- // TODO: (odeke-em): perhaps just pass around spans due to the cost
- // incurred from using trace.FromContext(ctx) yet we could avoid
- // throwing away the work done by ctx, span := trace.StartSpan.
- trace.FromContext(ctx).Annotatef(attrs, format, args...)
- }
-}
-
-// ocAttrs converts a generic map to OpenCensus attributes.
-func ocAttrs(attrMap map[string]interface{}) []trace.Attribute {
- var attrs []trace.Attribute
- for k, v := range attrMap {
- var a trace.Attribute
- switch v := v.(type) {
- case string:
- a = trace.StringAttribute(k, v)
- case bool:
- a = trace.BoolAttribute(k, v)
- case int:
- a = trace.Int64Attribute(k, int64(v))
- case int64:
- a = trace.Int64Attribute(k, v)
- default:
- a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
- }
- attrs = append(attrs, a)
- }
- return attrs
+ attrs := otAttrs(attrMap)
+ trace.SpanFromContext(ctx).AddEvent(fmt.Sprintf(format, args...), trace.WithAttributes(attrs...))
}
// otAttrs converts a generic map to OpenTelemetry attributes.
diff --git a/vendor/cloud.google.com/go/monitoring/LICENSE b/vendor/cloud.google.com/go/monitoring/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go
new file mode 100644
index 000000000..9a9408f19
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/alert_policy_client.go
@@ -0,0 +1,403 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+ "time"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+)
+
+var newAlertPolicyClientHook clientHook
+
+// AlertPolicyCallOptions contains the retry settings for each method of AlertPolicyClient.
+type AlertPolicyCallOptions struct {
+ ListAlertPolicies []gax.CallOption
+ GetAlertPolicy []gax.CallOption
+ CreateAlertPolicy []gax.CallOption
+ DeleteAlertPolicy []gax.CallOption
+ UpdateAlertPolicy []gax.CallOption
+}
+
+func defaultAlertPolicyGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultAlertPolicyCallOptions() *AlertPolicyCallOptions {
+ return &AlertPolicyCallOptions{
+ ListAlertPolicies: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetAlertPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateAlertPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ DeleteAlertPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ UpdateAlertPolicy: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ }
+}
+
+// internalAlertPolicyClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalAlertPolicyClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ ListAlertPolicies(context.Context, *monitoringpb.ListAlertPoliciesRequest, ...gax.CallOption) *AlertPolicyIterator
+ GetAlertPolicy(context.Context, *monitoringpb.GetAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error)
+ CreateAlertPolicy(context.Context, *monitoringpb.CreateAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error)
+ DeleteAlertPolicy(context.Context, *monitoringpb.DeleteAlertPolicyRequest, ...gax.CallOption) error
+ UpdateAlertPolicy(context.Context, *monitoringpb.UpdateAlertPolicyRequest, ...gax.CallOption) (*monitoringpb.AlertPolicy, error)
+}
+
+// AlertPolicyClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// The AlertPolicyService API is used to manage (list, create, delete,
+// edit) alert policies in Cloud Monitoring. An alerting policy is
+// a description of the conditions under which some aspect of your
+// system is considered to be “unhealthy” and the ways to notify
+// people or services about this state. In addition to using this API, alert
+// policies can also be managed through
+// Cloud Monitoring (at https://cloud.google.com/monitoring/docs/),
+// which can be reached by clicking the “Monitoring” tab in
+// Cloud console (at https://console.cloud.google.com/).
+type AlertPolicyClient struct {
+ // The internal transport-dependent client.
+ internalClient internalAlertPolicyClient
+
+ // The call options for this service.
+ CallOptions *AlertPolicyCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *AlertPolicyClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *AlertPolicyClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *AlertPolicyClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// ListAlertPolicies lists the existing alerting policies for the workspace.
+func (c *AlertPolicyClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator {
+ return c.internalClient.ListAlertPolicies(ctx, req, opts...)
+}
+
+// GetAlertPolicy gets a single alerting policy.
+func (c *AlertPolicyClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ return c.internalClient.GetAlertPolicy(ctx, req, opts...)
+}
+
+// CreateAlertPolicy creates a new alerting policy.
+//
+// Design your application to single-thread API calls that modify the state of
+// alerting policies in a single project. This includes calls to
+// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+func (c *AlertPolicyClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ return c.internalClient.CreateAlertPolicy(ctx, req, opts...)
+}
+
+// DeleteAlertPolicy deletes an alerting policy.
+//
+// Design your application to single-thread API calls that modify the state of
+// alerting policies in a single project. This includes calls to
+// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+func (c *AlertPolicyClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteAlertPolicy(ctx, req, opts...)
+}
+
+// UpdateAlertPolicy updates an alerting policy. You can either replace the entire policy with
+// a new one or replace only certain fields in the current alerting policy by
+// specifying the fields to be updated via updateMask. Returns the
+// updated alerting policy.
+//
+// Design your application to single-thread API calls that modify the state of
+// alerting policies in a single project. This includes calls to
+// CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+func (c *AlertPolicyClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ return c.internalClient.UpdateAlertPolicy(ctx, req, opts...)
+}
+
+// alertPolicyGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type alertPolicyGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing AlertPolicyClient
+ CallOptions **AlertPolicyCallOptions
+
+ // The gRPC API client.
+ alertPolicyClient monitoringpb.AlertPolicyServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewAlertPolicyClient creates a new alert policy service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// The AlertPolicyService API is used to manage (list, create, delete,
+// edit) alert policies in Cloud Monitoring. An alerting policy is
+// a description of the conditions under which some aspect of your
+// system is considered to be “unhealthy” and the ways to notify
+// people or services about this state. In addition to using this API, alert
+// policies can also be managed through
+// Cloud Monitoring (at https://cloud.google.com/monitoring/docs/),
+// which can be reached by clicking the “Monitoring” tab in
+// Cloud console (at https://console.cloud.google.com/).
+func NewAlertPolicyClient(ctx context.Context, opts ...option.ClientOption) (*AlertPolicyClient, error) {
+ clientOpts := defaultAlertPolicyGRPCClientOptions()
+ if newAlertPolicyClientHook != nil {
+ hookOpts, err := newAlertPolicyClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := AlertPolicyClient{CallOptions: defaultAlertPolicyCallOptions()}
+
+ c := &alertPolicyGRPCClient{
+ connPool: connPool,
+ alertPolicyClient: monitoringpb.NewAlertPolicyServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *alertPolicyGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *alertPolicyGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *alertPolicyGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *alertPolicyGRPCClient) ListAlertPolicies(ctx context.Context, req *monitoringpb.ListAlertPoliciesRequest, opts ...gax.CallOption) *AlertPolicyIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListAlertPolicies[0:len((*c.CallOptions).ListAlertPolicies):len((*c.CallOptions).ListAlertPolicies)], opts...)
+ it := &AlertPolicyIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListAlertPoliciesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.AlertPolicy, string, error) {
+ resp := &monitoringpb.ListAlertPoliciesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.alertPolicyClient.ListAlertPolicies, req, settings.GRPC, c.logger, "ListAlertPolicies")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetAlertPolicies(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *alertPolicyGRPCClient) GetAlertPolicy(ctx context.Context, req *monitoringpb.GetAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetAlertPolicy[0:len((*c.CallOptions).GetAlertPolicy):len((*c.CallOptions).GetAlertPolicy)], opts...)
+ var resp *monitoringpb.AlertPolicy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.alertPolicyClient.GetAlertPolicy, req, settings.GRPC, c.logger, "GetAlertPolicy")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *alertPolicyGRPCClient) CreateAlertPolicy(ctx context.Context, req *monitoringpb.CreateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateAlertPolicy[0:len((*c.CallOptions).CreateAlertPolicy):len((*c.CallOptions).CreateAlertPolicy)], opts...)
+ var resp *monitoringpb.AlertPolicy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.alertPolicyClient.CreateAlertPolicy, req, settings.GRPC, c.logger, "CreateAlertPolicy")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *alertPolicyGRPCClient) DeleteAlertPolicy(ctx context.Context, req *monitoringpb.DeleteAlertPolicyRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteAlertPolicy[0:len((*c.CallOptions).DeleteAlertPolicy):len((*c.CallOptions).DeleteAlertPolicy)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.alertPolicyClient.DeleteAlertPolicy, req, settings.GRPC, c.logger, "DeleteAlertPolicy")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *alertPolicyGRPCClient) UpdateAlertPolicy(ctx context.Context, req *monitoringpb.UpdateAlertPolicyRequest, opts ...gax.CallOption) (*monitoringpb.AlertPolicy, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "alert_policy.name", url.QueryEscape(req.GetAlertPolicy().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateAlertPolicy[0:len((*c.CallOptions).UpdateAlertPolicy):len((*c.CallOptions).UpdateAlertPolicy)], opts...)
+ var resp *monitoringpb.AlertPolicy
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.alertPolicyClient.UpdateAlertPolicy, req, settings.GRPC, c.logger, "UpdateAlertPolicy")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go
new file mode 100644
index 000000000..8dc963458
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary.go
@@ -0,0 +1,682 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ "google.golang.org/api/iterator"
+ metricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+)
+
+// AlertPolicyIterator manages a stream of *monitoringpb.AlertPolicy.
+type AlertPolicyIterator struct {
+ items []*monitoringpb.AlertPolicy
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.AlertPolicy, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *AlertPolicyIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *AlertPolicyIterator) Next() (*monitoringpb.AlertPolicy, error) {
+ var item *monitoringpb.AlertPolicy
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *AlertPolicyIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *AlertPolicyIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// GroupIterator manages a stream of *monitoringpb.Group.
+type GroupIterator struct {
+ items []*monitoringpb.Group
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *GroupIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *GroupIterator) Next() (*monitoringpb.Group, error) {
+ var item *monitoringpb.Group
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *GroupIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *GroupIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor.
+type MetricDescriptorIterator struct {
+ items []*metricpb.MetricDescriptor
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) {
+ var item *metricpb.MetricDescriptor
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *MetricDescriptorIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *MetricDescriptorIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor.
+type MonitoredResourceDescriptorIterator struct {
+ items []*monitoredrespb.MonitoredResourceDescriptor
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) {
+ var item *monitoredrespb.MonitoredResourceDescriptor
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *MonitoredResourceDescriptorIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource.
+type MonitoredResourceIterator struct {
+ items []*monitoredrespb.MonitoredResource
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) {
+ var item *monitoredrespb.MonitoredResource
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *MonitoredResourceIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *MonitoredResourceIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// NotificationChannelDescriptorIterator manages a stream of *monitoringpb.NotificationChannelDescriptor.
+type NotificationChannelDescriptorIterator struct {
+ items []*monitoringpb.NotificationChannelDescriptor
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannelDescriptor, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *NotificationChannelDescriptorIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *NotificationChannelDescriptorIterator) Next() (*monitoringpb.NotificationChannelDescriptor, error) {
+ var item *monitoringpb.NotificationChannelDescriptor
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *NotificationChannelDescriptorIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *NotificationChannelDescriptorIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// NotificationChannelIterator manages a stream of *monitoringpb.NotificationChannel.
+type NotificationChannelIterator struct {
+ items []*monitoringpb.NotificationChannel
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.NotificationChannel, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *NotificationChannelIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *NotificationChannelIterator) Next() (*monitoringpb.NotificationChannel, error) {
+ var item *monitoringpb.NotificationChannel
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *NotificationChannelIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *NotificationChannelIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// ServiceIterator manages a stream of *monitoringpb.Service.
+type ServiceIterator struct {
+ items []*monitoringpb.Service
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Service, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *ServiceIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *ServiceIterator) Next() (*monitoringpb.Service, error) {
+ var item *monitoringpb.Service
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *ServiceIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *ServiceIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// ServiceLevelObjectiveIterator manages a stream of *monitoringpb.ServiceLevelObjective.
+type ServiceLevelObjectiveIterator struct {
+ items []*monitoringpb.ServiceLevelObjective
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.ServiceLevelObjective, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *ServiceLevelObjectiveIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *ServiceLevelObjectiveIterator) Next() (*monitoringpb.ServiceLevelObjective, error) {
+ var item *monitoringpb.ServiceLevelObjective
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *ServiceLevelObjectiveIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *ServiceLevelObjectiveIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// SnoozeIterator manages a stream of *monitoringpb.Snooze.
+type SnoozeIterator struct {
+ items []*monitoringpb.Snooze
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Snooze, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *SnoozeIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *SnoozeIterator) Next() (*monitoringpb.Snooze, error) {
+ var item *monitoringpb.Snooze
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *SnoozeIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *SnoozeIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// TimeSeriesDataIterator manages a stream of *monitoringpb.TimeSeriesData.
+type TimeSeriesDataIterator struct {
+ items []*monitoringpb.TimeSeriesData
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeriesData, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *TimeSeriesDataIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *TimeSeriesDataIterator) Next() (*monitoringpb.TimeSeriesData, error) {
+ var item *monitoringpb.TimeSeriesData
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *TimeSeriesDataIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *TimeSeriesDataIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries.
+type TimeSeriesIterator struct {
+ items []*monitoringpb.TimeSeries
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) {
+ var item *monitoringpb.TimeSeries
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *TimeSeriesIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *TimeSeriesIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// UptimeCheckConfigIterator manages a stream of *monitoringpb.UptimeCheckConfig.
+type UptimeCheckConfigIterator struct {
+ items []*monitoringpb.UptimeCheckConfig
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckConfig, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *UptimeCheckConfigIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *UptimeCheckConfigIterator) Next() (*monitoringpb.UptimeCheckConfig, error) {
+ var item *monitoringpb.UptimeCheckConfig
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *UptimeCheckConfigIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *UptimeCheckConfigIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
+
+// UptimeCheckIpIterator manages a stream of *monitoringpb.UptimeCheckIp.
+type UptimeCheckIpIterator struct {
+ items []*monitoringpb.UptimeCheckIp
+ pageInfo *iterator.PageInfo
+ nextFunc func() error
+
+ // Response is the raw response for the current page.
+ // It must be cast to the RPC response type.
+ // Calling Next() or InternalFetch() updates this value.
+ Response interface{}
+
+ // InternalFetch is for use by the Google Cloud Libraries only.
+ // It is not part of the stable interface of this package.
+ //
+ // InternalFetch returns results from a single call to the underlying RPC.
+ // The number of results is no greater than pageSize.
+ // If there are no more results, nextPageToken is empty and err is nil.
+ InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.UptimeCheckIp, nextPageToken string, err error)
+}
+
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
+func (it *UptimeCheckIpIterator) PageInfo() *iterator.PageInfo {
+ return it.pageInfo
+}
+
+// Next returns the next result. Its second return value is iterator.Done if there are no more
+// results. Once Next returns Done, all subsequent calls will return Done.
+func (it *UptimeCheckIpIterator) Next() (*monitoringpb.UptimeCheckIp, error) {
+ var item *monitoringpb.UptimeCheckIp
+ if err := it.nextFunc(); err != nil {
+ return item, err
+ }
+ item = it.items[0]
+ it.items = it.items[1:]
+ return item, nil
+}
+
+func (it *UptimeCheckIpIterator) bufLen() int {
+ return len(it.items)
+}
+
+func (it *UptimeCheckIpIterator) takeBuf() interface{} {
+ b := it.items
+ it.items = nil
+ return b
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go
new file mode 100644
index 000000000..bf559553b
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/auxiliary_go123.go
@@ -0,0 +1,112 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+//go:build go1.23
+
+package monitoring
+
+import (
+ "iter"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ "github.com/googleapis/gax-go/v2/iterator"
+ metricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+)
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *AlertPolicyIterator) All() iter.Seq2[*monitoringpb.AlertPolicy, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *GroupIterator) All() iter.Seq2[*monitoringpb.Group, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *MetricDescriptorIterator) All() iter.Seq2[*metricpb.MetricDescriptor, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *MonitoredResourceDescriptorIterator) All() iter.Seq2[*monitoredrespb.MonitoredResourceDescriptor, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *MonitoredResourceIterator) All() iter.Seq2[*monitoredrespb.MonitoredResource, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *NotificationChannelDescriptorIterator) All() iter.Seq2[*monitoringpb.NotificationChannelDescriptor, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *NotificationChannelIterator) All() iter.Seq2[*monitoringpb.NotificationChannel, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *ServiceIterator) All() iter.Seq2[*monitoringpb.Service, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *ServiceLevelObjectiveIterator) All() iter.Seq2[*monitoringpb.ServiceLevelObjective, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *SnoozeIterator) All() iter.Seq2[*monitoringpb.Snooze, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *TimeSeriesDataIterator) All() iter.Seq2[*monitoringpb.TimeSeriesData, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *TimeSeriesIterator) All() iter.Seq2[*monitoringpb.TimeSeries, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *UptimeCheckConfigIterator) All() iter.Seq2[*monitoringpb.UptimeCheckConfig, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *UptimeCheckIpIterator) All() iter.Seq2[*monitoringpb.UptimeCheckIp, error] {
+ return iterator.RangeAdapter(it.Next)
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go
new file mode 100644
index 000000000..1d5136eda
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/doc.go
@@ -0,0 +1,85 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+// Package monitoring is an auto-generated package for the
+// Cloud Monitoring API.
+//
+// Manages your Cloud Monitoring data and configurations.
+//
+// NOTE: This package is in beta. It is not stable, and may be subject to changes.
+//
+// # General documentation
+//
+// For information that is relevant for all client libraries please reference
+// https://pkg.go.dev/cloud.google.com/go#pkg-overview. Some information on this
+// page includes:
+//
+// - [Authentication and Authorization]
+// - [Timeouts and Cancellation]
+// - [Testing against Client Libraries]
+// - [Debugging Client Libraries]
+// - [Inspecting errors]
+//
+// # Example usage
+//
+// To get started with this package, create a client.
+//
+// // go get cloud.google.com/go/monitoring/apiv3/v2@latest
+// ctx := context.Background()
+// // This snippet has been automatically generated and should be regarded as a code template only.
+// // It will require modifications to work:
+// // - It may require correct/in-range values for request initialization.
+// // - It may require specifying regional endpoints when creating the service client as shown in:
+// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
+// c, err := monitoring.NewAlertPolicyClient(ctx)
+// if err != nil {
+// // TODO: Handle error.
+// }
+// defer c.Close()
+//
+// The client will use your default application credentials. Clients should be reused instead of created as needed.
+// The methods of Client are safe for concurrent use by multiple goroutines.
+// The returned client must be Closed when it is done being used.
+//
+// # Using the Client
+//
+// The following is an example of making an API call with the newly created client, mentioned above.
+//
+// req := &monitoringpb.CreateAlertPolicyRequest{
+// // TODO: Fill request struct fields.
+// // See https://pkg.go.dev/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb#CreateAlertPolicyRequest.
+// }
+// resp, err := c.CreateAlertPolicy(ctx, req)
+// if err != nil {
+// // TODO: Handle error.
+// }
+// // TODO: Use resp.
+// _ = resp
+//
+// # Use of Context
+//
+// The ctx passed to NewAlertPolicyClient is used for authentication requests and
+// for creating the underlying connection, but is not used for subsequent calls.
+// Individual methods on the client use the ctx given to them.
+//
+// To close the open connection, use the Close() method.
+//
+// [Authentication and Authorization]: https://pkg.go.dev/cloud.google.com/go#hdr-Authentication_and_Authorization
+// [Timeouts and Cancellation]: https://pkg.go.dev/cloud.google.com/go#hdr-Timeouts_and_Cancellation
+// [Testing against Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Testing
+// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging
+// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors
+package monitoring // import "cloud.google.com/go/monitoring/apiv3/v2"
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json b/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json
new file mode 100644
index 000000000..a33cb6fcf
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/gapic_metadata.json
@@ -0,0 +1,336 @@
+{
+ "schema": "1.0",
+ "comment": "This file maps proto services/RPCs to the corresponding library clients/methods.",
+ "language": "go",
+ "protoPackage": "google.monitoring.v3",
+ "libraryPackage": "cloud.google.com/go/monitoring/apiv3/v2",
+ "services": {
+ "AlertPolicyService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "AlertPolicyClient",
+ "rpcs": {
+ "CreateAlertPolicy": {
+ "methods": [
+ "CreateAlertPolicy"
+ ]
+ },
+ "DeleteAlertPolicy": {
+ "methods": [
+ "DeleteAlertPolicy"
+ ]
+ },
+ "GetAlertPolicy": {
+ "methods": [
+ "GetAlertPolicy"
+ ]
+ },
+ "ListAlertPolicies": {
+ "methods": [
+ "ListAlertPolicies"
+ ]
+ },
+ "UpdateAlertPolicy": {
+ "methods": [
+ "UpdateAlertPolicy"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "GroupService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "GroupClient",
+ "rpcs": {
+ "CreateGroup": {
+ "methods": [
+ "CreateGroup"
+ ]
+ },
+ "DeleteGroup": {
+ "methods": [
+ "DeleteGroup"
+ ]
+ },
+ "GetGroup": {
+ "methods": [
+ "GetGroup"
+ ]
+ },
+ "ListGroupMembers": {
+ "methods": [
+ "ListGroupMembers"
+ ]
+ },
+ "ListGroups": {
+ "methods": [
+ "ListGroups"
+ ]
+ },
+ "UpdateGroup": {
+ "methods": [
+ "UpdateGroup"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "MetricService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "MetricClient",
+ "rpcs": {
+ "CreateMetricDescriptor": {
+ "methods": [
+ "CreateMetricDescriptor"
+ ]
+ },
+ "CreateServiceTimeSeries": {
+ "methods": [
+ "CreateServiceTimeSeries"
+ ]
+ },
+ "CreateTimeSeries": {
+ "methods": [
+ "CreateTimeSeries"
+ ]
+ },
+ "DeleteMetricDescriptor": {
+ "methods": [
+ "DeleteMetricDescriptor"
+ ]
+ },
+ "GetMetricDescriptor": {
+ "methods": [
+ "GetMetricDescriptor"
+ ]
+ },
+ "GetMonitoredResourceDescriptor": {
+ "methods": [
+ "GetMonitoredResourceDescriptor"
+ ]
+ },
+ "ListMetricDescriptors": {
+ "methods": [
+ "ListMetricDescriptors"
+ ]
+ },
+ "ListMonitoredResourceDescriptors": {
+ "methods": [
+ "ListMonitoredResourceDescriptors"
+ ]
+ },
+ "ListTimeSeries": {
+ "methods": [
+ "ListTimeSeries"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "NotificationChannelService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "NotificationChannelClient",
+ "rpcs": {
+ "CreateNotificationChannel": {
+ "methods": [
+ "CreateNotificationChannel"
+ ]
+ },
+ "DeleteNotificationChannel": {
+ "methods": [
+ "DeleteNotificationChannel"
+ ]
+ },
+ "GetNotificationChannel": {
+ "methods": [
+ "GetNotificationChannel"
+ ]
+ },
+ "GetNotificationChannelDescriptor": {
+ "methods": [
+ "GetNotificationChannelDescriptor"
+ ]
+ },
+ "GetNotificationChannelVerificationCode": {
+ "methods": [
+ "GetNotificationChannelVerificationCode"
+ ]
+ },
+ "ListNotificationChannelDescriptors": {
+ "methods": [
+ "ListNotificationChannelDescriptors"
+ ]
+ },
+ "ListNotificationChannels": {
+ "methods": [
+ "ListNotificationChannels"
+ ]
+ },
+ "SendNotificationChannelVerificationCode": {
+ "methods": [
+ "SendNotificationChannelVerificationCode"
+ ]
+ },
+ "UpdateNotificationChannel": {
+ "methods": [
+ "UpdateNotificationChannel"
+ ]
+ },
+ "VerifyNotificationChannel": {
+ "methods": [
+ "VerifyNotificationChannel"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "QueryService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "QueryClient",
+ "rpcs": {
+ "QueryTimeSeries": {
+ "methods": [
+ "QueryTimeSeries"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "ServiceMonitoringService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "ServiceMonitoringClient",
+ "rpcs": {
+ "CreateService": {
+ "methods": [
+ "CreateService"
+ ]
+ },
+ "CreateServiceLevelObjective": {
+ "methods": [
+ "CreateServiceLevelObjective"
+ ]
+ },
+ "DeleteService": {
+ "methods": [
+ "DeleteService"
+ ]
+ },
+ "DeleteServiceLevelObjective": {
+ "methods": [
+ "DeleteServiceLevelObjective"
+ ]
+ },
+ "GetService": {
+ "methods": [
+ "GetService"
+ ]
+ },
+ "GetServiceLevelObjective": {
+ "methods": [
+ "GetServiceLevelObjective"
+ ]
+ },
+ "ListServiceLevelObjectives": {
+ "methods": [
+ "ListServiceLevelObjectives"
+ ]
+ },
+ "ListServices": {
+ "methods": [
+ "ListServices"
+ ]
+ },
+ "UpdateService": {
+ "methods": [
+ "UpdateService"
+ ]
+ },
+ "UpdateServiceLevelObjective": {
+ "methods": [
+ "UpdateServiceLevelObjective"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "SnoozeService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "SnoozeClient",
+ "rpcs": {
+ "CreateSnooze": {
+ "methods": [
+ "CreateSnooze"
+ ]
+ },
+ "GetSnooze": {
+ "methods": [
+ "GetSnooze"
+ ]
+ },
+ "ListSnoozes": {
+ "methods": [
+ "ListSnoozes"
+ ]
+ },
+ "UpdateSnooze": {
+ "methods": [
+ "UpdateSnooze"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "UptimeCheckService": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "UptimeCheckClient",
+ "rpcs": {
+ "CreateUptimeCheckConfig": {
+ "methods": [
+ "CreateUptimeCheckConfig"
+ ]
+ },
+ "DeleteUptimeCheckConfig": {
+ "methods": [
+ "DeleteUptimeCheckConfig"
+ ]
+ },
+ "GetUptimeCheckConfig": {
+ "methods": [
+ "GetUptimeCheckConfig"
+ ]
+ },
+ "ListUptimeCheckConfigs": {
+ "methods": [
+ "ListUptimeCheckConfigs"
+ ]
+ },
+ "ListUptimeCheckIps": {
+ "methods": [
+ "ListUptimeCheckIps"
+ ]
+ },
+ "UpdateUptimeCheckConfig": {
+ "methods": [
+ "UpdateUptimeCheckConfig"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go
new file mode 100644
index 000000000..a45e1aec2
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/group_client.go
@@ -0,0 +1,470 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+ "time"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+)
+
+var newGroupClientHook clientHook
+
+// GroupCallOptions contains the retry settings for each method of GroupClient.
+type GroupCallOptions struct {
+ ListGroups []gax.CallOption
+ GetGroup []gax.CallOption
+ CreateGroup []gax.CallOption
+ UpdateGroup []gax.CallOption
+ DeleteGroup []gax.CallOption
+ ListGroupMembers []gax.CallOption
+}
+
+func defaultGroupGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultGroupCallOptions() *GroupCallOptions {
+ return &GroupCallOptions{
+ ListGroups: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetGroup: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateGroup: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ UpdateGroup: []gax.CallOption{
+ gax.WithTimeout(180000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ DeleteGroup: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListGroupMembers: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ }
+}
+
+// internalGroupClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalGroupClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ ListGroups(context.Context, *monitoringpb.ListGroupsRequest, ...gax.CallOption) *GroupIterator
+ GetGroup(context.Context, *monitoringpb.GetGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error)
+ CreateGroup(context.Context, *monitoringpb.CreateGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error)
+ UpdateGroup(context.Context, *monitoringpb.UpdateGroupRequest, ...gax.CallOption) (*monitoringpb.Group, error)
+ DeleteGroup(context.Context, *monitoringpb.DeleteGroupRequest, ...gax.CallOption) error
+ ListGroupMembers(context.Context, *monitoringpb.ListGroupMembersRequest, ...gax.CallOption) *MonitoredResourceIterator
+}
+
+// GroupClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// The Group API lets you inspect and manage your
+// groups (at #google.monitoring.v3.Group).
+//
+// A group is a named filter that is used to identify
+// a collection of monitored resources. Groups are typically used to
+// mirror the physical and/or logical topology of the environment.
+// Because group membership is computed dynamically, monitored
+// resources that are started in the future are automatically placed
+// in matching groups. By using a group to name monitored resources in,
+// for example, an alert policy, the target of that alert policy is
+// updated automatically as monitored resources are added and removed
+// from the infrastructure.
+type GroupClient struct {
+ // The internal transport-dependent client.
+ internalClient internalGroupClient
+
+ // The call options for this service.
+ CallOptions *GroupCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *GroupClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *GroupClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *GroupClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// ListGroups lists the existing groups.
+func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator {
+ return c.internalClient.ListGroups(ctx, req, opts...)
+}
+
+// GetGroup gets a single group.
+func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ return c.internalClient.GetGroup(ctx, req, opts...)
+}
+
+// CreateGroup creates a new group.
+func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ return c.internalClient.CreateGroup(ctx, req, opts...)
+}
+
+// UpdateGroup updates an existing group.
+// You can change any group attributes except name.
+func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ return c.internalClient.UpdateGroup(ctx, req, opts...)
+}
+
+// DeleteGroup deletes an existing group.
+func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteGroup(ctx, req, opts...)
+}
+
+// ListGroupMembers lists the monitored resources that are members of a group.
+func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator {
+ return c.internalClient.ListGroupMembers(ctx, req, opts...)
+}
+
+// groupGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type groupGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing GroupClient
+ CallOptions **GroupCallOptions
+
+ // The gRPC API client.
+ groupClient monitoringpb.GroupServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewGroupClient creates a new group service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// The Group API lets you inspect and manage your
+// groups (at #google.monitoring.v3.Group).
+//
+// A group is a named filter that is used to identify
+// a collection of monitored resources. Groups are typically used to
+// mirror the physical and/or logical topology of the environment.
+// Because group membership is computed dynamically, monitored
+// resources that are started in the future are automatically placed
+// in matching groups. By using a group to name monitored resources in,
+// for example, an alert policy, the target of that alert policy is
+// updated automatically as monitored resources are added and removed
+// from the infrastructure.
+func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) {
+ clientOpts := defaultGroupGRPCClientOptions()
+ if newGroupClientHook != nil {
+ hookOpts, err := newGroupClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := GroupClient{CallOptions: defaultGroupCallOptions()}
+
+ c := &groupGRPCClient{
+ connPool: connPool,
+ groupClient: monitoringpb.NewGroupServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *groupGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *groupGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *groupGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *groupGRPCClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListGroups[0:len((*c.CallOptions).ListGroups):len((*c.CallOptions).ListGroups)], opts...)
+ it := &GroupIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListGroupsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) {
+ resp := &monitoringpb.ListGroupsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.groupClient.ListGroups, req, settings.GRPC, c.logger, "ListGroups")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetGroup(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *groupGRPCClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetGroup[0:len((*c.CallOptions).GetGroup):len((*c.CallOptions).GetGroup)], opts...)
+ var resp *monitoringpb.Group
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.groupClient.GetGroup, req, settings.GRPC, c.logger, "GetGroup")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *groupGRPCClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateGroup[0:len((*c.CallOptions).CreateGroup):len((*c.CallOptions).CreateGroup)], opts...)
+ var resp *monitoringpb.Group
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.groupClient.CreateGroup, req, settings.GRPC, c.logger, "CreateGroup")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *groupGRPCClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "group.name", url.QueryEscape(req.GetGroup().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateGroup[0:len((*c.CallOptions).UpdateGroup):len((*c.CallOptions).UpdateGroup)], opts...)
+ var resp *monitoringpb.Group
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.groupClient.UpdateGroup, req, settings.GRPC, c.logger, "UpdateGroup")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *groupGRPCClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteGroup[0:len((*c.CallOptions).DeleteGroup):len((*c.CallOptions).DeleteGroup)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.groupClient.DeleteGroup, req, settings.GRPC, c.logger, "DeleteGroup")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *groupGRPCClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListGroupMembers[0:len((*c.CallOptions).ListGroupMembers):len((*c.CallOptions).ListGroupMembers)], opts...)
+ it := &MonitoredResourceIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListGroupMembersRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) {
+ resp := &monitoringpb.ListGroupMembersResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.groupClient.ListGroupMembers, req, settings.GRPC, c.logger, "ListGroupMembers")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetMembers(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/helpers.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/helpers.go
new file mode 100644
index 000000000..6719cac86
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/helpers.go
@@ -0,0 +1,64 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/googleapis/gax-go/v2/internallog/grpclog"
+ "google.golang.org/api/option"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/proto"
+)
+
+const serviceName = "monitoring.googleapis.com"
+
+// For more information on implementing a client constructor hook, see
+// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
+type clientHookParams struct{}
+type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
+
+var versionClient string
+
+func getVersionClient() string {
+ if versionClient == "" {
+ return "UNKNOWN"
+ }
+ return versionClient
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+ return []string{
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/monitoring",
+ "https://www.googleapis.com/auth/monitoring.read",
+ "https://www.googleapis.com/auth/monitoring.write",
+ }
+}
+
+func executeRPC[I proto.Message, O proto.Message](ctx context.Context, fn func(context.Context, I, ...grpc.CallOption) (O, error), req I, opts []grpc.CallOption, logger *slog.Logger, rpc string) (O, error) {
+ var zero O
+ logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", grpclog.ProtoMessageRequest(ctx, req))
+ resp, err := fn(ctx, req, opts...)
+ if err != nil {
+ return zero, err
+ }
+ logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", grpclog.ProtoMessageResponse(resp))
+ return resp, err
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go
new file mode 100644
index 000000000..29eb4849d
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/metric_client.go
@@ -0,0 +1,582 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+ "time"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ metricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+)
+
+var newMetricClientHook clientHook
+
+// MetricCallOptions contains the retry settings for each method of MetricClient.
+type MetricCallOptions struct {
+ ListMonitoredResourceDescriptors []gax.CallOption
+ GetMonitoredResourceDescriptor []gax.CallOption
+ ListMetricDescriptors []gax.CallOption
+ GetMetricDescriptor []gax.CallOption
+ CreateMetricDescriptor []gax.CallOption
+ DeleteMetricDescriptor []gax.CallOption
+ ListTimeSeries []gax.CallOption
+ CreateTimeSeries []gax.CallOption
+ CreateServiceTimeSeries []gax.CallOption
+}
+
+func defaultMetricGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultMetricCallOptions() *MetricCallOptions {
+ return &MetricCallOptions{
+ ListMonitoredResourceDescriptors: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetMonitoredResourceDescriptor: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListMetricDescriptors: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetMetricDescriptor: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateMetricDescriptor: []gax.CallOption{
+ gax.WithTimeout(12000 * time.Millisecond),
+ },
+ DeleteMetricDescriptor: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListTimeSeries: []gax.CallOption{
+ gax.WithTimeout(90000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateTimeSeries: []gax.CallOption{
+ gax.WithTimeout(12000 * time.Millisecond),
+ },
+ CreateServiceTimeSeries: []gax.CallOption{},
+ }
+}
+
+// internalMetricClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalMetricClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ ListMonitoredResourceDescriptors(context.Context, *monitoringpb.ListMonitoredResourceDescriptorsRequest, ...gax.CallOption) *MonitoredResourceDescriptorIterator
+ GetMonitoredResourceDescriptor(context.Context, *monitoringpb.GetMonitoredResourceDescriptorRequest, ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error)
+ ListMetricDescriptors(context.Context, *monitoringpb.ListMetricDescriptorsRequest, ...gax.CallOption) *MetricDescriptorIterator
+ GetMetricDescriptor(context.Context, *monitoringpb.GetMetricDescriptorRequest, ...gax.CallOption) (*metricpb.MetricDescriptor, error)
+ CreateMetricDescriptor(context.Context, *monitoringpb.CreateMetricDescriptorRequest, ...gax.CallOption) (*metricpb.MetricDescriptor, error)
+ DeleteMetricDescriptor(context.Context, *monitoringpb.DeleteMetricDescriptorRequest, ...gax.CallOption) error
+ ListTimeSeries(context.Context, *monitoringpb.ListTimeSeriesRequest, ...gax.CallOption) *TimeSeriesIterator
+ CreateTimeSeries(context.Context, *monitoringpb.CreateTimeSeriesRequest, ...gax.CallOption) error
+ CreateServiceTimeSeries(context.Context, *monitoringpb.CreateTimeSeriesRequest, ...gax.CallOption) error
+}
+
+// MetricClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// Manages metric descriptors, monitored resource descriptors, and
+// time series data.
+type MetricClient struct {
+ // The internal transport-dependent client.
+ internalClient internalMetricClient
+
+ // The call options for this service.
+ CallOptions *MetricCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *MetricClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *MetricClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *MetricClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter.
+func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator {
+ return c.internalClient.ListMonitoredResourceDescriptors(ctx, req, opts...)
+}
+
+// GetMonitoredResourceDescriptor gets a single monitored resource descriptor.
+func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) {
+ return c.internalClient.GetMonitoredResourceDescriptor(ctx, req, opts...)
+}
+
+// ListMetricDescriptors lists metric descriptors that match a filter.
+func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator {
+ return c.internalClient.ListMetricDescriptors(ctx, req, opts...)
+}
+
+// GetMetricDescriptor gets a single metric descriptor.
+func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
+ return c.internalClient.GetMetricDescriptor(ctx, req, opts...)
+}
+
+// CreateMetricDescriptor creates a new metric descriptor.
+// The creation is executed asynchronously.
+// User-created metric descriptors define
+// custom metrics (at https://cloud.google.com/monitoring/custom-metrics).
+// The metric descriptor is updated if it already exists,
+// except that metric labels are never removed.
+func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
+ return c.internalClient.CreateMetricDescriptor(ctx, req, opts...)
+}
+
+// DeleteMetricDescriptor deletes a metric descriptor. Only user-created
+// custom metrics (at https://cloud.google.com/monitoring/custom-metrics) can be
+// deleted.
+func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteMetricDescriptor(ctx, req, opts...)
+}
+
+// ListTimeSeries lists time series that match a filter.
+func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator {
+ return c.internalClient.ListTimeSeries(ctx, req, opts...)
+}
+
+// CreateTimeSeries creates or adds data to one or more time series.
+// The response is empty if all time series in the request were written.
+// If any time series could not be written, a corresponding failure message is
+// included in the error response.
+// This method does not support
+// resource locations constraint of an organization
+// policy (at https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy).
+func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
+ return c.internalClient.CreateTimeSeries(ctx, req, opts...)
+}
+
+// CreateServiceTimeSeries creates or adds data to one or more service time series. A service time
+// series is a time series for a metric from a Google Cloud service. The
+// response is empty if all time series in the request were written. If any
+// time series could not be written, a corresponding failure message is
+// included in the error response. This endpoint rejects writes to
+// user-defined metrics.
+// This method is only for use by Google Cloud services. Use
+// projects.timeSeries.create
+// instead.
+func (c *MetricClient) CreateServiceTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
+ return c.internalClient.CreateServiceTimeSeries(ctx, req, opts...)
+}
+
+// metricGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type metricGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing MetricClient
+ CallOptions **MetricCallOptions
+
+ // The gRPC API client.
+ metricClient monitoringpb.MetricServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewMetricClient creates a new metric service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// Manages metric descriptors, monitored resource descriptors, and
+// time series data.
+func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) {
+ clientOpts := defaultMetricGRPCClientOptions()
+ if newMetricClientHook != nil {
+ hookOpts, err := newMetricClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := MetricClient{CallOptions: defaultMetricCallOptions()}
+
+ c := &metricGRPCClient{
+ connPool: connPool,
+ metricClient: monitoringpb.NewMetricServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *metricGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *metricGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *metricGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *metricGRPCClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListMonitoredResourceDescriptors[0:len((*c.CallOptions).ListMonitoredResourceDescriptors):len((*c.CallOptions).ListMonitoredResourceDescriptors)], opts...)
+ it := &MonitoredResourceDescriptorIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListMonitoredResourceDescriptorsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) {
+ resp := &monitoringpb.ListMonitoredResourceDescriptorsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.metricClient.ListMonitoredResourceDescriptors, req, settings.GRPC, c.logger, "ListMonitoredResourceDescriptors")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetResourceDescriptors(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *metricGRPCClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetMonitoredResourceDescriptor[0:len((*c.CallOptions).GetMonitoredResourceDescriptor):len((*c.CallOptions).GetMonitoredResourceDescriptor)], opts...)
+ var resp *monitoredrespb.MonitoredResourceDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.metricClient.GetMonitoredResourceDescriptor, req, settings.GRPC, c.logger, "GetMonitoredResourceDescriptor")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *metricGRPCClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListMetricDescriptors[0:len((*c.CallOptions).ListMetricDescriptors):len((*c.CallOptions).ListMetricDescriptors)], opts...)
+ it := &MetricDescriptorIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListMetricDescriptorsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) {
+ resp := &monitoringpb.ListMetricDescriptorsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.metricClient.ListMetricDescriptors, req, settings.GRPC, c.logger, "ListMetricDescriptors")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetMetricDescriptors(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *metricGRPCClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetMetricDescriptor[0:len((*c.CallOptions).GetMetricDescriptor):len((*c.CallOptions).GetMetricDescriptor)], opts...)
+ var resp *metricpb.MetricDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.metricClient.GetMetricDescriptor, req, settings.GRPC, c.logger, "GetMetricDescriptor")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *metricGRPCClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateMetricDescriptor[0:len((*c.CallOptions).CreateMetricDescriptor):len((*c.CallOptions).CreateMetricDescriptor)], opts...)
+ var resp *metricpb.MetricDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.metricClient.CreateMetricDescriptor, req, settings.GRPC, c.logger, "CreateMetricDescriptor")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *metricGRPCClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteMetricDescriptor[0:len((*c.CallOptions).DeleteMetricDescriptor):len((*c.CallOptions).DeleteMetricDescriptor)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.metricClient.DeleteMetricDescriptor, req, settings.GRPC, c.logger, "DeleteMetricDescriptor")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *metricGRPCClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListTimeSeries[0:len((*c.CallOptions).ListTimeSeries):len((*c.CallOptions).ListTimeSeries)], opts...)
+ it := &TimeSeriesIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListTimeSeriesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) {
+ resp := &monitoringpb.ListTimeSeriesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.metricClient.ListTimeSeries, req, settings.GRPC, c.logger, "ListTimeSeries")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetTimeSeries(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *metricGRPCClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateTimeSeries[0:len((*c.CallOptions).CreateTimeSeries):len((*c.CallOptions).CreateTimeSeries)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.metricClient.CreateTimeSeries, req, settings.GRPC, c.logger, "CreateTimeSeries")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *metricGRPCClient) CreateServiceTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateServiceTimeSeries[0:len((*c.CallOptions).CreateServiceTimeSeries):len((*c.CallOptions).CreateServiceTimeSeries)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.metricClient.CreateServiceTimeSeries, req, settings.GRPC, c.logger, "CreateServiceTimeSeries")
+ return err
+ }, opts...)
+ return err
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go
new file mode 100644
index 000000000..24ca1414b
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert.pb.go
@@ -0,0 +1,2894 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/alert.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ timeofday "google.golang.org/genproto/googleapis/type/timeofday"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Operators for combining conditions.
+type AlertPolicy_ConditionCombinerType int32
+
+const (
+ // An unspecified combiner.
+ AlertPolicy_COMBINE_UNSPECIFIED AlertPolicy_ConditionCombinerType = 0
+ // Combine conditions using the logical `AND` operator. An
+ // incident is created only if all the conditions are met
+ // simultaneously. This combiner is satisfied if all conditions are
+ // met, even if they are met on completely different resources.
+ AlertPolicy_AND AlertPolicy_ConditionCombinerType = 1
+ // Combine conditions using the logical `OR` operator. An incident
+ // is created if any of the listed conditions is met.
+ AlertPolicy_OR AlertPolicy_ConditionCombinerType = 2
+ // Combine conditions using logical `AND` operator, but unlike the regular
+ // `AND` option, an incident is created only if all conditions are met
+ // simultaneously on at least one resource.
+ AlertPolicy_AND_WITH_MATCHING_RESOURCE AlertPolicy_ConditionCombinerType = 3
+)
+
+// Enum value maps for AlertPolicy_ConditionCombinerType.
+var (
+ AlertPolicy_ConditionCombinerType_name = map[int32]string{
+ 0: "COMBINE_UNSPECIFIED",
+ 1: "AND",
+ 2: "OR",
+ 3: "AND_WITH_MATCHING_RESOURCE",
+ }
+ AlertPolicy_ConditionCombinerType_value = map[string]int32{
+ "COMBINE_UNSPECIFIED": 0,
+ "AND": 1,
+ "OR": 2,
+ "AND_WITH_MATCHING_RESOURCE": 3,
+ }
+)
+
+func (x AlertPolicy_ConditionCombinerType) Enum() *AlertPolicy_ConditionCombinerType {
+ p := new(AlertPolicy_ConditionCombinerType)
+ *p = x
+ return p
+}
+
+func (x AlertPolicy_ConditionCombinerType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (AlertPolicy_ConditionCombinerType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_alert_proto_enumTypes[0].Descriptor()
+}
+
+func (AlertPolicy_ConditionCombinerType) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_alert_proto_enumTypes[0]
+}
+
+func (x AlertPolicy_ConditionCombinerType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use AlertPolicy_ConditionCombinerType.Descriptor instead.
+func (AlertPolicy_ConditionCombinerType) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// An enumeration of possible severity level for an alerting policy.
+type AlertPolicy_Severity int32
+
+const (
+ // No severity is specified. This is the default value.
+ AlertPolicy_SEVERITY_UNSPECIFIED AlertPolicy_Severity = 0
+ // This is the highest severity level. Use this if the problem could
+ // cause significant damage or downtime.
+ AlertPolicy_CRITICAL AlertPolicy_Severity = 1
+ // This is the medium severity level. Use this if the problem could
+ // cause minor damage or downtime.
+ AlertPolicy_ERROR AlertPolicy_Severity = 2
+ // This is the lowest severity level. Use this if the problem is not causing
+ // any damage or downtime, but could potentially lead to a problem in the
+ // future.
+ AlertPolicy_WARNING AlertPolicy_Severity = 3
+)
+
+// Enum value maps for AlertPolicy_Severity.
+var (
+ AlertPolicy_Severity_name = map[int32]string{
+ 0: "SEVERITY_UNSPECIFIED",
+ 1: "CRITICAL",
+ 2: "ERROR",
+ 3: "WARNING",
+ }
+ AlertPolicy_Severity_value = map[string]int32{
+ "SEVERITY_UNSPECIFIED": 0,
+ "CRITICAL": 1,
+ "ERROR": 2,
+ "WARNING": 3,
+ }
+)
+
+func (x AlertPolicy_Severity) Enum() *AlertPolicy_Severity {
+ p := new(AlertPolicy_Severity)
+ *p = x
+ return p
+}
+
+func (x AlertPolicy_Severity) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (AlertPolicy_Severity) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_alert_proto_enumTypes[1].Descriptor()
+}
+
+func (AlertPolicy_Severity) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_alert_proto_enumTypes[1]
+}
+
+func (x AlertPolicy_Severity) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use AlertPolicy_Severity.Descriptor instead.
+func (AlertPolicy_Severity) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1}
+}
+
+// A condition control that determines how metric-threshold conditions
+// are evaluated when data stops arriving.
+// This control doesn't affect metric-absence policies.
+type AlertPolicy_Condition_EvaluationMissingData int32
+
+const (
+ // An unspecified evaluation missing data option. Equivalent to
+ // EVALUATION_MISSING_DATA_NO_OP.
+ AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED AlertPolicy_Condition_EvaluationMissingData = 0
+ // If there is no data to evaluate the condition, then evaluate the
+ // condition as false.
+ AlertPolicy_Condition_EVALUATION_MISSING_DATA_INACTIVE AlertPolicy_Condition_EvaluationMissingData = 1
+ // If there is no data to evaluate the condition, then evaluate the
+ // condition as true.
+ AlertPolicy_Condition_EVALUATION_MISSING_DATA_ACTIVE AlertPolicy_Condition_EvaluationMissingData = 2
+ // Do not evaluate the condition to any value if there is no data.
+ AlertPolicy_Condition_EVALUATION_MISSING_DATA_NO_OP AlertPolicy_Condition_EvaluationMissingData = 3
+)
+
+// Enum value maps for AlertPolicy_Condition_EvaluationMissingData.
+var (
+ AlertPolicy_Condition_EvaluationMissingData_name = map[int32]string{
+ 0: "EVALUATION_MISSING_DATA_UNSPECIFIED",
+ 1: "EVALUATION_MISSING_DATA_INACTIVE",
+ 2: "EVALUATION_MISSING_DATA_ACTIVE",
+ 3: "EVALUATION_MISSING_DATA_NO_OP",
+ }
+ AlertPolicy_Condition_EvaluationMissingData_value = map[string]int32{
+ "EVALUATION_MISSING_DATA_UNSPECIFIED": 0,
+ "EVALUATION_MISSING_DATA_INACTIVE": 1,
+ "EVALUATION_MISSING_DATA_ACTIVE": 2,
+ "EVALUATION_MISSING_DATA_NO_OP": 3,
+ }
+)
+
+func (x AlertPolicy_Condition_EvaluationMissingData) Enum() *AlertPolicy_Condition_EvaluationMissingData {
+ p := new(AlertPolicy_Condition_EvaluationMissingData)
+ *p = x
+ return p
+}
+
+func (x AlertPolicy_Condition_EvaluationMissingData) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (AlertPolicy_Condition_EvaluationMissingData) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_alert_proto_enumTypes[2].Descriptor()
+}
+
+func (AlertPolicy_Condition_EvaluationMissingData) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_alert_proto_enumTypes[2]
+}
+
+func (x AlertPolicy_Condition_EvaluationMissingData) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_EvaluationMissingData.Descriptor instead.
+func (AlertPolicy_Condition_EvaluationMissingData) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 0}
+}
+
+// Control when notifications will be sent out.
+type AlertPolicy_AlertStrategy_NotificationPrompt int32
+
+const (
+ // No strategy specified. Treated as error.
+ AlertPolicy_AlertStrategy_NOTIFICATION_PROMPT_UNSPECIFIED AlertPolicy_AlertStrategy_NotificationPrompt = 0
+ // Notify when an incident is opened.
+ AlertPolicy_AlertStrategy_OPENED AlertPolicy_AlertStrategy_NotificationPrompt = 1
+ // Notify when an incident is closed.
+ AlertPolicy_AlertStrategy_CLOSED AlertPolicy_AlertStrategy_NotificationPrompt = 3
+)
+
+// Enum value maps for AlertPolicy_AlertStrategy_NotificationPrompt.
+var (
+ AlertPolicy_AlertStrategy_NotificationPrompt_name = map[int32]string{
+ 0: "NOTIFICATION_PROMPT_UNSPECIFIED",
+ 1: "OPENED",
+ 3: "CLOSED",
+ }
+ AlertPolicy_AlertStrategy_NotificationPrompt_value = map[string]int32{
+ "NOTIFICATION_PROMPT_UNSPECIFIED": 0,
+ "OPENED": 1,
+ "CLOSED": 3,
+ }
+)
+
+func (x AlertPolicy_AlertStrategy_NotificationPrompt) Enum() *AlertPolicy_AlertStrategy_NotificationPrompt {
+ p := new(AlertPolicy_AlertStrategy_NotificationPrompt)
+ *p = x
+ return p
+}
+
+func (x AlertPolicy_AlertStrategy_NotificationPrompt) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (AlertPolicy_AlertStrategy_NotificationPrompt) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_alert_proto_enumTypes[3].Descriptor()
+}
+
+func (AlertPolicy_AlertStrategy_NotificationPrompt) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_alert_proto_enumTypes[3]
+}
+
+func (x AlertPolicy_AlertStrategy_NotificationPrompt) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use AlertPolicy_AlertStrategy_NotificationPrompt.Descriptor instead.
+func (AlertPolicy_AlertStrategy_NotificationPrompt) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2, 0}
+}
+
+// A description of the conditions under which some aspect of your system is
+// considered to be "unhealthy" and the ways to notify people or services about
+// this state. For an overview of alerting policies, see
+// [Introduction to Alerting](https://cloud.google.com/monitoring/alerts/).
+type AlertPolicy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier. Required if the policy exists. The resource name for this
+ // policy. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]
+ //
+ // `[ALERT_POLICY_ID]` is assigned by Cloud Monitoring when the policy
+ // is created. When calling the
+ // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy]
+ // method, do not include the `name` field in the alerting policy passed as
+ // part of the request.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A short name or phrase used to identify the policy in dashboards,
+ // notifications, and incidents. To avoid confusion, don't use the same
+ // display name for multiple policies in the same project. The name is
+ // limited to 512 Unicode characters.
+ //
+ // The convention for the display_name of a PrometheusQueryLanguageCondition
+ // is "{rule group name}/{alert name}", where the {rule group name} and
+ // {alert name} should be taken from the corresponding Prometheus
+ // configuration file. This convention is not enforced.
+ // In any case the display_name is not a unique key of the AlertPolicy.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // Documentation that is included with notifications and incidents related to
+ // this policy. Best practice is for the documentation to include information
+ // to help responders understand, mitigate, escalate, and correct the
+ // underlying problems detected by the alerting policy. Notification channels
+ // that have limited capacity might not show this documentation.
+ Documentation *AlertPolicy_Documentation `protobuf:"bytes,13,opt,name=documentation,proto3" json:"documentation,omitempty"`
+ // User-supplied key/value data to be used for organizing and
+ // identifying the `AlertPolicy` objects.
+ //
+ // The field can contain up to 64 entries. Each key and value is limited to
+ // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and
+ // values can contain only lowercase letters, numerals, underscores, and
+ // dashes. Keys must begin with a letter.
+ //
+ // Note that Prometheus {alert name} is a
+ // [valid Prometheus label
+ // names](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels),
+ // whereas Prometheus {rule group} is an unrestricted UTF-8 string.
+ // This means that they cannot be stored as-is in user labels, because
+ // they may contain characters that are not allowed in user-label values.
+ UserLabels map[string]string `protobuf:"bytes,16,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // A list of conditions for the policy. The conditions are combined by AND or
+ // OR according to the `combiner` field. If the combined conditions evaluate
+ // to true, then an incident is created. A policy can have from one to six
+ // conditions.
+ // If `condition_time_series_query_language` is present, it must be the only
+ // `condition`.
+ // If `condition_monitoring_query_language` is present, it must be the only
+ // `condition`.
+ Conditions []*AlertPolicy_Condition `protobuf:"bytes,12,rep,name=conditions,proto3" json:"conditions,omitempty"`
+ // How to combine the results of multiple conditions to determine if an
+ // incident should be opened.
+ // If `condition_time_series_query_language` is present, this must be
+ // `COMBINE_UNSPECIFIED`.
+ Combiner AlertPolicy_ConditionCombinerType `protobuf:"varint,6,opt,name=combiner,proto3,enum=google.monitoring.v3.AlertPolicy_ConditionCombinerType" json:"combiner,omitempty"`
+ // Whether or not the policy is enabled. On write, the default interpretation
+ // if unset is that the policy is enabled. On read, clients should not make
+ // any assumption about the state if it has not been populated. The
+ // field should always be populated on List and Get operations, unless
+ // a field projection has been specified that strips it out.
+ Enabled *wrapperspb.BoolValue `protobuf:"bytes,17,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // Read-only description of how the alerting policy is invalid. This field is
+ // only set when the alerting policy is invalid. An invalid alerting policy
+ // will not generate incidents.
+ Validity *status.Status `protobuf:"bytes,18,opt,name=validity,proto3" json:"validity,omitempty"`
+ // Identifies the notification channels to which notifications should be sent
+ // when incidents are opened or closed or when new violations occur on
+ // an already opened incident. Each element of this array corresponds to
+ // the `name` field in each of the
+ // [`NotificationChannel`][google.monitoring.v3.NotificationChannel]
+ // objects that are returned from the [`ListNotificationChannels`]
+ // [google.monitoring.v3.NotificationChannelService.ListNotificationChannels]
+ // method. The format of the entries in this field is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
+ NotificationChannels []string `protobuf:"bytes,14,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"`
+ // A read-only record of the creation of the alerting policy. If provided
+ // in a call to create or update, this field will be ignored.
+ CreationRecord *MutationRecord `protobuf:"bytes,10,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"`
+ // A read-only record of the most recent change to the alerting policy. If
+ // provided in a call to create or update, this field will be ignored.
+ MutationRecord *MutationRecord `protobuf:"bytes,11,opt,name=mutation_record,json=mutationRecord,proto3" json:"mutation_record,omitempty"`
+ // Control over how this alerting policy's notification channels are notified.
+ AlertStrategy *AlertPolicy_AlertStrategy `protobuf:"bytes,21,opt,name=alert_strategy,json=alertStrategy,proto3" json:"alert_strategy,omitempty"`
+ // Optional. The severity of an alerting policy indicates how important
+ // incidents generated by that policy are. The severity level will be
+ // displayed on the Incident detail page and in notifications.
+ Severity AlertPolicy_Severity `protobuf:"varint,22,opt,name=severity,proto3,enum=google.monitoring.v3.AlertPolicy_Severity" json:"severity,omitempty"`
+}
+
+func (x *AlertPolicy) Reset() {
+ *x = AlertPolicy{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy) ProtoMessage() {}
+
+func (x *AlertPolicy) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy.ProtoReflect.Descriptor instead.
+func (*AlertPolicy) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *AlertPolicy) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *AlertPolicy) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *AlertPolicy) GetDocumentation() *AlertPolicy_Documentation {
+ if x != nil {
+ return x.Documentation
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetUserLabels() map[string]string {
+ if x != nil {
+ return x.UserLabels
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetConditions() []*AlertPolicy_Condition {
+ if x != nil {
+ return x.Conditions
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetCombiner() AlertPolicy_ConditionCombinerType {
+ if x != nil {
+ return x.Combiner
+ }
+ return AlertPolicy_COMBINE_UNSPECIFIED
+}
+
+func (x *AlertPolicy) GetEnabled() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.Enabled
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetValidity() *status.Status {
+ if x != nil {
+ return x.Validity
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetNotificationChannels() []string {
+ if x != nil {
+ return x.NotificationChannels
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetCreationRecord() *MutationRecord {
+ if x != nil {
+ return x.CreationRecord
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetMutationRecord() *MutationRecord {
+ if x != nil {
+ return x.MutationRecord
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetAlertStrategy() *AlertPolicy_AlertStrategy {
+ if x != nil {
+ return x.AlertStrategy
+ }
+ return nil
+}
+
+func (x *AlertPolicy) GetSeverity() AlertPolicy_Severity {
+ if x != nil {
+ return x.Severity
+ }
+ return AlertPolicy_SEVERITY_UNSPECIFIED
+}
+
+// Documentation that is included in the notifications and incidents
+// pertaining to this policy.
+type AlertPolicy_Documentation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The body of the documentation, interpreted according to `mime_type`.
+ // The content may not exceed 8,192 Unicode characters and may not exceed
+ // more than 10,240 bytes when encoded in UTF-8 format, whichever is
+ // smaller. This text can be [templatized by using
+ // variables](https://cloud.google.com/monitoring/alerts/doc-variables#doc-vars).
+ Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
+ // The format of the `content` field. Presently, only the value
+ // `"text/markdown"` is supported. See
+ // [Markdown](https://en.wikipedia.org/wiki/Markdown) for more information.
+ MimeType string `protobuf:"bytes,2,opt,name=mime_type,json=mimeType,proto3" json:"mime_type,omitempty"`
+ // Optional. The subject line of the notification. The subject line may not
+ // exceed 10,240 bytes. In notifications generated by this policy, the
+ // contents of the subject line after variable expansion will be truncated
+ // to 255 bytes or shorter at the latest UTF-8 character boundary. The
+ // 255-byte limit is recommended by [this
+ // thread](https://stackoverflow.com/questions/1592291/what-is-the-email-subject-length-limit).
+ // It is both the limit imposed by some third-party ticketing products and
+ // it is common to define textual fields in databases as VARCHAR(255).
+ //
+ // The contents of the subject line can be [templatized by using
+ // variables](https://cloud.google.com/monitoring/alerts/doc-variables#doc-vars).
+ // If this field is missing or empty, a default subject line will be
+ // generated.
+ Subject string `protobuf:"bytes,3,opt,name=subject,proto3" json:"subject,omitempty"`
+ // Optional. Links to content such as playbooks, repositories, and other
+ // resources. This field can contain up to 3 entries.
+ Links []*AlertPolicy_Documentation_Link `protobuf:"bytes,4,rep,name=links,proto3" json:"links,omitempty"`
+}
+
+func (x *AlertPolicy_Documentation) Reset() {
+ *x = AlertPolicy_Documentation{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Documentation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Documentation) ProtoMessage() {}
+
+func (x *AlertPolicy_Documentation) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Documentation.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Documentation) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *AlertPolicy_Documentation) GetContent() string {
+ if x != nil {
+ return x.Content
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Documentation) GetMimeType() string {
+ if x != nil {
+ return x.MimeType
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Documentation) GetSubject() string {
+ if x != nil {
+ return x.Subject
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Documentation) GetLinks() []*AlertPolicy_Documentation_Link {
+ if x != nil {
+ return x.Links
+ }
+ return nil
+}
+
+// A condition is a true/false test that determines when an alerting policy
+// should open an incident. If a condition evaluates to true, it signifies
+// that something is wrong.
+type AlertPolicy_Condition struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required if the condition exists. The unique resource name for this
+ // condition. Its format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]/conditions/[CONDITION_ID]
+ //
+ // `[CONDITION_ID]` is assigned by Cloud Monitoring when the
+ // condition is created as part of a new or updated alerting policy.
+ //
+ // When calling the
+ // [alertPolicies.create][google.monitoring.v3.AlertPolicyService.CreateAlertPolicy]
+ // method, do not include the `name` field in the conditions of the
+ // requested alerting policy. Cloud Monitoring creates the
+ // condition identifiers and includes them in the new policy.
+ //
+ // When calling the
+ // [alertPolicies.update][google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy]
+ // method to update a policy, including a condition `name` causes the
+ // existing condition to be updated. Conditions without names are added to
+ // the updated policy. Existing conditions are deleted if they are not
+ // updated.
+ //
+ // Best practice is to preserve `[CONDITION_ID]` if you make only small
+ // changes, such as those to condition thresholds, durations, or trigger
+ // values. Otherwise, treat the change as a new condition and let the
+ // existing condition be deleted.
+ Name string `protobuf:"bytes,12,opt,name=name,proto3" json:"name,omitempty"`
+ // A short name or phrase used to identify the condition in dashboards,
+ // notifications, and incidents. To avoid confusion, don't use the same
+ // display name for multiple conditions in the same policy.
+ DisplayName string `protobuf:"bytes,6,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // Only one of the following condition types will be specified.
+ //
+ // Types that are assignable to Condition:
+ //
+ // *AlertPolicy_Condition_ConditionThreshold
+ // *AlertPolicy_Condition_ConditionAbsent
+ // *AlertPolicy_Condition_ConditionMatchedLog
+ // *AlertPolicy_Condition_ConditionMonitoringQueryLanguage
+ // *AlertPolicy_Condition_ConditionPrometheusQueryLanguage
+ // *AlertPolicy_Condition_ConditionSql
+ Condition isAlertPolicy_Condition_Condition `protobuf_oneof:"condition"`
+}
+
+func (x *AlertPolicy_Condition) Reset() {
+ *x = AlertPolicy_Condition{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *AlertPolicy_Condition) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Condition) GetCondition() isAlertPolicy_Condition_Condition {
+ if m != nil {
+ return m.Condition
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition) GetConditionThreshold() *AlertPolicy_Condition_MetricThreshold {
+ if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionThreshold); ok {
+ return x.ConditionThreshold
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition) GetConditionAbsent() *AlertPolicy_Condition_MetricAbsence {
+ if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionAbsent); ok {
+ return x.ConditionAbsent
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition) GetConditionMatchedLog() *AlertPolicy_Condition_LogMatch {
+ if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionMatchedLog); ok {
+ return x.ConditionMatchedLog
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition) GetConditionMonitoringQueryLanguage() *AlertPolicy_Condition_MonitoringQueryLanguageCondition {
+ if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionMonitoringQueryLanguage); ok {
+ return x.ConditionMonitoringQueryLanguage
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition) GetConditionPrometheusQueryLanguage() *AlertPolicy_Condition_PrometheusQueryLanguageCondition {
+ if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionPrometheusQueryLanguage); ok {
+ return x.ConditionPrometheusQueryLanguage
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition) GetConditionSql() *AlertPolicy_Condition_SqlCondition {
+ if x, ok := x.GetCondition().(*AlertPolicy_Condition_ConditionSql); ok {
+ return x.ConditionSql
+ }
+ return nil
+}
+
+type isAlertPolicy_Condition_Condition interface {
+ isAlertPolicy_Condition_Condition()
+}
+
+type AlertPolicy_Condition_ConditionThreshold struct {
+ // A condition that compares a time series against a threshold.
+ ConditionThreshold *AlertPolicy_Condition_MetricThreshold `protobuf:"bytes,1,opt,name=condition_threshold,json=conditionThreshold,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_ConditionAbsent struct {
+ // A condition that checks that a time series continues to
+ // receive new data points.
+ ConditionAbsent *AlertPolicy_Condition_MetricAbsence `protobuf:"bytes,2,opt,name=condition_absent,json=conditionAbsent,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_ConditionMatchedLog struct {
+ // A condition that checks for log messages matching given constraints. If
+ // set, no other conditions can be present.
+ ConditionMatchedLog *AlertPolicy_Condition_LogMatch `protobuf:"bytes,20,opt,name=condition_matched_log,json=conditionMatchedLog,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_ConditionMonitoringQueryLanguage struct {
+ // A condition that uses the Monitoring Query Language to define
+ // alerts.
+ ConditionMonitoringQueryLanguage *AlertPolicy_Condition_MonitoringQueryLanguageCondition `protobuf:"bytes,19,opt,name=condition_monitoring_query_language,json=conditionMonitoringQueryLanguage,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_ConditionPrometheusQueryLanguage struct {
+ // A condition that uses the Prometheus query language to define alerts.
+ ConditionPrometheusQueryLanguage *AlertPolicy_Condition_PrometheusQueryLanguageCondition `protobuf:"bytes,21,opt,name=condition_prometheus_query_language,json=conditionPrometheusQueryLanguage,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_ConditionSql struct {
+ // A condition that periodically evaluates a SQL query result.
+ ConditionSql *AlertPolicy_Condition_SqlCondition `protobuf:"bytes,22,opt,name=condition_sql,json=conditionSql,proto3,oneof"`
+}
+
+func (*AlertPolicy_Condition_ConditionThreshold) isAlertPolicy_Condition_Condition() {}
+
+func (*AlertPolicy_Condition_ConditionAbsent) isAlertPolicy_Condition_Condition() {}
+
+func (*AlertPolicy_Condition_ConditionMatchedLog) isAlertPolicy_Condition_Condition() {}
+
+func (*AlertPolicy_Condition_ConditionMonitoringQueryLanguage) isAlertPolicy_Condition_Condition() {}
+
+func (*AlertPolicy_Condition_ConditionPrometheusQueryLanguage) isAlertPolicy_Condition_Condition() {}
+
+func (*AlertPolicy_Condition_ConditionSql) isAlertPolicy_Condition_Condition() {}
+
+// Control over how the notification channels in `notification_channels`
+// are notified when this alert fires.
+type AlertPolicy_AlertStrategy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required for log-based alerting policies, i.e. policies with a `LogMatch`
+ // condition.
+ //
+ // This limit is not implemented for alerting policies that do not have
+ // a LogMatch condition.
+ NotificationRateLimit *AlertPolicy_AlertStrategy_NotificationRateLimit `protobuf:"bytes,1,opt,name=notification_rate_limit,json=notificationRateLimit,proto3" json:"notification_rate_limit,omitempty"`
+ // For log-based alert policies, the notification prompts is always
+ // [OPENED]. For non log-based alert policies, the notification prompts can
+ // be [OPENED] or [OPENED, CLOSED].
+ NotificationPrompts []AlertPolicy_AlertStrategy_NotificationPrompt `protobuf:"varint,2,rep,packed,name=notification_prompts,json=notificationPrompts,proto3,enum=google.monitoring.v3.AlertPolicy_AlertStrategy_NotificationPrompt" json:"notification_prompts,omitempty"`
+ // If an alerting policy that was active has no data for this long, any open
+ // incidents will close
+ AutoClose *durationpb.Duration `protobuf:"bytes,3,opt,name=auto_close,json=autoClose,proto3" json:"auto_close,omitempty"`
+ // Control how notifications will be sent out, on a per-channel basis.
+ NotificationChannelStrategy []*AlertPolicy_AlertStrategy_NotificationChannelStrategy `protobuf:"bytes,4,rep,name=notification_channel_strategy,json=notificationChannelStrategy,proto3" json:"notification_channel_strategy,omitempty"`
+}
+
+func (x *AlertPolicy_AlertStrategy) Reset() {
+ *x = AlertPolicy_AlertStrategy{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_AlertStrategy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_AlertStrategy) ProtoMessage() {}
+
+func (x *AlertPolicy_AlertStrategy) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_AlertStrategy.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_AlertStrategy) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2}
+}
+
+func (x *AlertPolicy_AlertStrategy) GetNotificationRateLimit() *AlertPolicy_AlertStrategy_NotificationRateLimit {
+ if x != nil {
+ return x.NotificationRateLimit
+ }
+ return nil
+}
+
+func (x *AlertPolicy_AlertStrategy) GetNotificationPrompts() []AlertPolicy_AlertStrategy_NotificationPrompt {
+ if x != nil {
+ return x.NotificationPrompts
+ }
+ return nil
+}
+
+func (x *AlertPolicy_AlertStrategy) GetAutoClose() *durationpb.Duration {
+ if x != nil {
+ return x.AutoClose
+ }
+ return nil
+}
+
+func (x *AlertPolicy_AlertStrategy) GetNotificationChannelStrategy() []*AlertPolicy_AlertStrategy_NotificationChannelStrategy {
+ if x != nil {
+ return x.NotificationChannelStrategy
+ }
+ return nil
+}
+
+// Links to content such as playbooks, repositories, and other resources.
+type AlertPolicy_Documentation_Link struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A short display name for the link. The display name must not be empty
+ // or exceed 63 characters. Example: "playbook".
+ DisplayName string `protobuf:"bytes,1,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The url of a webpage.
+ // A url can be templatized by using variables
+ // in the path or the query parameters. The total length of a URL should
+ // not exceed 2083 characters before and after variable expansion.
+ // Example: "https://my_domain.com/playbook?name=${resource.name}"
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+}
+
+func (x *AlertPolicy_Documentation_Link) Reset() {
+ *x = AlertPolicy_Documentation_Link{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Documentation_Link) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Documentation_Link) ProtoMessage() {}
+
+func (x *AlertPolicy_Documentation_Link) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Documentation_Link.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Documentation_Link) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 0, 0}
+}
+
+func (x *AlertPolicy_Documentation_Link) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Documentation_Link) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+// Specifies how many time series must fail a predicate to trigger a
+// condition. If not specified, then a `{count: 1}` trigger is used.
+type AlertPolicy_Condition_Trigger struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A type of trigger.
+ //
+ // Types that are assignable to Type:
+ //
+ // *AlertPolicy_Condition_Trigger_Count
+ // *AlertPolicy_Condition_Trigger_Percent
+ Type isAlertPolicy_Condition_Trigger_Type `protobuf_oneof:"type"`
+}
+
+func (x *AlertPolicy_Condition_Trigger) Reset() {
+ *x = AlertPolicy_Condition_Trigger{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_Trigger) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_Trigger) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_Trigger) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_Trigger.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_Trigger) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 0}
+}
+
+func (m *AlertPolicy_Condition_Trigger) GetType() isAlertPolicy_Condition_Trigger_Type {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_Trigger) GetCount() int32 {
+ if x, ok := x.GetType().(*AlertPolicy_Condition_Trigger_Count); ok {
+ return x.Count
+ }
+ return 0
+}
+
+func (x *AlertPolicy_Condition_Trigger) GetPercent() float64 {
+ if x, ok := x.GetType().(*AlertPolicy_Condition_Trigger_Percent); ok {
+ return x.Percent
+ }
+ return 0
+}
+
+type isAlertPolicy_Condition_Trigger_Type interface {
+ isAlertPolicy_Condition_Trigger_Type()
+}
+
+type AlertPolicy_Condition_Trigger_Count struct {
+ // The absolute number of time series that must fail
+ // the predicate for the condition to be triggered.
+ Count int32 `protobuf:"varint,1,opt,name=count,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_Trigger_Percent struct {
+ // The percentage of time series that must fail the
+ // predicate for the condition to be triggered.
+ Percent float64 `protobuf:"fixed64,2,opt,name=percent,proto3,oneof"`
+}
+
+func (*AlertPolicy_Condition_Trigger_Count) isAlertPolicy_Condition_Trigger_Type() {}
+
+func (*AlertPolicy_Condition_Trigger_Percent) isAlertPolicy_Condition_Trigger_Type() {}
+
+// A condition type that compares a collection of time series
+// against a threshold.
+type AlertPolicy_Condition_MetricThreshold struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. A
+ // [filter](https://cloud.google.com/monitoring/api/v3/filters) that
+ // identifies which time series should be compared with the threshold.
+ //
+ // The filter is similar to the one that is specified in the
+ // [`ListTimeSeries`
+ // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list)
+ // (that call is useful to verify the time series that will be retrieved /
+ // processed). The filter must specify the metric type and the resource
+ // type. Optionally, it can specify resource labels and metric labels.
+ // This field must not exceed 2048 Unicode characters in length.
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Specifies the alignment of data points in individual time series as
+ // well as how to combine the retrieved time series together (such as
+ // when aggregating multiple streams on each resource to a single
+ // stream for each resource or when aggregating streams across all
+ // members of a group of resources). Multiple aggregations
+ // are applied in the order specified.
+ //
+ // This field is similar to the one in the [`ListTimeSeries`
+ // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list).
+ // It is advisable to use the `ListTimeSeries` method when debugging this
+ // field.
+ Aggregations []*Aggregation `protobuf:"bytes,8,rep,name=aggregations,proto3" json:"aggregations,omitempty"`
+ // A [filter](https://cloud.google.com/monitoring/api/v3/filters) that
+ // identifies a time series that should be used as the denominator of a
+ // ratio that will be compared with the threshold. If a
+ // `denominator_filter` is specified, the time series specified by the
+ // `filter` field will be used as the numerator.
+ //
+ // The filter must specify the metric type and optionally may contain
+ // restrictions on resource type, resource labels, and metric labels.
+ // This field may not exceed 2048 Unicode characters in length.
+ DenominatorFilter string `protobuf:"bytes,9,opt,name=denominator_filter,json=denominatorFilter,proto3" json:"denominator_filter,omitempty"`
+ // Specifies the alignment of data points in individual time series
+ // selected by `denominatorFilter` as
+ // well as how to combine the retrieved time series together (such as
+ // when aggregating multiple streams on each resource to a single
+ // stream for each resource or when aggregating streams across all
+ // members of a group of resources).
+ //
+ // When computing ratios, the `aggregations` and
+ // `denominator_aggregations` fields must use the same alignment period
+ // and produce time series that have the same periodicity and labels.
+ DenominatorAggregations []*Aggregation `protobuf:"bytes,10,rep,name=denominator_aggregations,json=denominatorAggregations,proto3" json:"denominator_aggregations,omitempty"`
+ // When this field is present, the `MetricThreshold` condition forecasts
+ // whether the time series is predicted to violate the threshold within
+ // the `forecast_horizon`. When this field is not set, the
+ // `MetricThreshold` tests the current value of the timeseries against the
+ // threshold.
+ ForecastOptions *AlertPolicy_Condition_MetricThreshold_ForecastOptions `protobuf:"bytes,12,opt,name=forecast_options,json=forecastOptions,proto3" json:"forecast_options,omitempty"`
+ // The comparison to apply between the time series (indicated by `filter`
+ // and `aggregation`) and the threshold (indicated by `threshold_value`).
+ // The comparison is applied on each time series, with the time series
+ // on the left-hand side and the threshold on the right-hand side.
+ //
+ // Only `COMPARISON_LT` and `COMPARISON_GT` are supported currently.
+ Comparison ComparisonType `protobuf:"varint,4,opt,name=comparison,proto3,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"`
+ // A value against which to compare the time series.
+ ThresholdValue float64 `protobuf:"fixed64,5,opt,name=threshold_value,json=thresholdValue,proto3" json:"threshold_value,omitempty"`
+ // The amount of time that a time series must violate the
+ // threshold to be considered failing. Currently, only values
+ // that are a multiple of a minute--e.g., 0, 60, 120, or 300
+ // seconds--are supported. If an invalid value is given, an
+ // error will be returned. When choosing a duration, it is useful to
+ // keep in mind the frequency of the underlying time series data
+ // (which may also be affected by any alignments specified in the
+ // `aggregations` field); a good duration is long enough so that a single
+ // outlier does not generate spurious alerts, but short enough that
+ // unhealthy states are detected and alerted on quickly.
+ Duration *durationpb.Duration `protobuf:"bytes,6,opt,name=duration,proto3" json:"duration,omitempty"`
+ // The number/percent of time series for which the comparison must hold
+ // in order for the condition to trigger. If unspecified, then the
+ // condition will trigger if the comparison is true for any of the
+ // time series that have been identified by `filter` and `aggregations`,
+ // or by the ratio, if `denominator_filter` and `denominator_aggregations`
+ // are specified.
+ Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,7,opt,name=trigger,proto3" json:"trigger,omitempty"`
+ // A condition control that determines how metric-threshold conditions
+ // are evaluated when data stops arriving. To use this control, the value
+ // of the `duration` field must be greater than or equal to 60 seconds.
+ EvaluationMissingData AlertPolicy_Condition_EvaluationMissingData `protobuf:"varint,11,opt,name=evaluation_missing_data,json=evaluationMissingData,proto3,enum=google.monitoring.v3.AlertPolicy_Condition_EvaluationMissingData" json:"evaluation_missing_data,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) Reset() {
+ *x = AlertPolicy_Condition_MetricThreshold{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_MetricThreshold) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_MetricThreshold) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_MetricThreshold.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_MetricThreshold) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 1}
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetAggregations() []*Aggregation {
+ if x != nil {
+ return x.Aggregations
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetDenominatorFilter() string {
+ if x != nil {
+ return x.DenominatorFilter
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetDenominatorAggregations() []*Aggregation {
+ if x != nil {
+ return x.DenominatorAggregations
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetForecastOptions() *AlertPolicy_Condition_MetricThreshold_ForecastOptions {
+ if x != nil {
+ return x.ForecastOptions
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetComparison() ComparisonType {
+ if x != nil {
+ return x.Comparison
+ }
+ return ComparisonType_COMPARISON_UNSPECIFIED
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetThresholdValue() float64 {
+ if x != nil {
+ return x.ThresholdValue
+ }
+ return 0
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetDuration() *durationpb.Duration {
+ if x != nil {
+ return x.Duration
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetTrigger() *AlertPolicy_Condition_Trigger {
+ if x != nil {
+ return x.Trigger
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold) GetEvaluationMissingData() AlertPolicy_Condition_EvaluationMissingData {
+ if x != nil {
+ return x.EvaluationMissingData
+ }
+ return AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED
+}
+
+// A condition type that checks that monitored resources
+// are reporting data. The configuration defines a metric and
+// a set of monitored resources. The predicate is considered in violation
+// when a time series for the specified metric of a monitored
+// resource does not include any data in the specified `duration`.
+type AlertPolicy_Condition_MetricAbsence struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. A
+ // [filter](https://cloud.google.com/monitoring/api/v3/filters) that
+ // identifies which time series should be compared with the threshold.
+ //
+ // The filter is similar to the one that is specified in the
+ // [`ListTimeSeries`
+ // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list)
+ // (that call is useful to verify the time series that will be retrieved /
+ // processed). The filter must specify the metric type and the resource
+ // type. Optionally, it can specify resource labels and metric labels.
+ // This field must not exceed 2048 Unicode characters in length.
+ Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Specifies the alignment of data points in individual time series as
+ // well as how to combine the retrieved time series together (such as
+ // when aggregating multiple streams on each resource to a single
+ // stream for each resource or when aggregating streams across all
+ // members of a group of resources). Multiple aggregations
+ // are applied in the order specified.
+ //
+ // This field is similar to the one in the [`ListTimeSeries`
+ // request](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.timeSeries/list).
+ // It is advisable to use the `ListTimeSeries` method when debugging this
+ // field.
+ Aggregations []*Aggregation `protobuf:"bytes,5,rep,name=aggregations,proto3" json:"aggregations,omitempty"`
+ // The amount of time that a time series must fail to report new
+ // data to be considered failing. The minimum value of this field
+ // is 120 seconds. Larger values that are a multiple of a
+ // minute--for example, 240 or 300 seconds--are supported.
+ // If an invalid value is given, an
+ // error will be returned. The `Duration.nanos` field is
+ // ignored.
+ Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"`
+ // The number/percent of time series for which the comparison must hold
+ // in order for the condition to trigger. If unspecified, then the
+ // condition will trigger if the comparison is true for any of the
+ // time series that have been identified by `filter` and `aggregations`.
+ Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_MetricAbsence) Reset() {
+ *x = AlertPolicy_Condition_MetricAbsence{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_MetricAbsence) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_MetricAbsence) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_MetricAbsence) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_MetricAbsence.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_MetricAbsence) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 2}
+}
+
+func (x *AlertPolicy_Condition_MetricAbsence) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_MetricAbsence) GetAggregations() []*Aggregation {
+ if x != nil {
+ return x.Aggregations
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MetricAbsence) GetDuration() *durationpb.Duration {
+ if x != nil {
+ return x.Duration
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MetricAbsence) GetTrigger() *AlertPolicy_Condition_Trigger {
+ if x != nil {
+ return x.Trigger
+ }
+ return nil
+}
+
+// A condition type that checks whether a log message in the [scoping
+// project](https://cloud.google.com/monitoring/api/v3#project_name)
+// satisfies the given filter. Logs from other projects in the metrics
+// scope are not evaluated.
+type AlertPolicy_Condition_LogMatch struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. A logs-based filter. See [Advanced Logs
+ // Queries](https://cloud.google.com/logging/docs/view/advanced-queries)
+ // for how this filter should be constructed.
+ Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Optional. A map from a label key to an extractor expression, which is
+ // used to extract the value for this label key. Each entry in this map is
+ // a specification for how data should be extracted from log entries that
+ // match `filter`. Each combination of extracted values is treated as a
+ // separate rule for the purposes of triggering notifications. Label keys
+ // and corresponding values can be used in notifications generated by this
+ // condition.
+ //
+ // Please see [the documentation on logs-based metric
+ // `valueExtractor`s](https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics#LogMetric.FIELDS.value_extractor)
+ // for syntax and examples.
+ LabelExtractors map[string]string `protobuf:"bytes,2,rep,name=label_extractors,json=labelExtractors,proto3" json:"label_extractors,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *AlertPolicy_Condition_LogMatch) Reset() {
+ *x = AlertPolicy_Condition_LogMatch{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_LogMatch) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_LogMatch) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_LogMatch) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_LogMatch.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_LogMatch) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 3}
+}
+
+func (x *AlertPolicy_Condition_LogMatch) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_LogMatch) GetLabelExtractors() map[string]string {
+ if x != nil {
+ return x.LabelExtractors
+ }
+ return nil
+}
+
+// A condition type that allows alerting policies to be defined using
+// [Monitoring Query Language](https://cloud.google.com/monitoring/mql).
+type AlertPolicy_Condition_MonitoringQueryLanguageCondition struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // [Monitoring Query Language](https://cloud.google.com/monitoring/mql)
+ // query that outputs a boolean stream.
+ Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
+ // The amount of time that a time series must violate the
+ // threshold to be considered failing. Currently, only values
+ // that are a multiple of a minute--e.g., 0, 60, 120, or 300
+ // seconds--are supported. If an invalid value is given, an
+ // error will be returned. When choosing a duration, it is useful to
+ // keep in mind the frequency of the underlying time series data
+ // (which may also be affected by any alignments specified in the
+ // `aggregations` field); a good duration is long enough so that a single
+ // outlier does not generate spurious alerts, but short enough that
+ // unhealthy states are detected and alerted on quickly.
+ Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"`
+ // The number/percent of time series for which the comparison must hold
+ // in order for the condition to trigger. If unspecified, then the
+ // condition will trigger if the comparison is true for any of the
+ // time series that have been identified by `filter` and `aggregations`,
+ // or by the ratio, if `denominator_filter` and `denominator_aggregations`
+ // are specified.
+ Trigger *AlertPolicy_Condition_Trigger `protobuf:"bytes,3,opt,name=trigger,proto3" json:"trigger,omitempty"`
+ // A condition control that determines how metric-threshold conditions
+ // are evaluated when data stops arriving.
+ EvaluationMissingData AlertPolicy_Condition_EvaluationMissingData `protobuf:"varint,4,opt,name=evaluation_missing_data,json=evaluationMissingData,proto3,enum=google.monitoring.v3.AlertPolicy_Condition_EvaluationMissingData" json:"evaluation_missing_data,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) Reset() {
+ *x = AlertPolicy_Condition_MonitoringQueryLanguageCondition{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_MonitoringQueryLanguageCondition) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_MonitoringQueryLanguageCondition.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_MonitoringQueryLanguageCondition) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 4}
+}
+
+func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetQuery() string {
+ if x != nil {
+ return x.Query
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetDuration() *durationpb.Duration {
+ if x != nil {
+ return x.Duration
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetTrigger() *AlertPolicy_Condition_Trigger {
+ if x != nil {
+ return x.Trigger
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_MonitoringQueryLanguageCondition) GetEvaluationMissingData() AlertPolicy_Condition_EvaluationMissingData {
+ if x != nil {
+ return x.EvaluationMissingData
+ }
+ return AlertPolicy_Condition_EVALUATION_MISSING_DATA_UNSPECIFIED
+}
+
+// A condition type that allows alerting policies to be defined using
+// [Prometheus Query Language
+// (PromQL)](https://prometheus.io/docs/prometheus/latest/querying/basics/).
+//
+// The PrometheusQueryLanguageCondition message contains information
+// from a Prometheus alerting rule and its associated rule group.
+//
+// A Prometheus alerting rule is described
+// [here](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/).
+// The semantics of a Prometheus alerting rule is described
+// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule).
+//
+// A Prometheus rule group is described
+// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/).
+// The semantics of a Prometheus rule group is described
+// [here](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/#rule_group).
+//
+// Because Cloud Alerting has no representation of a Prometheus rule
+// group resource, we must embed the information of the parent rule
+// group inside each of the conditions that refer to it. We must also
+// update the contents of all Prometheus alerts in case the information
+// of their rule group changes.
+//
+// The PrometheusQueryLanguageCondition protocol buffer combines the
+// information of the corresponding rule group and alerting rule.
+// The structure of the PrometheusQueryLanguageCondition protocol buffer
+// does NOT mimic the structure of the Prometheus rule group and alerting
+// rule YAML declarations. The PrometheusQueryLanguageCondition protocol
+// buffer may change in the future to support future rule group and/or
+// alerting rule features. There are no new such features at the present
+// time (2023-06-26).
+type AlertPolicy_Condition_PrometheusQueryLanguageCondition struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The PromQL expression to evaluate. Every evaluation cycle
+ // this expression is evaluated at the current time, and all resultant
+ // time series become pending/firing alerts. This field must not be empty.
+ Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
+ // Optional. Alerts are considered firing once their PromQL expression was
+ // evaluated to be "true" for this long.
+ // Alerts whose PromQL expression was not evaluated to be "true" for
+ // long enough are considered pending.
+ // Must be a non-negative duration or missing.
+ // This field is optional. Its default value is zero.
+ Duration *durationpb.Duration `protobuf:"bytes,2,opt,name=duration,proto3" json:"duration,omitempty"`
+ // Optional. How often this rule should be evaluated.
+ // Must be a positive multiple of 30 seconds or missing.
+ // This field is optional. Its default value is 30 seconds.
+ // If this PrometheusQueryLanguageCondition was generated from a
+ // Prometheus alerting rule, then this value should be taken from the
+ // enclosing rule group.
+ EvaluationInterval *durationpb.Duration `protobuf:"bytes,3,opt,name=evaluation_interval,json=evaluationInterval,proto3" json:"evaluation_interval,omitempty"`
+ // Optional. Labels to add to or overwrite in the PromQL query result.
+ // Label names [must be
+ // valid](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels).
+ // Label values can be [templatized by using
+ // variables](https://cloud.google.com/monitoring/alerts/doc-variables#doc-vars).
+ // The only available variable names are the names of the labels in the
+ // PromQL result, including "__name__" and "value". "labels" may be empty.
+ Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Optional. The rule group name of this alert in the corresponding
+ // Prometheus configuration file.
+ //
+ // Some external tools may require this field to be populated correctly
+ // in order to refer to the original Prometheus configuration file.
+ // The rule group name and the alert name are necessary to update the
+ // relevant AlertPolicies in case the definition of the rule group changes
+ // in the future.
+ //
+ // This field is optional. If this field is not empty, then it must
+ // contain a valid UTF-8 string.
+ // This field may not exceed 2048 Unicode characters in length.
+ RuleGroup string `protobuf:"bytes,5,opt,name=rule_group,json=ruleGroup,proto3" json:"rule_group,omitempty"`
+ // Optional. The alerting rule name of this alert in the corresponding
+ // Prometheus configuration file.
+ //
+ // Some external tools may require this field to be populated correctly
+ // in order to refer to the original Prometheus configuration file.
+ // The rule group name and the alert name are necessary to update the
+ // relevant AlertPolicies in case the definition of the rule group changes
+ // in the future.
+ //
+ // This field is optional. If this field is not empty, then it must be a
+ // [valid Prometheus label
+ // name](https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels).
+ // This field may not exceed 2048 Unicode characters in length.
+ AlertRule string `protobuf:"bytes,6,opt,name=alert_rule,json=alertRule,proto3" json:"alert_rule,omitempty"`
+ // Optional. Whether to disable metric existence validation for this
+ // condition.
+ //
+ // This allows alerting policies to be defined on metrics that do not yet
+ // exist, improving advanced customer workflows such as configuring
+ // alerting policies using Terraform.
+ //
+ // Users with the `monitoring.alertPolicyViewer` role are able to see the
+ // name of the non-existent metric in the alerting policy condition.
+ DisableMetricValidation bool `protobuf:"varint,7,opt,name=disable_metric_validation,json=disableMetricValidation,proto3" json:"disable_metric_validation,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) Reset() {
+ *x = AlertPolicy_Condition_PrometheusQueryLanguageCondition{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_PrometheusQueryLanguageCondition) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_PrometheusQueryLanguageCondition.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_PrometheusQueryLanguageCondition) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 5}
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetQuery() string {
+ if x != nil {
+ return x.Query
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetDuration() *durationpb.Duration {
+ if x != nil {
+ return x.Duration
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetEvaluationInterval() *durationpb.Duration {
+ if x != nil {
+ return x.EvaluationInterval
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetLabels() map[string]string {
+ if x != nil {
+ return x.Labels
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetRuleGroup() string {
+ if x != nil {
+ return x.RuleGroup
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetAlertRule() string {
+ if x != nil {
+ return x.AlertRule
+ }
+ return ""
+}
+
+func (x *AlertPolicy_Condition_PrometheusQueryLanguageCondition) GetDisableMetricValidation() bool {
+ if x != nil {
+ return x.DisableMetricValidation
+ }
+ return false
+}
+
+// A condition that allows alerting policies to be defined using GoogleSQL.
+// SQL conditions examine a sliding window of logs using GoogleSQL.
+// Alert policies with SQL conditions may incur additional billing.
+type AlertPolicy_Condition_SqlCondition struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The Log Analytics SQL query to run, as a string. The query
+ // must conform to the required shape. Specifically, the query must not
+ // try to filter the input by time. A filter will automatically be
+ // applied to filter the input so that the query receives all rows
+ // received since the last time the query was run.
+ //
+ // For example, the following query extracts all log entries containing an
+ // HTTP request:
+ //
+ // SELECT
+ // timestamp, log_name, severity, http_request, resource, labels
+ // FROM
+ // my-project.global._Default._AllLogs
+ // WHERE
+ // http_request IS NOT NULL
+ Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"`
+ // The schedule indicates how often the query should be run.
+ //
+ // Types that are assignable to Schedule:
+ //
+ // *AlertPolicy_Condition_SqlCondition_Minutes_
+ // *AlertPolicy_Condition_SqlCondition_Hourly_
+ // *AlertPolicy_Condition_SqlCondition_Daily_
+ Schedule isAlertPolicy_Condition_SqlCondition_Schedule `protobuf_oneof:"schedule"`
+ // The test to be run against the SQL result set.
+ //
+ // Types that are assignable to Evaluate:
+ //
+ // *AlertPolicy_Condition_SqlCondition_RowCountTest_
+ // *AlertPolicy_Condition_SqlCondition_BooleanTest_
+ Evaluate isAlertPolicy_Condition_SqlCondition_Evaluate `protobuf_oneof:"evaluate"`
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) Reset() {
+ *x = AlertPolicy_Condition_SqlCondition{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_SqlCondition) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_SqlCondition) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_SqlCondition.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_SqlCondition) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 6}
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) GetQuery() string {
+ if x != nil {
+ return x.Query
+ }
+ return ""
+}
+
+func (m *AlertPolicy_Condition_SqlCondition) GetSchedule() isAlertPolicy_Condition_SqlCondition_Schedule {
+ if m != nil {
+ return m.Schedule
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) GetMinutes() *AlertPolicy_Condition_SqlCondition_Minutes {
+ if x, ok := x.GetSchedule().(*AlertPolicy_Condition_SqlCondition_Minutes_); ok {
+ return x.Minutes
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) GetHourly() *AlertPolicy_Condition_SqlCondition_Hourly {
+ if x, ok := x.GetSchedule().(*AlertPolicy_Condition_SqlCondition_Hourly_); ok {
+ return x.Hourly
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) GetDaily() *AlertPolicy_Condition_SqlCondition_Daily {
+ if x, ok := x.GetSchedule().(*AlertPolicy_Condition_SqlCondition_Daily_); ok {
+ return x.Daily
+ }
+ return nil
+}
+
+func (m *AlertPolicy_Condition_SqlCondition) GetEvaluate() isAlertPolicy_Condition_SqlCondition_Evaluate {
+ if m != nil {
+ return m.Evaluate
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) GetRowCountTest() *AlertPolicy_Condition_SqlCondition_RowCountTest {
+ if x, ok := x.GetEvaluate().(*AlertPolicy_Condition_SqlCondition_RowCountTest_); ok {
+ return x.RowCountTest
+ }
+ return nil
+}
+
+func (x *AlertPolicy_Condition_SqlCondition) GetBooleanTest() *AlertPolicy_Condition_SqlCondition_BooleanTest {
+ if x, ok := x.GetEvaluate().(*AlertPolicy_Condition_SqlCondition_BooleanTest_); ok {
+ return x.BooleanTest
+ }
+ return nil
+}
+
+type isAlertPolicy_Condition_SqlCondition_Schedule interface {
+ isAlertPolicy_Condition_SqlCondition_Schedule()
+}
+
+type AlertPolicy_Condition_SqlCondition_Minutes_ struct {
+ // Schedule the query to execute every so many minutes.
+ Minutes *AlertPolicy_Condition_SqlCondition_Minutes `protobuf:"bytes,2,opt,name=minutes,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_SqlCondition_Hourly_ struct {
+ // Schedule the query to execute every so many hours.
+ Hourly *AlertPolicy_Condition_SqlCondition_Hourly `protobuf:"bytes,3,opt,name=hourly,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_SqlCondition_Daily_ struct {
+ // Schedule the query to execute every so many days.
+ Daily *AlertPolicy_Condition_SqlCondition_Daily `protobuf:"bytes,4,opt,name=daily,proto3,oneof"`
+}
+
+func (*AlertPolicy_Condition_SqlCondition_Minutes_) isAlertPolicy_Condition_SqlCondition_Schedule() {}
+
+func (*AlertPolicy_Condition_SqlCondition_Hourly_) isAlertPolicy_Condition_SqlCondition_Schedule() {}
+
+func (*AlertPolicy_Condition_SqlCondition_Daily_) isAlertPolicy_Condition_SqlCondition_Schedule() {}
+
+type isAlertPolicy_Condition_SqlCondition_Evaluate interface {
+ isAlertPolicy_Condition_SqlCondition_Evaluate()
+}
+
+type AlertPolicy_Condition_SqlCondition_RowCountTest_ struct {
+ // Test the row count against a threshold.
+ RowCountTest *AlertPolicy_Condition_SqlCondition_RowCountTest `protobuf:"bytes,5,opt,name=row_count_test,json=rowCountTest,proto3,oneof"`
+}
+
+type AlertPolicy_Condition_SqlCondition_BooleanTest_ struct {
+ // Test the boolean value in the indicated column.
+ BooleanTest *AlertPolicy_Condition_SqlCondition_BooleanTest `protobuf:"bytes,6,opt,name=boolean_test,json=booleanTest,proto3,oneof"`
+}
+
+func (*AlertPolicy_Condition_SqlCondition_RowCountTest_) isAlertPolicy_Condition_SqlCondition_Evaluate() {
+}
+
+func (*AlertPolicy_Condition_SqlCondition_BooleanTest_) isAlertPolicy_Condition_SqlCondition_Evaluate() {
+}
+
+// Options used when forecasting the time series and testing
+// the predicted value against the threshold.
+type AlertPolicy_Condition_MetricThreshold_ForecastOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The length of time into the future to forecast whether a
+ // time series will violate the threshold. If the predicted value is
+ // found to violate the threshold, and the violation is observed in all
+ // forecasts made for the configured `duration`, then the time series is
+ // considered to be failing.
+ // The forecast horizon can range from 1 hour to 60 hours.
+ ForecastHorizon *durationpb.Duration `protobuf:"bytes,1,opt,name=forecast_horizon,json=forecastHorizon,proto3" json:"forecast_horizon,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) Reset() {
+ *x = AlertPolicy_Condition_MetricThreshold_ForecastOptions{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_MetricThreshold_ForecastOptions) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_MetricThreshold_ForecastOptions.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_MetricThreshold_ForecastOptions) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 1, 0}
+}
+
+func (x *AlertPolicy_Condition_MetricThreshold_ForecastOptions) GetForecastHorizon() *durationpb.Duration {
+ if x != nil {
+ return x.ForecastHorizon
+ }
+ return nil
+}
+
+// Used to schedule the query to run every so many minutes.
+type AlertPolicy_Condition_SqlCondition_Minutes struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Number of minutes between runs. The interval must be
+ // greater than or equal to 5 minutes and less than or equal to 1440
+ // minutes.
+ Periodicity int32 `protobuf:"varint,1,opt,name=periodicity,proto3" json:"periodicity,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Minutes) Reset() {
+ *x = AlertPolicy_Condition_SqlCondition_Minutes{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Minutes) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_SqlCondition_Minutes) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_SqlCondition_Minutes) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[16]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_SqlCondition_Minutes.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_SqlCondition_Minutes) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 6, 0}
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Minutes) GetPeriodicity() int32 {
+ if x != nil {
+ return x.Periodicity
+ }
+ return 0
+}
+
+// Used to schedule the query to run every so many hours.
+type AlertPolicy_Condition_SqlCondition_Hourly struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The number of hours between runs. Must be greater than or
+ // equal to 1 hour and less than or equal to 48 hours.
+ Periodicity int32 `protobuf:"varint,1,opt,name=periodicity,proto3" json:"periodicity,omitempty"`
+ // Optional. The number of minutes after the hour (in UTC) to run the
+ // query. Must be greater than or equal to 0 minutes and less than or
+ // equal to 59 minutes. If left unspecified, then an arbitrary offset
+ // is used.
+ MinuteOffset *int32 `protobuf:"varint,2,opt,name=minute_offset,json=minuteOffset,proto3,oneof" json:"minute_offset,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Hourly) Reset() {
+ *x = AlertPolicy_Condition_SqlCondition_Hourly{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Hourly) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_SqlCondition_Hourly) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_SqlCondition_Hourly) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[17]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_SqlCondition_Hourly.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_SqlCondition_Hourly) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 6, 1}
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Hourly) GetPeriodicity() int32 {
+ if x != nil {
+ return x.Periodicity
+ }
+ return 0
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Hourly) GetMinuteOffset() int32 {
+ if x != nil && x.MinuteOffset != nil {
+ return *x.MinuteOffset
+ }
+ return 0
+}
+
+// Used to schedule the query to run every so many days.
+type AlertPolicy_Condition_SqlCondition_Daily struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The number of days between runs. Must be greater than or
+ // equal to 1 day and less than or equal to 31 days.
+ Periodicity int32 `protobuf:"varint,1,opt,name=periodicity,proto3" json:"periodicity,omitempty"`
+ // Optional. The time of day (in UTC) at which the query should run. If
+ // left unspecified, the server picks an arbitrary time of day and runs
+ // the query at the same time each day.
+ ExecutionTime *timeofday.TimeOfDay `protobuf:"bytes,2,opt,name=execution_time,json=executionTime,proto3" json:"execution_time,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Daily) Reset() {
+ *x = AlertPolicy_Condition_SqlCondition_Daily{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Daily) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_SqlCondition_Daily) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_SqlCondition_Daily) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[18]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_SqlCondition_Daily.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_SqlCondition_Daily) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 6, 2}
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Daily) GetPeriodicity() int32 {
+ if x != nil {
+ return x.Periodicity
+ }
+ return 0
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_Daily) GetExecutionTime() *timeofday.TimeOfDay {
+ if x != nil {
+ return x.ExecutionTime
+ }
+ return nil
+}
+
+// A test that checks if the number of rows in the result set
+// violates some threshold.
+type AlertPolicy_Condition_SqlCondition_RowCountTest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The comparison to apply between the number of rows returned
+ // by the query and the threshold.
+ Comparison ComparisonType `protobuf:"varint,1,opt,name=comparison,proto3,enum=google.monitoring.v3.ComparisonType" json:"comparison,omitempty"`
+ // Required. The value against which to compare the row count.
+ Threshold int64 `protobuf:"varint,2,opt,name=threshold,proto3" json:"threshold,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_RowCountTest) Reset() {
+ *x = AlertPolicy_Condition_SqlCondition_RowCountTest{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_RowCountTest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_SqlCondition_RowCountTest) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_SqlCondition_RowCountTest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[19]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_SqlCondition_RowCountTest.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_SqlCondition_RowCountTest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 6, 3}
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_RowCountTest) GetComparison() ComparisonType {
+ if x != nil {
+ return x.Comparison
+ }
+ return ComparisonType_COMPARISON_UNSPECIFIED
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_RowCountTest) GetThreshold() int64 {
+ if x != nil {
+ return x.Threshold
+ }
+ return 0
+}
+
+// A test that uses an alerting result in a boolean column produced by
+// the SQL query.
+type AlertPolicy_Condition_SqlCondition_BooleanTest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the column containing the boolean value. If the
+ // value in a row is NULL, that row is ignored.
+ Column string `protobuf:"bytes,1,opt,name=column,proto3" json:"column,omitempty"`
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_BooleanTest) Reset() {
+ *x = AlertPolicy_Condition_SqlCondition_BooleanTest{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_BooleanTest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_Condition_SqlCondition_BooleanTest) ProtoMessage() {}
+
+func (x *AlertPolicy_Condition_SqlCondition_BooleanTest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[20]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_Condition_SqlCondition_BooleanTest.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_Condition_SqlCondition_BooleanTest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 1, 6, 4}
+}
+
+func (x *AlertPolicy_Condition_SqlCondition_BooleanTest) GetColumn() string {
+ if x != nil {
+ return x.Column
+ }
+ return ""
+}
+
+// Control over the rate of notifications sent to this alerting policy's
+// notification channels.
+type AlertPolicy_AlertStrategy_NotificationRateLimit struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Not more than one notification per `period`.
+ Period *durationpb.Duration `protobuf:"bytes,1,opt,name=period,proto3" json:"period,omitempty"`
+}
+
+func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) Reset() {
+ *x = AlertPolicy_AlertStrategy_NotificationRateLimit{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_AlertStrategy_NotificationRateLimit) ProtoMessage() {}
+
+func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[21]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_AlertStrategy_NotificationRateLimit.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_AlertStrategy_NotificationRateLimit) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2, 0}
+}
+
+func (x *AlertPolicy_AlertStrategy_NotificationRateLimit) GetPeriod() *durationpb.Duration {
+ if x != nil {
+ return x.Period
+ }
+ return nil
+}
+
+// Control over how the notification channels in `notification_channels`
+// are notified when this alert fires, on a per-channel basis.
+type AlertPolicy_AlertStrategy_NotificationChannelStrategy struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The full REST resource name for the notification channels that these
+ // settings apply to. Each of these correspond to the name field in one
+ // of the NotificationChannel objects referenced in the
+ // notification_channels field of this AlertPolicy.
+ // The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
+ NotificationChannelNames []string `protobuf:"bytes,1,rep,name=notification_channel_names,json=notificationChannelNames,proto3" json:"notification_channel_names,omitempty"`
+ // The frequency at which to send reminder notifications for open
+ // incidents.
+ RenotifyInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=renotify_interval,json=renotifyInterval,proto3" json:"renotify_interval,omitempty"`
+}
+
+func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) Reset() {
+ *x = AlertPolicy_AlertStrategy_NotificationChannelStrategy{}
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AlertPolicy_AlertStrategy_NotificationChannelStrategy) ProtoMessage() {}
+
+func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_proto_msgTypes[22]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AlertPolicy_AlertStrategy_NotificationChannelStrategy.ProtoReflect.Descriptor instead.
+func (*AlertPolicy_AlertStrategy_NotificationChannelStrategy) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_proto_rawDescGZIP(), []int{0, 2, 1}
+}
+
+func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) GetNotificationChannelNames() []string {
+ if x != nil {
+ return x.NotificationChannelNames
+ }
+ return nil
+}
+
+func (x *AlertPolicy_AlertStrategy_NotificationChannelStrategy) GetRenotifyInterval() *durationpb.Duration {
+ if x != nil {
+ return x.RenotifyInterval
+ }
+ return nil
+}
+
+var File_google_monitoring_v3_alert_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_alert_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76,
+ 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x66,
+ 0x64, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe5, 0x35, 0x0a, 0x0b, 0x41, 0x6c,
+ 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61,
+ 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x55, 0x0a, 0x0d, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e,
+ 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x64,
+ 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x0b,
+ 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+ 0x12, 0x4b, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72,
+ 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x53, 0x0a,
+ 0x08, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69,
+ 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62,
+ 0x69, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e,
+ 0x65, 0x72, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x11, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+ 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x08, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x69, 0x74, 0x79, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x08,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x69, 0x74, 0x79, 0x12, 0x33, 0x0a, 0x15, 0x6e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x4d, 0x0a,
+ 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64,
+ 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x63, 0x72,
+ 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x4d, 0x0a, 0x0f,
+ 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18,
+ 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x75, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x6d, 0x75, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x56, 0x0a, 0x0e, 0x61,
+ 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x15, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61,
+ 0x74, 0x65, 0x67, 0x79, 0x52, 0x0d, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74,
+ 0x65, 0x67, 0x79, 0x12, 0x4b, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18,
+ 0x16, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65,
+ 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74,
+ 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79,
+ 0x1a, 0xf3, 0x01, 0x0a, 0x0d, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09,
+ 0x6d, 0x69, 0x6d, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x08, 0x6d, 0x69, 0x6d, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x75, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x4f, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
+ 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41,
+ 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d,
+ 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3b, 0x0a, 0x04, 0x4c, 0x69, 0x6e,
+ 0x6b, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79,
+ 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x1a, 0xa5, 0x23, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70,
+ 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x6e, 0x0a, 0x13, 0x63,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f,
+ 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65,
+ 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x48, 0x00, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x66, 0x0a, 0x10, 0x63,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x62, 0x73, 0x65, 0x6e, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65,
+ 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73, 0x65, 0x6e, 0x63, 0x65,
+ 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x62, 0x73,
+ 0x65, 0x6e, 0x74, 0x12, 0x6a, 0x0a, 0x15, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x14, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x48, 0x00, 0x52, 0x13, 0x63, 0x6f, 0x6e, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x12,
+ 0x9d, 0x01, 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61,
+ 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12,
+ 0x9d, 0x01, 0x0a, 0x23, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72,
+ 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x6c,
+ 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61,
+ 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x20, 0x63,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+ 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12,
+ 0x5f, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x71, 0x6c,
+ 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c,
+ 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x71, 0x6c, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x71, 0x6c,
+ 0x1a, 0x45, 0x0a, 0x07, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x05, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x05, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x07, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x42,
+ 0x06, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0xc8, 0x06, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1b, 0x0a, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72,
+ 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x2d, 0x0a, 0x12, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x65, 0x6e,
+ 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5c,
+ 0x0a, 0x18, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x67,
+ 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x17, 0x64, 0x65, 0x6e, 0x6f, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72,
+ 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x76, 0x0a, 0x10,
+ 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c,
+ 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68,
+ 0x6f, 0x6c, 0x64, 0x2e, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73,
+ 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a,
+ 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x68,
+ 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x0e, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72,
+ 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72,
+ 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61,
+ 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f,
+ 0x64, 0x61, 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65,
+ 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67,
+ 0x44, 0x61, 0x74, 0x61, 0x1a, 0x5c, 0x0a, 0x0f, 0x46, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x66, 0x6f, 0x72, 0x65, 0x63,
+ 0x61, 0x73, 0x74, 0x5f, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x0f, 0x66, 0x6f, 0x72, 0x65, 0x63, 0x61, 0x73, 0x74, 0x48, 0x6f, 0x72, 0x69, 0x7a,
+ 0x6f, 0x6e, 0x1a, 0xf9, 0x01, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x41, 0x62, 0x73,
+ 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x12, 0x45, 0x0a, 0x0c, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41,
+ 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x61, 0x67, 0x67, 0x72,
+ 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x72,
+ 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x1a, 0xe1,
+ 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1b, 0x0a, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x74, 0x0a, 0x10, 0x6c, 0x61, 0x62, 0x65,
+ 0x6c, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x4c, 0x6f, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78,
+ 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0x42,
+ 0x0a, 0x14, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x6f, 0x72,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+ 0x38, 0x01, 0x1a, 0xb9, 0x02, 0x0a, 0x20, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x35, 0x0a,
+ 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65,
+ 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67,
+ 0x67, 0x65, 0x72, 0x12, 0x79, 0x0a, 0x17, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x41, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72,
+ 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73,
+ 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x52, 0x15, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x85,
+ 0x04, 0x0a, 0x20, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x51, 0x75, 0x65,
+ 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x3a,
+ 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4f, 0x0a, 0x13, 0x65, 0x76,
+ 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x12, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x75, 0x0a, 0x06, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x58, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x51, 0x75, 0x65, 0x72, 0x79, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
+ 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65,
+ 0x6c, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x72, 0x75, 0x6c,
+ 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x22, 0x0a, 0x0a, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f,
+ 0x72, 0x75, 0x6c, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x09, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x19, 0x64, 0x69,
+ 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x17, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x39, 0x0a, 0x0b, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xee, 0x07, 0x0a, 0x0c, 0x53, 0x71, 0x6c, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65,
+ 0x72, 0x79, 0x12, 0x5c, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x53, 0x71, 0x6c, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x69,
+ 0x6e, 0x75, 0x74, 0x65, 0x73, 0x48, 0x00, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x73,
+ 0x12, 0x59, 0x0a, 0x06, 0x68, 0x6f, 0x75, 0x72, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x71,
+ 0x6c, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x48, 0x6f, 0x75, 0x72, 0x6c,
+ 0x79, 0x48, 0x00, 0x52, 0x06, 0x68, 0x6f, 0x75, 0x72, 0x6c, 0x79, 0x12, 0x56, 0x0a, 0x05, 0x64,
+ 0x61, 0x69, 0x6c, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x71, 0x6c, 0x43, 0x6f, 0x6e, 0x64, 0x69,
+ 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x44, 0x61, 0x69, 0x6c, 0x79, 0x48, 0x00, 0x52, 0x05, 0x64, 0x61,
+ 0x69, 0x6c, 0x79, 0x12, 0x6d, 0x0a, 0x0e, 0x72, 0x6f, 0x77, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x5f, 0x74, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x71, 0x6c, 0x43, 0x6f, 0x6e, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x6f, 0x77, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x65,
+ 0x73, 0x74, 0x48, 0x01, 0x52, 0x0c, 0x72, 0x6f, 0x77, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x65,
+ 0x73, 0x74, 0x12, 0x69, 0x0a, 0x0c, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x5f, 0x74, 0x65,
+ 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x43, 0x6f, 0x6e, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x71, 0x6c, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x48, 0x01,
+ 0x52, 0x0b, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x54, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x0a,
+ 0x07, 0x4d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x69,
+ 0x6f, 0x64, 0x69, 0x63, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x69, 0x74, 0x79, 0x1a,
+ 0x70, 0x0a, 0x06, 0x48, 0x6f, 0x75, 0x72, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65, 0x72,
+ 0x69, 0x6f, 0x64, 0x69, 0x63, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x69, 0x74, 0x79,
+ 0x12, 0x2d, 0x0a, 0x0d, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x00, 0x52, 0x0c,
+ 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x88, 0x01, 0x01, 0x42,
+ 0x10, 0x0a, 0x0e, 0x5f, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x1a, 0x72, 0x0a, 0x05, 0x44, 0x61, 0x69, 0x6c, 0x79, 0x12, 0x25, 0x0a, 0x0b, 0x70, 0x65,
+ 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x69, 0x74,
+ 0x79, 0x12, 0x42, 0x0a, 0x0e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x4f, 0x66, 0x44, 0x61,
+ 0x79, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0d, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f,
+ 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x1a, 0x7c, 0x0a, 0x0c, 0x52, 0x6f, 0x77, 0x43, 0x6f, 0x75, 0x6e,
+ 0x74, 0x54, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69,
+ 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e,
+ 0x12, 0x21, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68,
+ 0x6f, 0x6c, 0x64, 0x1a, 0x2a, 0x0a, 0x0b, 0x42, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x54, 0x65,
+ 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42,
+ 0x0a, 0x0a, 0x08, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x65,
+ 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x22, 0xad, 0x01, 0x0a, 0x15, 0x45, 0x76, 0x61, 0x6c,
+ 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74,
+ 0x61, 0x12, 0x27, 0x0a, 0x23, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
+ 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x55, 0x4e, 0x53,
+ 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x24, 0x0a, 0x20, 0x45, 0x56,
+ 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47,
+ 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x49, 0x4e, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01,
+ 0x12, 0x22, 0x0a, 0x1e, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d,
+ 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x41, 0x43, 0x54, 0x49,
+ 0x56, 0x45, 0x10, 0x02, 0x12, 0x21, 0x0a, 0x1d, 0x45, 0x56, 0x41, 0x4c, 0x55, 0x41, 0x54, 0x49,
+ 0x4f, 0x4e, 0x5f, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4e, 0x47, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f,
+ 0x4e, 0x4f, 0x5f, 0x4f, 0x50, 0x10, 0x03, 0x3a, 0x97, 0x02, 0xea, 0x41, 0x93, 0x02, 0x0a, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x46,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73,
+ 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64,
+ 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69,
+ 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72,
+ 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f,
+ 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x2f, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x7b, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x12, 0x01,
+ 0x2a, 0x42, 0x0b, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x96,
+ 0x06, 0x0a, 0x0d, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79,
+ 0x12, 0x7d, 0x0a, 0x17, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65,
+ 0x67, 0x79, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x15, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12,
+ 0x75, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x42, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x70,
+ 0x74, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
+ 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x63,
+ 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x43, 0x6c, 0x6f, 0x73, 0x65,
+ 0x12, 0x8f, 0x01, 0x0a, 0x1d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65,
+ 0x67, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x41, 0x6c, 0x65, 0x72,
+ 0x74, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x72,
+ 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x1b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65,
+ 0x67, 0x79, 0x1a, 0x4a, 0x0a, 0x15, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x70,
+ 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0xa3,
+ 0x01, 0x0a, 0x1b, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x3c,
+ 0x0a, 0x1a, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x18, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x46, 0x0a, 0x11,
+ 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x76, 0x61, 0x6c, 0x22, 0x51, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x12, 0x23, 0x0a, 0x1f, 0x4e, 0x4f,
+ 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x4d, 0x50,
+ 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
+ 0x0a, 0x0a, 0x06, 0x4f, 0x50, 0x45, 0x4e, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43,
+ 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x03, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x61, 0x0a, 0x15, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x17, 0x0a, 0x13, 0x43, 0x4f, 0x4d, 0x42, 0x49, 0x4e, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45,
+ 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4e, 0x44, 0x10,
+ 0x01, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x41, 0x4e, 0x44,
+ 0x5f, 0x57, 0x49, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x49, 0x4e, 0x47, 0x5f, 0x52,
+ 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x03, 0x22, 0x4a, 0x0a, 0x08, 0x53, 0x65, 0x76,
+ 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x56, 0x45, 0x52, 0x49, 0x54,
+ 0x59, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
+ 0x0c, 0x0a, 0x08, 0x43, 0x52, 0x49, 0x54, 0x49, 0x43, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x09, 0x0a,
+ 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e,
+ 0x49, 0x4e, 0x47, 0x10, 0x03, 0x3a, 0xc9, 0x01, 0xea, 0x41, 0xc5, 0x01, 0x0a, 0x25, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x7d, 0x12, 0x39, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73,
+ 0x2f, 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12,
+ 0x2d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72,
+ 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f,
+ 0x7b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x7d, 0x12, 0x01,
+ 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0a,
+ 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67,
+ 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69,
+ 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa,
+ 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_google_monitoring_v3_alert_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_alert_proto_rawDescData = file_google_monitoring_v3_alert_proto_rawDesc
+)
+
+func file_google_monitoring_v3_alert_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_alert_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_alert_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_alert_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_alert_proto_rawDescData
+}
+
+var file_google_monitoring_v3_alert_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
+var file_google_monitoring_v3_alert_proto_msgTypes = make([]protoimpl.MessageInfo, 23)
+var file_google_monitoring_v3_alert_proto_goTypes = []any{
+ (AlertPolicy_ConditionCombinerType)(0), // 0: google.monitoring.v3.AlertPolicy.ConditionCombinerType
+ (AlertPolicy_Severity)(0), // 1: google.monitoring.v3.AlertPolicy.Severity
+ (AlertPolicy_Condition_EvaluationMissingData)(0), // 2: google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData
+ (AlertPolicy_AlertStrategy_NotificationPrompt)(0), // 3: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationPrompt
+ (*AlertPolicy)(nil), // 4: google.monitoring.v3.AlertPolicy
+ (*AlertPolicy_Documentation)(nil), // 5: google.monitoring.v3.AlertPolicy.Documentation
+ (*AlertPolicy_Condition)(nil), // 6: google.monitoring.v3.AlertPolicy.Condition
+ (*AlertPolicy_AlertStrategy)(nil), // 7: google.monitoring.v3.AlertPolicy.AlertStrategy
+ nil, // 8: google.monitoring.v3.AlertPolicy.UserLabelsEntry
+ (*AlertPolicy_Documentation_Link)(nil), // 9: google.monitoring.v3.AlertPolicy.Documentation.Link
+ (*AlertPolicy_Condition_Trigger)(nil), // 10: google.monitoring.v3.AlertPolicy.Condition.Trigger
+ (*AlertPolicy_Condition_MetricThreshold)(nil), // 11: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold
+ (*AlertPolicy_Condition_MetricAbsence)(nil), // 12: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence
+ (*AlertPolicy_Condition_LogMatch)(nil), // 13: google.monitoring.v3.AlertPolicy.Condition.LogMatch
+ (*AlertPolicy_Condition_MonitoringQueryLanguageCondition)(nil), // 14: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition
+ (*AlertPolicy_Condition_PrometheusQueryLanguageCondition)(nil), // 15: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition
+ (*AlertPolicy_Condition_SqlCondition)(nil), // 16: google.monitoring.v3.AlertPolicy.Condition.SqlCondition
+ (*AlertPolicy_Condition_MetricThreshold_ForecastOptions)(nil), // 17: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions
+ nil, // 18: google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry
+ nil, // 19: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry
+ (*AlertPolicy_Condition_SqlCondition_Minutes)(nil), // 20: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.Minutes
+ (*AlertPolicy_Condition_SqlCondition_Hourly)(nil), // 21: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.Hourly
+ (*AlertPolicy_Condition_SqlCondition_Daily)(nil), // 22: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.Daily
+ (*AlertPolicy_Condition_SqlCondition_RowCountTest)(nil), // 23: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.RowCountTest
+ (*AlertPolicy_Condition_SqlCondition_BooleanTest)(nil), // 24: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.BooleanTest
+ (*AlertPolicy_AlertStrategy_NotificationRateLimit)(nil), // 25: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit
+ (*AlertPolicy_AlertStrategy_NotificationChannelStrategy)(nil), // 26: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy
+ (*wrapperspb.BoolValue)(nil), // 27: google.protobuf.BoolValue
+ (*status.Status)(nil), // 28: google.rpc.Status
+ (*MutationRecord)(nil), // 29: google.monitoring.v3.MutationRecord
+ (*durationpb.Duration)(nil), // 30: google.protobuf.Duration
+ (*Aggregation)(nil), // 31: google.monitoring.v3.Aggregation
+ (ComparisonType)(0), // 32: google.monitoring.v3.ComparisonType
+ (*timeofday.TimeOfDay)(nil), // 33: google.type.TimeOfDay
+}
+var file_google_monitoring_v3_alert_proto_depIdxs = []int32{
+ 5, // 0: google.monitoring.v3.AlertPolicy.documentation:type_name -> google.monitoring.v3.AlertPolicy.Documentation
+ 8, // 1: google.monitoring.v3.AlertPolicy.user_labels:type_name -> google.monitoring.v3.AlertPolicy.UserLabelsEntry
+ 6, // 2: google.monitoring.v3.AlertPolicy.conditions:type_name -> google.monitoring.v3.AlertPolicy.Condition
+ 0, // 3: google.monitoring.v3.AlertPolicy.combiner:type_name -> google.monitoring.v3.AlertPolicy.ConditionCombinerType
+ 27, // 4: google.monitoring.v3.AlertPolicy.enabled:type_name -> google.protobuf.BoolValue
+ 28, // 5: google.monitoring.v3.AlertPolicy.validity:type_name -> google.rpc.Status
+ 29, // 6: google.monitoring.v3.AlertPolicy.creation_record:type_name -> google.monitoring.v3.MutationRecord
+ 29, // 7: google.monitoring.v3.AlertPolicy.mutation_record:type_name -> google.monitoring.v3.MutationRecord
+ 7, // 8: google.monitoring.v3.AlertPolicy.alert_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy
+ 1, // 9: google.monitoring.v3.AlertPolicy.severity:type_name -> google.monitoring.v3.AlertPolicy.Severity
+ 9, // 10: google.monitoring.v3.AlertPolicy.Documentation.links:type_name -> google.monitoring.v3.AlertPolicy.Documentation.Link
+ 11, // 11: google.monitoring.v3.AlertPolicy.Condition.condition_threshold:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold
+ 12, // 12: google.monitoring.v3.AlertPolicy.Condition.condition_absent:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricAbsence
+ 13, // 13: google.monitoring.v3.AlertPolicy.Condition.condition_matched_log:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch
+ 14, // 14: google.monitoring.v3.AlertPolicy.Condition.condition_monitoring_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition
+ 15, // 15: google.monitoring.v3.AlertPolicy.Condition.condition_prometheus_query_language:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition
+ 16, // 16: google.monitoring.v3.AlertPolicy.Condition.condition_sql:type_name -> google.monitoring.v3.AlertPolicy.Condition.SqlCondition
+ 25, // 17: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_rate_limit:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit
+ 3, // 18: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_prompts:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationPrompt
+ 30, // 19: google.monitoring.v3.AlertPolicy.AlertStrategy.auto_close:type_name -> google.protobuf.Duration
+ 26, // 20: google.monitoring.v3.AlertPolicy.AlertStrategy.notification_channel_strategy:type_name -> google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy
+ 31, // 21: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.aggregations:type_name -> google.monitoring.v3.Aggregation
+ 31, // 22: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.denominator_aggregations:type_name -> google.monitoring.v3.Aggregation
+ 17, // 23: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.forecast_options:type_name -> google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions
+ 32, // 24: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.comparison:type_name -> google.monitoring.v3.ComparisonType
+ 30, // 25: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.duration:type_name -> google.protobuf.Duration
+ 10, // 26: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger
+ 2, // 27: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData
+ 31, // 28: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.aggregations:type_name -> google.monitoring.v3.Aggregation
+ 30, // 29: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.duration:type_name -> google.protobuf.Duration
+ 10, // 30: google.monitoring.v3.AlertPolicy.Condition.MetricAbsence.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger
+ 18, // 31: google.monitoring.v3.AlertPolicy.Condition.LogMatch.label_extractors:type_name -> google.monitoring.v3.AlertPolicy.Condition.LogMatch.LabelExtractorsEntry
+ 30, // 32: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.duration:type_name -> google.protobuf.Duration
+ 10, // 33: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.trigger:type_name -> google.monitoring.v3.AlertPolicy.Condition.Trigger
+ 2, // 34: google.monitoring.v3.AlertPolicy.Condition.MonitoringQueryLanguageCondition.evaluation_missing_data:type_name -> google.monitoring.v3.AlertPolicy.Condition.EvaluationMissingData
+ 30, // 35: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.duration:type_name -> google.protobuf.Duration
+ 30, // 36: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.evaluation_interval:type_name -> google.protobuf.Duration
+ 19, // 37: google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.labels:type_name -> google.monitoring.v3.AlertPolicy.Condition.PrometheusQueryLanguageCondition.LabelsEntry
+ 20, // 38: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.minutes:type_name -> google.monitoring.v3.AlertPolicy.Condition.SqlCondition.Minutes
+ 21, // 39: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.hourly:type_name -> google.monitoring.v3.AlertPolicy.Condition.SqlCondition.Hourly
+ 22, // 40: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.daily:type_name -> google.monitoring.v3.AlertPolicy.Condition.SqlCondition.Daily
+ 23, // 41: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.row_count_test:type_name -> google.monitoring.v3.AlertPolicy.Condition.SqlCondition.RowCountTest
+ 24, // 42: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.boolean_test:type_name -> google.monitoring.v3.AlertPolicy.Condition.SqlCondition.BooleanTest
+ 30, // 43: google.monitoring.v3.AlertPolicy.Condition.MetricThreshold.ForecastOptions.forecast_horizon:type_name -> google.protobuf.Duration
+ 33, // 44: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.Daily.execution_time:type_name -> google.type.TimeOfDay
+ 32, // 45: google.monitoring.v3.AlertPolicy.Condition.SqlCondition.RowCountTest.comparison:type_name -> google.monitoring.v3.ComparisonType
+ 30, // 46: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationRateLimit.period:type_name -> google.protobuf.Duration
+ 30, // 47: google.monitoring.v3.AlertPolicy.AlertStrategy.NotificationChannelStrategy.renotify_interval:type_name -> google.protobuf.Duration
+ 48, // [48:48] is the sub-list for method output_type
+ 48, // [48:48] is the sub-list for method input_type
+ 48, // [48:48] is the sub-list for extension type_name
+ 48, // [48:48] is the sub-list for extension extendee
+ 0, // [0:48] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_alert_proto_init() }
+func file_google_monitoring_v3_alert_proto_init() {
+ if File_google_monitoring_v3_alert_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_common_proto_init()
+ file_google_monitoring_v3_mutation_record_proto_init()
+ file_google_monitoring_v3_alert_proto_msgTypes[2].OneofWrappers = []any{
+ (*AlertPolicy_Condition_ConditionThreshold)(nil),
+ (*AlertPolicy_Condition_ConditionAbsent)(nil),
+ (*AlertPolicy_Condition_ConditionMatchedLog)(nil),
+ (*AlertPolicy_Condition_ConditionMonitoringQueryLanguage)(nil),
+ (*AlertPolicy_Condition_ConditionPrometheusQueryLanguage)(nil),
+ (*AlertPolicy_Condition_ConditionSql)(nil),
+ }
+ file_google_monitoring_v3_alert_proto_msgTypes[6].OneofWrappers = []any{
+ (*AlertPolicy_Condition_Trigger_Count)(nil),
+ (*AlertPolicy_Condition_Trigger_Percent)(nil),
+ }
+ file_google_monitoring_v3_alert_proto_msgTypes[12].OneofWrappers = []any{
+ (*AlertPolicy_Condition_SqlCondition_Minutes_)(nil),
+ (*AlertPolicy_Condition_SqlCondition_Hourly_)(nil),
+ (*AlertPolicy_Condition_SqlCondition_Daily_)(nil),
+ (*AlertPolicy_Condition_SqlCondition_RowCountTest_)(nil),
+ (*AlertPolicy_Condition_SqlCondition_BooleanTest_)(nil),
+ }
+ file_google_monitoring_v3_alert_proto_msgTypes[17].OneofWrappers = []any{}
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_alert_proto_rawDesc,
+ NumEnums: 4,
+ NumMessages: 23,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_alert_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_alert_proto_depIdxs,
+ EnumInfos: file_google_monitoring_v3_alert_proto_enumTypes,
+ MessageInfos: file_google_monitoring_v3_alert_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_alert_proto = out.File
+ file_google_monitoring_v3_alert_proto_rawDesc = nil
+ file_google_monitoring_v3_alert_proto_goTypes = nil
+ file_google_monitoring_v3_alert_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go
new file mode 100644
index 000000000..ba0c4f65f
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/alert_service.pb.go
@@ -0,0 +1,961 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/alert_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The protocol for the `CreateAlertPolicy` request.
+type CreateAlertPolicyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which
+ // to create the alerting policy. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ //
+ // Note that this field names the parent container in which the alerting
+ // policy will be written, not the name of the created policy. |name| must be
+ // a host project of a Metrics Scope, otherwise INVALID_ARGUMENT error will
+ // return. The alerting policy that is returned will have a name that contains
+ // a normalized representation of this name as a prefix but adds a suffix of
+ // the form `/alertPolicies/[ALERT_POLICY_ID]`, identifying the policy in the
+ // container.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The requested alerting policy. You should omit the `name` field
+ // in this policy. The name will be returned in the new policy, including a
+ // new `[ALERT_POLICY_ID]` value.
+ AlertPolicy *AlertPolicy `protobuf:"bytes,2,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"`
+}
+
+func (x *CreateAlertPolicyRequest) Reset() {
+ *x = CreateAlertPolicyRequest{}
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateAlertPolicyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateAlertPolicyRequest) ProtoMessage() {}
+
+func (x *CreateAlertPolicyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateAlertPolicyRequest.ProtoReflect.Descriptor instead.
+func (*CreateAlertPolicyRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CreateAlertPolicyRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CreateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy {
+ if x != nil {
+ return x.AlertPolicy
+ }
+ return nil
+}
+
+// The protocol for the `GetAlertPolicy` request.
+type GetAlertPolicyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The alerting policy to retrieve. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetAlertPolicyRequest) Reset() {
+ *x = GetAlertPolicyRequest{}
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetAlertPolicyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetAlertPolicyRequest) ProtoMessage() {}
+
+func (x *GetAlertPolicyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetAlertPolicyRequest.ProtoReflect.Descriptor instead.
+func (*GetAlertPolicyRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *GetAlertPolicyRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The protocol for the `ListAlertPolicies` request.
+type ListAlertPoliciesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose
+ // alert policies are to be listed. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ //
+ // Note that this field names the parent container in which the alerting
+ // policies to be listed are stored. To retrieve a single alerting policy
+ // by name, use the
+ // [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy]
+ // operation, instead.
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ // Optional. If provided, this field specifies the criteria that must be met
+ // by alert policies to be included in the response.
+ //
+ // For more details, see [sorting and
+ // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
+ Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Optional. A comma-separated list of fields by which to sort the result.
+ // Supports the same set of field references as the `filter` field. Entries
+ // can be prefixed with a minus sign to sort by the field in descending order.
+ //
+ // For more details, see [sorting and
+ // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
+ OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
+ // Optional. The maximum number of results to return in a single response.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // Optional. If this field is not empty then it must contain the
+ // `nextPageToken` value returned by a previous call to this method. Using
+ // this field causes the method to return more results from the previous
+ // method call.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListAlertPoliciesRequest) Reset() {
+ *x = ListAlertPoliciesRequest{}
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListAlertPoliciesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListAlertPoliciesRequest) ProtoMessage() {}
+
+func (x *ListAlertPoliciesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListAlertPoliciesRequest.ProtoReflect.Descriptor instead.
+func (*ListAlertPoliciesRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListAlertPoliciesRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListAlertPoliciesRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListAlertPoliciesRequest) GetOrderBy() string {
+ if x != nil {
+ return x.OrderBy
+ }
+ return ""
+}
+
+func (x *ListAlertPoliciesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListAlertPoliciesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The protocol for the `ListAlertPolicies` response.
+type ListAlertPoliciesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The returned alert policies.
+ AlertPolicies []*AlertPolicy `protobuf:"bytes,3,rep,name=alert_policies,json=alertPolicies,proto3" json:"alert_policies,omitempty"`
+ // If there might be more results than were returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // The total number of alert policies in all pages. This number is only an
+ // estimate, and may change in subsequent pages. https://aip.dev/158
+ TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
+}
+
+func (x *ListAlertPoliciesResponse) Reset() {
+ *x = ListAlertPoliciesResponse{}
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListAlertPoliciesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListAlertPoliciesResponse) ProtoMessage() {}
+
+func (x *ListAlertPoliciesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListAlertPoliciesResponse.ProtoReflect.Descriptor instead.
+func (*ListAlertPoliciesResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *ListAlertPoliciesResponse) GetAlertPolicies() []*AlertPolicy {
+ if x != nil {
+ return x.AlertPolicies
+ }
+ return nil
+}
+
+func (x *ListAlertPoliciesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *ListAlertPoliciesResponse) GetTotalSize() int32 {
+ if x != nil {
+ return x.TotalSize
+ }
+ return 0
+}
+
+// The protocol for the `UpdateAlertPolicy` request.
+type UpdateAlertPolicyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional. A list of alerting policy field names. If this field is not
+ // empty, each listed field in the existing alerting policy is set to the
+ // value of the corresponding field in the supplied policy (`alert_policy`),
+ // or to the field's default value if the field is not in the supplied
+ // alerting policy. Fields not listed retain their previous value.
+ //
+ // Examples of valid field masks include `display_name`, `documentation`,
+ // `documentation.content`, `documentation.mime_type`, `user_labels`,
+ // `user_label.nameofkey`, `enabled`, `conditions`, `combiner`, etc.
+ //
+ // If this field is empty, then the supplied alerting policy replaces the
+ // existing policy. It is the same as deleting the existing policy and
+ // adding the supplied policy, except for the following:
+ //
+ // - The new policy will have the same `[ALERT_POLICY_ID]` as the former
+ // policy. This gives you continuity with the former policy in your
+ // notifications and incidents.
+ // - Conditions in the new policy will keep their former `[CONDITION_ID]` if
+ // the supplied condition includes the `name` field with that
+ // `[CONDITION_ID]`. If the supplied condition omits the `name` field,
+ // then a new `[CONDITION_ID]` is created.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ // Required. The updated alerting policy or the updated values for the
+ // fields listed in `update_mask`.
+ // If `update_mask` is not empty, any fields in this policy that are
+ // not in `update_mask` are ignored.
+ AlertPolicy *AlertPolicy `protobuf:"bytes,3,opt,name=alert_policy,json=alertPolicy,proto3" json:"alert_policy,omitempty"`
+}
+
+func (x *UpdateAlertPolicyRequest) Reset() {
+ *x = UpdateAlertPolicyRequest{}
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateAlertPolicyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateAlertPolicyRequest) ProtoMessage() {}
+
+func (x *UpdateAlertPolicyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateAlertPolicyRequest.ProtoReflect.Descriptor instead.
+func (*UpdateAlertPolicyRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UpdateAlertPolicyRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+func (x *UpdateAlertPolicyRequest) GetAlertPolicy() *AlertPolicy {
+ if x != nil {
+ return x.AlertPolicy
+ }
+ return nil
+}
+
+// The protocol for the `DeleteAlertPolicy` request.
+type DeleteAlertPolicyRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The alerting policy to delete. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID]
+ //
+ // For more information, see [AlertPolicy][google.monitoring.v3.AlertPolicy].
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteAlertPolicyRequest) Reset() {
+ *x = DeleteAlertPolicyRequest{}
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteAlertPolicyRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteAlertPolicyRequest) ProtoMessage() {}
+
+func (x *DeleteAlertPolicyRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_alert_service_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteAlertPolicyRequest.ProtoReflect.Descriptor instead.
+func (*DeleteAlertPolicyRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_alert_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DeleteAlertPolicyRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+var File_google_monitoring_v3_alert_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_alert_service_proto_rawDesc = []byte{
+ 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69,
+ 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa8, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41,
+ 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x12, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x49, 0x0a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x0b, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22,
+ 0x5a, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xe0, 0x01, 0x0a, 0x18,
+ 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x12, 0x25,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1e, 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65,
+ 0x72, 0x5f, 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65,
+ 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xac,
+ 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69,
+ 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x48, 0x0a, 0x0e,
+ 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72,
+ 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0d, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f,
+ 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70,
+ 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0xa7, 0x01,
+ 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x49, 0x0a, 0x0c,
+ 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50,
+ 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x61, 0x6c, 0x65, 0x72,
+ 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x5d, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x2d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0x9e, 0x08, 0x0a, 0x12, 0x41, 0x6c, 0x65, 0x72, 0x74,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xa8, 0x01,
+ 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x69, 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41,
+ 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41,
+ 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4,
+ 0x93, 0x02, 0x25, 0x12, 0x23, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x96, 0x01, 0x0a, 0x0e, 0x47, 0x65, 0x74,
+ 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2b, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x34, 0xda, 0x41, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x12, 0x25, 0x2f, 0x76, 0x33, 0x2f,
+ 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
+ 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x2a,
+ 0x7d, 0x12, 0xb5, 0x01, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72,
+ 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41,
+ 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x4d, 0xda, 0x41, 0x11, 0x6e,
+ 0x61, 0x6d, 0x65, 0x2c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x3a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x22, 0x23, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x6c, 0x65, 0x72,
+ 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x91, 0x01, 0x0a, 0x11, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
+ 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x6c, 0x65,
+ 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x34, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x27, 0x2a, 0x25, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x6c, 0x65,
+ 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xcb, 0x01,
+ 0x0a, 0x11, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6c, 0x65, 0x72, 0x74,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x63, 0xda, 0x41, 0x18, 0x75, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x61, 0x6c, 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x3a, 0x0c, 0x61, 0x6c, 0x65, 0x72, 0x74,
+ 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x32, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x61, 0x6c,
+ 0x65, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x6c, 0x65, 0x72, 0x74,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0xa9, 0x01, 0xca, 0x41,
+ 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74,
+ 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74,
+ 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f,
+ 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x42, 0x11, 0x41, 0x6c, 0x65, 0x72, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f,
+ 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a,
+ 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_alert_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_alert_service_proto_rawDescData = file_google_monitoring_v3_alert_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_alert_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_alert_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_alert_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_alert_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_alert_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_alert_service_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_google_monitoring_v3_alert_service_proto_goTypes = []any{
+ (*CreateAlertPolicyRequest)(nil), // 0: google.monitoring.v3.CreateAlertPolicyRequest
+ (*GetAlertPolicyRequest)(nil), // 1: google.monitoring.v3.GetAlertPolicyRequest
+ (*ListAlertPoliciesRequest)(nil), // 2: google.monitoring.v3.ListAlertPoliciesRequest
+ (*ListAlertPoliciesResponse)(nil), // 3: google.monitoring.v3.ListAlertPoliciesResponse
+ (*UpdateAlertPolicyRequest)(nil), // 4: google.monitoring.v3.UpdateAlertPolicyRequest
+ (*DeleteAlertPolicyRequest)(nil), // 5: google.monitoring.v3.DeleteAlertPolicyRequest
+ (*AlertPolicy)(nil), // 6: google.monitoring.v3.AlertPolicy
+ (*fieldmaskpb.FieldMask)(nil), // 7: google.protobuf.FieldMask
+ (*emptypb.Empty)(nil), // 8: google.protobuf.Empty
+}
+var file_google_monitoring_v3_alert_service_proto_depIdxs = []int32{
+ 6, // 0: google.monitoring.v3.CreateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy
+ 6, // 1: google.monitoring.v3.ListAlertPoliciesResponse.alert_policies:type_name -> google.monitoring.v3.AlertPolicy
+ 7, // 2: google.monitoring.v3.UpdateAlertPolicyRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 6, // 3: google.monitoring.v3.UpdateAlertPolicyRequest.alert_policy:type_name -> google.monitoring.v3.AlertPolicy
+ 2, // 4: google.monitoring.v3.AlertPolicyService.ListAlertPolicies:input_type -> google.monitoring.v3.ListAlertPoliciesRequest
+ 1, // 5: google.monitoring.v3.AlertPolicyService.GetAlertPolicy:input_type -> google.monitoring.v3.GetAlertPolicyRequest
+ 0, // 6: google.monitoring.v3.AlertPolicyService.CreateAlertPolicy:input_type -> google.monitoring.v3.CreateAlertPolicyRequest
+ 5, // 7: google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy:input_type -> google.monitoring.v3.DeleteAlertPolicyRequest
+ 4, // 8: google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy:input_type -> google.monitoring.v3.UpdateAlertPolicyRequest
+ 3, // 9: google.monitoring.v3.AlertPolicyService.ListAlertPolicies:output_type -> google.monitoring.v3.ListAlertPoliciesResponse
+ 6, // 10: google.monitoring.v3.AlertPolicyService.GetAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy
+ 6, // 11: google.monitoring.v3.AlertPolicyService.CreateAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy
+ 8, // 12: google.monitoring.v3.AlertPolicyService.DeleteAlertPolicy:output_type -> google.protobuf.Empty
+ 6, // 13: google.monitoring.v3.AlertPolicyService.UpdateAlertPolicy:output_type -> google.monitoring.v3.AlertPolicy
+ 9, // [9:14] is the sub-list for method output_type
+ 4, // [4:9] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_alert_service_proto_init() }
+func file_google_monitoring_v3_alert_service_proto_init() {
+ if File_google_monitoring_v3_alert_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_alert_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_alert_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 6,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_alert_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_alert_service_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_alert_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_alert_service_proto = out.File
+ file_google_monitoring_v3_alert_service_proto_rawDesc = nil
+ file_google_monitoring_v3_alert_service_proto_goTypes = nil
+ file_google_monitoring_v3_alert_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// AlertPolicyServiceClient is the client API for AlertPolicyService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type AlertPolicyServiceClient interface {
+ // Lists the existing alerting policies for the workspace.
+ ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error)
+ // Gets a single alerting policy.
+ GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
+ // Creates a new alerting policy.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // alerting policies in a single project. This includes calls to
+ // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+ CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
+ // Deletes an alerting policy.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // alerting policies in a single project. This includes calls to
+ // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+ DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Updates an alerting policy. You can either replace the entire policy with
+ // a new one or replace only certain fields in the current alerting policy by
+ // specifying the fields to be updated via `updateMask`. Returns the
+ // updated alerting policy.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // alerting policies in a single project. This includes calls to
+ // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+ UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error)
+}
+
+type alertPolicyServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewAlertPolicyServiceClient(cc grpc.ClientConnInterface) AlertPolicyServiceClient {
+ return &alertPolicyServiceClient{cc}
+}
+
+func (c *alertPolicyServiceClient) ListAlertPolicies(ctx context.Context, in *ListAlertPoliciesRequest, opts ...grpc.CallOption) (*ListAlertPoliciesResponse, error) {
+ out := new(ListAlertPoliciesResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) GetAlertPolicy(ctx context.Context, in *GetAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
+ out := new(AlertPolicy)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) CreateAlertPolicy(ctx context.Context, in *CreateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
+ out := new(AlertPolicy)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) DeleteAlertPolicy(ctx context.Context, in *DeleteAlertPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *alertPolicyServiceClient) UpdateAlertPolicy(ctx context.Context, in *UpdateAlertPolicyRequest, opts ...grpc.CallOption) (*AlertPolicy, error) {
+ out := new(AlertPolicy)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// AlertPolicyServiceServer is the server API for AlertPolicyService service.
+type AlertPolicyServiceServer interface {
+ // Lists the existing alerting policies for the workspace.
+ ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error)
+ // Gets a single alerting policy.
+ GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error)
+ // Creates a new alerting policy.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // alerting policies in a single project. This includes calls to
+ // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+ CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error)
+ // Deletes an alerting policy.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // alerting policies in a single project. This includes calls to
+ // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+ DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error)
+ // Updates an alerting policy. You can either replace the entire policy with
+ // a new one or replace only certain fields in the current alerting policy by
+ // specifying the fields to be updated via `updateMask`. Returns the
+ // updated alerting policy.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // alerting policies in a single project. This includes calls to
+ // CreateAlertPolicy, DeleteAlertPolicy and UpdateAlertPolicy.
+ UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error)
+}
+
+// UnimplementedAlertPolicyServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedAlertPolicyServiceServer struct {
+}
+
+func (*UnimplementedAlertPolicyServiceServer) ListAlertPolicies(context.Context, *ListAlertPoliciesRequest) (*ListAlertPoliciesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListAlertPolicies not implemented")
+}
+func (*UnimplementedAlertPolicyServiceServer) GetAlertPolicy(context.Context, *GetAlertPolicyRequest) (*AlertPolicy, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetAlertPolicy not implemented")
+}
+func (*UnimplementedAlertPolicyServiceServer) CreateAlertPolicy(context.Context, *CreateAlertPolicyRequest) (*AlertPolicy, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateAlertPolicy not implemented")
+}
+func (*UnimplementedAlertPolicyServiceServer) DeleteAlertPolicy(context.Context, *DeleteAlertPolicyRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteAlertPolicy not implemented")
+}
+func (*UnimplementedAlertPolicyServiceServer) UpdateAlertPolicy(context.Context, *UpdateAlertPolicyRequest) (*AlertPolicy, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateAlertPolicy not implemented")
+}
+
+func RegisterAlertPolicyServiceServer(s *grpc.Server, srv AlertPolicyServiceServer) {
+ s.RegisterService(&_AlertPolicyService_serviceDesc, srv)
+}
+
+func _AlertPolicyService_ListAlertPolicies_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListAlertPoliciesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/ListAlertPolicies",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).ListAlertPolicies(ctx, req.(*ListAlertPoliciesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_GetAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/GetAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).GetAlertPolicy(ctx, req.(*GetAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_CreateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/CreateAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).CreateAlertPolicy(ctx, req.(*CreateAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_DeleteAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/DeleteAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).DeleteAlertPolicy(ctx, req.(*DeleteAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _AlertPolicyService_UpdateAlertPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateAlertPolicyRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.AlertPolicyService/UpdateAlertPolicy",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AlertPolicyServiceServer).UpdateAlertPolicy(ctx, req.(*UpdateAlertPolicyRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _AlertPolicyService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.AlertPolicyService",
+ HandlerType: (*AlertPolicyServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListAlertPolicies",
+ Handler: _AlertPolicyService_ListAlertPolicies_Handler,
+ },
+ {
+ MethodName: "GetAlertPolicy",
+ Handler: _AlertPolicyService_GetAlertPolicy_Handler,
+ },
+ {
+ MethodName: "CreateAlertPolicy",
+ Handler: _AlertPolicyService_CreateAlertPolicy_Handler,
+ },
+ {
+ MethodName: "DeleteAlertPolicy",
+ Handler: _AlertPolicyService_DeleteAlertPolicy_Handler,
+ },
+ {
+ MethodName: "UpdateAlertPolicy",
+ Handler: _AlertPolicyService_UpdateAlertPolicy_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/alert_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go
new file mode 100644
index 000000000..81b8c8f5e
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/common.pb.go
@@ -0,0 +1,1121 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/common.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ distribution "google.golang.org/genproto/googleapis/api/distribution"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Specifies an ordering relationship on two arguments, called `left` and
+// `right`.
+type ComparisonType int32
+
+const (
+ // No ordering relationship is specified.
+ ComparisonType_COMPARISON_UNSPECIFIED ComparisonType = 0
+ // True if the left argument is greater than the right argument.
+ ComparisonType_COMPARISON_GT ComparisonType = 1
+ // True if the left argument is greater than or equal to the right argument.
+ ComparisonType_COMPARISON_GE ComparisonType = 2
+ // True if the left argument is less than the right argument.
+ ComparisonType_COMPARISON_LT ComparisonType = 3
+ // True if the left argument is less than or equal to the right argument.
+ ComparisonType_COMPARISON_LE ComparisonType = 4
+ // True if the left argument is equal to the right argument.
+ ComparisonType_COMPARISON_EQ ComparisonType = 5
+ // True if the left argument is not equal to the right argument.
+ ComparisonType_COMPARISON_NE ComparisonType = 6
+)
+
+// Enum value maps for ComparisonType.
+var (
+ ComparisonType_name = map[int32]string{
+ 0: "COMPARISON_UNSPECIFIED",
+ 1: "COMPARISON_GT",
+ 2: "COMPARISON_GE",
+ 3: "COMPARISON_LT",
+ 4: "COMPARISON_LE",
+ 5: "COMPARISON_EQ",
+ 6: "COMPARISON_NE",
+ }
+ ComparisonType_value = map[string]int32{
+ "COMPARISON_UNSPECIFIED": 0,
+ "COMPARISON_GT": 1,
+ "COMPARISON_GE": 2,
+ "COMPARISON_LT": 3,
+ "COMPARISON_LE": 4,
+ "COMPARISON_EQ": 5,
+ "COMPARISON_NE": 6,
+ }
+)
+
+func (x ComparisonType) Enum() *ComparisonType {
+ p := new(ComparisonType)
+ *p = x
+ return p
+}
+
+func (x ComparisonType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ComparisonType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_common_proto_enumTypes[0].Descriptor()
+}
+
+func (ComparisonType) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_common_proto_enumTypes[0]
+}
+
+func (x ComparisonType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ComparisonType.Descriptor instead.
+func (ComparisonType) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{0}
+}
+
+// The tier of service for a Metrics Scope. Please see the
+// [service tiers
+// documentation](https://cloud.google.com/monitoring/workspaces/tiers) for more
+// details.
+//
+// Deprecated: Marked as deprecated in google/monitoring/v3/common.proto.
+type ServiceTier int32
+
+const (
+ // An invalid sentinel value, used to indicate that a tier has not
+ // been provided explicitly.
+ ServiceTier_SERVICE_TIER_UNSPECIFIED ServiceTier = 0
+ // The Cloud Monitoring Basic tier, a free tier of service that provides basic
+ // features, a moderate allotment of logs, and access to built-in metrics.
+ // A number of features are not available in this tier. For more details,
+ // see [the service tiers
+ // documentation](https://cloud.google.com/monitoring/workspaces/tiers).
+ ServiceTier_SERVICE_TIER_BASIC ServiceTier = 1
+ // The Cloud Monitoring Premium tier, a higher, more expensive tier of service
+ // that provides access to all Cloud Monitoring features, lets you use Cloud
+ // Monitoring with AWS accounts, and has a larger allotments for logs and
+ // metrics. For more details, see [the service tiers
+ // documentation](https://cloud.google.com/monitoring/workspaces/tiers).
+ ServiceTier_SERVICE_TIER_PREMIUM ServiceTier = 2
+)
+
+// Enum value maps for ServiceTier.
+var (
+ ServiceTier_name = map[int32]string{
+ 0: "SERVICE_TIER_UNSPECIFIED",
+ 1: "SERVICE_TIER_BASIC",
+ 2: "SERVICE_TIER_PREMIUM",
+ }
+ ServiceTier_value = map[string]int32{
+ "SERVICE_TIER_UNSPECIFIED": 0,
+ "SERVICE_TIER_BASIC": 1,
+ "SERVICE_TIER_PREMIUM": 2,
+ }
+)
+
+func (x ServiceTier) Enum() *ServiceTier {
+ p := new(ServiceTier)
+ *p = x
+ return p
+}
+
+func (x ServiceTier) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ServiceTier) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_common_proto_enumTypes[1].Descriptor()
+}
+
+func (ServiceTier) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_common_proto_enumTypes[1]
+}
+
+func (x ServiceTier) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ServiceTier.Descriptor instead.
+func (ServiceTier) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{1}
+}
+
+// The `Aligner` specifies the operation that will be applied to the data
+// points in each alignment period in a time series. Except for
+// `ALIGN_NONE`, which specifies that no operation be applied, each alignment
+// operation replaces the set of data values in each alignment period with
+// a single value: the result of applying the operation to the data values.
+// An aligned time series has a single data value at the end of each
+// `alignment_period`.
+//
+// An alignment operation can change the data type of the values, too. For
+// example, if you apply a counting operation to boolean values, the data
+// `value_type` in the original time series is `BOOLEAN`, but the `value_type`
+// in the aligned result is `INT64`.
+type Aggregation_Aligner int32
+
+const (
+ // No alignment. Raw data is returned. Not valid if cross-series reduction
+ // is requested. The `value_type` of the result is the same as the
+ // `value_type` of the input.
+ Aggregation_ALIGN_NONE Aggregation_Aligner = 0
+ // Align and convert to
+ // [DELTA][google.api.MetricDescriptor.MetricKind.DELTA].
+ // The output is `delta = y1 - y0`.
+ //
+ // This alignment is valid for
+ // [CUMULATIVE][google.api.MetricDescriptor.MetricKind.CUMULATIVE] and
+ // `DELTA` metrics. If the selected alignment period results in periods
+ // with no data, then the aligned value for such a period is created by
+ // interpolation. The `value_type` of the aligned result is the same as
+ // the `value_type` of the input.
+ Aggregation_ALIGN_DELTA Aggregation_Aligner = 1
+ // Align and convert to a rate. The result is computed as
+ // `rate = (y1 - y0)/(t1 - t0)`, or "delta over time".
+ // Think of this aligner as providing the slope of the line that passes
+ // through the value at the start and at the end of the `alignment_period`.
+ //
+ // This aligner is valid for `CUMULATIVE`
+ // and `DELTA` metrics with numeric values. If the selected alignment
+ // period results in periods with no data, then the aligned value for
+ // such a period is created by interpolation. The output is a `GAUGE`
+ // metric with `value_type` `DOUBLE`.
+ //
+ // If, by "rate", you mean "percentage change", see the
+ // `ALIGN_PERCENT_CHANGE` aligner instead.
+ Aggregation_ALIGN_RATE Aggregation_Aligner = 2
+ // Align by interpolating between adjacent points around the alignment
+ // period boundary. This aligner is valid for `GAUGE` metrics with
+ // numeric values. The `value_type` of the aligned result is the same as the
+ // `value_type` of the input.
+ Aggregation_ALIGN_INTERPOLATE Aggregation_Aligner = 3
+ // Align by moving the most recent data point before the end of the
+ // alignment period to the boundary at the end of the alignment
+ // period. This aligner is valid for `GAUGE` metrics. The `value_type` of
+ // the aligned result is the same as the `value_type` of the input.
+ Aggregation_ALIGN_NEXT_OLDER Aggregation_Aligner = 4
+ // Align the time series by returning the minimum value in each alignment
+ // period. This aligner is valid for `GAUGE` and `DELTA` metrics with
+ // numeric values. The `value_type` of the aligned result is the same as
+ // the `value_type` of the input.
+ Aggregation_ALIGN_MIN Aggregation_Aligner = 10
+ // Align the time series by returning the maximum value in each alignment
+ // period. This aligner is valid for `GAUGE` and `DELTA` metrics with
+ // numeric values. The `value_type` of the aligned result is the same as
+ // the `value_type` of the input.
+ Aggregation_ALIGN_MAX Aggregation_Aligner = 11
+ // Align the time series by returning the mean value in each alignment
+ // period. This aligner is valid for `GAUGE` and `DELTA` metrics with
+ // numeric values. The `value_type` of the aligned result is `DOUBLE`.
+ Aggregation_ALIGN_MEAN Aggregation_Aligner = 12
+ // Align the time series by returning the number of values in each alignment
+ // period. This aligner is valid for `GAUGE` and `DELTA` metrics with
+ // numeric or Boolean values. The `value_type` of the aligned result is
+ // `INT64`.
+ Aggregation_ALIGN_COUNT Aggregation_Aligner = 13
+ // Align the time series by returning the sum of the values in each
+ // alignment period. This aligner is valid for `GAUGE` and `DELTA`
+ // metrics with numeric and distribution values. The `value_type` of the
+ // aligned result is the same as the `value_type` of the input.
+ Aggregation_ALIGN_SUM Aggregation_Aligner = 14
+ // Align the time series by returning the standard deviation of the values
+ // in each alignment period. This aligner is valid for `GAUGE` and
+ // `DELTA` metrics with numeric values. The `value_type` of the output is
+ // `DOUBLE`.
+ Aggregation_ALIGN_STDDEV Aggregation_Aligner = 15
+ // Align the time series by returning the number of `True` values in
+ // each alignment period. This aligner is valid for `GAUGE` metrics with
+ // Boolean values. The `value_type` of the output is `INT64`.
+ Aggregation_ALIGN_COUNT_TRUE Aggregation_Aligner = 16
+ // Align the time series by returning the number of `False` values in
+ // each alignment period. This aligner is valid for `GAUGE` metrics with
+ // Boolean values. The `value_type` of the output is `INT64`.
+ Aggregation_ALIGN_COUNT_FALSE Aggregation_Aligner = 24
+ // Align the time series by returning the ratio of the number of `True`
+ // values to the total number of values in each alignment period. This
+ // aligner is valid for `GAUGE` metrics with Boolean values. The output
+ // value is in the range [0.0, 1.0] and has `value_type` `DOUBLE`.
+ Aggregation_ALIGN_FRACTION_TRUE Aggregation_Aligner = 17
+ // Align the time series by using [percentile
+ // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting
+ // data point in each alignment period is the 99th percentile of all data
+ // points in the period. This aligner is valid for `GAUGE` and `DELTA`
+ // metrics with distribution values. The output is a `GAUGE` metric with
+ // `value_type` `DOUBLE`.
+ Aggregation_ALIGN_PERCENTILE_99 Aggregation_Aligner = 18
+ // Align the time series by using [percentile
+ // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting
+ // data point in each alignment period is the 95th percentile of all data
+ // points in the period. This aligner is valid for `GAUGE` and `DELTA`
+ // metrics with distribution values. The output is a `GAUGE` metric with
+ // `value_type` `DOUBLE`.
+ Aggregation_ALIGN_PERCENTILE_95 Aggregation_Aligner = 19
+ // Align the time series by using [percentile
+ // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting
+ // data point in each alignment period is the 50th percentile of all data
+ // points in the period. This aligner is valid for `GAUGE` and `DELTA`
+ // metrics with distribution values. The output is a `GAUGE` metric with
+ // `value_type` `DOUBLE`.
+ Aggregation_ALIGN_PERCENTILE_50 Aggregation_Aligner = 20
+ // Align the time series by using [percentile
+ // aggregation](https://en.wikipedia.org/wiki/Percentile). The resulting
+ // data point in each alignment period is the 5th percentile of all data
+ // points in the period. This aligner is valid for `GAUGE` and `DELTA`
+ // metrics with distribution values. The output is a `GAUGE` metric with
+ // `value_type` `DOUBLE`.
+ Aggregation_ALIGN_PERCENTILE_05 Aggregation_Aligner = 21
+ // Align and convert to a percentage change. This aligner is valid for
+ // `GAUGE` and `DELTA` metrics with numeric values. This alignment returns
+ // `((current - previous)/previous) * 100`, where the value of `previous` is
+ // determined based on the `alignment_period`.
+ //
+ // If the values of `current` and `previous` are both 0, then the returned
+ // value is 0. If only `previous` is 0, the returned value is infinity.
+ //
+ // A 10-minute moving mean is computed at each point of the alignment period
+ // prior to the above calculation to smooth the metric and prevent false
+ // positives from very short-lived spikes. The moving mean is only
+ // applicable for data whose values are `>= 0`. Any values `< 0` are
+ // treated as a missing datapoint, and are ignored. While `DELTA`
+ // metrics are accepted by this alignment, special care should be taken that
+ // the values for the metric will always be positive. The output is a
+ // `GAUGE` metric with `value_type` `DOUBLE`.
+ Aggregation_ALIGN_PERCENT_CHANGE Aggregation_Aligner = 23
+)
+
+// Enum value maps for Aggregation_Aligner.
+var (
+ Aggregation_Aligner_name = map[int32]string{
+ 0: "ALIGN_NONE",
+ 1: "ALIGN_DELTA",
+ 2: "ALIGN_RATE",
+ 3: "ALIGN_INTERPOLATE",
+ 4: "ALIGN_NEXT_OLDER",
+ 10: "ALIGN_MIN",
+ 11: "ALIGN_MAX",
+ 12: "ALIGN_MEAN",
+ 13: "ALIGN_COUNT",
+ 14: "ALIGN_SUM",
+ 15: "ALIGN_STDDEV",
+ 16: "ALIGN_COUNT_TRUE",
+ 24: "ALIGN_COUNT_FALSE",
+ 17: "ALIGN_FRACTION_TRUE",
+ 18: "ALIGN_PERCENTILE_99",
+ 19: "ALIGN_PERCENTILE_95",
+ 20: "ALIGN_PERCENTILE_50",
+ 21: "ALIGN_PERCENTILE_05",
+ 23: "ALIGN_PERCENT_CHANGE",
+ }
+ Aggregation_Aligner_value = map[string]int32{
+ "ALIGN_NONE": 0,
+ "ALIGN_DELTA": 1,
+ "ALIGN_RATE": 2,
+ "ALIGN_INTERPOLATE": 3,
+ "ALIGN_NEXT_OLDER": 4,
+ "ALIGN_MIN": 10,
+ "ALIGN_MAX": 11,
+ "ALIGN_MEAN": 12,
+ "ALIGN_COUNT": 13,
+ "ALIGN_SUM": 14,
+ "ALIGN_STDDEV": 15,
+ "ALIGN_COUNT_TRUE": 16,
+ "ALIGN_COUNT_FALSE": 24,
+ "ALIGN_FRACTION_TRUE": 17,
+ "ALIGN_PERCENTILE_99": 18,
+ "ALIGN_PERCENTILE_95": 19,
+ "ALIGN_PERCENTILE_50": 20,
+ "ALIGN_PERCENTILE_05": 21,
+ "ALIGN_PERCENT_CHANGE": 23,
+ }
+)
+
+func (x Aggregation_Aligner) Enum() *Aggregation_Aligner {
+ p := new(Aggregation_Aligner)
+ *p = x
+ return p
+}
+
+func (x Aggregation_Aligner) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Aggregation_Aligner) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_common_proto_enumTypes[2].Descriptor()
+}
+
+func (Aggregation_Aligner) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_common_proto_enumTypes[2]
+}
+
+func (x Aggregation_Aligner) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Aggregation_Aligner.Descriptor instead.
+func (Aggregation_Aligner) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2, 0}
+}
+
+// A Reducer operation describes how to aggregate data points from multiple
+// time series into a single time series, where the value of each data point
+// in the resulting series is a function of all the already aligned values in
+// the input time series.
+type Aggregation_Reducer int32
+
+const (
+ // No cross-time series reduction. The output of the `Aligner` is
+ // returned.
+ Aggregation_REDUCE_NONE Aggregation_Reducer = 0
+ // Reduce by computing the mean value across time series for each
+ // alignment period. This reducer is valid for
+ // [DELTA][google.api.MetricDescriptor.MetricKind.DELTA] and
+ // [GAUGE][google.api.MetricDescriptor.MetricKind.GAUGE] metrics with
+ // numeric or distribution values. The `value_type` of the output is
+ // [DOUBLE][google.api.MetricDescriptor.ValueType.DOUBLE].
+ Aggregation_REDUCE_MEAN Aggregation_Reducer = 1
+ // Reduce by computing the minimum value across time series for each
+ // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics
+ // with numeric values. The `value_type` of the output is the same as the
+ // `value_type` of the input.
+ Aggregation_REDUCE_MIN Aggregation_Reducer = 2
+ // Reduce by computing the maximum value across time series for each
+ // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics
+ // with numeric values. The `value_type` of the output is the same as the
+ // `value_type` of the input.
+ Aggregation_REDUCE_MAX Aggregation_Reducer = 3
+ // Reduce by computing the sum across time series for each
+ // alignment period. This reducer is valid for `DELTA` and `GAUGE` metrics
+ // with numeric and distribution values. The `value_type` of the output is
+ // the same as the `value_type` of the input.
+ Aggregation_REDUCE_SUM Aggregation_Reducer = 4
+ // Reduce by computing the standard deviation across time series
+ // for each alignment period. This reducer is valid for `DELTA` and
+ // `GAUGE` metrics with numeric or distribution values. The `value_type`
+ // of the output is `DOUBLE`.
+ Aggregation_REDUCE_STDDEV Aggregation_Reducer = 5
+ // Reduce by computing the number of data points across time series
+ // for each alignment period. This reducer is valid for `DELTA` and
+ // `GAUGE` metrics of numeric, Boolean, distribution, and string
+ // `value_type`. The `value_type` of the output is `INT64`.
+ Aggregation_REDUCE_COUNT Aggregation_Reducer = 6
+ // Reduce by computing the number of `True`-valued data points across time
+ // series for each alignment period. This reducer is valid for `DELTA` and
+ // `GAUGE` metrics of Boolean `value_type`. The `value_type` of the output
+ // is `INT64`.
+ Aggregation_REDUCE_COUNT_TRUE Aggregation_Reducer = 7
+ // Reduce by computing the number of `False`-valued data points across time
+ // series for each alignment period. This reducer is valid for `DELTA` and
+ // `GAUGE` metrics of Boolean `value_type`. The `value_type` of the output
+ // is `INT64`.
+ Aggregation_REDUCE_COUNT_FALSE Aggregation_Reducer = 15
+ // Reduce by computing the ratio of the number of `True`-valued data points
+ // to the total number of data points for each alignment period. This
+ // reducer is valid for `DELTA` and `GAUGE` metrics of Boolean `value_type`.
+ // The output value is in the range [0.0, 1.0] and has `value_type`
+ // `DOUBLE`.
+ Aggregation_REDUCE_FRACTION_TRUE Aggregation_Reducer = 8
+ // Reduce by computing the [99th
+ // percentile](https://en.wikipedia.org/wiki/Percentile) of data points
+ // across time series for each alignment period. This reducer is valid for
+ // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value
+ // of the output is `DOUBLE`.
+ Aggregation_REDUCE_PERCENTILE_99 Aggregation_Reducer = 9
+ // Reduce by computing the [95th
+ // percentile](https://en.wikipedia.org/wiki/Percentile) of data points
+ // across time series for each alignment period. This reducer is valid for
+ // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value
+ // of the output is `DOUBLE`.
+ Aggregation_REDUCE_PERCENTILE_95 Aggregation_Reducer = 10
+ // Reduce by computing the [50th
+ // percentile](https://en.wikipedia.org/wiki/Percentile) of data points
+ // across time series for each alignment period. This reducer is valid for
+ // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value
+ // of the output is `DOUBLE`.
+ Aggregation_REDUCE_PERCENTILE_50 Aggregation_Reducer = 11
+ // Reduce by computing the [5th
+ // percentile](https://en.wikipedia.org/wiki/Percentile) of data points
+ // across time series for each alignment period. This reducer is valid for
+ // `GAUGE` and `DELTA` metrics of numeric and distribution type. The value
+ // of the output is `DOUBLE`.
+ Aggregation_REDUCE_PERCENTILE_05 Aggregation_Reducer = 12
+)
+
+// Enum value maps for Aggregation_Reducer.
+var (
+ Aggregation_Reducer_name = map[int32]string{
+ 0: "REDUCE_NONE",
+ 1: "REDUCE_MEAN",
+ 2: "REDUCE_MIN",
+ 3: "REDUCE_MAX",
+ 4: "REDUCE_SUM",
+ 5: "REDUCE_STDDEV",
+ 6: "REDUCE_COUNT",
+ 7: "REDUCE_COUNT_TRUE",
+ 15: "REDUCE_COUNT_FALSE",
+ 8: "REDUCE_FRACTION_TRUE",
+ 9: "REDUCE_PERCENTILE_99",
+ 10: "REDUCE_PERCENTILE_95",
+ 11: "REDUCE_PERCENTILE_50",
+ 12: "REDUCE_PERCENTILE_05",
+ }
+ Aggregation_Reducer_value = map[string]int32{
+ "REDUCE_NONE": 0,
+ "REDUCE_MEAN": 1,
+ "REDUCE_MIN": 2,
+ "REDUCE_MAX": 3,
+ "REDUCE_SUM": 4,
+ "REDUCE_STDDEV": 5,
+ "REDUCE_COUNT": 6,
+ "REDUCE_COUNT_TRUE": 7,
+ "REDUCE_COUNT_FALSE": 15,
+ "REDUCE_FRACTION_TRUE": 8,
+ "REDUCE_PERCENTILE_99": 9,
+ "REDUCE_PERCENTILE_95": 10,
+ "REDUCE_PERCENTILE_50": 11,
+ "REDUCE_PERCENTILE_05": 12,
+ }
+)
+
+func (x Aggregation_Reducer) Enum() *Aggregation_Reducer {
+ p := new(Aggregation_Reducer)
+ *p = x
+ return p
+}
+
+func (x Aggregation_Reducer) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Aggregation_Reducer) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_common_proto_enumTypes[3].Descriptor()
+}
+
+func (Aggregation_Reducer) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_common_proto_enumTypes[3]
+}
+
+func (x Aggregation_Reducer) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Aggregation_Reducer.Descriptor instead.
+func (Aggregation_Reducer) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2, 1}
+}
+
+// A single strongly-typed value.
+type TypedValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The typed value field.
+ //
+ // Types that are assignable to Value:
+ //
+ // *TypedValue_BoolValue
+ // *TypedValue_Int64Value
+ // *TypedValue_DoubleValue
+ // *TypedValue_StringValue
+ // *TypedValue_DistributionValue
+ Value isTypedValue_Value `protobuf_oneof:"value"`
+}
+
+func (x *TypedValue) Reset() {
+ *x = TypedValue{}
+ mi := &file_google_monitoring_v3_common_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TypedValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TypedValue) ProtoMessage() {}
+
+func (x *TypedValue) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_common_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TypedValue.ProtoReflect.Descriptor instead.
+func (*TypedValue) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *TypedValue) GetValue() isTypedValue_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (x *TypedValue) GetBoolValue() bool {
+ if x, ok := x.GetValue().(*TypedValue_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *TypedValue) GetInt64Value() int64 {
+ if x, ok := x.GetValue().(*TypedValue_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *TypedValue) GetDoubleValue() float64 {
+ if x, ok := x.GetValue().(*TypedValue_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *TypedValue) GetStringValue() string {
+ if x, ok := x.GetValue().(*TypedValue_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *TypedValue) GetDistributionValue() *distribution.Distribution {
+ if x, ok := x.GetValue().(*TypedValue_DistributionValue); ok {
+ return x.DistributionValue
+ }
+ return nil
+}
+
+type isTypedValue_Value interface {
+ isTypedValue_Value()
+}
+
+type TypedValue_BoolValue struct {
+ // A Boolean value: `true` or `false`.
+ BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type TypedValue_Int64Value struct {
+ // A 64-bit integer. Its range is approximately ±9.2x1018.
+ Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type TypedValue_DoubleValue struct {
+ // A 64-bit double-precision floating-point number. Its magnitude
+ // is approximately ±10±300 and it has 16
+ // significant digits of precision.
+ DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type TypedValue_StringValue struct {
+ // A variable-length string value.
+ StringValue string `protobuf:"bytes,4,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type TypedValue_DistributionValue struct {
+ // A distribution value.
+ DistributionValue *distribution.Distribution `protobuf:"bytes,5,opt,name=distribution_value,json=distributionValue,proto3,oneof"`
+}
+
+func (*TypedValue_BoolValue) isTypedValue_Value() {}
+
+func (*TypedValue_Int64Value) isTypedValue_Value() {}
+
+func (*TypedValue_DoubleValue) isTypedValue_Value() {}
+
+func (*TypedValue_StringValue) isTypedValue_Value() {}
+
+func (*TypedValue_DistributionValue) isTypedValue_Value() {}
+
+// Describes a time interval:
+//
+// - Reads: A half-open time interval. It includes the end time but
+// excludes the start time: `(startTime, endTime]`. The start time
+// must be specified, must be earlier than the end time, and should be
+// no older than the data retention period for the metric.
+// - Writes: A closed time interval. It extends from the start time to the end
+// time,
+// and includes both: `[startTime, endTime]`. Valid time intervals
+// depend on the
+// [`MetricKind`](https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.metricDescriptors#MetricKind)
+// of the metric value. The end time must not be earlier than the start
+// time, and the end time must not be more than 25 hours in the past or more
+// than five minutes in the future.
+// - For `GAUGE` metrics, the `startTime` value is technically optional; if
+// no value is specified, the start time defaults to the value of the
+// end time, and the interval represents a single point in time. If both
+// start and end times are specified, they must be identical. Such an
+// interval is valid only for `GAUGE` metrics, which are point-in-time
+// measurements. The end time of a new interval must be at least a
+// millisecond after the end time of the previous interval.
+// - For `DELTA` metrics, the start time and end time must specify a
+// non-zero interval, with subsequent points specifying contiguous and
+// non-overlapping intervals. For `DELTA` metrics, the start time of
+// the next interval must be at least a millisecond after the end time
+// of the previous interval.
+// - For `CUMULATIVE` metrics, the start time and end time must specify a
+// non-zero interval, with subsequent points specifying the same
+// start time and increasing end times, until an event resets the
+// cumulative value to zero and sets a new start time for the following
+// points. The new start time must be at least a millisecond after the
+// end time of the previous interval.
+// - The start time of a new interval must be at least a millisecond after
+// the
+// end time of the previous interval because intervals are closed. If the
+// start time of a new interval is the same as the end time of the
+// previous interval, then data written at the new start time could
+// overwrite data written at the previous end time.
+type TimeInterval struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The end of the time interval.
+ EndTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+ // Optional. The beginning of the time interval. The default value
+ // for the start time is the end time. The start time must not be
+ // later than the end time.
+ StartTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+}
+
+func (x *TimeInterval) Reset() {
+ *x = TimeInterval{}
+ mi := &file_google_monitoring_v3_common_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimeInterval) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeInterval) ProtoMessage() {}
+
+func (x *TimeInterval) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_common_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeInterval.ProtoReflect.Descriptor instead.
+func (*TimeInterval) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *TimeInterval) GetEndTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.EndTime
+ }
+ return nil
+}
+
+func (x *TimeInterval) GetStartTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.StartTime
+ }
+ return nil
+}
+
+// Describes how to combine multiple time series to provide a different view of
+// the data. Aggregation of time series is done in two steps. First, each time
+// series in the set is _aligned_ to the same time interval boundaries, then the
+// set of time series is optionally _reduced_ in number.
+//
+// Alignment consists of applying the `per_series_aligner` operation
+// to each time series after its data has been divided into regular
+// `alignment_period` time intervals. This process takes _all_ of the data
+// points in an alignment period, applies a mathematical transformation such as
+// averaging, minimum, maximum, delta, etc., and converts them into a single
+// data point per period.
+//
+// Reduction is when the aligned and transformed time series can optionally be
+// combined, reducing the number of time series through similar mathematical
+// transformations. Reduction involves applying a `cross_series_reducer` to
+// all the time series, optionally sorting the time series into subsets with
+// `group_by_fields`, and applying the reducer to each subset.
+//
+// The raw time series data can contain a huge amount of information from
+// multiple sources. Alignment and reduction transforms this mass of data into
+// a more manageable and representative collection of data, for example "the
+// 95% latency across the average of all tasks in a cluster". This
+// representative data can be more easily graphed and comprehended, and the
+// individual time series data is still available for later drilldown. For more
+// details, see [Filtering and
+// aggregation](https://cloud.google.com/monitoring/api/v3/aggregation).
+type Aggregation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The `alignment_period` specifies a time interval, in seconds, that is used
+ // to divide the data in all the
+ // [time series][google.monitoring.v3.TimeSeries] into consistent blocks of
+ // time. This will be done before the per-series aligner can be applied to
+ // the data.
+ //
+ // The value must be at least 60 seconds. If a per-series
+ // aligner other than `ALIGN_NONE` is specified, this field is required or an
+ // error is returned. If no per-series aligner is specified, or the aligner
+ // `ALIGN_NONE` is specified, then this field is ignored.
+ //
+ // The maximum value of the `alignment_period` is 104 weeks (2 years) for
+ // charts, and 90,000 seconds (25 hours) for alerting policies.
+ AlignmentPeriod *durationpb.Duration `protobuf:"bytes,1,opt,name=alignment_period,json=alignmentPeriod,proto3" json:"alignment_period,omitempty"`
+ // An `Aligner` describes how to bring the data points in a single
+ // time series into temporal alignment. Except for `ALIGN_NONE`, all
+ // alignments cause all the data points in an `alignment_period` to be
+ // mathematically grouped together, resulting in a single data point for
+ // each `alignment_period` with end timestamp at the end of the period.
+ //
+ // Not all alignment operations may be applied to all time series. The valid
+ // choices depend on the `metric_kind` and `value_type` of the original time
+ // series. Alignment can change the `metric_kind` or the `value_type` of
+ // the time series.
+ //
+ // Time series data must be aligned in order to perform cross-time
+ // series reduction. If `cross_series_reducer` is specified, then
+ // `per_series_aligner` must be specified and not equal to `ALIGN_NONE`
+ // and `alignment_period` must be specified; otherwise, an error is
+ // returned.
+ PerSeriesAligner Aggregation_Aligner `protobuf:"varint,2,opt,name=per_series_aligner,json=perSeriesAligner,proto3,enum=google.monitoring.v3.Aggregation_Aligner" json:"per_series_aligner,omitempty"`
+ // The reduction operation to be used to combine time series into a single
+ // time series, where the value of each data point in the resulting series is
+ // a function of all the already aligned values in the input time series.
+ //
+ // Not all reducer operations can be applied to all time series. The valid
+ // choices depend on the `metric_kind` and the `value_type` of the original
+ // time series. Reduction can yield a time series with a different
+ // `metric_kind` or `value_type` than the input time series.
+ //
+ // Time series data must first be aligned (see `per_series_aligner`) in order
+ // to perform cross-time series reduction. If `cross_series_reducer` is
+ // specified, then `per_series_aligner` must be specified, and must not be
+ // `ALIGN_NONE`. An `alignment_period` must also be specified; otherwise, an
+ // error is returned.
+ CrossSeriesReducer Aggregation_Reducer `protobuf:"varint,4,opt,name=cross_series_reducer,json=crossSeriesReducer,proto3,enum=google.monitoring.v3.Aggregation_Reducer" json:"cross_series_reducer,omitempty"`
+ // The set of fields to preserve when `cross_series_reducer` is
+ // specified. The `group_by_fields` determine how the time series are
+ // partitioned into subsets prior to applying the aggregation
+ // operation. Each subset contains time series that have the same
+ // value for each of the grouping fields. Each individual time
+ // series is a member of exactly one subset. The
+ // `cross_series_reducer` is applied to each subset of time series.
+ // It is not possible to reduce across different resource types, so
+ // this field implicitly contains `resource.type`. Fields not
+ // specified in `group_by_fields` are aggregated away. If
+ // `group_by_fields` is not specified and all the time series have
+ // the same resource type, then the time series are aggregated into
+ // a single output time series. If `cross_series_reducer` is not
+ // defined, this field is ignored.
+ GroupByFields []string `protobuf:"bytes,5,rep,name=group_by_fields,json=groupByFields,proto3" json:"group_by_fields,omitempty"`
+}
+
+func (x *Aggregation) Reset() {
+ *x = Aggregation{}
+ mi := &file_google_monitoring_v3_common_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Aggregation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Aggregation) ProtoMessage() {}
+
+func (x *Aggregation) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_common_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Aggregation.ProtoReflect.Descriptor instead.
+func (*Aggregation) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_common_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Aggregation) GetAlignmentPeriod() *durationpb.Duration {
+ if x != nil {
+ return x.AlignmentPeriod
+ }
+ return nil
+}
+
+func (x *Aggregation) GetPerSeriesAligner() Aggregation_Aligner {
+ if x != nil {
+ return x.PerSeriesAligner
+ }
+ return Aggregation_ALIGN_NONE
+}
+
+func (x *Aggregation) GetCrossSeriesReducer() Aggregation_Reducer {
+ if x != nil {
+ return x.CrossSeriesReducer
+ }
+ return Aggregation_REDUCE_NONE
+}
+
+func (x *Aggregation) GetGroupByFields() []string {
+ if x != nil {
+ return x.GroupByFields
+ }
+ return nil
+}
+
+var File_google_monitoring_v3_common_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_common_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x01, 0x0a, 0x0a, 0x54, 0x79,
+ 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c,
+ 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09,
+ 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74,
+ 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00,
+ 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c,
+ 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x49, 0x0a, 0x12, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69,
+ 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x11,
+ 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x80, 0x01, 0x0a, 0x0c, 0x54,
+ 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x35, 0x0a, 0x08, 0x65,
+ 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xf3, 0x07,
+ 0x0a, 0x0b, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a,
+ 0x10, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x0f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x50, 0x65, 0x72,
+ 0x69, 0x6f, 0x64, 0x12, 0x57, 0x0a, 0x12, 0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65,
+ 0x73, 0x5f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x2e, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x52, 0x10, 0x70, 0x65, 0x72, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x14,
+ 0x63, 0x72, 0x6f, 0x73, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x64,
+ 0x75, 0x63, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65,
+ 0x64, 0x75, 0x63, 0x65, 0x72, 0x52, 0x12, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x53, 0x65, 0x72, 0x69,
+ 0x65, 0x73, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x67, 0x72, 0x6f,
+ 0x75, 0x70, 0x5f, 0x62, 0x79, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x0d, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x79, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x73, 0x22, 0x8b, 0x03, 0x0a, 0x07, 0x41, 0x6c, 0x69, 0x67, 0x6e, 0x65, 0x72, 0x12, 0x0e, 0x0a,
+ 0x0a, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a,
+ 0x0b, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x44, 0x45, 0x4c, 0x54, 0x41, 0x10, 0x01, 0x12, 0x0e,
+ 0x0a, 0x0a, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x52, 0x41, 0x54, 0x45, 0x10, 0x02, 0x12, 0x15,
+ 0x0a, 0x11, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x50, 0x4f, 0x4c,
+ 0x41, 0x54, 0x45, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4e,
+ 0x45, 0x58, 0x54, 0x5f, 0x4f, 0x4c, 0x44, 0x45, 0x52, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x41,
+ 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x4d, 0x49, 0x4e, 0x10, 0x0a, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c,
+ 0x49, 0x47, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0x0b, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x4c, 0x49,
+ 0x47, 0x4e, 0x5f, 0x4d, 0x45, 0x41, 0x4e, 0x10, 0x0c, 0x12, 0x0f, 0x0a, 0x0b, 0x41, 0x4c, 0x49,
+ 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c,
+ 0x49, 0x47, 0x4e, 0x5f, 0x53, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x10, 0x0a, 0x0c, 0x41, 0x4c, 0x49,
+ 0x47, 0x4e, 0x5f, 0x53, 0x54, 0x44, 0x44, 0x45, 0x56, 0x10, 0x0f, 0x12, 0x14, 0x0a, 0x10, 0x41,
+ 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10,
+ 0x10, 0x12, 0x15, 0x0a, 0x11, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54,
+ 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x18, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47,
+ 0x4e, 0x5f, 0x46, 0x52, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10,
+ 0x11, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45,
+ 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, 0x39, 0x10, 0x12, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c,
+ 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39,
+ 0x35, 0x10, 0x13, 0x12, 0x17, 0x0a, 0x13, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52,
+ 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x35, 0x30, 0x10, 0x14, 0x12, 0x17, 0x0a, 0x13,
+ 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45,
+ 0x5f, 0x30, 0x35, 0x10, 0x15, 0x12, 0x18, 0x0a, 0x14, 0x41, 0x4c, 0x49, 0x47, 0x4e, 0x5f, 0x50,
+ 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x17, 0x22,
+ 0xb1, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x64, 0x75, 0x63, 0x65, 0x72, 0x12, 0x0f, 0x0a, 0x0b, 0x52,
+ 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b,
+ 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x45, 0x41, 0x4e, 0x10, 0x01, 0x12, 0x0e, 0x0a,
+ 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0e, 0x0a,
+ 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0x03, 0x12, 0x0e, 0x0a,
+ 0x0a, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x53, 0x55, 0x4d, 0x10, 0x04, 0x12, 0x11, 0x0a,
+ 0x0d, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x53, 0x54, 0x44, 0x44, 0x45, 0x56, 0x10, 0x05,
+ 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54,
+ 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55,
+ 0x4e, 0x54, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x44,
+ 0x55, 0x43, 0x45, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10,
+ 0x0f, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x46, 0x52, 0x41, 0x43,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x52, 0x55, 0x45, 0x10, 0x08, 0x12, 0x18, 0x0a, 0x14, 0x52,
+ 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45,
+ 0x5f, 0x39, 0x39, 0x10, 0x09, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f,
+ 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x39, 0x35, 0x10, 0x0a, 0x12,
+ 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44, 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e,
+ 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x35, 0x30, 0x10, 0x0b, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, 0x44,
+ 0x55, 0x43, 0x45, 0x5f, 0x50, 0x45, 0x52, 0x43, 0x45, 0x4e, 0x54, 0x49, 0x4c, 0x45, 0x5f, 0x30,
+ 0x35, 0x10, 0x0c, 0x2a, 0x9e, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73,
+ 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52,
+ 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e,
+ 0x5f, 0x47, 0x54, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49,
+ 0x53, 0x4f, 0x4e, 0x5f, 0x47, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50,
+ 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x4c, 0x54, 0x10, 0x03, 0x12, 0x11, 0x0a, 0x0d, 0x43,
+ 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x10, 0x04, 0x12, 0x11,
+ 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f, 0x45, 0x51, 0x10,
+ 0x05, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x4f, 0x4d, 0x50, 0x41, 0x52, 0x49, 0x53, 0x4f, 0x4e, 0x5f,
+ 0x4e, 0x45, 0x10, 0x06, 0x2a, 0x61, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54,
+ 0x69, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54,
+ 0x49, 0x45, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45,
+ 0x52, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x18, 0x0a, 0x14, 0x53, 0x45, 0x52,
+ 0x56, 0x49, 0x43, 0x45, 0x5f, 0x54, 0x49, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x45, 0x4d, 0x49, 0x55,
+ 0x4d, 0x10, 0x02, 0x1a, 0x02, 0x18, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xa2, 0x02, 0x04, 0x47, 0x4d, 0x4f, 0x4e, 0xaa, 0x02, 0x1a,
+ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_common_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_common_proto_rawDescData = file_google_monitoring_v3_common_proto_rawDesc
+)
+
+func file_google_monitoring_v3_common_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_common_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_common_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_common_proto_rawDescData
+}
+
+var file_google_monitoring_v3_common_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
+var file_google_monitoring_v3_common_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_google_monitoring_v3_common_proto_goTypes = []any{
+ (ComparisonType)(0), // 0: google.monitoring.v3.ComparisonType
+ (ServiceTier)(0), // 1: google.monitoring.v3.ServiceTier
+ (Aggregation_Aligner)(0), // 2: google.monitoring.v3.Aggregation.Aligner
+ (Aggregation_Reducer)(0), // 3: google.monitoring.v3.Aggregation.Reducer
+ (*TypedValue)(nil), // 4: google.monitoring.v3.TypedValue
+ (*TimeInterval)(nil), // 5: google.monitoring.v3.TimeInterval
+ (*Aggregation)(nil), // 6: google.monitoring.v3.Aggregation
+ (*distribution.Distribution)(nil), // 7: google.api.Distribution
+ (*timestamppb.Timestamp)(nil), // 8: google.protobuf.Timestamp
+ (*durationpb.Duration)(nil), // 9: google.protobuf.Duration
+}
+var file_google_monitoring_v3_common_proto_depIdxs = []int32{
+ 7, // 0: google.monitoring.v3.TypedValue.distribution_value:type_name -> google.api.Distribution
+ 8, // 1: google.monitoring.v3.TimeInterval.end_time:type_name -> google.protobuf.Timestamp
+ 8, // 2: google.monitoring.v3.TimeInterval.start_time:type_name -> google.protobuf.Timestamp
+ 9, // 3: google.monitoring.v3.Aggregation.alignment_period:type_name -> google.protobuf.Duration
+ 2, // 4: google.monitoring.v3.Aggregation.per_series_aligner:type_name -> google.monitoring.v3.Aggregation.Aligner
+ 3, // 5: google.monitoring.v3.Aggregation.cross_series_reducer:type_name -> google.monitoring.v3.Aggregation.Reducer
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_common_proto_init() }
+func file_google_monitoring_v3_common_proto_init() {
+ if File_google_monitoring_v3_common_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_common_proto_msgTypes[0].OneofWrappers = []any{
+ (*TypedValue_BoolValue)(nil),
+ (*TypedValue_Int64Value)(nil),
+ (*TypedValue_DoubleValue)(nil),
+ (*TypedValue_StringValue)(nil),
+ (*TypedValue_DistributionValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_common_proto_rawDesc,
+ NumEnums: 4,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_common_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_common_proto_depIdxs,
+ EnumInfos: file_google_monitoring_v3_common_proto_enumTypes,
+ MessageInfos: file_google_monitoring_v3_common_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_common_proto = out.File
+ file_google_monitoring_v3_common_proto_rawDesc = nil
+ file_google_monitoring_v3_common_proto_goTypes = nil
+ file_google_monitoring_v3_common_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go
new file mode 100644
index 000000000..0c3ac5a1c
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/dropped_labels.pb.go
@@ -0,0 +1,181 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/dropped_labels.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// A set of (label, value) pairs that were removed from a Distribution
+// time series during aggregation and then added as an attachment to a
+// Distribution.Exemplar.
+//
+// The full label set for the exemplars is constructed by using the dropped
+// pairs in combination with the label values that remain on the aggregated
+// Distribution time series. The constructed full label set can be used to
+// identify the specific entity, such as the instance or job, which might be
+// contributing to a long-tail. However, with dropped labels, the storage
+// requirements are reduced because only the aggregated distribution values for
+// a large group of time series are stored.
+//
+// Note that there are no guarantees on ordering of the labels from
+// exemplar-to-exemplar and from distribution-to-distribution in the same
+// stream, and there may be duplicates. It is up to clients to resolve any
+// ambiguities.
+type DroppedLabels struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Map from label to its value, for all labels dropped in any aggregation.
+ Label map[string]string `protobuf:"bytes,1,rep,name=label,proto3" json:"label,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *DroppedLabels) Reset() {
+ *x = DroppedLabels{}
+ mi := &file_google_monitoring_v3_dropped_labels_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DroppedLabels) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DroppedLabels) ProtoMessage() {}
+
+func (x *DroppedLabels) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_dropped_labels_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DroppedLabels.ProtoReflect.Descriptor instead.
+func (*DroppedLabels) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *DroppedLabels) GetLabel() map[string]string {
+ if x != nil {
+ return x.Label
+ }
+ return nil
+}
+
+var File_google_monitoring_v3_dropped_labels_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_dropped_labels_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x22, 0x8f, 0x01, 0x0a, 0x0d, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x61, 0x62,
+ 0x65, 0x6c, 0x73, 0x12, 0x44, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65,
+ 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x1a, 0x38, 0x0a, 0x0a, 0x4c, 0x61, 0x62,
+ 0x65, 0x6c, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
+ 0x02, 0x38, 0x01, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x42, 0x12, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a,
+ 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_dropped_labels_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_dropped_labels_proto_rawDescData = file_google_monitoring_v3_dropped_labels_proto_rawDesc
+)
+
+func file_google_monitoring_v3_dropped_labels_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_dropped_labels_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_dropped_labels_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_dropped_labels_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_dropped_labels_proto_rawDescData
+}
+
+var file_google_monitoring_v3_dropped_labels_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_google_monitoring_v3_dropped_labels_proto_goTypes = []any{
+ (*DroppedLabels)(nil), // 0: google.monitoring.v3.DroppedLabels
+ nil, // 1: google.monitoring.v3.DroppedLabels.LabelEntry
+}
+var file_google_monitoring_v3_dropped_labels_proto_depIdxs = []int32{
+ 1, // 0: google.monitoring.v3.DroppedLabels.label:type_name -> google.monitoring.v3.DroppedLabels.LabelEntry
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_dropped_labels_proto_init() }
+func file_google_monitoring_v3_dropped_labels_proto_init() {
+ if File_google_monitoring_v3_dropped_labels_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_dropped_labels_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_dropped_labels_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_dropped_labels_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_dropped_labels_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_dropped_labels_proto = out.File
+ file_google_monitoring_v3_dropped_labels_proto_rawDesc = nil
+ file_google_monitoring_v3_dropped_labels_proto_goTypes = nil
+ file_google_monitoring_v3_dropped_labels_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go
new file mode 100644
index 000000000..c35046ac7
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group.pb.go
@@ -0,0 +1,249 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/group.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The description of a dynamic collection of monitored resources. Each group
+// has a filter that is matched against monitored resources and their associated
+// metadata. If a group's filter matches an available monitored resource, then
+// that resource is a member of that group. Groups can contain any number of
+// monitored resources, and each monitored resource can be a member of any
+// number of groups.
+//
+// Groups can be nested in parent-child hierarchies. The `parentName` field
+// identifies an optional parent for each group. If a group has a parent, then
+// the only monitored resources available to be matched by the group's filter
+// are the resources contained in the parent group. In other words, a group
+// contains the monitored resources that match its filter and the filters of all
+// the group's ancestors. A group without a parent can contain any monitored
+// resource.
+//
+// For example, consider an infrastructure running a set of instances with two
+// user-defined tags: `"environment"` and `"role"`. A parent group has a filter,
+// `environment="production"`. A child of that parent group has a filter,
+// `role="transcoder"`. The parent group contains all instances in the
+// production environment, regardless of their roles. The child group contains
+// instances that have the transcoder role *and* are in the production
+// environment.
+//
+// The monitored resources contained in a group can change at any moment,
+// depending on what resources exist and what filters are associated with the
+// group and its ancestors.
+type Group struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Output only. The name of this group. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ //
+ // When creating a group, this field is ignored and a new name is created
+ // consisting of the project specified in the call to `CreateGroup`
+ // and a unique `[GROUP_ID]` that is generated automatically.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A user-assigned name for this group, used only for display purposes.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The name of the group's parent, if it has one. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ //
+ // For groups with no parent, `parent_name` is the empty string, `""`.
+ ParentName string `protobuf:"bytes,3,opt,name=parent_name,json=parentName,proto3" json:"parent_name,omitempty"`
+ // The filter used to determine which monitored resources belong to this
+ // group.
+ Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
+ // If true, the members of this group are considered to be a cluster.
+ // The system can perform additional analysis on groups that are clusters.
+ IsCluster bool `protobuf:"varint,6,opt,name=is_cluster,json=isCluster,proto3" json:"is_cluster,omitempty"`
+}
+
+func (x *Group) Reset() {
+ *x = Group{}
+ mi := &file_google_monitoring_v3_group_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Group) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Group) ProtoMessage() {}
+
+func (x *Group) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Group.ProtoReflect.Descriptor instead.
+func (*Group) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Group) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Group) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *Group) GetParentName() string {
+ if x != nil {
+ return x.ParentName
+ }
+ return ""
+}
+
+func (x *Group) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *Group) GetIsCluster() bool {
+ if x != nil {
+ return x.IsCluster
+ }
+ return false
+}
+
+var File_google_monitoring_v3_group_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_group_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x12, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79,
+ 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1d, 0x0a,
+ 0x0a, 0x69, 0x73, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x09, 0x69, 0x73, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x99, 0x01, 0xea,
+ 0x41, 0x95, 0x01, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47,
+ 0x72, 0x6f, 0x75, 0x70, 0x12, 0x21, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f,
+ 0x7b, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x2b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x67, 0x72,
+ 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x1f, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66,
+ 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x7b, 0x67,
+ 0x72, 0x6f, 0x75, 0x70, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0xc5, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33,
+ 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64,
+ 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_group_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_group_proto_rawDescData = file_google_monitoring_v3_group_proto_rawDesc
+)
+
+func file_google_monitoring_v3_group_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_group_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_group_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_group_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_group_proto_rawDescData
+}
+
+var file_google_monitoring_v3_group_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_monitoring_v3_group_proto_goTypes = []any{
+ (*Group)(nil), // 0: google.monitoring.v3.Group
+}
+var file_google_monitoring_v3_group_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_group_proto_init() }
+func file_google_monitoring_v3_group_proto_init() {
+ if File_google_monitoring_v3_group_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_group_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_group_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_group_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_group_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_group_proto = out.File
+ file_google_monitoring_v3_group_proto_rawDesc = nil
+ file_google_monitoring_v3_group_proto_goTypes = nil
+ file_google_monitoring_v3_group_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go
new file mode 100644
index 000000000..fbdf9ef54
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/group_service.pb.go
@@ -0,0 +1,1205 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/group_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The `ListGroup` request.
+type ListGroupsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose
+ // groups are to be listed. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
+ // An optional filter consisting of a single group name. The filters limit
+ // the groups returned based on their parent-child relationship with the
+ // specified group. If no filter is specified, all groups are returned.
+ //
+ // Types that are assignable to Filter:
+ //
+ // *ListGroupsRequest_ChildrenOfGroup
+ // *ListGroupsRequest_AncestorsOfGroup
+ // *ListGroupsRequest_DescendantsOfGroup
+ Filter isListGroupsRequest_Filter `protobuf_oneof:"filter"`
+ // A positive number that is the maximum number of results to return.
+ PageSize int32 `protobuf:"varint,5,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `next_page_token` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,6,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListGroupsRequest) Reset() {
+ *x = ListGroupsRequest{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListGroupsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListGroupsRequest) ProtoMessage() {}
+
+func (x *ListGroupsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListGroupsRequest.ProtoReflect.Descriptor instead.
+func (*ListGroupsRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ListGroupsRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *ListGroupsRequest) GetFilter() isListGroupsRequest_Filter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
+func (x *ListGroupsRequest) GetChildrenOfGroup() string {
+ if x, ok := x.GetFilter().(*ListGroupsRequest_ChildrenOfGroup); ok {
+ return x.ChildrenOfGroup
+ }
+ return ""
+}
+
+func (x *ListGroupsRequest) GetAncestorsOfGroup() string {
+ if x, ok := x.GetFilter().(*ListGroupsRequest_AncestorsOfGroup); ok {
+ return x.AncestorsOfGroup
+ }
+ return ""
+}
+
+func (x *ListGroupsRequest) GetDescendantsOfGroup() string {
+ if x, ok := x.GetFilter().(*ListGroupsRequest_DescendantsOfGroup); ok {
+ return x.DescendantsOfGroup
+ }
+ return ""
+}
+
+func (x *ListGroupsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListGroupsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+type isListGroupsRequest_Filter interface {
+ isListGroupsRequest_Filter()
+}
+
+type ListGroupsRequest_ChildrenOfGroup struct {
+ // A group name. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ //
+ // Returns groups whose `parent_name` field contains the group
+ // name. If no groups have this parent, the results are empty.
+ ChildrenOfGroup string `protobuf:"bytes,2,opt,name=children_of_group,json=childrenOfGroup,proto3,oneof"`
+}
+
+type ListGroupsRequest_AncestorsOfGroup struct {
+ // A group name. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ //
+ // Returns groups that are ancestors of the specified group.
+ // The groups are returned in order, starting with the immediate parent and
+ // ending with the most distant ancestor. If the specified group has no
+ // immediate parent, the results are empty.
+ AncestorsOfGroup string `protobuf:"bytes,3,opt,name=ancestors_of_group,json=ancestorsOfGroup,proto3,oneof"`
+}
+
+type ListGroupsRequest_DescendantsOfGroup struct {
+ // A group name. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ //
+ // Returns the descendants of the specified group. This is a superset of
+ // the results returned by the `children_of_group` filter, and includes
+ // children-of-children, and so forth.
+ DescendantsOfGroup string `protobuf:"bytes,4,opt,name=descendants_of_group,json=descendantsOfGroup,proto3,oneof"`
+}
+
+func (*ListGroupsRequest_ChildrenOfGroup) isListGroupsRequest_Filter() {}
+
+func (*ListGroupsRequest_AncestorsOfGroup) isListGroupsRequest_Filter() {}
+
+func (*ListGroupsRequest_DescendantsOfGroup) isListGroupsRequest_Filter() {}
+
+// The `ListGroups` response.
+type ListGroupsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The groups that match the specified filters.
+ Group []*Group `protobuf:"bytes,1,rep,name=group,proto3" json:"group,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListGroupsResponse) Reset() {
+ *x = ListGroupsResponse{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListGroupsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListGroupsResponse) ProtoMessage() {}
+
+func (x *ListGroupsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListGroupsResponse.ProtoReflect.Descriptor instead.
+func (*ListGroupsResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListGroupsResponse) GetGroup() []*Group {
+ if x != nil {
+ return x.Group
+ }
+ return nil
+}
+
+func (x *ListGroupsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The `GetGroup` request.
+type GetGroupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The group to retrieve. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetGroupRequest) Reset() {
+ *x = GetGroupRequest{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetGroupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetGroupRequest) ProtoMessage() {}
+
+func (x *GetGroupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetGroupRequest.ProtoReflect.Descriptor instead.
+func (*GetGroupRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *GetGroupRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `CreateGroup` request.
+type CreateGroupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which
+ // to create the group. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. A group definition. It is an error to define the `name` field
+ // because the system assigns the name.
+ Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"`
+ // If true, validate this request but do not create the group.
+ ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
+}
+
+func (x *CreateGroupRequest) Reset() {
+ *x = CreateGroupRequest{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateGroupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateGroupRequest) ProtoMessage() {}
+
+func (x *CreateGroupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateGroupRequest.ProtoReflect.Descriptor instead.
+func (*CreateGroupRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *CreateGroupRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CreateGroupRequest) GetGroup() *Group {
+ if x != nil {
+ return x.Group
+ }
+ return nil
+}
+
+func (x *CreateGroupRequest) GetValidateOnly() bool {
+ if x != nil {
+ return x.ValidateOnly
+ }
+ return false
+}
+
+// The `UpdateGroup` request.
+type UpdateGroupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The new definition of the group. All fields of the existing
+ // group, excepting `name`, are replaced with the corresponding fields of this
+ // group.
+ Group *Group `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"`
+ // If true, validate this request but do not update the existing group.
+ ValidateOnly bool `protobuf:"varint,3,opt,name=validate_only,json=validateOnly,proto3" json:"validate_only,omitempty"`
+}
+
+func (x *UpdateGroupRequest) Reset() {
+ *x = UpdateGroupRequest{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateGroupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateGroupRequest) ProtoMessage() {}
+
+func (x *UpdateGroupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateGroupRequest.ProtoReflect.Descriptor instead.
+func (*UpdateGroupRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UpdateGroupRequest) GetGroup() *Group {
+ if x != nil {
+ return x.Group
+ }
+ return nil
+}
+
+func (x *UpdateGroupRequest) GetValidateOnly() bool {
+ if x != nil {
+ return x.ValidateOnly
+ }
+ return false
+}
+
+// The `DeleteGroup` request. The default behavior is to be able to delete a
+// single group without any descendants.
+type DeleteGroupRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The group to delete. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // If this field is true, then the request means to delete a group with all
+ // its descendants. Otherwise, the request means to delete a group only when
+ // it has no descendants. The default value is false.
+ Recursive bool `protobuf:"varint,4,opt,name=recursive,proto3" json:"recursive,omitempty"`
+}
+
+func (x *DeleteGroupRequest) Reset() {
+ *x = DeleteGroupRequest{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteGroupRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteGroupRequest) ProtoMessage() {}
+
+func (x *DeleteGroupRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteGroupRequest.ProtoReflect.Descriptor instead.
+func (*DeleteGroupRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DeleteGroupRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *DeleteGroupRequest) GetRecursive() bool {
+ if x != nil {
+ return x.Recursive
+ }
+ return false
+}
+
+// The `ListGroupMembers` request.
+type ListGroupMembersRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The group whose members are listed. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]
+ Name string `protobuf:"bytes,7,opt,name=name,proto3" json:"name,omitempty"`
+ // A positive number that is the maximum number of results to return.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `next_page_token` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // An optional [list
+ // filter](https://cloud.google.com/monitoring/api/learn_more#filtering)
+ // describing the members to be returned. The filter may reference the type,
+ // labels, and metadata of monitored resources that comprise the group. For
+ // example, to return only resources representing Compute Engine VM instances,
+ // use this filter:
+ //
+ // `resource.type = "gce_instance"`
+ Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
+ // An optional time interval for which results should be returned. Only
+ // members that were part of the group during the specified interval are
+ // included in the response. If no interval is provided then the group
+ // membership over the last minute is returned.
+ Interval *TimeInterval `protobuf:"bytes,6,opt,name=interval,proto3" json:"interval,omitempty"`
+}
+
+func (x *ListGroupMembersRequest) Reset() {
+ *x = ListGroupMembersRequest{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListGroupMembersRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListGroupMembersRequest) ProtoMessage() {}
+
+func (x *ListGroupMembersRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListGroupMembersRequest.ProtoReflect.Descriptor instead.
+func (*ListGroupMembersRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ListGroupMembersRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListGroupMembersRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListGroupMembersRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+func (x *ListGroupMembersRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListGroupMembersRequest) GetInterval() *TimeInterval {
+ if x != nil {
+ return x.Interval
+ }
+ return nil
+}
+
+// The `ListGroupMembers` response.
+type ListGroupMembersResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A set of monitored resources in the group.
+ Members []*monitoredres.MonitoredResource `protobuf:"bytes,1,rep,name=members,proto3" json:"members,omitempty"`
+ // If there are more results than have been returned, then this field is
+ // set to a non-empty value. To see the additional results, use that value as
+ // `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // The total number of elements matching this request.
+ TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
+}
+
+func (x *ListGroupMembersResponse) Reset() {
+ *x = ListGroupMembersResponse{}
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListGroupMembersResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListGroupMembersResponse) ProtoMessage() {}
+
+func (x *ListGroupMembersResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_group_service_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListGroupMembersResponse.ProtoReflect.Descriptor instead.
+func (*ListGroupMembersResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_group_service_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ListGroupMembersResponse) GetMembers() []*monitoredres.MonitoredResource {
+ if x != nil {
+ return x.Members
+ }
+ return nil
+}
+
+func (x *ListGroupMembersResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *ListGroupMembersResponse) GetTotalSize() int32 {
+ if x != nil {
+ return x.TotalSize
+ }
+ return 0
+}
+
+var File_google_monitoring_v3_group_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_group_service_proto_rawDesc = []byte{
+ 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69,
+ 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76,
+ 0x33, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65,
+ 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x03, 0x0a, 0x11, 0x4c,
+ 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27,
+ 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x12, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x52, 0x0a,
+ 0x11, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x5f, 0x6f, 0x66, 0x5f, 0x67, 0x72, 0x6f,
+ 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00,
+ 0x52, 0x0f, 0x63, 0x68, 0x69, 0x6c, 0x64, 0x72, 0x65, 0x6e, 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75,
+ 0x70, 0x12, 0x54, 0x0a, 0x12, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x6f,
+ 0x66, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa,
+ 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72,
+ 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x10, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x73,
+ 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x58, 0x0a, 0x14, 0x64, 0x65, 0x73, 0x63, 0x65,
+ 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x24, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x12, 0x64,
+ 0x65, 0x73, 0x63, 0x65, 0x6e, 0x64, 0x61, 0x6e, 0x74, 0x73, 0x4f, 0x66, 0x47, 0x72, 0x6f, 0x75,
+ 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x08, 0x0a,
+ 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x47,
+ 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a,
+ 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70,
+ 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50,
+ 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x4e, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x47,
+ 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f,
+ 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xae, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x21, 0x12, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x05,
+ 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x67,
+ 0x72, 0x6f, 0x75, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x71, 0x0a, 0x12, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x36, 0x0a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x23, 0x0a, 0x0d, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x22, 0x6f, 0x0a, 0x12,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a, 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x1c, 0x0a, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x09, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0xea, 0x01,
+ 0x0a, 0x17, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65,
+ 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x27, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x21, 0x0a,
+ 0x1f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x47, 0x72, 0x6f, 0x75, 0x70,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73,
+ 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x08, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c,
+ 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x9a, 0x01, 0x0a, 0x18, 0x4c,
+ 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65,
+ 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x07, 0x6d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73,
+ 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50,
+ 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61,
+ 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f,
+ 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x32, 0x98, 0x08, 0x0a, 0x0c, 0x47, 0x72, 0x6f, 0x75,
+ 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x8c, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73,
+ 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c,
+ 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75,
+ 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0xda, 0x41, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x76, 0x33, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x7d, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x47, 0x72,
+ 0x6f, 0x75, 0x70, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x47, 0x72,
+ 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x2d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x12, 0x1e, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x67, 0x72, 0x6f,
+ 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x8e, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x38, 0xda,
+ 0x41, 0x0a, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0x25, 0x3a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x1c, 0x2f, 0x76, 0x33, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x91, 0x01, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x3b,
+ 0xda, 0x41, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x3a, 0x05,
+ 0x67, 0x72, 0x6f, 0x75, 0x70, 0x1a, 0x24, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x67, 0x72, 0x6f, 0x75,
+ 0x70, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x7e, 0x0a, 0x0b, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x2d, 0xda, 0x41,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x2a, 0x1e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x10,
+ 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73,
+ 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75,
+ 0x70, 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70,
+ 0x4d, 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
+ 0x35, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26,
+ 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d,
+ 0x65, 0x6d, 0x62, 0x65, 0x72, 0x73, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f,
+ 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70,
+ 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f,
+ 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75,
+ 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65,
+ 0x61, 0x64, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42,
+ 0x11, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56,
+ 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75,
+ 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56,
+ 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_group_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_group_service_proto_rawDescData = file_google_monitoring_v3_group_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_group_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_group_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_group_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_group_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_group_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_group_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_google_monitoring_v3_group_service_proto_goTypes = []any{
+ (*ListGroupsRequest)(nil), // 0: google.monitoring.v3.ListGroupsRequest
+ (*ListGroupsResponse)(nil), // 1: google.monitoring.v3.ListGroupsResponse
+ (*GetGroupRequest)(nil), // 2: google.monitoring.v3.GetGroupRequest
+ (*CreateGroupRequest)(nil), // 3: google.monitoring.v3.CreateGroupRequest
+ (*UpdateGroupRequest)(nil), // 4: google.monitoring.v3.UpdateGroupRequest
+ (*DeleteGroupRequest)(nil), // 5: google.monitoring.v3.DeleteGroupRequest
+ (*ListGroupMembersRequest)(nil), // 6: google.monitoring.v3.ListGroupMembersRequest
+ (*ListGroupMembersResponse)(nil), // 7: google.monitoring.v3.ListGroupMembersResponse
+ (*Group)(nil), // 8: google.monitoring.v3.Group
+ (*TimeInterval)(nil), // 9: google.monitoring.v3.TimeInterval
+ (*monitoredres.MonitoredResource)(nil), // 10: google.api.MonitoredResource
+ (*emptypb.Empty)(nil), // 11: google.protobuf.Empty
+}
+var file_google_monitoring_v3_group_service_proto_depIdxs = []int32{
+ 8, // 0: google.monitoring.v3.ListGroupsResponse.group:type_name -> google.monitoring.v3.Group
+ 8, // 1: google.monitoring.v3.CreateGroupRequest.group:type_name -> google.monitoring.v3.Group
+ 8, // 2: google.monitoring.v3.UpdateGroupRequest.group:type_name -> google.monitoring.v3.Group
+ 9, // 3: google.monitoring.v3.ListGroupMembersRequest.interval:type_name -> google.monitoring.v3.TimeInterval
+ 10, // 4: google.monitoring.v3.ListGroupMembersResponse.members:type_name -> google.api.MonitoredResource
+ 0, // 5: google.monitoring.v3.GroupService.ListGroups:input_type -> google.monitoring.v3.ListGroupsRequest
+ 2, // 6: google.monitoring.v3.GroupService.GetGroup:input_type -> google.monitoring.v3.GetGroupRequest
+ 3, // 7: google.monitoring.v3.GroupService.CreateGroup:input_type -> google.monitoring.v3.CreateGroupRequest
+ 4, // 8: google.monitoring.v3.GroupService.UpdateGroup:input_type -> google.monitoring.v3.UpdateGroupRequest
+ 5, // 9: google.monitoring.v3.GroupService.DeleteGroup:input_type -> google.monitoring.v3.DeleteGroupRequest
+ 6, // 10: google.monitoring.v3.GroupService.ListGroupMembers:input_type -> google.monitoring.v3.ListGroupMembersRequest
+ 1, // 11: google.monitoring.v3.GroupService.ListGroups:output_type -> google.monitoring.v3.ListGroupsResponse
+ 8, // 12: google.monitoring.v3.GroupService.GetGroup:output_type -> google.monitoring.v3.Group
+ 8, // 13: google.monitoring.v3.GroupService.CreateGroup:output_type -> google.monitoring.v3.Group
+ 8, // 14: google.monitoring.v3.GroupService.UpdateGroup:output_type -> google.monitoring.v3.Group
+ 11, // 15: google.monitoring.v3.GroupService.DeleteGroup:output_type -> google.protobuf.Empty
+ 7, // 16: google.monitoring.v3.GroupService.ListGroupMembers:output_type -> google.monitoring.v3.ListGroupMembersResponse
+ 11, // [11:17] is the sub-list for method output_type
+ 5, // [5:11] is the sub-list for method input_type
+ 5, // [5:5] is the sub-list for extension type_name
+ 5, // [5:5] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_group_service_proto_init() }
+func file_google_monitoring_v3_group_service_proto_init() {
+ if File_google_monitoring_v3_group_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_common_proto_init()
+ file_google_monitoring_v3_group_proto_init()
+ file_google_monitoring_v3_group_service_proto_msgTypes[0].OneofWrappers = []any{
+ (*ListGroupsRequest_ChildrenOfGroup)(nil),
+ (*ListGroupsRequest_AncestorsOfGroup)(nil),
+ (*ListGroupsRequest_DescendantsOfGroup)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_group_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 8,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_group_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_group_service_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_group_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_group_service_proto = out.File
+ file_google_monitoring_v3_group_service_proto_rawDesc = nil
+ file_google_monitoring_v3_group_service_proto_goTypes = nil
+ file_google_monitoring_v3_group_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// GroupServiceClient is the client API for GroupService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type GroupServiceClient interface {
+ // Lists the existing groups.
+ ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error)
+ // Gets a single group.
+ GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error)
+ // Creates a new group.
+ CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error)
+ // Updates an existing group.
+ // You can change any group attributes except `name`.
+ UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error)
+ // Deletes an existing group.
+ DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Lists the monitored resources that are members of a group.
+ ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error)
+}
+
+type groupServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewGroupServiceClient(cc grpc.ClientConnInterface) GroupServiceClient {
+ return &groupServiceClient{cc}
+}
+
+func (c *groupServiceClient) ListGroups(ctx context.Context, in *ListGroupsRequest, opts ...grpc.CallOption) (*ListGroupsResponse, error) {
+ out := new(ListGroupsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroups", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) GetGroup(ctx context.Context, in *GetGroupRequest, opts ...grpc.CallOption) (*Group, error) {
+ out := new(Group)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/GetGroup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) CreateGroup(ctx context.Context, in *CreateGroupRequest, opts ...grpc.CallOption) (*Group, error) {
+ out := new(Group)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/CreateGroup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) UpdateGroup(ctx context.Context, in *UpdateGroupRequest, opts ...grpc.CallOption) (*Group, error) {
+ out := new(Group)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/UpdateGroup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) DeleteGroup(ctx context.Context, in *DeleteGroupRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/DeleteGroup", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *groupServiceClient) ListGroupMembers(ctx context.Context, in *ListGroupMembersRequest, opts ...grpc.CallOption) (*ListGroupMembersResponse, error) {
+ out := new(ListGroupMembersResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.GroupService/ListGroupMembers", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// GroupServiceServer is the server API for GroupService service.
+type GroupServiceServer interface {
+ // Lists the existing groups.
+ ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error)
+ // Gets a single group.
+ GetGroup(context.Context, *GetGroupRequest) (*Group, error)
+ // Creates a new group.
+ CreateGroup(context.Context, *CreateGroupRequest) (*Group, error)
+ // Updates an existing group.
+ // You can change any group attributes except `name`.
+ UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error)
+ // Deletes an existing group.
+ DeleteGroup(context.Context, *DeleteGroupRequest) (*emptypb.Empty, error)
+ // Lists the monitored resources that are members of a group.
+ ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error)
+}
+
+// UnimplementedGroupServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedGroupServiceServer struct {
+}
+
+func (*UnimplementedGroupServiceServer) ListGroups(context.Context, *ListGroupsRequest) (*ListGroupsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListGroups not implemented")
+}
+func (*UnimplementedGroupServiceServer) GetGroup(context.Context, *GetGroupRequest) (*Group, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetGroup not implemented")
+}
+func (*UnimplementedGroupServiceServer) CreateGroup(context.Context, *CreateGroupRequest) (*Group, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateGroup not implemented")
+}
+func (*UnimplementedGroupServiceServer) UpdateGroup(context.Context, *UpdateGroupRequest) (*Group, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateGroup not implemented")
+}
+func (*UnimplementedGroupServiceServer) DeleteGroup(context.Context, *DeleteGroupRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteGroup not implemented")
+}
+func (*UnimplementedGroupServiceServer) ListGroupMembers(context.Context, *ListGroupMembersRequest) (*ListGroupMembersResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListGroupMembers not implemented")
+}
+
+func RegisterGroupServiceServer(s *grpc.Server, srv GroupServiceServer) {
+ s.RegisterService(&_GroupService_serviceDesc, srv)
+}
+
+func _GroupService_ListGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListGroupsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).ListGroups(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/ListGroups",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).ListGroups(ctx, req.(*ListGroupsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_GetGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetGroupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).GetGroup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/GetGroup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).GetGroup(ctx, req.(*GetGroupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_CreateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateGroupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).CreateGroup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/CreateGroup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).CreateGroup(ctx, req.(*CreateGroupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_UpdateGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateGroupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).UpdateGroup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/UpdateGroup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).UpdateGroup(ctx, req.(*UpdateGroupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_DeleteGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteGroupRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).DeleteGroup(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/DeleteGroup",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).DeleteGroup(ctx, req.(*DeleteGroupRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _GroupService_ListGroupMembers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListGroupMembersRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(GroupServiceServer).ListGroupMembers(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.GroupService/ListGroupMembers",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(GroupServiceServer).ListGroupMembers(ctx, req.(*ListGroupMembersRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _GroupService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.GroupService",
+ HandlerType: (*GroupServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListGroups",
+ Handler: _GroupService_ListGroups_Handler,
+ },
+ {
+ MethodName: "GetGroup",
+ Handler: _GroupService_GetGroup_Handler,
+ },
+ {
+ MethodName: "CreateGroup",
+ Handler: _GroupService_CreateGroup_Handler,
+ },
+ {
+ MethodName: "UpdateGroup",
+ Handler: _GroupService_UpdateGroup_Handler,
+ },
+ {
+ MethodName: "DeleteGroup",
+ Handler: _GroupService_DeleteGroup_Handler,
+ },
+ {
+ MethodName: "ListGroupMembers",
+ Handler: _GroupService_ListGroupMembers_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/group_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go
new file mode 100644
index 000000000..ae7eea5b6
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric.pb.go
@@ -0,0 +1,1067 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/metric.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ label "google.golang.org/genproto/googleapis/api/label"
+ metric "google.golang.org/genproto/googleapis/api/metric"
+ monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// A single data point in a time series.
+type Point struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The time interval to which the data point applies. For `GAUGE` metrics,
+ // the start time is optional, but if it is supplied, it must equal the
+ // end time. For `DELTA` metrics, the start
+ // and end time should specify a non-zero interval, with subsequent points
+ // specifying contiguous and non-overlapping intervals. For `CUMULATIVE`
+ // metrics, the start and end time should specify a non-zero interval, with
+ // subsequent points specifying the same start time and increasing end times,
+ // until an event resets the cumulative value to zero and sets a new start
+ // time for the following points.
+ Interval *TimeInterval `protobuf:"bytes,1,opt,name=interval,proto3" json:"interval,omitempty"`
+ // The value of the data point.
+ Value *TypedValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *Point) Reset() {
+ *x = Point{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Point) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Point) ProtoMessage() {}
+
+func (x *Point) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Point.ProtoReflect.Descriptor instead.
+func (*Point) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Point) GetInterval() *TimeInterval {
+ if x != nil {
+ return x.Interval
+ }
+ return nil
+}
+
+func (x *Point) GetValue() *TypedValue {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+// A collection of data points that describes the time-varying values
+// of a metric. A time series is identified by a combination of a
+// fully-specified monitored resource and a fully-specified metric.
+// This type is used for both listing and creating time series.
+type TimeSeries struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The associated metric. A fully-specified metric used to identify the time
+ // series.
+ Metric *metric.Metric `protobuf:"bytes,1,opt,name=metric,proto3" json:"metric,omitempty"`
+ // The associated monitored resource. Custom metrics can use only certain
+ // monitored resource types in their time series data. For more information,
+ // see [Monitored resources for custom
+ // metrics](https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom-metric-resources).
+ Resource *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=resource,proto3" json:"resource,omitempty"`
+ // Output only. The associated monitored resource metadata. When reading a
+ // time series, this field will include metadata labels that are explicitly
+ // named in the reduction. When creating a time series, this field is ignored.
+ Metadata *monitoredres.MonitoredResourceMetadata `protobuf:"bytes,7,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ // The metric kind of the time series. When listing time series, this metric
+ // kind might be different from the metric kind of the associated metric if
+ // this time series is an alignment or reduction of other time series.
+ //
+ // When creating a time series, this field is optional. If present, it must be
+ // the same as the metric kind of the associated metric. If the associated
+ // metric's descriptor must be auto-created, then this field specifies the
+ // metric kind of the new descriptor and must be either `GAUGE` (the default)
+ // or `CUMULATIVE`.
+ MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"`
+ // The value type of the time series. When listing time series, this value
+ // type might be different from the value type of the associated metric if
+ // this time series is an alignment or reduction of other time series.
+ //
+ // When creating a time series, this field is optional. If present, it must be
+ // the same as the type of the data in the `points` field.
+ ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,4,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"`
+ // The data points of this time series. When listing time series, points are
+ // returned in reverse time order.
+ //
+ // When creating a time series, this field must contain exactly one point and
+ // the point's type must be the same as the value type of the associated
+ // metric. If the associated metric's descriptor must be auto-created, then
+ // the value type of the descriptor is determined by the point's type, which
+ // must be `BOOL`, `INT64`, `DOUBLE`, or `DISTRIBUTION`.
+ Points []*Point `protobuf:"bytes,5,rep,name=points,proto3" json:"points,omitempty"`
+ // The units in which the metric value is reported. It is only applicable
+ // if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`. The `unit`
+ // defines the representation of the stored metric values. This field can only
+ // be changed through CreateTimeSeries when it is empty.
+ Unit string `protobuf:"bytes,8,opt,name=unit,proto3" json:"unit,omitempty"`
+ // Input only. A detailed description of the time series that will be
+ // associated with the
+ // [google.api.MetricDescriptor][google.api.MetricDescriptor] for the metric.
+ // Once set, this field cannot be changed through CreateTimeSeries.
+ Description string `protobuf:"bytes,9,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *TimeSeries) Reset() {
+ *x = TimeSeries{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimeSeries) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeSeries) ProtoMessage() {}
+
+func (x *TimeSeries) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeSeries.ProtoReflect.Descriptor instead.
+func (*TimeSeries) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *TimeSeries) GetMetric() *metric.Metric {
+ if x != nil {
+ return x.Metric
+ }
+ return nil
+}
+
+func (x *TimeSeries) GetResource() *monitoredres.MonitoredResource {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+func (x *TimeSeries) GetMetadata() *monitoredres.MonitoredResourceMetadata {
+ if x != nil {
+ return x.Metadata
+ }
+ return nil
+}
+
+func (x *TimeSeries) GetMetricKind() metric.MetricDescriptor_MetricKind {
+ if x != nil {
+ return x.MetricKind
+ }
+ return metric.MetricDescriptor_MetricKind(0)
+}
+
+func (x *TimeSeries) GetValueType() metric.MetricDescriptor_ValueType {
+ if x != nil {
+ return x.ValueType
+ }
+ return metric.MetricDescriptor_ValueType(0)
+}
+
+func (x *TimeSeries) GetPoints() []*Point {
+ if x != nil {
+ return x.Points
+ }
+ return nil
+}
+
+func (x *TimeSeries) GetUnit() string {
+ if x != nil {
+ return x.Unit
+ }
+ return ""
+}
+
+func (x *TimeSeries) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+// A descriptor for the labels and points in a time series.
+type TimeSeriesDescriptor struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Descriptors for the labels.
+ LabelDescriptors []*label.LabelDescriptor `protobuf:"bytes,1,rep,name=label_descriptors,json=labelDescriptors,proto3" json:"label_descriptors,omitempty"`
+ // Descriptors for the point data value columns.
+ PointDescriptors []*TimeSeriesDescriptor_ValueDescriptor `protobuf:"bytes,5,rep,name=point_descriptors,json=pointDescriptors,proto3" json:"point_descriptors,omitempty"`
+}
+
+func (x *TimeSeriesDescriptor) Reset() {
+ *x = TimeSeriesDescriptor{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimeSeriesDescriptor) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeSeriesDescriptor) ProtoMessage() {}
+
+func (x *TimeSeriesDescriptor) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeSeriesDescriptor.ProtoReflect.Descriptor instead.
+func (*TimeSeriesDescriptor) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *TimeSeriesDescriptor) GetLabelDescriptors() []*label.LabelDescriptor {
+ if x != nil {
+ return x.LabelDescriptors
+ }
+ return nil
+}
+
+func (x *TimeSeriesDescriptor) GetPointDescriptors() []*TimeSeriesDescriptor_ValueDescriptor {
+ if x != nil {
+ return x.PointDescriptors
+ }
+ return nil
+}
+
+// Represents the values of a time series associated with a
+// TimeSeriesDescriptor.
+type TimeSeriesData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The values of the labels in the time series identifier, given in the same
+ // order as the `label_descriptors` field of the TimeSeriesDescriptor
+ // associated with this object. Each value must have a value of the type
+ // given in the corresponding entry of `label_descriptors`.
+ LabelValues []*LabelValue `protobuf:"bytes,1,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"`
+ // The points in the time series.
+ PointData []*TimeSeriesData_PointData `protobuf:"bytes,2,rep,name=point_data,json=pointData,proto3" json:"point_data,omitempty"`
+}
+
+func (x *TimeSeriesData) Reset() {
+ *x = TimeSeriesData{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimeSeriesData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeSeriesData) ProtoMessage() {}
+
+func (x *TimeSeriesData) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeSeriesData.ProtoReflect.Descriptor instead.
+func (*TimeSeriesData) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *TimeSeriesData) GetLabelValues() []*LabelValue {
+ if x != nil {
+ return x.LabelValues
+ }
+ return nil
+}
+
+func (x *TimeSeriesData) GetPointData() []*TimeSeriesData_PointData {
+ if x != nil {
+ return x.PointData
+ }
+ return nil
+}
+
+// A label value.
+type LabelValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The label value can be a bool, int64, or string.
+ //
+ // Types that are assignable to Value:
+ //
+ // *LabelValue_BoolValue
+ // *LabelValue_Int64Value
+ // *LabelValue_StringValue
+ Value isLabelValue_Value `protobuf_oneof:"value"`
+}
+
+func (x *LabelValue) Reset() {
+ *x = LabelValue{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *LabelValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LabelValue) ProtoMessage() {}
+
+func (x *LabelValue) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LabelValue.ProtoReflect.Descriptor instead.
+func (*LabelValue) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{4}
+}
+
+func (m *LabelValue) GetValue() isLabelValue_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (x *LabelValue) GetBoolValue() bool {
+ if x, ok := x.GetValue().(*LabelValue_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *LabelValue) GetInt64Value() int64 {
+ if x, ok := x.GetValue().(*LabelValue_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (x *LabelValue) GetStringValue() string {
+ if x, ok := x.GetValue().(*LabelValue_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+type isLabelValue_Value interface {
+ isLabelValue_Value()
+}
+
+type LabelValue_BoolValue struct {
+ // A bool label value.
+ BoolValue bool `protobuf:"varint,1,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type LabelValue_Int64Value struct {
+ // An int64 label value.
+ Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type LabelValue_StringValue struct {
+ // A string label value.
+ StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+func (*LabelValue_BoolValue) isLabelValue_Value() {}
+
+func (*LabelValue_Int64Value) isLabelValue_Value() {}
+
+func (*LabelValue_StringValue) isLabelValue_Value() {}
+
+// An error associated with a query in the time series query language format.
+type QueryError struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The location of the time series query language text that this error applies
+ // to.
+ Locator *TextLocator `protobuf:"bytes,1,opt,name=locator,proto3" json:"locator,omitempty"`
+ // The error message.
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+}
+
+func (x *QueryError) Reset() {
+ *x = QueryError{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *QueryError) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryError) ProtoMessage() {}
+
+func (x *QueryError) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryError.ProtoReflect.Descriptor instead.
+func (*QueryError) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *QueryError) GetLocator() *TextLocator {
+ if x != nil {
+ return x.Locator
+ }
+ return nil
+}
+
+func (x *QueryError) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+// A locator for text. Indicates a particular part of the text of a request or
+// of an object referenced in the request.
+//
+// For example, suppose the request field `text` contains:
+//
+// text: "The quick brown fox jumps over the lazy dog."
+//
+// Then the locator:
+//
+// source: "text"
+// start_position {
+// line: 1
+// column: 17
+// }
+// end_position {
+// line: 1
+// column: 19
+// }
+//
+// refers to the part of the text: "fox".
+type TextLocator struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The source of the text. The source may be a field in the request, in which
+ // case its format is the format of the
+ // google.rpc.BadRequest.FieldViolation.field field in
+ // https://cloud.google.com/apis/design/errors#error_details. It may also be
+ // be a source other than the request field (e.g. a macro definition
+ // referenced in the text of the query), in which case this is the name of
+ // the source (e.g. the macro name).
+ Source string `protobuf:"bytes,1,opt,name=source,proto3" json:"source,omitempty"`
+ // The position of the first byte within the text.
+ StartPosition *TextLocator_Position `protobuf:"bytes,2,opt,name=start_position,json=startPosition,proto3" json:"start_position,omitempty"`
+ // The position of the last byte within the text.
+ EndPosition *TextLocator_Position `protobuf:"bytes,3,opt,name=end_position,json=endPosition,proto3" json:"end_position,omitempty"`
+ // If `source`, `start_position`, and `end_position` describe a call on
+ // some object (e.g. a macro in the time series query language text) and a
+ // location is to be designated in that object's text, `nested_locator`
+ // identifies the location within that object.
+ NestedLocator *TextLocator `protobuf:"bytes,4,opt,name=nested_locator,json=nestedLocator,proto3" json:"nested_locator,omitempty"`
+ // When `nested_locator` is set, this field gives the reason for the nesting.
+ // Usually, the reason is a macro invocation. In that case, the macro name
+ // (including the leading '@') signals the location of the macro call
+ // in the text and a macro argument name (including the leading '$') signals
+ // the location of the macro argument inside the macro body that got
+ // substituted away.
+ NestingReason string `protobuf:"bytes,5,opt,name=nesting_reason,json=nestingReason,proto3" json:"nesting_reason,omitempty"`
+}
+
+func (x *TextLocator) Reset() {
+ *x = TextLocator{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TextLocator) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TextLocator) ProtoMessage() {}
+
+func (x *TextLocator) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TextLocator.ProtoReflect.Descriptor instead.
+func (*TextLocator) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *TextLocator) GetSource() string {
+ if x != nil {
+ return x.Source
+ }
+ return ""
+}
+
+func (x *TextLocator) GetStartPosition() *TextLocator_Position {
+ if x != nil {
+ return x.StartPosition
+ }
+ return nil
+}
+
+func (x *TextLocator) GetEndPosition() *TextLocator_Position {
+ if x != nil {
+ return x.EndPosition
+ }
+ return nil
+}
+
+func (x *TextLocator) GetNestedLocator() *TextLocator {
+ if x != nil {
+ return x.NestedLocator
+ }
+ return nil
+}
+
+func (x *TextLocator) GetNestingReason() string {
+ if x != nil {
+ return x.NestingReason
+ }
+ return ""
+}
+
+// A descriptor for the value columns in a data point.
+type TimeSeriesDescriptor_ValueDescriptor struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The value key.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // The value type.
+ ValueType metric.MetricDescriptor_ValueType `protobuf:"varint,2,opt,name=value_type,json=valueType,proto3,enum=google.api.MetricDescriptor_ValueType" json:"value_type,omitempty"`
+ // The value stream kind.
+ MetricKind metric.MetricDescriptor_MetricKind `protobuf:"varint,3,opt,name=metric_kind,json=metricKind,proto3,enum=google.api.MetricDescriptor_MetricKind" json:"metric_kind,omitempty"`
+ // The unit in which `time_series` point values are reported. `unit`
+ // follows the UCUM format for units as seen in
+ // https://unitsofmeasure.org/ucum.html.
+ // `unit` is only valid if `value_type` is INTEGER, DOUBLE, DISTRIBUTION.
+ Unit string `protobuf:"bytes,4,opt,name=unit,proto3" json:"unit,omitempty"`
+}
+
+func (x *TimeSeriesDescriptor_ValueDescriptor) Reset() {
+ *x = TimeSeriesDescriptor_ValueDescriptor{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimeSeriesDescriptor_ValueDescriptor) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeSeriesDescriptor_ValueDescriptor) ProtoMessage() {}
+
+func (x *TimeSeriesDescriptor_ValueDescriptor) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeSeriesDescriptor_ValueDescriptor.ProtoReflect.Descriptor instead.
+func (*TimeSeriesDescriptor_ValueDescriptor) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *TimeSeriesDescriptor_ValueDescriptor) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *TimeSeriesDescriptor_ValueDescriptor) GetValueType() metric.MetricDescriptor_ValueType {
+ if x != nil {
+ return x.ValueType
+ }
+ return metric.MetricDescriptor_ValueType(0)
+}
+
+func (x *TimeSeriesDescriptor_ValueDescriptor) GetMetricKind() metric.MetricDescriptor_MetricKind {
+ if x != nil {
+ return x.MetricKind
+ }
+ return metric.MetricDescriptor_MetricKind(0)
+}
+
+func (x *TimeSeriesDescriptor_ValueDescriptor) GetUnit() string {
+ if x != nil {
+ return x.Unit
+ }
+ return ""
+}
+
+// A point's value columns and time interval. Each point has one or more
+// point values corresponding to the entries in `point_descriptors` field in
+// the TimeSeriesDescriptor associated with this object.
+type TimeSeriesData_PointData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The values that make up the point.
+ Values []*TypedValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ // The time interval associated with the point.
+ TimeInterval *TimeInterval `protobuf:"bytes,2,opt,name=time_interval,json=timeInterval,proto3" json:"time_interval,omitempty"`
+}
+
+func (x *TimeSeriesData_PointData) Reset() {
+ *x = TimeSeriesData_PointData{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimeSeriesData_PointData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeSeriesData_PointData) ProtoMessage() {}
+
+func (x *TimeSeriesData_PointData) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeSeriesData_PointData.ProtoReflect.Descriptor instead.
+func (*TimeSeriesData_PointData) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *TimeSeriesData_PointData) GetValues() []*TypedValue {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+func (x *TimeSeriesData_PointData) GetTimeInterval() *TimeInterval {
+ if x != nil {
+ return x.TimeInterval
+ }
+ return nil
+}
+
+// The position of a byte within the text.
+type TextLocator_Position struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The line, starting with 1, where the byte is positioned.
+ Line int32 `protobuf:"varint,1,opt,name=line,proto3" json:"line,omitempty"`
+ // The column within the line, starting with 1, where the byte is
+ // positioned. This is a byte index even though the text is UTF-8.
+ Column int32 `protobuf:"varint,2,opt,name=column,proto3" json:"column,omitempty"`
+}
+
+func (x *TextLocator_Position) Reset() {
+ *x = TextLocator_Position{}
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TextLocator_Position) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TextLocator_Position) ProtoMessage() {}
+
+func (x *TextLocator_Position) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TextLocator_Position.ProtoReflect.Descriptor instead.
+func (*TextLocator_Position) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_proto_rawDescGZIP(), []int{6, 0}
+}
+
+func (x *TextLocator_Position) GetLine() int32 {
+ if x != nil {
+ return x.Line
+ }
+ return 0
+}
+
+func (x *TextLocator_Position) GetColumn() int32 {
+ if x != nil {
+ return x.Column
+ }
+ return 0
+}
+
+var File_google_monitoring_v3_metric_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_metric_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64,
+ 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x22, 0x7f, 0x0a, 0x05, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x3e, 0x0a, 0x08, 0x69,
+ 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x36, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x22, 0xb2, 0x03, 0x0a, 0x0a, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69,
+ 0x65, 0x73, 0x12, 0x2a, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x39,
+ 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52,
+ 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x74,
+ 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
+ 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x48, 0x0a, 0x0b,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e,
+ 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x33, 0x0a,
+ 0x06, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x06, 0x70, 0x6f, 0x69, 0x6e,
+ 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x03, 0x0a, 0x14, 0x54, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x12, 0x48, 0x0a, 0x11, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x10, 0x6c, 0x61, 0x62, 0x65, 0x6c,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x67, 0x0a, 0x11, 0x70,
+ 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73,
+ 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x52, 0x10, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x73, 0x1a, 0xc8, 0x01, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x45, 0x0a, 0x0a, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70,
+ 0x65, 0x12, 0x48, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6b, 0x69, 0x6e, 0x64,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x52,
+ 0x0a, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75,
+ 0x6e, 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22,
+ 0xb5, 0x02, 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x61,
+ 0x74, 0x61, 0x12, 0x43, 0x0a, 0x0c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x62, 0x65,
+ 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x4d, 0x0a, 0x0a, 0x70, 0x6f, 0x69, 0x6e, 0x74,
+ 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x61, 0x74,
+ 0x61, 0x2e, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x09, 0x70, 0x6f, 0x69,
+ 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x1a, 0x8e, 0x01, 0x0a, 0x09, 0x50, 0x6f, 0x69, 0x6e, 0x74,
+ 0x44, 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65,
+ 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x47,
+ 0x0a, 0x0d, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x49,
+ 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x7e, 0x0a, 0x0a, 0x4c, 0x61, 0x62, 0x65, 0x6c,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f,
+ 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69,
+ 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72,
+ 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48,
+ 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x63, 0x0a, 0x0a, 0x51, 0x75, 0x65, 0x72, 0x79,
+ 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65,
+ 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ 0x6f, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xf0, 0x02, 0x0a,
+ 0x0b, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x12, 0x51, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x6f,
+ 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e,
+ 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50,
+ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d, 0x0a, 0x0c, 0x65, 0x6e, 0x64, 0x5f, 0x70,
+ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72,
+ 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x65, 0x6e, 0x64, 0x50, 0x6f,
+ 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a, 0x0e, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64,
+ 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x65, 0x78, 0x74, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f,
+ 0x72, 0x52, 0x0d, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72,
+ 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x72, 0x65, 0x61, 0x73,
+ 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e,
+ 0x67, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x1a, 0x36, 0x0a, 0x08, 0x50, 0x6f, 0x73, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x42,
+ 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x4d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76,
+ 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70,
+ 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02,
+ 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_metric_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_metric_proto_rawDescData = file_google_monitoring_v3_metric_proto_rawDesc
+)
+
+func file_google_monitoring_v3_metric_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_metric_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_metric_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_metric_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_metric_proto_rawDescData
+}
+
+var file_google_monitoring_v3_metric_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
+var file_google_monitoring_v3_metric_proto_goTypes = []any{
+ (*Point)(nil), // 0: google.monitoring.v3.Point
+ (*TimeSeries)(nil), // 1: google.monitoring.v3.TimeSeries
+ (*TimeSeriesDescriptor)(nil), // 2: google.monitoring.v3.TimeSeriesDescriptor
+ (*TimeSeriesData)(nil), // 3: google.monitoring.v3.TimeSeriesData
+ (*LabelValue)(nil), // 4: google.monitoring.v3.LabelValue
+ (*QueryError)(nil), // 5: google.monitoring.v3.QueryError
+ (*TextLocator)(nil), // 6: google.monitoring.v3.TextLocator
+ (*TimeSeriesDescriptor_ValueDescriptor)(nil), // 7: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor
+ (*TimeSeriesData_PointData)(nil), // 8: google.monitoring.v3.TimeSeriesData.PointData
+ (*TextLocator_Position)(nil), // 9: google.monitoring.v3.TextLocator.Position
+ (*TimeInterval)(nil), // 10: google.monitoring.v3.TimeInterval
+ (*TypedValue)(nil), // 11: google.monitoring.v3.TypedValue
+ (*metric.Metric)(nil), // 12: google.api.Metric
+ (*monitoredres.MonitoredResource)(nil), // 13: google.api.MonitoredResource
+ (*monitoredres.MonitoredResourceMetadata)(nil), // 14: google.api.MonitoredResourceMetadata
+ (metric.MetricDescriptor_MetricKind)(0), // 15: google.api.MetricDescriptor.MetricKind
+ (metric.MetricDescriptor_ValueType)(0), // 16: google.api.MetricDescriptor.ValueType
+ (*label.LabelDescriptor)(nil), // 17: google.api.LabelDescriptor
+}
+var file_google_monitoring_v3_metric_proto_depIdxs = []int32{
+ 10, // 0: google.monitoring.v3.Point.interval:type_name -> google.monitoring.v3.TimeInterval
+ 11, // 1: google.monitoring.v3.Point.value:type_name -> google.monitoring.v3.TypedValue
+ 12, // 2: google.monitoring.v3.TimeSeries.metric:type_name -> google.api.Metric
+ 13, // 3: google.monitoring.v3.TimeSeries.resource:type_name -> google.api.MonitoredResource
+ 14, // 4: google.monitoring.v3.TimeSeries.metadata:type_name -> google.api.MonitoredResourceMetadata
+ 15, // 5: google.monitoring.v3.TimeSeries.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind
+ 16, // 6: google.monitoring.v3.TimeSeries.value_type:type_name -> google.api.MetricDescriptor.ValueType
+ 0, // 7: google.monitoring.v3.TimeSeries.points:type_name -> google.monitoring.v3.Point
+ 17, // 8: google.monitoring.v3.TimeSeriesDescriptor.label_descriptors:type_name -> google.api.LabelDescriptor
+ 7, // 9: google.monitoring.v3.TimeSeriesDescriptor.point_descriptors:type_name -> google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor
+ 4, // 10: google.monitoring.v3.TimeSeriesData.label_values:type_name -> google.monitoring.v3.LabelValue
+ 8, // 11: google.monitoring.v3.TimeSeriesData.point_data:type_name -> google.monitoring.v3.TimeSeriesData.PointData
+ 6, // 12: google.monitoring.v3.QueryError.locator:type_name -> google.monitoring.v3.TextLocator
+ 9, // 13: google.monitoring.v3.TextLocator.start_position:type_name -> google.monitoring.v3.TextLocator.Position
+ 9, // 14: google.monitoring.v3.TextLocator.end_position:type_name -> google.monitoring.v3.TextLocator.Position
+ 6, // 15: google.monitoring.v3.TextLocator.nested_locator:type_name -> google.monitoring.v3.TextLocator
+ 16, // 16: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.value_type:type_name -> google.api.MetricDescriptor.ValueType
+ 15, // 17: google.monitoring.v3.TimeSeriesDescriptor.ValueDescriptor.metric_kind:type_name -> google.api.MetricDescriptor.MetricKind
+ 11, // 18: google.monitoring.v3.TimeSeriesData.PointData.values:type_name -> google.monitoring.v3.TypedValue
+ 10, // 19: google.monitoring.v3.TimeSeriesData.PointData.time_interval:type_name -> google.monitoring.v3.TimeInterval
+ 20, // [20:20] is the sub-list for method output_type
+ 20, // [20:20] is the sub-list for method input_type
+ 20, // [20:20] is the sub-list for extension type_name
+ 20, // [20:20] is the sub-list for extension extendee
+ 0, // [0:20] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_metric_proto_init() }
+func file_google_monitoring_v3_metric_proto_init() {
+ if File_google_monitoring_v3_metric_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_common_proto_init()
+ file_google_monitoring_v3_metric_proto_msgTypes[4].OneofWrappers = []any{
+ (*LabelValue_BoolValue)(nil),
+ (*LabelValue_Int64Value)(nil),
+ (*LabelValue_StringValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_metric_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 10,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_metric_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_metric_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_metric_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_metric_proto = out.File
+ file_google_monitoring_v3_metric_proto_rawDesc = nil
+ file_google_monitoring_v3_metric_proto_goTypes = nil
+ file_google_monitoring_v3_metric_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go
new file mode 100644
index 000000000..39b959524
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/metric_service.pb.go
@@ -0,0 +1,2293 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/metric_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ metric "google.golang.org/genproto/googleapis/api/metric"
+ monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
+ status "google.golang.org/genproto/googleapis/rpc/status"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status1 "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Controls which fields are returned by `ListTimeSeries*`.
+type ListTimeSeriesRequest_TimeSeriesView int32
+
+const (
+ // Returns the identity of the metric(s), the time series,
+ // and the time series data.
+ ListTimeSeriesRequest_FULL ListTimeSeriesRequest_TimeSeriesView = 0
+ // Returns the identity of the metric and the time series resource,
+ // but not the time series data.
+ ListTimeSeriesRequest_HEADERS ListTimeSeriesRequest_TimeSeriesView = 1
+)
+
+// Enum value maps for ListTimeSeriesRequest_TimeSeriesView.
+var (
+ ListTimeSeriesRequest_TimeSeriesView_name = map[int32]string{
+ 0: "FULL",
+ 1: "HEADERS",
+ }
+ ListTimeSeriesRequest_TimeSeriesView_value = map[string]int32{
+ "FULL": 0,
+ "HEADERS": 1,
+ }
+)
+
+func (x ListTimeSeriesRequest_TimeSeriesView) Enum() *ListTimeSeriesRequest_TimeSeriesView {
+ p := new(ListTimeSeriesRequest_TimeSeriesView)
+ *p = x
+ return p
+}
+
+func (x ListTimeSeriesRequest_TimeSeriesView) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ListTimeSeriesRequest_TimeSeriesView) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_metric_service_proto_enumTypes[0].Descriptor()
+}
+
+func (ListTimeSeriesRequest_TimeSeriesView) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_metric_service_proto_enumTypes[0]
+}
+
+func (x ListTimeSeriesRequest_TimeSeriesView) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ListTimeSeriesRequest_TimeSeriesView.Descriptor instead.
+func (ListTimeSeriesRequest_TimeSeriesView) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{8, 0}
+}
+
+// The `ListMonitoredResourceDescriptors` request.
+type ListMonitoredResourceDescriptorsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which
+ // to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ // An optional [filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // describing the descriptors to be returned. The filter can reference the
+ // descriptor's type and labels. For example, the following filter returns
+ // only Google Compute Engine descriptors that have an `id` label:
+ //
+ // resource.type = starts_with("gce_") AND resource.label:id
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // A positive number that is the maximum number of results to return.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListMonitoredResourceDescriptorsRequest) Reset() {
+ *x = ListMonitoredResourceDescriptorsRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListMonitoredResourceDescriptorsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListMonitoredResourceDescriptorsRequest) ProtoMessage() {}
+
+func (x *ListMonitoredResourceDescriptorsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListMonitoredResourceDescriptorsRequest.ProtoReflect.Descriptor instead.
+func (*ListMonitoredResourceDescriptorsRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ListMonitoredResourceDescriptorsRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListMonitoredResourceDescriptorsRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListMonitoredResourceDescriptorsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListMonitoredResourceDescriptorsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The `ListMonitoredResourceDescriptors` response.
+type ListMonitoredResourceDescriptorsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The monitored resource descriptors that are available to this project
+ // and that match `filter`, if present.
+ ResourceDescriptors []*monitoredres.MonitoredResourceDescriptor `protobuf:"bytes,1,rep,name=resource_descriptors,json=resourceDescriptors,proto3" json:"resource_descriptors,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListMonitoredResourceDescriptorsResponse) Reset() {
+ *x = ListMonitoredResourceDescriptorsResponse{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListMonitoredResourceDescriptorsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListMonitoredResourceDescriptorsResponse) ProtoMessage() {}
+
+func (x *ListMonitoredResourceDescriptorsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListMonitoredResourceDescriptorsResponse.ProtoReflect.Descriptor instead.
+func (*ListMonitoredResourceDescriptorsResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListMonitoredResourceDescriptorsResponse) GetResourceDescriptors() []*monitoredres.MonitoredResourceDescriptor {
+ if x != nil {
+ return x.ResourceDescriptors
+ }
+ return nil
+}
+
+func (x *ListMonitoredResourceDescriptorsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The `GetMonitoredResourceDescriptor` request.
+type GetMonitoredResourceDescriptorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The monitored resource descriptor to get. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/monitoredResourceDescriptors/[RESOURCE_TYPE]
+ //
+ // The `[RESOURCE_TYPE]` is a predefined type, such as
+ // `cloudsql_database`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetMonitoredResourceDescriptorRequest) Reset() {
+ *x = GetMonitoredResourceDescriptorRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetMonitoredResourceDescriptorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetMonitoredResourceDescriptorRequest) ProtoMessage() {}
+
+func (x *GetMonitoredResourceDescriptorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetMonitoredResourceDescriptorRequest.ProtoReflect.Descriptor instead.
+func (*GetMonitoredResourceDescriptorRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *GetMonitoredResourceDescriptorRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `ListMetricDescriptors` request.
+type ListMetricDescriptorsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which
+ // to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ // Optional. If this field is empty, all custom and
+ // system-defined metric descriptors are returned.
+ // Otherwise, the [filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // specifies which metric descriptors are to be
+ // returned. For example, the following filter matches all
+ // [custom metrics](https://cloud.google.com/monitoring/custom-metrics):
+ //
+ // metric.type = starts_with("custom.googleapis.com/")
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Optional. A positive number that is the maximum number of results to
+ // return. The default and maximum value is 10,000. If a page_size <= 0 or >
+ // 10,000 is submitted, will instead return a maximum of 10,000 results.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // Optional. If this field is not empty then it must contain the
+ // `nextPageToken` value returned by a previous call to this method. Using
+ // this field causes the method to return additional results from the previous
+ // method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // Optional. If true, only metrics and monitored resource types that have
+ // recent data (within roughly 25 hours) will be included in the response.
+ // - If a metric descriptor enumerates monitored resource types, only the
+ // monitored resource types for which the metric type has recent data will
+ // be included in the returned metric descriptor, and if none of them have
+ // recent data, the metric descriptor will not be returned.
+ // - If a metric descriptor does not enumerate the compatible monitored
+ // resource types, it will be returned only if the metric type has recent
+ // data for some monitored resource type. The returned descriptor will not
+ // enumerate any monitored resource types.
+ ActiveOnly bool `protobuf:"varint,6,opt,name=active_only,json=activeOnly,proto3" json:"active_only,omitempty"`
+}
+
+func (x *ListMetricDescriptorsRequest) Reset() {
+ *x = ListMetricDescriptorsRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListMetricDescriptorsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListMetricDescriptorsRequest) ProtoMessage() {}
+
+func (x *ListMetricDescriptorsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListMetricDescriptorsRequest.ProtoReflect.Descriptor instead.
+func (*ListMetricDescriptorsRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *ListMetricDescriptorsRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListMetricDescriptorsRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListMetricDescriptorsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListMetricDescriptorsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+func (x *ListMetricDescriptorsRequest) GetActiveOnly() bool {
+ if x != nil {
+ return x.ActiveOnly
+ }
+ return false
+}
+
+// The `ListMetricDescriptors` response.
+type ListMetricDescriptorsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The metric descriptors that are available to the project
+ // and that match the value of `filter`, if present.
+ MetricDescriptors []*metric.MetricDescriptor `protobuf:"bytes,1,rep,name=metric_descriptors,json=metricDescriptors,proto3" json:"metric_descriptors,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListMetricDescriptorsResponse) Reset() {
+ *x = ListMetricDescriptorsResponse{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListMetricDescriptorsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListMetricDescriptorsResponse) ProtoMessage() {}
+
+func (x *ListMetricDescriptorsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListMetricDescriptorsResponse.ProtoReflect.Descriptor instead.
+func (*ListMetricDescriptorsResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ListMetricDescriptorsResponse) GetMetricDescriptors() []*metric.MetricDescriptor {
+ if x != nil {
+ return x.MetricDescriptors
+ }
+ return nil
+}
+
+func (x *ListMetricDescriptorsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The `GetMetricDescriptor` request.
+type GetMetricDescriptorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The metric descriptor on which to execute the request. The format
+ // is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID]
+ //
+ // An example value of `[METRIC_ID]` is
+ // `"compute.googleapis.com/instance/disk/read_bytes_count"`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetMetricDescriptorRequest) Reset() {
+ *x = GetMetricDescriptorRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetMetricDescriptorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetMetricDescriptorRequest) ProtoMessage() {}
+
+func (x *GetMetricDescriptorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetMetricDescriptorRequest.ProtoReflect.Descriptor instead.
+func (*GetMetricDescriptorRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *GetMetricDescriptorRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `CreateMetricDescriptor` request.
+type CreateMetricDescriptorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which
+ // to execute the request. The format is:
+ // 4
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The new [custom
+ // metric](https://cloud.google.com/monitoring/custom-metrics) descriptor.
+ MetricDescriptor *metric.MetricDescriptor `protobuf:"bytes,2,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"`
+}
+
+func (x *CreateMetricDescriptorRequest) Reset() {
+ *x = CreateMetricDescriptorRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateMetricDescriptorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateMetricDescriptorRequest) ProtoMessage() {}
+
+func (x *CreateMetricDescriptorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateMetricDescriptorRequest.ProtoReflect.Descriptor instead.
+func (*CreateMetricDescriptorRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *CreateMetricDescriptorRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CreateMetricDescriptorRequest) GetMetricDescriptor() *metric.MetricDescriptor {
+ if x != nil {
+ return x.MetricDescriptor
+ }
+ return nil
+}
+
+// The `DeleteMetricDescriptor` request.
+type DeleteMetricDescriptorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The metric descriptor on which to execute the request. The format
+ // is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/metricDescriptors/[METRIC_ID]
+ //
+ // An example of `[METRIC_ID]` is:
+ // `"custom.googleapis.com/my_test_metric"`.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteMetricDescriptorRequest) Reset() {
+ *x = DeleteMetricDescriptorRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteMetricDescriptorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteMetricDescriptorRequest) ProtoMessage() {}
+
+func (x *DeleteMetricDescriptorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteMetricDescriptorRequest.ProtoReflect.Descriptor instead.
+func (*DeleteMetricDescriptorRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *DeleteMetricDescriptorRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `ListTimeSeries` request.
+type ListTimeSeriesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name),
+ // organization or folder on which to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ // organizations/[ORGANIZATION_ID]
+ // folders/[FOLDER_ID]
+ Name string `protobuf:"bytes,10,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. A [monitoring
+ // filter](https://cloud.google.com/monitoring/api/v3/filters) that specifies
+ // which time series should be returned. The filter must specify a single
+ // metric type, and can additionally specify metric labels and other
+ // information. For example:
+ //
+ // metric.type = "compute.googleapis.com/instance/cpu/usage_time" AND
+ // metric.labels.instance_name = "my-instance-name"
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Required. The time interval for which results should be returned. Only time
+ // series that contain data points in the specified interval are included in
+ // the response.
+ Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"`
+ // Specifies the alignment of data points in individual time series as
+ // well as how to combine the retrieved time series across specified labels.
+ //
+ // By default (if no `aggregation` is explicitly specified), the raw time
+ // series data is returned.
+ Aggregation *Aggregation `protobuf:"bytes,5,opt,name=aggregation,proto3" json:"aggregation,omitempty"`
+ // Apply a second aggregation after `aggregation` is applied. May only be
+ // specified if `aggregation` is specified.
+ SecondaryAggregation *Aggregation `protobuf:"bytes,11,opt,name=secondary_aggregation,json=secondaryAggregation,proto3" json:"secondary_aggregation,omitempty"`
+ // Unsupported: must be left blank. The points in each time series are
+ // currently returned in reverse time order (most recent to oldest).
+ OrderBy string `protobuf:"bytes,6,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
+ // Required. Specifies which information is returned about the time series.
+ View ListTimeSeriesRequest_TimeSeriesView `protobuf:"varint,7,opt,name=view,proto3,enum=google.monitoring.v3.ListTimeSeriesRequest_TimeSeriesView" json:"view,omitempty"`
+ // A positive number that is the maximum number of results to return. If
+ // `page_size` is empty or more than 100,000 results, the effective
+ // `page_size` is 100,000 results. If `view` is set to `FULL`, this is the
+ // maximum number of `Points` returned. If `view` is set to `HEADERS`, this is
+ // the maximum number of `TimeSeries` returned.
+ PageSize int32 `protobuf:"varint,8,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,9,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListTimeSeriesRequest) Reset() {
+ *x = ListTimeSeriesRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListTimeSeriesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListTimeSeriesRequest) ProtoMessage() {}
+
+func (x *ListTimeSeriesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListTimeSeriesRequest.ProtoReflect.Descriptor instead.
+func (*ListTimeSeriesRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *ListTimeSeriesRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListTimeSeriesRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListTimeSeriesRequest) GetInterval() *TimeInterval {
+ if x != nil {
+ return x.Interval
+ }
+ return nil
+}
+
+func (x *ListTimeSeriesRequest) GetAggregation() *Aggregation {
+ if x != nil {
+ return x.Aggregation
+ }
+ return nil
+}
+
+func (x *ListTimeSeriesRequest) GetSecondaryAggregation() *Aggregation {
+ if x != nil {
+ return x.SecondaryAggregation
+ }
+ return nil
+}
+
+func (x *ListTimeSeriesRequest) GetOrderBy() string {
+ if x != nil {
+ return x.OrderBy
+ }
+ return ""
+}
+
+func (x *ListTimeSeriesRequest) GetView() ListTimeSeriesRequest_TimeSeriesView {
+ if x != nil {
+ return x.View
+ }
+ return ListTimeSeriesRequest_FULL
+}
+
+func (x *ListTimeSeriesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListTimeSeriesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The `ListTimeSeries` response.
+type ListTimeSeriesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // One or more time series that match the filter included in the request.
+ TimeSeries []*TimeSeries `protobuf:"bytes,1,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // Query execution errors that may have caused the time series data returned
+ // to be incomplete.
+ ExecutionErrors []*status.Status `protobuf:"bytes,3,rep,name=execution_errors,json=executionErrors,proto3" json:"execution_errors,omitempty"`
+ // The unit in which all `time_series` point values are reported. `unit`
+ // follows the UCUM format for units as seen in
+ // https://unitsofmeasure.org/ucum.html.
+ // If different `time_series` have different units (for example, because they
+ // come from different metric types, or a unit is absent), then `unit` will be
+ // "{not_a_unit}".
+ Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"`
+}
+
+func (x *ListTimeSeriesResponse) Reset() {
+ *x = ListTimeSeriesResponse{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListTimeSeriesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListTimeSeriesResponse) ProtoMessage() {}
+
+func (x *ListTimeSeriesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListTimeSeriesResponse.ProtoReflect.Descriptor instead.
+func (*ListTimeSeriesResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *ListTimeSeriesResponse) GetTimeSeries() []*TimeSeries {
+ if x != nil {
+ return x.TimeSeries
+ }
+ return nil
+}
+
+func (x *ListTimeSeriesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *ListTimeSeriesResponse) GetExecutionErrors() []*status.Status {
+ if x != nil {
+ return x.ExecutionErrors
+ }
+ return nil
+}
+
+func (x *ListTimeSeriesResponse) GetUnit() string {
+ if x != nil {
+ return x.Unit
+ }
+ return ""
+}
+
+// The `CreateTimeSeries` request.
+type CreateTimeSeriesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which
+ // to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The new data to be added to a list of time series.
+ // Adds at most one data point to each of several time series. The new data
+ // point must be more recent than any other point in its time series. Each
+ // `TimeSeries` value must fully specify a unique time series by supplying
+ // all label values for the metric and the monitored resource.
+ //
+ // The maximum number of `TimeSeries` objects per `Create` request is 200.
+ TimeSeries []*TimeSeries `protobuf:"bytes,2,rep,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"`
+}
+
+func (x *CreateTimeSeriesRequest) Reset() {
+ *x = CreateTimeSeriesRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateTimeSeriesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateTimeSeriesRequest) ProtoMessage() {}
+
+func (x *CreateTimeSeriesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateTimeSeriesRequest.ProtoReflect.Descriptor instead.
+func (*CreateTimeSeriesRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *CreateTimeSeriesRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CreateTimeSeriesRequest) GetTimeSeries() []*TimeSeries {
+ if x != nil {
+ return x.TimeSeries
+ }
+ return nil
+}
+
+// DEPRECATED. Used to hold per-time-series error status.
+type CreateTimeSeriesError struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // DEPRECATED. Time series ID that resulted in the `status` error.
+ //
+ // Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto.
+ TimeSeries *TimeSeries `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"`
+ // DEPRECATED. The status of the requested write operation for `time_series`.
+ //
+ // Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto.
+ Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
+}
+
+func (x *CreateTimeSeriesError) Reset() {
+ *x = CreateTimeSeriesError{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateTimeSeriesError) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateTimeSeriesError) ProtoMessage() {}
+
+func (x *CreateTimeSeriesError) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateTimeSeriesError.ProtoReflect.Descriptor instead.
+func (*CreateTimeSeriesError) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{11}
+}
+
+// Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto.
+func (x *CreateTimeSeriesError) GetTimeSeries() *TimeSeries {
+ if x != nil {
+ return x.TimeSeries
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto.
+func (x *CreateTimeSeriesError) GetStatus() *status.Status {
+ if x != nil {
+ return x.Status
+ }
+ return nil
+}
+
+// Summary of the result of a failed request to write data to a time series.
+type CreateTimeSeriesSummary struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The number of points in the request.
+ TotalPointCount int32 `protobuf:"varint,1,opt,name=total_point_count,json=totalPointCount,proto3" json:"total_point_count,omitempty"`
+ // The number of points that were successfully written.
+ SuccessPointCount int32 `protobuf:"varint,2,opt,name=success_point_count,json=successPointCount,proto3" json:"success_point_count,omitempty"`
+ // The number of points that failed to be written. Order is not guaranteed.
+ Errors []*CreateTimeSeriesSummary_Error `protobuf:"bytes,3,rep,name=errors,proto3" json:"errors,omitempty"`
+}
+
+func (x *CreateTimeSeriesSummary) Reset() {
+ *x = CreateTimeSeriesSummary{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateTimeSeriesSummary) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateTimeSeriesSummary) ProtoMessage() {}
+
+func (x *CreateTimeSeriesSummary) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateTimeSeriesSummary.ProtoReflect.Descriptor instead.
+func (*CreateTimeSeriesSummary) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *CreateTimeSeriesSummary) GetTotalPointCount() int32 {
+ if x != nil {
+ return x.TotalPointCount
+ }
+ return 0
+}
+
+func (x *CreateTimeSeriesSummary) GetSuccessPointCount() int32 {
+ if x != nil {
+ return x.SuccessPointCount
+ }
+ return 0
+}
+
+func (x *CreateTimeSeriesSummary) GetErrors() []*CreateTimeSeriesSummary_Error {
+ if x != nil {
+ return x.Errors
+ }
+ return nil
+}
+
+// The `QueryTimeSeries` request. For information about the status of
+// Monitoring Query Language (MQL), see the [MQL deprecation
+// notice](https://cloud.google.com/stackdriver/docs/deprecations/mql).
+//
+// Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto.
+type QueryTimeSeriesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which
+ // to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The query in the [Monitoring Query
+ // Language](https://cloud.google.com/monitoring/mql/reference) format.
+ // The default time zone is in UTC.
+ Query string `protobuf:"bytes,7,opt,name=query,proto3" json:"query,omitempty"`
+ // A positive number that is the maximum number of time_series_data to return.
+ PageSize int32 `protobuf:"varint,9,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,10,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *QueryTimeSeriesRequest) Reset() {
+ *x = QueryTimeSeriesRequest{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *QueryTimeSeriesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryTimeSeriesRequest) ProtoMessage() {}
+
+func (x *QueryTimeSeriesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryTimeSeriesRequest.ProtoReflect.Descriptor instead.
+func (*QueryTimeSeriesRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *QueryTimeSeriesRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *QueryTimeSeriesRequest) GetQuery() string {
+ if x != nil {
+ return x.Query
+ }
+ return ""
+}
+
+func (x *QueryTimeSeriesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *QueryTimeSeriesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The `QueryTimeSeries` response. For information about the status of
+// Monitoring Query Language (MQL), see the [MQL deprecation
+// notice](https://cloud.google.com/stackdriver/docs/deprecations/mql).
+//
+// Deprecated: Marked as deprecated in google/monitoring/v3/metric_service.proto.
+type QueryTimeSeriesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The descriptor for the time series data.
+ TimeSeriesDescriptor *TimeSeriesDescriptor `protobuf:"bytes,8,opt,name=time_series_descriptor,json=timeSeriesDescriptor,proto3" json:"time_series_descriptor,omitempty"`
+ // The time series data.
+ TimeSeriesData []*TimeSeriesData `protobuf:"bytes,9,rep,name=time_series_data,json=timeSeriesData,proto3" json:"time_series_data,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results, use that value as
+ // `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,10,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // Query execution errors that may have caused the time series data returned
+ // to be incomplete. The available data will be available in the
+ // response.
+ PartialErrors []*status.Status `protobuf:"bytes,11,rep,name=partial_errors,json=partialErrors,proto3" json:"partial_errors,omitempty"`
+}
+
+func (x *QueryTimeSeriesResponse) Reset() {
+ *x = QueryTimeSeriesResponse{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *QueryTimeSeriesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryTimeSeriesResponse) ProtoMessage() {}
+
+func (x *QueryTimeSeriesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[14]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryTimeSeriesResponse.ProtoReflect.Descriptor instead.
+func (*QueryTimeSeriesResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *QueryTimeSeriesResponse) GetTimeSeriesDescriptor() *TimeSeriesDescriptor {
+ if x != nil {
+ return x.TimeSeriesDescriptor
+ }
+ return nil
+}
+
+func (x *QueryTimeSeriesResponse) GetTimeSeriesData() []*TimeSeriesData {
+ if x != nil {
+ return x.TimeSeriesData
+ }
+ return nil
+}
+
+func (x *QueryTimeSeriesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *QueryTimeSeriesResponse) GetPartialErrors() []*status.Status {
+ if x != nil {
+ return x.PartialErrors
+ }
+ return nil
+}
+
+// This is an error detail intended to be used with INVALID_ARGUMENT errors.
+type QueryErrorList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Errors in parsing the time series query language text. The number of errors
+ // in the response may be limited.
+ Errors []*QueryError `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
+ // A summary of all the errors.
+ ErrorSummary string `protobuf:"bytes,2,opt,name=error_summary,json=errorSummary,proto3" json:"error_summary,omitempty"`
+}
+
+func (x *QueryErrorList) Reset() {
+ *x = QueryErrorList{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *QueryErrorList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QueryErrorList) ProtoMessage() {}
+
+func (x *QueryErrorList) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[15]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QueryErrorList.ProtoReflect.Descriptor instead.
+func (*QueryErrorList) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *QueryErrorList) GetErrors() []*QueryError {
+ if x != nil {
+ return x.Errors
+ }
+ return nil
+}
+
+func (x *QueryErrorList) GetErrorSummary() string {
+ if x != nil {
+ return x.ErrorSummary
+ }
+ return ""
+}
+
+// Detailed information about an error category.
+type CreateTimeSeriesSummary_Error struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The status of the requested write operation.
+ Status *status.Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"`
+ // The number of points that couldn't be written because of `status`.
+ PointCount int32 `protobuf:"varint,2,opt,name=point_count,json=pointCount,proto3" json:"point_count,omitempty"`
+}
+
+func (x *CreateTimeSeriesSummary_Error) Reset() {
+ *x = CreateTimeSeriesSummary_Error{}
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateTimeSeriesSummary_Error) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateTimeSeriesSummary_Error) ProtoMessage() {}
+
+func (x *CreateTimeSeriesSummary_Error) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_metric_service_proto_msgTypes[16]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateTimeSeriesSummary_Error.ProtoReflect.Descriptor instead.
+func (*CreateTimeSeriesSummary_Error) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_metric_service_proto_rawDescGZIP(), []int{12, 0}
+}
+
+func (x *CreateTimeSeriesSummary_Error) GetStatus() *status.Status {
+ if x != nil {
+ return x.Status
+ }
+ return nil
+}
+
+func (x *CreateTimeSeriesSummary_Error) GetPointCount() int32 {
+ if x != nil {
+ return x.PointCount
+ }
+ return 0
+}
+
+var File_google_monitoring_v3_metric_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_metric_service_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76,
+ 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70,
+ 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd0,
+ 0x01, 0x0a, 0x27, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3d, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x37,
+ 0x12, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a,
+ 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69,
+ 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69,
+ 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x22, 0xae, 0x01, 0x0a, 0x28, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a,
+ 0x0a, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x13, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65,
+ 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x22, 0x7a, 0x0a, 0x25, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x51, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3d, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x37, 0x0a, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xef,
+ 0x01, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x12, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a,
+ 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61,
+ 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x24, 0x0a, 0x0b, 0x61, 0x63,
+ 0x74, 0x69, 0x76, 0x65, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79,
+ 0x22, 0x94, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x12, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x11, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12,
+ 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61,
+ 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x64, 0x0a, 0x1a, 0x47, 0x65, 0x74, 0x4d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x0a, 0x2a, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xb7, 0x01,
+ 0x0a, 0x1d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x12, 0x2a, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x11, 0x6d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x67, 0x0a, 0x1d, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x46, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x32, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2c, 0x0a, 0x2a,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x22, 0xad, 0x04, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72,
+ 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2c, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x26,
+ 0x12, 0x24, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x69, 0x6d, 0x65,
+ 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x06,
+ 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74,
+ 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x43,
+ 0x0a, 0x0b, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65,
+ 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x56, 0x0a, 0x15, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79,
+ 0x5f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x14, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79,
+ 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x6f,
+ 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f,
+ 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x53, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x07,
+ 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x56, 0x69, 0x65, 0x77,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x76, 0x69, 0x65, 0x77, 0x12, 0x1b, 0x0a, 0x09, 0x70,
+ 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08,
+ 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65,
+ 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61,
+ 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x27, 0x0a, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x56, 0x69, 0x65, 0x77, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c,
+ 0x4c, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x53, 0x10, 0x01,
+ 0x22, 0xd6, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72,
+ 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x0b, 0x74,
+ 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69,
+ 0x65, 0x73, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x26,
+ 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67,
+ 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3d, 0x0a, 0x10, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45,
+ 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
+ 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46,
+ 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65,
+ 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x45, 0x72, 0x72, 0x6f, 0x72,
+ 0x12, 0x45, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x74, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52,
+ 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x98, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x53, 0x75, 0x6d, 0x6d,
+ 0x61, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x69,
+ 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f,
+ 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
+ 0x2e, 0x0a, 0x13, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x6f, 0x69, 0x6e, 0x74,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x73, 0x75,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
+ 0x4b, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x2e, 0x45,
+ 0x72, 0x72, 0x6f, 0x72, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x1a, 0x54, 0x0a, 0x05,
+ 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
+ 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x75,
+ 0x6e, 0x74, 0x22, 0x8c, 0x01, 0x0a, 0x16, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65,
+ 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72,
+ 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09,
+ 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x3a, 0x02, 0x18,
+ 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x60, 0x0a,
+ 0x16, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x14, 0x74, 0x69, 0x6d, 0x65, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12,
+ 0x4e, 0x0a, 0x10, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5f, 0x64,
+ 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x52,
+ 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x44, 0x61, 0x74, 0x61, 0x12,
+ 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61,
+ 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x39, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x74, 0x69,
+ 0x61, 0x6c, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x52, 0x0d, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x45, 0x72, 0x72, 0x6f,
+ 0x72, 0x73, 0x3a, 0x02, 0x18, 0x01, 0x22, 0x6f, 0x0a, 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x45,
+ 0x72, 0x72, 0x6f, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x51, 0x75, 0x65, 0x72, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, 0x75, 0x6d, 0x6d,
+ 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x32, 0xbc, 0x0f, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0xe4, 0x01, 0x0a, 0x20, 0x4c, 0x69,
+ 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x3d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3e, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda,
+ 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76,
+ 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73,
+ 0x12, 0xcc, 0x01, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65,
+ 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x44, 0xda, 0x41, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x12, 0x35, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e,
+ 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x12,
+ 0xb8, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x36, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02,
+ 0x29, 0x12, 0x27, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xa0, 0x01, 0x0a, 0x13, 0x47,
+ 0x65, 0x74, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x12, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74,
+ 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02,
+ 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x12, 0xc8, 0x01,
+ 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x5b, 0xda, 0x41, 0x16,
+ 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x3a, 0x11, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22,
+ 0x27, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xa0, 0x01, 0x0a, 0x16, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
+ 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a,
+ 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x2a, 0x7d, 0x12, 0xfe, 0x01, 0x0a, 0x0e,
+ 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65,
+ 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x90, 0x01, 0xda, 0x41, 0x19, 0x6e,
+ 0x61, 0x6d, 0x65, 0x2c, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2c, 0x69, 0x6e, 0x74, 0x65, 0x72,
+ 0x76, 0x61, 0x6c, 0x2c, 0x76, 0x69, 0x65, 0x77, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x6e, 0x5a, 0x27,
+ 0x12, 0x25, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x6f, 0x72, 0x67, 0x61,
+ 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x5a, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
+ 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x20, 0x2f, 0x76, 0x33, 0x2f,
+ 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
+ 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x99, 0x01, 0x0a,
+ 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65,
+ 0x73, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54,
+ 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x3e, 0xda, 0x41, 0x10, 0x6e, 0x61, 0x6d,
+ 0x65, 0x2c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x82, 0xd3, 0xe4,
+ 0x93, 0x02, 0x25, 0x3a, 0x01, 0x2a, 0x22, 0x20, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69,
+ 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0xae, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65,
+ 0x72, 0x69, 0x65, 0x73, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x4c, 0xda, 0x41, 0x10,
+ 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x33, 0x3a, 0x01, 0x2a, 0x22, 0x2e, 0x2f, 0x76, 0x33, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x3a, 0x63, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0xda, 0x01, 0xca, 0x41, 0x19, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xba, 0x01, 0x68, 0x74, 0x74, 0x70,
+ 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73,
+ 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77,
+ 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77,
+ 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0x89, 0x08, 0xea, 0x41, 0xf0, 0x01, 0x0a, 0x2a, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x3b, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f,
+ 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x45, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x39, 0x66,
+ 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x73, 0x2f, 0x7b, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0xea, 0x41, 0xb7,
+ 0x02, 0x0a, 0x35, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x4f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x59, 0x6f, 0x72, 0x67, 0x61, 0x6e,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69,
+ 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65,
+ 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x4d, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66,
+ 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0xea, 0x41, 0x51, 0x0a, 0x23, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x57, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65,
+ 0x12, 0x12, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x7d, 0x12, 0x16, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73,
+ 0x2f, 0x7b, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x70, 0x61, 0x63, 0x65, 0x7d, 0xea, 0x41, 0xb5, 0x01,
+ 0x0a, 0x24, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x69, 0x6d, 0x65,
+ 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69,
+ 0x65, 0x73, 0x7d, 0x12, 0x35, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x69,
+ 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x7d, 0x12, 0x29, 0x66, 0x6f, 0x6c, 0x64,
+ 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x74, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2f, 0x7b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x69, 0x65, 0x73, 0x7d, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42,
+ 0x12, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43,
+ 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c,
+ 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a,
+ 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_metric_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_metric_service_proto_rawDescData = file_google_monitoring_v3_metric_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_metric_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_metric_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_metric_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_metric_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_metric_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_metric_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_monitoring_v3_metric_service_proto_msgTypes = make([]protoimpl.MessageInfo, 17)
+var file_google_monitoring_v3_metric_service_proto_goTypes = []any{
+ (ListTimeSeriesRequest_TimeSeriesView)(0), // 0: google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView
+ (*ListMonitoredResourceDescriptorsRequest)(nil), // 1: google.monitoring.v3.ListMonitoredResourceDescriptorsRequest
+ (*ListMonitoredResourceDescriptorsResponse)(nil), // 2: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse
+ (*GetMonitoredResourceDescriptorRequest)(nil), // 3: google.monitoring.v3.GetMonitoredResourceDescriptorRequest
+ (*ListMetricDescriptorsRequest)(nil), // 4: google.monitoring.v3.ListMetricDescriptorsRequest
+ (*ListMetricDescriptorsResponse)(nil), // 5: google.monitoring.v3.ListMetricDescriptorsResponse
+ (*GetMetricDescriptorRequest)(nil), // 6: google.monitoring.v3.GetMetricDescriptorRequest
+ (*CreateMetricDescriptorRequest)(nil), // 7: google.monitoring.v3.CreateMetricDescriptorRequest
+ (*DeleteMetricDescriptorRequest)(nil), // 8: google.monitoring.v3.DeleteMetricDescriptorRequest
+ (*ListTimeSeriesRequest)(nil), // 9: google.monitoring.v3.ListTimeSeriesRequest
+ (*ListTimeSeriesResponse)(nil), // 10: google.monitoring.v3.ListTimeSeriesResponse
+ (*CreateTimeSeriesRequest)(nil), // 11: google.monitoring.v3.CreateTimeSeriesRequest
+ (*CreateTimeSeriesError)(nil), // 12: google.monitoring.v3.CreateTimeSeriesError
+ (*CreateTimeSeriesSummary)(nil), // 13: google.monitoring.v3.CreateTimeSeriesSummary
+ (*QueryTimeSeriesRequest)(nil), // 14: google.monitoring.v3.QueryTimeSeriesRequest
+ (*QueryTimeSeriesResponse)(nil), // 15: google.monitoring.v3.QueryTimeSeriesResponse
+ (*QueryErrorList)(nil), // 16: google.monitoring.v3.QueryErrorList
+ (*CreateTimeSeriesSummary_Error)(nil), // 17: google.monitoring.v3.CreateTimeSeriesSummary.Error
+ (*monitoredres.MonitoredResourceDescriptor)(nil), // 18: google.api.MonitoredResourceDescriptor
+ (*metric.MetricDescriptor)(nil), // 19: google.api.MetricDescriptor
+ (*TimeInterval)(nil), // 20: google.monitoring.v3.TimeInterval
+ (*Aggregation)(nil), // 21: google.monitoring.v3.Aggregation
+ (*TimeSeries)(nil), // 22: google.monitoring.v3.TimeSeries
+ (*status.Status)(nil), // 23: google.rpc.Status
+ (*TimeSeriesDescriptor)(nil), // 24: google.monitoring.v3.TimeSeriesDescriptor
+ (*TimeSeriesData)(nil), // 25: google.monitoring.v3.TimeSeriesData
+ (*QueryError)(nil), // 26: google.monitoring.v3.QueryError
+ (*emptypb.Empty)(nil), // 27: google.protobuf.Empty
+}
+var file_google_monitoring_v3_metric_service_proto_depIdxs = []int32{
+ 18, // 0: google.monitoring.v3.ListMonitoredResourceDescriptorsResponse.resource_descriptors:type_name -> google.api.MonitoredResourceDescriptor
+ 19, // 1: google.monitoring.v3.ListMetricDescriptorsResponse.metric_descriptors:type_name -> google.api.MetricDescriptor
+ 19, // 2: google.monitoring.v3.CreateMetricDescriptorRequest.metric_descriptor:type_name -> google.api.MetricDescriptor
+ 20, // 3: google.monitoring.v3.ListTimeSeriesRequest.interval:type_name -> google.monitoring.v3.TimeInterval
+ 21, // 4: google.monitoring.v3.ListTimeSeriesRequest.aggregation:type_name -> google.monitoring.v3.Aggregation
+ 21, // 5: google.monitoring.v3.ListTimeSeriesRequest.secondary_aggregation:type_name -> google.monitoring.v3.Aggregation
+ 0, // 6: google.monitoring.v3.ListTimeSeriesRequest.view:type_name -> google.monitoring.v3.ListTimeSeriesRequest.TimeSeriesView
+ 22, // 7: google.monitoring.v3.ListTimeSeriesResponse.time_series:type_name -> google.monitoring.v3.TimeSeries
+ 23, // 8: google.monitoring.v3.ListTimeSeriesResponse.execution_errors:type_name -> google.rpc.Status
+ 22, // 9: google.monitoring.v3.CreateTimeSeriesRequest.time_series:type_name -> google.monitoring.v3.TimeSeries
+ 22, // 10: google.monitoring.v3.CreateTimeSeriesError.time_series:type_name -> google.monitoring.v3.TimeSeries
+ 23, // 11: google.monitoring.v3.CreateTimeSeriesError.status:type_name -> google.rpc.Status
+ 17, // 12: google.monitoring.v3.CreateTimeSeriesSummary.errors:type_name -> google.monitoring.v3.CreateTimeSeriesSummary.Error
+ 24, // 13: google.monitoring.v3.QueryTimeSeriesResponse.time_series_descriptor:type_name -> google.monitoring.v3.TimeSeriesDescriptor
+ 25, // 14: google.monitoring.v3.QueryTimeSeriesResponse.time_series_data:type_name -> google.monitoring.v3.TimeSeriesData
+ 23, // 15: google.monitoring.v3.QueryTimeSeriesResponse.partial_errors:type_name -> google.rpc.Status
+ 26, // 16: google.monitoring.v3.QueryErrorList.errors:type_name -> google.monitoring.v3.QueryError
+ 23, // 17: google.monitoring.v3.CreateTimeSeriesSummary.Error.status:type_name -> google.rpc.Status
+ 1, // 18: google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors:input_type -> google.monitoring.v3.ListMonitoredResourceDescriptorsRequest
+ 3, // 19: google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor:input_type -> google.monitoring.v3.GetMonitoredResourceDescriptorRequest
+ 4, // 20: google.monitoring.v3.MetricService.ListMetricDescriptors:input_type -> google.monitoring.v3.ListMetricDescriptorsRequest
+ 6, // 21: google.monitoring.v3.MetricService.GetMetricDescriptor:input_type -> google.monitoring.v3.GetMetricDescriptorRequest
+ 7, // 22: google.monitoring.v3.MetricService.CreateMetricDescriptor:input_type -> google.monitoring.v3.CreateMetricDescriptorRequest
+ 8, // 23: google.monitoring.v3.MetricService.DeleteMetricDescriptor:input_type -> google.monitoring.v3.DeleteMetricDescriptorRequest
+ 9, // 24: google.monitoring.v3.MetricService.ListTimeSeries:input_type -> google.monitoring.v3.ListTimeSeriesRequest
+ 11, // 25: google.monitoring.v3.MetricService.CreateTimeSeries:input_type -> google.monitoring.v3.CreateTimeSeriesRequest
+ 11, // 26: google.monitoring.v3.MetricService.CreateServiceTimeSeries:input_type -> google.monitoring.v3.CreateTimeSeriesRequest
+ 2, // 27: google.monitoring.v3.MetricService.ListMonitoredResourceDescriptors:output_type -> google.monitoring.v3.ListMonitoredResourceDescriptorsResponse
+ 18, // 28: google.monitoring.v3.MetricService.GetMonitoredResourceDescriptor:output_type -> google.api.MonitoredResourceDescriptor
+ 5, // 29: google.monitoring.v3.MetricService.ListMetricDescriptors:output_type -> google.monitoring.v3.ListMetricDescriptorsResponse
+ 19, // 30: google.monitoring.v3.MetricService.GetMetricDescriptor:output_type -> google.api.MetricDescriptor
+ 19, // 31: google.monitoring.v3.MetricService.CreateMetricDescriptor:output_type -> google.api.MetricDescriptor
+ 27, // 32: google.monitoring.v3.MetricService.DeleteMetricDescriptor:output_type -> google.protobuf.Empty
+ 10, // 33: google.monitoring.v3.MetricService.ListTimeSeries:output_type -> google.monitoring.v3.ListTimeSeriesResponse
+ 27, // 34: google.monitoring.v3.MetricService.CreateTimeSeries:output_type -> google.protobuf.Empty
+ 27, // 35: google.monitoring.v3.MetricService.CreateServiceTimeSeries:output_type -> google.protobuf.Empty
+ 27, // [27:36] is the sub-list for method output_type
+ 18, // [18:27] is the sub-list for method input_type
+ 18, // [18:18] is the sub-list for extension type_name
+ 18, // [18:18] is the sub-list for extension extendee
+ 0, // [0:18] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_metric_service_proto_init() }
+func file_google_monitoring_v3_metric_service_proto_init() {
+ if File_google_monitoring_v3_metric_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_common_proto_init()
+ file_google_monitoring_v3_metric_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_metric_service_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 17,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_metric_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_metric_service_proto_depIdxs,
+ EnumInfos: file_google_monitoring_v3_metric_service_proto_enumTypes,
+ MessageInfos: file_google_monitoring_v3_metric_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_metric_service_proto = out.File
+ file_google_monitoring_v3_metric_service_proto_rawDesc = nil
+ file_google_monitoring_v3_metric_service_proto_goTypes = nil
+ file_google_monitoring_v3_metric_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// MetricServiceClient is the client API for MetricService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MetricServiceClient interface {
+ // Lists monitored resource descriptors that match a filter.
+ ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error)
+ // Gets a single monitored resource descriptor.
+ GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error)
+ // Lists metric descriptors that match a filter.
+ ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error)
+ // Gets a single metric descriptor.
+ GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error)
+ // Creates a new metric descriptor.
+ // The creation is executed asynchronously.
+ // User-created metric descriptors define
+ // [custom metrics](https://cloud.google.com/monitoring/custom-metrics).
+ // The metric descriptor is updated if it already exists,
+ // except that metric labels are never removed.
+ CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error)
+ // Deletes a metric descriptor. Only user-created
+ // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be
+ // deleted.
+ DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Lists time series that match a filter.
+ ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error)
+ // Creates or adds data to one or more time series.
+ // The response is empty if all time series in the request were written.
+ // If any time series could not be written, a corresponding failure message is
+ // included in the error response.
+ // This method does not support
+ // [resource locations constraint of an organization
+ // policy](https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy).
+ CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Creates or adds data to one or more service time series. A service time
+ // series is a time series for a metric from a Google Cloud service. The
+ // response is empty if all time series in the request were written. If any
+ // time series could not be written, a corresponding failure message is
+ // included in the error response. This endpoint rejects writes to
+ // user-defined metrics.
+ // This method is only for use by Google Cloud services. Use
+ // [projects.timeSeries.create][google.monitoring.v3.MetricService.CreateTimeSeries]
+ // instead.
+ CreateServiceTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+}
+
+type metricServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewMetricServiceClient(cc grpc.ClientConnInterface) MetricServiceClient {
+ return &metricServiceClient{cc}
+}
+
+func (c *metricServiceClient) ListMonitoredResourceDescriptors(ctx context.Context, in *ListMonitoredResourceDescriptorsRequest, opts ...grpc.CallOption) (*ListMonitoredResourceDescriptorsResponse, error) {
+ out := new(ListMonitoredResourceDescriptorsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) GetMonitoredResourceDescriptor(ctx context.Context, in *GetMonitoredResourceDescriptorRequest, opts ...grpc.CallOption) (*monitoredres.MonitoredResourceDescriptor, error) {
+ out := new(monitoredres.MonitoredResourceDescriptor)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) ListMetricDescriptors(ctx context.Context, in *ListMetricDescriptorsRequest, opts ...grpc.CallOption) (*ListMetricDescriptorsResponse, error) {
+ out := new(ListMetricDescriptorsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListMetricDescriptors", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) GetMetricDescriptor(ctx context.Context, in *GetMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) {
+ out := new(metric.MetricDescriptor)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/GetMetricDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) CreateMetricDescriptor(ctx context.Context, in *CreateMetricDescriptorRequest, opts ...grpc.CallOption) (*metric.MetricDescriptor, error) {
+ out := new(metric.MetricDescriptor)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateMetricDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) DeleteMetricDescriptor(ctx context.Context, in *DeleteMetricDescriptorRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/DeleteMetricDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) ListTimeSeries(ctx context.Context, in *ListTimeSeriesRequest, opts ...grpc.CallOption) (*ListTimeSeriesResponse, error) {
+ out := new(ListTimeSeriesResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/ListTimeSeries", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) CreateTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateTimeSeries", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *metricServiceClient) CreateServiceTimeSeries(ctx context.Context, in *CreateTimeSeriesRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.MetricService/CreateServiceTimeSeries", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// MetricServiceServer is the server API for MetricService service.
+type MetricServiceServer interface {
+ // Lists monitored resource descriptors that match a filter.
+ ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error)
+ // Gets a single monitored resource descriptor.
+ GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error)
+ // Lists metric descriptors that match a filter.
+ ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error)
+ // Gets a single metric descriptor.
+ GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error)
+ // Creates a new metric descriptor.
+ // The creation is executed asynchronously.
+ // User-created metric descriptors define
+ // [custom metrics](https://cloud.google.com/monitoring/custom-metrics).
+ // The metric descriptor is updated if it already exists,
+ // except that metric labels are never removed.
+ CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error)
+ // Deletes a metric descriptor. Only user-created
+ // [custom metrics](https://cloud.google.com/monitoring/custom-metrics) can be
+ // deleted.
+ DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error)
+ // Lists time series that match a filter.
+ ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error)
+ // Creates or adds data to one or more time series.
+ // The response is empty if all time series in the request were written.
+ // If any time series could not be written, a corresponding failure message is
+ // included in the error response.
+ // This method does not support
+ // [resource locations constraint of an organization
+ // policy](https://cloud.google.com/resource-manager/docs/organization-policy/defining-locations#setting_the_organization_policy).
+ CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error)
+ // Creates or adds data to one or more service time series. A service time
+ // series is a time series for a metric from a Google Cloud service. The
+ // response is empty if all time series in the request were written. If any
+ // time series could not be written, a corresponding failure message is
+ // included in the error response. This endpoint rejects writes to
+ // user-defined metrics.
+ // This method is only for use by Google Cloud services. Use
+ // [projects.timeSeries.create][google.monitoring.v3.MetricService.CreateTimeSeries]
+ // instead.
+ CreateServiceTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error)
+}
+
+// UnimplementedMetricServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedMetricServiceServer struct {
+}
+
+func (*UnimplementedMetricServiceServer) ListMonitoredResourceDescriptors(context.Context, *ListMonitoredResourceDescriptorsRequest) (*ListMonitoredResourceDescriptorsResponse, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method ListMonitoredResourceDescriptors not implemented")
+}
+func (*UnimplementedMetricServiceServer) GetMonitoredResourceDescriptor(context.Context, *GetMonitoredResourceDescriptorRequest) (*monitoredres.MonitoredResourceDescriptor, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method GetMonitoredResourceDescriptor not implemented")
+}
+func (*UnimplementedMetricServiceServer) ListMetricDescriptors(context.Context, *ListMetricDescriptorsRequest) (*ListMetricDescriptorsResponse, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method ListMetricDescriptors not implemented")
+}
+func (*UnimplementedMetricServiceServer) GetMetricDescriptor(context.Context, *GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method GetMetricDescriptor not implemented")
+}
+func (*UnimplementedMetricServiceServer) CreateMetricDescriptor(context.Context, *CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method CreateMetricDescriptor not implemented")
+}
+func (*UnimplementedMetricServiceServer) DeleteMetricDescriptor(context.Context, *DeleteMetricDescriptorRequest) (*emptypb.Empty, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method DeleteMetricDescriptor not implemented")
+}
+func (*UnimplementedMetricServiceServer) ListTimeSeries(context.Context, *ListTimeSeriesRequest) (*ListTimeSeriesResponse, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method ListTimeSeries not implemented")
+}
+func (*UnimplementedMetricServiceServer) CreateTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method CreateTimeSeries not implemented")
+}
+func (*UnimplementedMetricServiceServer) CreateServiceTimeSeries(context.Context, *CreateTimeSeriesRequest) (*emptypb.Empty, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method CreateServiceTimeSeries not implemented")
+}
+
+func RegisterMetricServiceServer(s *grpc.Server, srv MetricServiceServer) {
+ s.RegisterService(&_MetricService_serviceDesc, srv)
+}
+
+func _MetricService_ListMonitoredResourceDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListMonitoredResourceDescriptorsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/ListMonitoredResourceDescriptors",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).ListMonitoredResourceDescriptors(ctx, req.(*ListMonitoredResourceDescriptorsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_GetMonitoredResourceDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetMonitoredResourceDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/GetMonitoredResourceDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).GetMonitoredResourceDescriptor(ctx, req.(*GetMonitoredResourceDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_ListMetricDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListMetricDescriptorsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).ListMetricDescriptors(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/ListMetricDescriptors",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).ListMetricDescriptors(ctx, req.(*ListMetricDescriptorsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_GetMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetMetricDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).GetMetricDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/GetMetricDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).GetMetricDescriptor(ctx, req.(*GetMetricDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_CreateMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateMetricDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/CreateMetricDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).CreateMetricDescriptor(ctx, req.(*CreateMetricDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_DeleteMetricDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteMetricDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/DeleteMetricDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).DeleteMetricDescriptor(ctx, req.(*DeleteMetricDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_ListTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListTimeSeriesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).ListTimeSeries(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/ListTimeSeries",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).ListTimeSeries(ctx, req.(*ListTimeSeriesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_CreateTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateTimeSeriesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).CreateTimeSeries(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/CreateTimeSeries",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).CreateTimeSeries(ctx, req.(*CreateTimeSeriesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _MetricService_CreateServiceTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateTimeSeriesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MetricServiceServer).CreateServiceTimeSeries(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.MetricService/CreateServiceTimeSeries",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MetricServiceServer).CreateServiceTimeSeries(ctx, req.(*CreateTimeSeriesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _MetricService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.MetricService",
+ HandlerType: (*MetricServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListMonitoredResourceDescriptors",
+ Handler: _MetricService_ListMonitoredResourceDescriptors_Handler,
+ },
+ {
+ MethodName: "GetMonitoredResourceDescriptor",
+ Handler: _MetricService_GetMonitoredResourceDescriptor_Handler,
+ },
+ {
+ MethodName: "ListMetricDescriptors",
+ Handler: _MetricService_ListMetricDescriptors_Handler,
+ },
+ {
+ MethodName: "GetMetricDescriptor",
+ Handler: _MetricService_GetMetricDescriptor_Handler,
+ },
+ {
+ MethodName: "CreateMetricDescriptor",
+ Handler: _MetricService_CreateMetricDescriptor_Handler,
+ },
+ {
+ MethodName: "DeleteMetricDescriptor",
+ Handler: _MetricService_DeleteMetricDescriptor_Handler,
+ },
+ {
+ MethodName: "ListTimeSeries",
+ Handler: _MetricService_ListTimeSeries_Handler,
+ },
+ {
+ MethodName: "CreateTimeSeries",
+ Handler: _MetricService_CreateTimeSeries_Handler,
+ },
+ {
+ MethodName: "CreateServiceTimeSeries",
+ Handler: _MetricService_CreateServiceTimeSeries_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/metric_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go
new file mode 100644
index 000000000..e03d89efe
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/mutation_record.pb.go
@@ -0,0 +1,176 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/mutation_record.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Describes a change made to a configuration.
+type MutationRecord struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // When the change occurred.
+ MutateTime *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=mutate_time,json=mutateTime,proto3" json:"mutate_time,omitempty"`
+ // The email address of the user making the change.
+ MutatedBy string `protobuf:"bytes,2,opt,name=mutated_by,json=mutatedBy,proto3" json:"mutated_by,omitempty"`
+}
+
+func (x *MutationRecord) Reset() {
+ *x = MutationRecord{}
+ mi := &file_google_monitoring_v3_mutation_record_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *MutationRecord) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MutationRecord) ProtoMessage() {}
+
+func (x *MutationRecord) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_mutation_record_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MutationRecord.ProtoReflect.Descriptor instead.
+func (*MutationRecord) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_mutation_record_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MutationRecord) GetMutateTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.MutateTime
+ }
+ return nil
+}
+
+func (x *MutationRecord) GetMutatedBy() string {
+ if x != nil {
+ return x.MutatedBy
+ }
+ return ""
+}
+
+var File_google_monitoring_v3_mutation_record_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_mutation_record_proto_rawDesc = []byte{
+ 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0x6c, 0x0a, 0x0e, 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x5f,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x54, 0x69,
+ 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x79,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x65, 0x64, 0x42,
+ 0x79, 0x42, 0xce, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x13,
+ 0x4d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43,
+ 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c,
+ 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a,
+ 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_mutation_record_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_mutation_record_proto_rawDescData = file_google_monitoring_v3_mutation_record_proto_rawDesc
+)
+
+func file_google_monitoring_v3_mutation_record_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_mutation_record_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_mutation_record_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_mutation_record_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_mutation_record_proto_rawDescData
+}
+
+var file_google_monitoring_v3_mutation_record_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_monitoring_v3_mutation_record_proto_goTypes = []any{
+ (*MutationRecord)(nil), // 0: google.monitoring.v3.MutationRecord
+ (*timestamppb.Timestamp)(nil), // 1: google.protobuf.Timestamp
+}
+var file_google_monitoring_v3_mutation_record_proto_depIdxs = []int32{
+ 1, // 0: google.monitoring.v3.MutationRecord.mutate_time:type_name -> google.protobuf.Timestamp
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_mutation_record_proto_init() }
+func file_google_monitoring_v3_mutation_record_proto_init() {
+ if File_google_monitoring_v3_mutation_record_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_mutation_record_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_mutation_record_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_mutation_record_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_mutation_record_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_mutation_record_proto = out.File
+ file_google_monitoring_v3_mutation_record_proto_rawDesc = nil
+ file_google_monitoring_v3_mutation_record_proto_goTypes = nil
+ file_google_monitoring_v3_mutation_record_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go
new file mode 100644
index 000000000..0d5cacbec
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification.pb.go
@@ -0,0 +1,619 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/notification.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ api "google.golang.org/genproto/googleapis/api"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ label "google.golang.org/genproto/googleapis/api/label"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Indicates whether the channel has been verified or not. It is illegal
+// to specify this field in a
+// [`CreateNotificationChannel`][google.monitoring.v3.NotificationChannelService.CreateNotificationChannel]
+// or an
+// [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel]
+// operation.
+type NotificationChannel_VerificationStatus int32
+
+const (
+ // Sentinel value used to indicate that the state is unknown, omitted, or
+ // is not applicable (as in the case of channels that neither support
+ // nor require verification in order to function).
+ NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED NotificationChannel_VerificationStatus = 0
+ // The channel has yet to be verified and requires verification to function.
+ // Note that this state also applies to the case where the verification
+ // process has been initiated by sending a verification code but where
+ // the verification code has not been submitted to complete the process.
+ NotificationChannel_UNVERIFIED NotificationChannel_VerificationStatus = 1
+ // It has been proven that notifications can be received on this
+ // notification channel and that someone on the project has access
+ // to messages that are delivered to that channel.
+ NotificationChannel_VERIFIED NotificationChannel_VerificationStatus = 2
+)
+
+// Enum value maps for NotificationChannel_VerificationStatus.
+var (
+ NotificationChannel_VerificationStatus_name = map[int32]string{
+ 0: "VERIFICATION_STATUS_UNSPECIFIED",
+ 1: "UNVERIFIED",
+ 2: "VERIFIED",
+ }
+ NotificationChannel_VerificationStatus_value = map[string]int32{
+ "VERIFICATION_STATUS_UNSPECIFIED": 0,
+ "UNVERIFIED": 1,
+ "VERIFIED": 2,
+ }
+)
+
+func (x NotificationChannel_VerificationStatus) Enum() *NotificationChannel_VerificationStatus {
+ p := new(NotificationChannel_VerificationStatus)
+ *p = x
+ return p
+}
+
+func (x NotificationChannel_VerificationStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (NotificationChannel_VerificationStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_notification_proto_enumTypes[0].Descriptor()
+}
+
+func (NotificationChannel_VerificationStatus) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_notification_proto_enumTypes[0]
+}
+
+func (x NotificationChannel_VerificationStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use NotificationChannel_VerificationStatus.Descriptor instead.
+func (NotificationChannel_VerificationStatus) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{1, 0}
+}
+
+// A description of a notification channel. The descriptor includes
+// the properties of the channel and the set of labels or fields that
+// must be specified to configure channels of a given type.
+type NotificationChannelDescriptor struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The full REST resource name for this descriptor. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[TYPE]
+ //
+ // In the above, `[TYPE]` is the value of the `type` field.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ // The type of notification channel, such as "email" and "sms". To view the
+ // full list of channels, see
+ // [Channel
+ // descriptors](https://cloud.google.com/monitoring/alerts/using-channels-api#ncd).
+ // Notification channel types are globally unique.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // A human-readable name for the notification channel type. This
+ // form of the name is suitable for a user interface.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // A human-readable description of the notification channel
+ // type. The description may include a description of the properties
+ // of the channel and pointers to external documentation.
+ Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+ // The set of labels that must be defined to identify a particular
+ // channel of the corresponding type. Each label includes a
+ // description for how that field should be populated.
+ Labels []*label.LabelDescriptor `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty"`
+ // The tiers that support this notification channel; the project service tier
+ // must be one of the supported_tiers.
+ //
+ // Deprecated: Marked as deprecated in google/monitoring/v3/notification.proto.
+ SupportedTiers []ServiceTier `protobuf:"varint,5,rep,packed,name=supported_tiers,json=supportedTiers,proto3,enum=google.monitoring.v3.ServiceTier" json:"supported_tiers,omitempty"`
+ // The product launch stage for channels of this type.
+ LaunchStage api.LaunchStage `protobuf:"varint,7,opt,name=launch_stage,json=launchStage,proto3,enum=google.api.LaunchStage" json:"launch_stage,omitempty"`
+}
+
+func (x *NotificationChannelDescriptor) Reset() {
+ *x = NotificationChannelDescriptor{}
+ mi := &file_google_monitoring_v3_notification_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *NotificationChannelDescriptor) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NotificationChannelDescriptor) ProtoMessage() {}
+
+func (x *NotificationChannelDescriptor) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NotificationChannelDescriptor.ProtoReflect.Descriptor instead.
+func (*NotificationChannelDescriptor) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *NotificationChannelDescriptor) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *NotificationChannelDescriptor) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *NotificationChannelDescriptor) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *NotificationChannelDescriptor) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *NotificationChannelDescriptor) GetLabels() []*label.LabelDescriptor {
+ if x != nil {
+ return x.Labels
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in google/monitoring/v3/notification.proto.
+func (x *NotificationChannelDescriptor) GetSupportedTiers() []ServiceTier {
+ if x != nil {
+ return x.SupportedTiers
+ }
+ return nil
+}
+
+func (x *NotificationChannelDescriptor) GetLaunchStage() api.LaunchStage {
+ if x != nil {
+ return x.LaunchStage
+ }
+ return api.LaunchStage(0)
+}
+
+// A `NotificationChannel` is a medium through which an alert is
+// delivered when a policy violation is detected. Examples of channels
+// include email, SMS, and third-party messaging applications. Fields
+// containing sensitive information like authentication tokens or
+// contact info are only partially populated on retrieval.
+type NotificationChannel struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The type of the notification channel. This field matches the
+ // value of the
+ // [NotificationChannelDescriptor.type][google.monitoring.v3.NotificationChannelDescriptor.type]
+ // field.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // Identifier. The full REST resource name for this channel. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
+ //
+ // The `[CHANNEL_ID]` is automatically assigned by the server on creation.
+ Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"`
+ // An optional human-readable name for this notification channel. It is
+ // recommended that you specify a non-empty and unique name in order to
+ // make it easier to identify the channels in your project, though this is
+ // not enforced. The display name is limited to 512 Unicode characters.
+ DisplayName string `protobuf:"bytes,3,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // An optional human-readable description of this notification channel. This
+ // description may provide additional details, beyond the display
+ // name, for the channel. This may not exceed 1024 Unicode characters.
+ Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
+ // Configuration fields that define the channel and its behavior. The
+ // permissible and required labels are specified in the
+ // [NotificationChannelDescriptor.labels][google.monitoring.v3.NotificationChannelDescriptor.labels]
+ // of the `NotificationChannelDescriptor` corresponding to the `type` field.
+ Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // User-supplied key/value data that does not need to conform to
+ // the corresponding `NotificationChannelDescriptor`'s schema, unlike
+ // the `labels` field. This field is intended to be used for organizing
+ // and identifying the `NotificationChannel` objects.
+ //
+ // The field can contain up to 64 entries. Each key and value is limited to
+ // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and
+ // values can contain only lowercase letters, numerals, underscores, and
+ // dashes. Keys must begin with a letter.
+ UserLabels map[string]string `protobuf:"bytes,8,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Indicates whether this channel has been verified or not. On a
+ // [`ListNotificationChannels`][google.monitoring.v3.NotificationChannelService.ListNotificationChannels]
+ // or
+ // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel]
+ // operation, this field is expected to be populated.
+ //
+ // If the value is `UNVERIFIED`, then it indicates that the channel is
+ // non-functioning (it both requires verification and lacks verification);
+ // otherwise, it is assumed that the channel works.
+ //
+ // If the channel is neither `VERIFIED` nor `UNVERIFIED`, it implies that
+ // the channel is of a type that does not require verification or that
+ // this specific channel has been exempted from verification because it was
+ // created prior to verification being required for channels of this type.
+ //
+ // This field cannot be modified using a standard
+ // [`UpdateNotificationChannel`][google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel]
+ // operation. To change the value of this field, you must call
+ // [`VerifyNotificationChannel`][google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel].
+ VerificationStatus NotificationChannel_VerificationStatus `protobuf:"varint,9,opt,name=verification_status,json=verificationStatus,proto3,enum=google.monitoring.v3.NotificationChannel_VerificationStatus" json:"verification_status,omitempty"`
+ // Whether notifications are forwarded to the described channel. This makes
+ // it possible to disable delivery of notifications to a particular channel
+ // without removing the channel from all alerting policies that reference
+ // the channel. This is a more convenient approach when the change is
+ // temporary and you want to receive notifications from the same set
+ // of alerting policies on the channel at some point in the future.
+ Enabled *wrapperspb.BoolValue `protobuf:"bytes,11,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // Record of the creation of this channel.
+ CreationRecord *MutationRecord `protobuf:"bytes,12,opt,name=creation_record,json=creationRecord,proto3" json:"creation_record,omitempty"`
+ // Records of the modification of this channel.
+ MutationRecords []*MutationRecord `protobuf:"bytes,13,rep,name=mutation_records,json=mutationRecords,proto3" json:"mutation_records,omitempty"`
+}
+
+func (x *NotificationChannel) Reset() {
+ *x = NotificationChannel{}
+ mi := &file_google_monitoring_v3_notification_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *NotificationChannel) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NotificationChannel) ProtoMessage() {}
+
+func (x *NotificationChannel) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NotificationChannel.ProtoReflect.Descriptor instead.
+func (*NotificationChannel) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *NotificationChannel) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *NotificationChannel) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *NotificationChannel) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *NotificationChannel) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *NotificationChannel) GetLabels() map[string]string {
+ if x != nil {
+ return x.Labels
+ }
+ return nil
+}
+
+func (x *NotificationChannel) GetUserLabels() map[string]string {
+ if x != nil {
+ return x.UserLabels
+ }
+ return nil
+}
+
+func (x *NotificationChannel) GetVerificationStatus() NotificationChannel_VerificationStatus {
+ if x != nil {
+ return x.VerificationStatus
+ }
+ return NotificationChannel_VERIFICATION_STATUS_UNSPECIFIED
+}
+
+func (x *NotificationChannel) GetEnabled() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.Enabled
+ }
+ return nil
+}
+
+func (x *NotificationChannel) GetCreationRecord() *MutationRecord {
+ if x != nil {
+ return x.CreationRecord
+ }
+ return nil
+}
+
+func (x *NotificationChannel) GetMutationRecords() []*MutationRecord {
+ if x != nil {
+ return x.MutationRecords
+ }
+ return nil
+}
+
+var File_google_monitoring_v3_notification_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_notification_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a,
+ 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x62,
+ 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x75, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x22, 0xf0, 0x04, 0x0a, 0x1d, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64,
+ 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20,
+ 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x33, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61,
+ 0x62, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x06, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x4e, 0x0a, 0x0f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
+ 0x65, 0x64, 0x5f, 0x74, 0x69, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x21,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x69, 0x65,
+ 0x72, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0e, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64,
+ 0x54, 0x69, 0x65, 0x72, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f,
+ 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53,
+ 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67,
+ 0x65, 0x3a, 0xa0, 0x02, 0xea, 0x41, 0x9c, 0x02, 0x0a, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x12, 0x46, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x50, 0x6f, 0x72, 0x67, 0x61, 0x6e,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69,
+ 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x7d, 0x12, 0x44, 0x66, 0x6f, 0x6c,
+ 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x6e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x7b, 0x63, 0x68,
+ 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x7d, 0x12, 0x01, 0x2a, 0x22, 0xcb, 0x08, 0x0a, 0x13, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
+ 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73,
+ 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4d,
+ 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x5a, 0x0a,
+ 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x08, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x2e, 0x55, 0x73,
+ 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75,
+ 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x6d, 0x0a, 0x13, 0x76, 0x65, 0x72,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x12, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62,
+ 0x6c, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x4d,
+ 0x0a, 0x0f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72,
+ 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d,
+ 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0e, 0x63,
+ 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x4f, 0x0a,
+ 0x10, 0x6d, 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64,
+ 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d,
+ 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x0f, 0x6d,
+ 0x75, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x1a, 0x39,
+ 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65,
+ 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x57, 0x0a, 0x12, 0x56, 0x65, 0x72, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23,
+ 0x0a, 0x1f, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53,
+ 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
+ 0x02, 0x3a, 0xfe, 0x01, 0xea, 0x41, 0xfa, 0x01, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x3e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73,
+ 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12, 0x48, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x7d, 0x12, 0x3c, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64,
+ 0x65, 0x72, 0x7d, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x7d, 0x12,
+ 0x01, 0x2a, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42,
+ 0x11, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56,
+ 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75,
+ 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56,
+ 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_notification_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_notification_proto_rawDescData = file_google_monitoring_v3_notification_proto_rawDesc
+)
+
+func file_google_monitoring_v3_notification_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_notification_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_notification_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_notification_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_notification_proto_rawDescData
+}
+
+var file_google_monitoring_v3_notification_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_monitoring_v3_notification_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_google_monitoring_v3_notification_proto_goTypes = []any{
+ (NotificationChannel_VerificationStatus)(0), // 0: google.monitoring.v3.NotificationChannel.VerificationStatus
+ (*NotificationChannelDescriptor)(nil), // 1: google.monitoring.v3.NotificationChannelDescriptor
+ (*NotificationChannel)(nil), // 2: google.monitoring.v3.NotificationChannel
+ nil, // 3: google.monitoring.v3.NotificationChannel.LabelsEntry
+ nil, // 4: google.monitoring.v3.NotificationChannel.UserLabelsEntry
+ (*label.LabelDescriptor)(nil), // 5: google.api.LabelDescriptor
+ (ServiceTier)(0), // 6: google.monitoring.v3.ServiceTier
+ (api.LaunchStage)(0), // 7: google.api.LaunchStage
+ (*wrapperspb.BoolValue)(nil), // 8: google.protobuf.BoolValue
+ (*MutationRecord)(nil), // 9: google.monitoring.v3.MutationRecord
+}
+var file_google_monitoring_v3_notification_proto_depIdxs = []int32{
+ 5, // 0: google.monitoring.v3.NotificationChannelDescriptor.labels:type_name -> google.api.LabelDescriptor
+ 6, // 1: google.monitoring.v3.NotificationChannelDescriptor.supported_tiers:type_name -> google.monitoring.v3.ServiceTier
+ 7, // 2: google.monitoring.v3.NotificationChannelDescriptor.launch_stage:type_name -> google.api.LaunchStage
+ 3, // 3: google.monitoring.v3.NotificationChannel.labels:type_name -> google.monitoring.v3.NotificationChannel.LabelsEntry
+ 4, // 4: google.monitoring.v3.NotificationChannel.user_labels:type_name -> google.monitoring.v3.NotificationChannel.UserLabelsEntry
+ 0, // 5: google.monitoring.v3.NotificationChannel.verification_status:type_name -> google.monitoring.v3.NotificationChannel.VerificationStatus
+ 8, // 6: google.monitoring.v3.NotificationChannel.enabled:type_name -> google.protobuf.BoolValue
+ 9, // 7: google.monitoring.v3.NotificationChannel.creation_record:type_name -> google.monitoring.v3.MutationRecord
+ 9, // 8: google.monitoring.v3.NotificationChannel.mutation_records:type_name -> google.monitoring.v3.MutationRecord
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_notification_proto_init() }
+func file_google_monitoring_v3_notification_proto_init() {
+ if File_google_monitoring_v3_notification_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_common_proto_init()
+ file_google_monitoring_v3_mutation_record_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_notification_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_notification_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_notification_proto_depIdxs,
+ EnumInfos: file_google_monitoring_v3_notification_proto_enumTypes,
+ MessageInfos: file_google_monitoring_v3_notification_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_notification_proto = out.File
+ file_google_monitoring_v3_notification_proto_rawDesc = nil
+ file_google_monitoring_v3_notification_proto_goTypes = nil
+ file_google_monitoring_v3_notification_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go
new file mode 100644
index 000000000..fd0230036
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/notification_service.pb.go
@@ -0,0 +1,1819 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/notification_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The `ListNotificationChannelDescriptors` request.
+type ListNotificationChannelDescriptorsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The REST resource name of the parent from which to retrieve
+ // the notification channel descriptors. The expected syntax is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ //
+ // Note that this
+ // [names](https://cloud.google.com/monitoring/api/v3#project_name) the parent
+ // container in which to look for the descriptors; to retrieve a single
+ // descriptor by name, use the
+ // [GetNotificationChannelDescriptor][google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor]
+ // operation, instead.
+ Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ // The maximum number of results to return in a single response. If
+ // not set to a positive number, a reasonable value will be chosen by the
+ // service.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If non-empty, `page_token` must contain a value returned as the
+ // `next_page_token` in a previous response to request the next set
+ // of results.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListNotificationChannelDescriptorsRequest) Reset() {
+ *x = ListNotificationChannelDescriptorsRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListNotificationChannelDescriptorsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListNotificationChannelDescriptorsRequest) ProtoMessage() {}
+
+func (x *ListNotificationChannelDescriptorsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListNotificationChannelDescriptorsRequest.ProtoReflect.Descriptor instead.
+func (*ListNotificationChannelDescriptorsRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ListNotificationChannelDescriptorsRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListNotificationChannelDescriptorsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListNotificationChannelDescriptorsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The `ListNotificationChannelDescriptors` response.
+type ListNotificationChannelDescriptorsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The monitored resource descriptors supported for the specified
+ // project, optionally filtered.
+ ChannelDescriptors []*NotificationChannelDescriptor `protobuf:"bytes,1,rep,name=channel_descriptors,json=channelDescriptors,proto3" json:"channel_descriptors,omitempty"`
+ // If not empty, indicates that there may be more results that match
+ // the request. Use the value in the `page_token` field in a
+ // subsequent request to fetch the next set of results. If empty,
+ // all results have been returned.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListNotificationChannelDescriptorsResponse) Reset() {
+ *x = ListNotificationChannelDescriptorsResponse{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListNotificationChannelDescriptorsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListNotificationChannelDescriptorsResponse) ProtoMessage() {}
+
+func (x *ListNotificationChannelDescriptorsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListNotificationChannelDescriptorsResponse.ProtoReflect.Descriptor instead.
+func (*ListNotificationChannelDescriptorsResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListNotificationChannelDescriptorsResponse) GetChannelDescriptors() []*NotificationChannelDescriptor {
+ if x != nil {
+ return x.ChannelDescriptors
+ }
+ return nil
+}
+
+func (x *ListNotificationChannelDescriptorsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The `GetNotificationChannelDescriptor` response.
+type GetNotificationChannelDescriptorRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The channel type for which to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannelDescriptors/[CHANNEL_TYPE]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetNotificationChannelDescriptorRequest) Reset() {
+ *x = GetNotificationChannelDescriptorRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetNotificationChannelDescriptorRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetNotificationChannelDescriptorRequest) ProtoMessage() {}
+
+func (x *GetNotificationChannelDescriptorRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetNotificationChannelDescriptorRequest.ProtoReflect.Descriptor instead.
+func (*GetNotificationChannelDescriptorRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *GetNotificationChannelDescriptorRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `CreateNotificationChannel` request.
+type CreateNotificationChannelRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which
+ // to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ //
+ // This names the container into which the channel will be
+ // written, this does not name the newly created channel. The resulting
+ // channel's name will have a normalized version of this field as a prefix,
+ // but will add `/notificationChannels/[CHANNEL_ID]` to identify the channel.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The definition of the `NotificationChannel` to create.
+ NotificationChannel *NotificationChannel `protobuf:"bytes,2,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"`
+}
+
+func (x *CreateNotificationChannelRequest) Reset() {
+ *x = CreateNotificationChannelRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateNotificationChannelRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateNotificationChannelRequest) ProtoMessage() {}
+
+func (x *CreateNotificationChannelRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateNotificationChannelRequest.ProtoReflect.Descriptor instead.
+func (*CreateNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *CreateNotificationChannelRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CreateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel {
+ if x != nil {
+ return x.NotificationChannel
+ }
+ return nil
+}
+
+// The `ListNotificationChannels` request.
+type ListNotificationChannelsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) on which
+ // to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ //
+ // This names the container
+ // in which to look for the notification channels; it does not name a
+ // specific channel. To query a specific channel by REST resource name, use
+ // the
+ // [`GetNotificationChannel`][google.monitoring.v3.NotificationChannelService.GetNotificationChannel]
+ // operation.
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ // Optional. If provided, this field specifies the criteria that must be met
+ // by notification channels to be included in the response.
+ //
+ // For more details, see [sorting and
+ // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
+ Filter string `protobuf:"bytes,6,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Optional. A comma-separated list of fields by which to sort the result.
+ // Supports the same set of fields as in `filter`. Entries can be prefixed
+ // with a minus sign to sort in descending rather than ascending order.
+ //
+ // For more details, see [sorting and
+ // filtering](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering).
+ OrderBy string `protobuf:"bytes,7,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
+ // Optional. The maximum number of results to return in a single response. If
+ // not set to a positive number, a reasonable value will be chosen by the
+ // service.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // Optional. If non-empty, `page_token` must contain a value returned as the
+ // `next_page_token` in a previous response to request the next set
+ // of results.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListNotificationChannelsRequest) Reset() {
+ *x = ListNotificationChannelsRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListNotificationChannelsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListNotificationChannelsRequest) ProtoMessage() {}
+
+func (x *ListNotificationChannelsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListNotificationChannelsRequest.ProtoReflect.Descriptor instead.
+func (*ListNotificationChannelsRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ListNotificationChannelsRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListNotificationChannelsRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListNotificationChannelsRequest) GetOrderBy() string {
+ if x != nil {
+ return x.OrderBy
+ }
+ return ""
+}
+
+func (x *ListNotificationChannelsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListNotificationChannelsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The `ListNotificationChannels` response.
+type ListNotificationChannelsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The notification channels defined for the specified project.
+ NotificationChannels []*NotificationChannel `protobuf:"bytes,3,rep,name=notification_channels,json=notificationChannels,proto3" json:"notification_channels,omitempty"`
+ // If not empty, indicates that there may be more results that match
+ // the request. Use the value in the `page_token` field in a
+ // subsequent request to fetch the next set of results. If empty,
+ // all results have been returned.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // The total number of notification channels in all pages. This number is only
+ // an estimate, and may change in subsequent pages. https://aip.dev/158
+ TotalSize int32 `protobuf:"varint,4,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
+}
+
+func (x *ListNotificationChannelsResponse) Reset() {
+ *x = ListNotificationChannelsResponse{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListNotificationChannelsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListNotificationChannelsResponse) ProtoMessage() {}
+
+func (x *ListNotificationChannelsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListNotificationChannelsResponse.ProtoReflect.Descriptor instead.
+func (*ListNotificationChannelsResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ListNotificationChannelsResponse) GetNotificationChannels() []*NotificationChannel {
+ if x != nil {
+ return x.NotificationChannels
+ }
+ return nil
+}
+
+func (x *ListNotificationChannelsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *ListNotificationChannelsResponse) GetTotalSize() int32 {
+ if x != nil {
+ return x.TotalSize
+ }
+ return 0
+}
+
+// The `GetNotificationChannel` request.
+type GetNotificationChannelRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The channel for which to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetNotificationChannelRequest) Reset() {
+ *x = GetNotificationChannelRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetNotificationChannelRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetNotificationChannelRequest) ProtoMessage() {}
+
+func (x *GetNotificationChannelRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetNotificationChannelRequest.ProtoReflect.Descriptor instead.
+func (*GetNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *GetNotificationChannelRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `UpdateNotificationChannel` request.
+type UpdateNotificationChannelRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional. The fields to update.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ // Required. A description of the changes to be applied to the specified
+ // notification channel. The description must provide a definition for
+ // fields to be updated; the names of these fields should also be
+ // included in the `update_mask`.
+ NotificationChannel *NotificationChannel `protobuf:"bytes,3,opt,name=notification_channel,json=notificationChannel,proto3" json:"notification_channel,omitempty"`
+}
+
+func (x *UpdateNotificationChannelRequest) Reset() {
+ *x = UpdateNotificationChannelRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateNotificationChannelRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateNotificationChannelRequest) ProtoMessage() {}
+
+func (x *UpdateNotificationChannelRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateNotificationChannelRequest.ProtoReflect.Descriptor instead.
+func (*UpdateNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *UpdateNotificationChannelRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+func (x *UpdateNotificationChannelRequest) GetNotificationChannel() *NotificationChannel {
+ if x != nil {
+ return x.NotificationChannel
+ }
+ return nil
+}
+
+// The `DeleteNotificationChannel` request.
+type DeleteNotificationChannelRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The channel for which to execute the request. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/notificationChannels/[CHANNEL_ID]
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // If true, the notification channel will be deleted regardless of its
+ // use in alert policies (the policies will be updated to remove the
+ // channel). If false, this operation will fail if the notification channel
+ // is referenced by existing alerting policies.
+ Force bool `protobuf:"varint,5,opt,name=force,proto3" json:"force,omitempty"`
+}
+
+func (x *DeleteNotificationChannelRequest) Reset() {
+ *x = DeleteNotificationChannelRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteNotificationChannelRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteNotificationChannelRequest) ProtoMessage() {}
+
+func (x *DeleteNotificationChannelRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteNotificationChannelRequest.ProtoReflect.Descriptor instead.
+func (*DeleteNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *DeleteNotificationChannelRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *DeleteNotificationChannelRequest) GetForce() bool {
+ if x != nil {
+ return x.Force
+ }
+ return false
+}
+
+// The `SendNotificationChannelVerificationCode` request.
+type SendNotificationChannelVerificationCodeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The notification channel to which to send a verification code.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *SendNotificationChannelVerificationCodeRequest) Reset() {
+ *x = SendNotificationChannelVerificationCodeRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SendNotificationChannelVerificationCodeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SendNotificationChannelVerificationCodeRequest) ProtoMessage() {}
+
+func (x *SendNotificationChannelVerificationCodeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SendNotificationChannelVerificationCodeRequest.ProtoReflect.Descriptor instead.
+func (*SendNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *SendNotificationChannelVerificationCodeRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `GetNotificationChannelVerificationCode` request.
+type GetNotificationChannelVerificationCodeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The notification channel for which a verification code is to be
+ // generated and retrieved. This must name a channel that is already verified;
+ // if the specified channel is not verified, the request will fail.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The desired expiration time. If specified, the API will guarantee that
+ // the returned code will not be valid after the specified timestamp;
+ // however, the API cannot guarantee that the returned code will be
+ // valid for at least as long as the requested time (the API puts an upper
+ // bound on the amount of time for which a code may be valid). If omitted,
+ // a default expiration will be used, which may be less than the max
+ // permissible expiration (so specifying an expiration may extend the
+ // code's lifetime over omitting an expiration, even though the API does
+ // impose an upper limit on the maximum expiration that is permitted).
+ ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
+}
+
+func (x *GetNotificationChannelVerificationCodeRequest) Reset() {
+ *x = GetNotificationChannelVerificationCodeRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetNotificationChannelVerificationCodeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetNotificationChannelVerificationCodeRequest) ProtoMessage() {}
+
+func (x *GetNotificationChannelVerificationCodeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetNotificationChannelVerificationCodeRequest.ProtoReflect.Descriptor instead.
+func (*GetNotificationChannelVerificationCodeRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *GetNotificationChannelVerificationCodeRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *GetNotificationChannelVerificationCodeRequest) GetExpireTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.ExpireTime
+ }
+ return nil
+}
+
+// The `GetNotificationChannelVerificationCode` request.
+type GetNotificationChannelVerificationCodeResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The verification code, which may be used to verify other channels
+ // that have an equivalent identity (i.e. other channels of the same
+ // type with the same fingerprint such as other email channels with
+ // the same email address or other sms channels with the same number).
+ Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"`
+ // The expiration time associated with the code that was returned. If
+ // an expiration was provided in the request, this is the minimum of the
+ // requested expiration in the request and the max permitted expiration.
+ ExpireTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"`
+}
+
+func (x *GetNotificationChannelVerificationCodeResponse) Reset() {
+ *x = GetNotificationChannelVerificationCodeResponse{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetNotificationChannelVerificationCodeResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetNotificationChannelVerificationCodeResponse) ProtoMessage() {}
+
+func (x *GetNotificationChannelVerificationCodeResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetNotificationChannelVerificationCodeResponse.ProtoReflect.Descriptor instead.
+func (*GetNotificationChannelVerificationCodeResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *GetNotificationChannelVerificationCodeResponse) GetCode() string {
+ if x != nil {
+ return x.Code
+ }
+ return ""
+}
+
+func (x *GetNotificationChannelVerificationCodeResponse) GetExpireTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.ExpireTime
+ }
+ return nil
+}
+
+// The `VerifyNotificationChannel` request.
+type VerifyNotificationChannelRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The notification channel to verify.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. The verification code that was delivered to the channel as
+ // a result of invoking the `SendNotificationChannelVerificationCode` API
+ // method or that was retrieved from a verified channel via
+ // `GetNotificationChannelVerificationCode`. For example, one might have
+ // "G-123456" or "TKNZGhhd2EyN3I1MnRnMjRv" (in general, one is only
+ // guaranteed that the code is valid UTF-8; one should not
+ // make any assumptions regarding the structure or format of the code).
+ Code string `protobuf:"bytes,2,opt,name=code,proto3" json:"code,omitempty"`
+}
+
+func (x *VerifyNotificationChannelRequest) Reset() {
+ *x = VerifyNotificationChannelRequest{}
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *VerifyNotificationChannelRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VerifyNotificationChannelRequest) ProtoMessage() {}
+
+func (x *VerifyNotificationChannelRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_notification_service_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VerifyNotificationChannelRequest.ProtoReflect.Descriptor instead.
+func (*VerifyNotificationChannelRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_notification_service_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *VerifyNotificationChannelRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *VerifyNotificationChannelRequest) GetCode() string {
+ if x != nil {
+ return x.Code
+ }
+ return ""
+}
+
+var File_google_monitoring_v3_notification_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_notification_service_proto_rawDesc = []byte{
+ 0x0a, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70,
+ 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64,
+ 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33,
+ 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0xbc, 0x01, 0x0a, 0x29, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x53, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x3f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x39, 0x12, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73,
+ 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x22, 0xba, 0x01, 0x0a, 0x2a, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x64, 0x0a, 0x13, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x5f, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x52, 0x12, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f,
+ 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22,
+ 0x7e, 0x0a, 0x27, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x3f, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x39,
+ 0x0a, 0x37, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22,
+ 0xd0, 0x01, 0x0a, 0x20, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x12, 0x2d, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x61, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x13, 0x6e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e,
+ 0x65, 0x6c, 0x22, 0xef, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x12, 0x2d, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1e,
+ 0x0a, 0x08, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x5f, 0x62, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x42, 0x79, 0x12, 0x20,
+ 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65,
+ 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc9, 0x01, 0x0a, 0x20, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x15, 0x6e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x52, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78,
+ 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65,
+ 0x22, 0x6a, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc7, 0x01, 0x0a,
+ 0x20, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61,
+ 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d,
+ 0x61, 0x73, 0x6b, 0x12, 0x61, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x83, 0x01, 0x0a, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61,
+ 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x22, 0x7b, 0x0a, 0x2e,
+ 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41,
+ 0x02, 0xfa, 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xb7, 0x01, 0x0a, 0x2d, 0x47, 0x65,
+ 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61,
+ 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x54,
+ 0x69, 0x6d, 0x65, 0x22, 0x81, 0x01, 0x0a, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65,
+ 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x78,
+ 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70,
+ 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x86, 0x01, 0x0a, 0x20, 0x56, 0x65, 0x72, 0x69,
+ 0x66, 0x79, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68,
+ 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x35, 0xe0, 0x41, 0x02, 0xfa,
+ 0x41, 0x2f, 0x0a, 0x2d, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65,
+ 0x32, 0xea, 0x12, 0x0a, 0x1a, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12,
+ 0xec, 0x01, 0x0a, 0x22, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69,
+ 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68,
+ 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c,
+ 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x12, 0x34, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e,
+ 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
+ 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x12, 0xdd,
+ 0x01, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x6f, 0x72, 0x12, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x44, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x22, 0x45, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xc4,
+ 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, 0xda, 0x41, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61,
+ 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x12, 0xb5, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c,
+ 0x22, 0x3b, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12,
+ 0x2c, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xe4, 0x01,
+ 0x0a, 0x19, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66,
+ 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x64,
+ 0xda, 0x41, 0x19, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x82, 0xd3, 0xe4, 0x93,
+ 0x02, 0x42, 0x3a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x2a, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e,
+ 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
+ 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x73, 0x12, 0x83, 0x02, 0x0a, 0x19, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e,
+ 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68,
+ 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x82, 0x01, 0xda, 0x41, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02,
+ 0x59, 0x3a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x32, 0x41, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x68, 0x61, 0x6e, 0x6e,
+ 0x65, 0x6c, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x19, 0x44,
+ 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x41, 0xda, 0x41, 0x0a, 0x6e, 0x61, 0x6d,
+ 0x65, 0x2c, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x2a, 0x2c, 0x2f,
+ 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xdc, 0x01, 0x0a, 0x27,
+ 0x53, 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x44, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53,
+ 0x65, 0x6e, 0x64, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x53, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3,
+ 0xe4, 0x93, 0x02, 0x46, 0x3a, 0x01, 0x2a, 0x22, 0x41, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61,
+ 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65,
+ 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x6e, 0x64, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x87, 0x02, 0x0a, 0x26, 0x47,
+ 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68,
+ 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x43, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74,
+ 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x6f, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x44, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x22, 0x52, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x45, 0x3a,
+ 0x01, 0x2a, 0x22, 0x40, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x3a, 0x67, 0x65, 0x74, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x64, 0x65, 0x12, 0xca, 0x01, 0x0a, 0x19, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x4e,
+ 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e, 0x6e,
+ 0x65, 0x6c, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79,
+ 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68, 0x61, 0x6e,
+ 0x6e, 0x65, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x68,
+ 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x22, 0x4a, 0xda, 0x41, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63,
+ 0x6f, 0x64, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x3a, 0x01, 0x2a, 0x22, 0x33, 0x2f, 0x76,
+ 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x2f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
+ 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x76, 0x65, 0x72, 0x69, 0x66,
+ 0x79, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61,
+ 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f,
+ 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75,
+ 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74,
+ 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xd3, 0x01,
+ 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x18, 0x4e, 0x6f, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a,
+ 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_notification_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_notification_service_proto_rawDescData = file_google_monitoring_v3_notification_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_notification_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_notification_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_notification_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_notification_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_notification_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_notification_service_proto_msgTypes = make([]protoimpl.MessageInfo, 13)
+var file_google_monitoring_v3_notification_service_proto_goTypes = []any{
+ (*ListNotificationChannelDescriptorsRequest)(nil), // 0: google.monitoring.v3.ListNotificationChannelDescriptorsRequest
+ (*ListNotificationChannelDescriptorsResponse)(nil), // 1: google.monitoring.v3.ListNotificationChannelDescriptorsResponse
+ (*GetNotificationChannelDescriptorRequest)(nil), // 2: google.monitoring.v3.GetNotificationChannelDescriptorRequest
+ (*CreateNotificationChannelRequest)(nil), // 3: google.monitoring.v3.CreateNotificationChannelRequest
+ (*ListNotificationChannelsRequest)(nil), // 4: google.monitoring.v3.ListNotificationChannelsRequest
+ (*ListNotificationChannelsResponse)(nil), // 5: google.monitoring.v3.ListNotificationChannelsResponse
+ (*GetNotificationChannelRequest)(nil), // 6: google.monitoring.v3.GetNotificationChannelRequest
+ (*UpdateNotificationChannelRequest)(nil), // 7: google.monitoring.v3.UpdateNotificationChannelRequest
+ (*DeleteNotificationChannelRequest)(nil), // 8: google.monitoring.v3.DeleteNotificationChannelRequest
+ (*SendNotificationChannelVerificationCodeRequest)(nil), // 9: google.monitoring.v3.SendNotificationChannelVerificationCodeRequest
+ (*GetNotificationChannelVerificationCodeRequest)(nil), // 10: google.monitoring.v3.GetNotificationChannelVerificationCodeRequest
+ (*GetNotificationChannelVerificationCodeResponse)(nil), // 11: google.monitoring.v3.GetNotificationChannelVerificationCodeResponse
+ (*VerifyNotificationChannelRequest)(nil), // 12: google.monitoring.v3.VerifyNotificationChannelRequest
+ (*NotificationChannelDescriptor)(nil), // 13: google.monitoring.v3.NotificationChannelDescriptor
+ (*NotificationChannel)(nil), // 14: google.monitoring.v3.NotificationChannel
+ (*fieldmaskpb.FieldMask)(nil), // 15: google.protobuf.FieldMask
+ (*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp
+ (*emptypb.Empty)(nil), // 17: google.protobuf.Empty
+}
+var file_google_monitoring_v3_notification_service_proto_depIdxs = []int32{
+ 13, // 0: google.monitoring.v3.ListNotificationChannelDescriptorsResponse.channel_descriptors:type_name -> google.monitoring.v3.NotificationChannelDescriptor
+ 14, // 1: google.monitoring.v3.CreateNotificationChannelRequest.notification_channel:type_name -> google.monitoring.v3.NotificationChannel
+ 14, // 2: google.monitoring.v3.ListNotificationChannelsResponse.notification_channels:type_name -> google.monitoring.v3.NotificationChannel
+ 15, // 3: google.monitoring.v3.UpdateNotificationChannelRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 14, // 4: google.monitoring.v3.UpdateNotificationChannelRequest.notification_channel:type_name -> google.monitoring.v3.NotificationChannel
+ 16, // 5: google.monitoring.v3.GetNotificationChannelVerificationCodeRequest.expire_time:type_name -> google.protobuf.Timestamp
+ 16, // 6: google.monitoring.v3.GetNotificationChannelVerificationCodeResponse.expire_time:type_name -> google.protobuf.Timestamp
+ 0, // 7: google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors:input_type -> google.monitoring.v3.ListNotificationChannelDescriptorsRequest
+ 2, // 8: google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor:input_type -> google.monitoring.v3.GetNotificationChannelDescriptorRequest
+ 4, // 9: google.monitoring.v3.NotificationChannelService.ListNotificationChannels:input_type -> google.monitoring.v3.ListNotificationChannelsRequest
+ 6, // 10: google.monitoring.v3.NotificationChannelService.GetNotificationChannel:input_type -> google.monitoring.v3.GetNotificationChannelRequest
+ 3, // 11: google.monitoring.v3.NotificationChannelService.CreateNotificationChannel:input_type -> google.monitoring.v3.CreateNotificationChannelRequest
+ 7, // 12: google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel:input_type -> google.monitoring.v3.UpdateNotificationChannelRequest
+ 8, // 13: google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel:input_type -> google.monitoring.v3.DeleteNotificationChannelRequest
+ 9, // 14: google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode:input_type -> google.monitoring.v3.SendNotificationChannelVerificationCodeRequest
+ 10, // 15: google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode:input_type -> google.monitoring.v3.GetNotificationChannelVerificationCodeRequest
+ 12, // 16: google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel:input_type -> google.monitoring.v3.VerifyNotificationChannelRequest
+ 1, // 17: google.monitoring.v3.NotificationChannelService.ListNotificationChannelDescriptors:output_type -> google.monitoring.v3.ListNotificationChannelDescriptorsResponse
+ 13, // 18: google.monitoring.v3.NotificationChannelService.GetNotificationChannelDescriptor:output_type -> google.monitoring.v3.NotificationChannelDescriptor
+ 5, // 19: google.monitoring.v3.NotificationChannelService.ListNotificationChannels:output_type -> google.monitoring.v3.ListNotificationChannelsResponse
+ 14, // 20: google.monitoring.v3.NotificationChannelService.GetNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel
+ 14, // 21: google.monitoring.v3.NotificationChannelService.CreateNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel
+ 14, // 22: google.monitoring.v3.NotificationChannelService.UpdateNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel
+ 17, // 23: google.monitoring.v3.NotificationChannelService.DeleteNotificationChannel:output_type -> google.protobuf.Empty
+ 17, // 24: google.monitoring.v3.NotificationChannelService.SendNotificationChannelVerificationCode:output_type -> google.protobuf.Empty
+ 11, // 25: google.monitoring.v3.NotificationChannelService.GetNotificationChannelVerificationCode:output_type -> google.monitoring.v3.GetNotificationChannelVerificationCodeResponse
+ 14, // 26: google.monitoring.v3.NotificationChannelService.VerifyNotificationChannel:output_type -> google.monitoring.v3.NotificationChannel
+ 17, // [17:27] is the sub-list for method output_type
+ 7, // [7:17] is the sub-list for method input_type
+ 7, // [7:7] is the sub-list for extension type_name
+ 7, // [7:7] is the sub-list for extension extendee
+ 0, // [0:7] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_notification_service_proto_init() }
+func file_google_monitoring_v3_notification_service_proto_init() {
+ if File_google_monitoring_v3_notification_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_notification_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_notification_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 13,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_notification_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_notification_service_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_notification_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_notification_service_proto = out.File
+ file_google_monitoring_v3_notification_service_proto_rawDesc = nil
+ file_google_monitoring_v3_notification_service_proto_goTypes = nil
+ file_google_monitoring_v3_notification_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// NotificationChannelServiceClient is the client API for NotificationChannelService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type NotificationChannelServiceClient interface {
+ // Lists the descriptors for supported channel types. The use of descriptors
+ // makes it possible for new channel types to be dynamically added.
+ ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error)
+ // Gets a single channel descriptor. The descriptor indicates which fields
+ // are expected / permitted for a notification channel of the given type.
+ GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error)
+ // Lists the notification channels that have been created for the project.
+ // To list the types of notification channels that are supported, use
+ // the `ListNotificationChannelDescriptors` method.
+ ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error)
+ // Gets a single notification channel. The channel includes the relevant
+ // configuration details with which the channel was created. However, the
+ // response may truncate or omit passwords, API keys, or other private key
+ // matter and thus the response may not be 100% identical to the information
+ // that was supplied in the call to the create method.
+ GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+ // Creates a new notification channel, representing a single notification
+ // endpoint such as an email address, SMS number, or PagerDuty service.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // notification channels in a single project. This includes calls to
+ // CreateNotificationChannel, DeleteNotificationChannel and
+ // UpdateNotificationChannel.
+ CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+ // Updates a notification channel. Fields not specified in the field mask
+ // remain unchanged.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // notification channels in a single project. This includes calls to
+ // CreateNotificationChannel, DeleteNotificationChannel and
+ // UpdateNotificationChannel.
+ UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+ // Deletes a notification channel.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // notification channels in a single project. This includes calls to
+ // CreateNotificationChannel, DeleteNotificationChannel and
+ // UpdateNotificationChannel.
+ DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Causes a verification code to be delivered to the channel. The code
+ // can then be supplied in `VerifyNotificationChannel` to verify the channel.
+ SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Requests a verification code for an already verified channel that can then
+ // be used in a call to VerifyNotificationChannel() on a different channel
+ // with an equivalent identity in the same or in a different project. This
+ // makes it possible to copy a channel between projects without requiring
+ // manual reverification of the channel. If the channel is not in the
+ // verified state, this method will fail (in other words, this may only be
+ // used if the SendNotificationChannelVerificationCode and
+ // VerifyNotificationChannel paths have already been used to put the given
+ // channel into the verified state).
+ //
+ // There is no guarantee that the verification codes returned by this method
+ // will be of a similar structure or form as the ones that are delivered
+ // to the channel via SendNotificationChannelVerificationCode; while
+ // VerifyNotificationChannel() will recognize both the codes delivered via
+ // SendNotificationChannelVerificationCode() and returned from
+ // GetNotificationChannelVerificationCode(), it is typically the case that
+ // the verification codes delivered via
+ // SendNotificationChannelVerificationCode() will be shorter and also
+ // have a shorter expiration (e.g. codes such as "G-123456") whereas
+ // GetVerificationCode() will typically return a much longer, websafe base
+ // 64 encoded string that has a longer expiration time.
+ GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error)
+ // Verifies a `NotificationChannel` by proving receipt of the code
+ // delivered to the channel as a result of calling
+ // `SendNotificationChannelVerificationCode`.
+ VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error)
+}
+
+type notificationChannelServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewNotificationChannelServiceClient(cc grpc.ClientConnInterface) NotificationChannelServiceClient {
+ return ¬ificationChannelServiceClient{cc}
+}
+
+func (c *notificationChannelServiceClient) ListNotificationChannelDescriptors(ctx context.Context, in *ListNotificationChannelDescriptorsRequest, opts ...grpc.CallOption) (*ListNotificationChannelDescriptorsResponse, error) {
+ out := new(ListNotificationChannelDescriptorsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) GetNotificationChannelDescriptor(ctx context.Context, in *GetNotificationChannelDescriptorRequest, opts ...grpc.CallOption) (*NotificationChannelDescriptor, error) {
+ out := new(NotificationChannelDescriptor)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) ListNotificationChannels(ctx context.Context, in *ListNotificationChannelsRequest, opts ...grpc.CallOption) (*ListNotificationChannelsResponse, error) {
+ out := new(ListNotificationChannelsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) GetNotificationChannel(ctx context.Context, in *GetNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) CreateNotificationChannel(ctx context.Context, in *CreateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) UpdateNotificationChannel(ctx context.Context, in *UpdateNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) DeleteNotificationChannel(ctx context.Context, in *DeleteNotificationChannelRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) SendNotificationChannelVerificationCode(ctx context.Context, in *SendNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) GetNotificationChannelVerificationCode(ctx context.Context, in *GetNotificationChannelVerificationCodeRequest, opts ...grpc.CallOption) (*GetNotificationChannelVerificationCodeResponse, error) {
+ out := new(GetNotificationChannelVerificationCodeResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *notificationChannelServiceClient) VerifyNotificationChannel(ctx context.Context, in *VerifyNotificationChannelRequest, opts ...grpc.CallOption) (*NotificationChannel, error) {
+ out := new(NotificationChannel)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// NotificationChannelServiceServer is the server API for NotificationChannelService service.
+type NotificationChannelServiceServer interface {
+ // Lists the descriptors for supported channel types. The use of descriptors
+ // makes it possible for new channel types to be dynamically added.
+ ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error)
+ // Gets a single channel descriptor. The descriptor indicates which fields
+ // are expected / permitted for a notification channel of the given type.
+ GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error)
+ // Lists the notification channels that have been created for the project.
+ // To list the types of notification channels that are supported, use
+ // the `ListNotificationChannelDescriptors` method.
+ ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error)
+ // Gets a single notification channel. The channel includes the relevant
+ // configuration details with which the channel was created. However, the
+ // response may truncate or omit passwords, API keys, or other private key
+ // matter and thus the response may not be 100% identical to the information
+ // that was supplied in the call to the create method.
+ GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error)
+ // Creates a new notification channel, representing a single notification
+ // endpoint such as an email address, SMS number, or PagerDuty service.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // notification channels in a single project. This includes calls to
+ // CreateNotificationChannel, DeleteNotificationChannel and
+ // UpdateNotificationChannel.
+ CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error)
+ // Updates a notification channel. Fields not specified in the field mask
+ // remain unchanged.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // notification channels in a single project. This includes calls to
+ // CreateNotificationChannel, DeleteNotificationChannel and
+ // UpdateNotificationChannel.
+ UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error)
+ // Deletes a notification channel.
+ //
+ // Design your application to single-thread API calls that modify the state of
+ // notification channels in a single project. This includes calls to
+ // CreateNotificationChannel, DeleteNotificationChannel and
+ // UpdateNotificationChannel.
+ DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*emptypb.Empty, error)
+ // Causes a verification code to be delivered to the channel. The code
+ // can then be supplied in `VerifyNotificationChannel` to verify the channel.
+ SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*emptypb.Empty, error)
+ // Requests a verification code for an already verified channel that can then
+ // be used in a call to VerifyNotificationChannel() on a different channel
+ // with an equivalent identity in the same or in a different project. This
+ // makes it possible to copy a channel between projects without requiring
+ // manual reverification of the channel. If the channel is not in the
+ // verified state, this method will fail (in other words, this may only be
+ // used if the SendNotificationChannelVerificationCode and
+ // VerifyNotificationChannel paths have already been used to put the given
+ // channel into the verified state).
+ //
+ // There is no guarantee that the verification codes returned by this method
+ // will be of a similar structure or form as the ones that are delivered
+ // to the channel via SendNotificationChannelVerificationCode; while
+ // VerifyNotificationChannel() will recognize both the codes delivered via
+ // SendNotificationChannelVerificationCode() and returned from
+ // GetNotificationChannelVerificationCode(), it is typically the case that
+ // the verification codes delivered via
+ // SendNotificationChannelVerificationCode() will be shorter and also
+ // have a shorter expiration (e.g. codes such as "G-123456") whereas
+ // GetVerificationCode() will typically return a much longer, websafe base
+ // 64 encoded string that has a longer expiration time.
+ GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error)
+ // Verifies a `NotificationChannel` by proving receipt of the code
+ // delivered to the channel as a result of calling
+ // `SendNotificationChannelVerificationCode`.
+ VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error)
+}
+
+// UnimplementedNotificationChannelServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedNotificationChannelServiceServer struct {
+}
+
+func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannelDescriptors(context.Context, *ListNotificationChannelDescriptorsRequest) (*ListNotificationChannelDescriptorsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannelDescriptors not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelDescriptor(context.Context, *GetNotificationChannelDescriptorRequest) (*NotificationChannelDescriptor, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelDescriptor not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) ListNotificationChannels(context.Context, *ListNotificationChannelsRequest) (*ListNotificationChannelsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListNotificationChannels not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannel(context.Context, *GetNotificationChannelRequest) (*NotificationChannel, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannel not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) CreateNotificationChannel(context.Context, *CreateNotificationChannelRequest) (*NotificationChannel, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationChannel not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) UpdateNotificationChannel(context.Context, *UpdateNotificationChannelRequest) (*NotificationChannel, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateNotificationChannel not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) DeleteNotificationChannel(context.Context, *DeleteNotificationChannelRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationChannel not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) SendNotificationChannelVerificationCode(context.Context, *SendNotificationChannelVerificationCodeRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method SendNotificationChannelVerificationCode not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) GetNotificationChannelVerificationCode(context.Context, *GetNotificationChannelVerificationCodeRequest) (*GetNotificationChannelVerificationCodeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetNotificationChannelVerificationCode not implemented")
+}
+func (*UnimplementedNotificationChannelServiceServer) VerifyNotificationChannel(context.Context, *VerifyNotificationChannelRequest) (*NotificationChannel, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method VerifyNotificationChannel not implemented")
+}
+
+func RegisterNotificationChannelServiceServer(s *grpc.Server, srv NotificationChannelServiceServer) {
+ s.RegisterService(&_NotificationChannelService_serviceDesc, srv)
+}
+
+func _NotificationChannelService_ListNotificationChannelDescriptors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListNotificationChannelDescriptorsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannelDescriptors",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannelDescriptors(ctx, req.(*ListNotificationChannelDescriptorsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_GetNotificationChannelDescriptor_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNotificationChannelDescriptorRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelDescriptor",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelDescriptor(ctx, req.(*GetNotificationChannelDescriptorRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_ListNotificationChannels_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListNotificationChannelsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/ListNotificationChannels",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).ListNotificationChannels(ctx, req.(*ListNotificationChannelsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_GetNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannel(ctx, req.(*GetNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_CreateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/CreateNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).CreateNotificationChannel(ctx, req.(*CreateNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_UpdateNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/UpdateNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).UpdateNotificationChannel(ctx, req.(*UpdateNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_DeleteNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/DeleteNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).DeleteNotificationChannel(ctx, req.(*DeleteNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_SendNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(SendNotificationChannelVerificationCodeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/SendNotificationChannelVerificationCode",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).SendNotificationChannelVerificationCode(ctx, req.(*SendNotificationChannelVerificationCodeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_GetNotificationChannelVerificationCode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetNotificationChannelVerificationCodeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/GetNotificationChannelVerificationCode",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).GetNotificationChannelVerificationCode(ctx, req.(*GetNotificationChannelVerificationCodeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _NotificationChannelService_VerifyNotificationChannel_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(VerifyNotificationChannelRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.NotificationChannelService/VerifyNotificationChannel",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(NotificationChannelServiceServer).VerifyNotificationChannel(ctx, req.(*VerifyNotificationChannelRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _NotificationChannelService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.NotificationChannelService",
+ HandlerType: (*NotificationChannelServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListNotificationChannelDescriptors",
+ Handler: _NotificationChannelService_ListNotificationChannelDescriptors_Handler,
+ },
+ {
+ MethodName: "GetNotificationChannelDescriptor",
+ Handler: _NotificationChannelService_GetNotificationChannelDescriptor_Handler,
+ },
+ {
+ MethodName: "ListNotificationChannels",
+ Handler: _NotificationChannelService_ListNotificationChannels_Handler,
+ },
+ {
+ MethodName: "GetNotificationChannel",
+ Handler: _NotificationChannelService_GetNotificationChannel_Handler,
+ },
+ {
+ MethodName: "CreateNotificationChannel",
+ Handler: _NotificationChannelService_CreateNotificationChannel_Handler,
+ },
+ {
+ MethodName: "UpdateNotificationChannel",
+ Handler: _NotificationChannelService_UpdateNotificationChannel_Handler,
+ },
+ {
+ MethodName: "DeleteNotificationChannel",
+ Handler: _NotificationChannelService_DeleteNotificationChannel_Handler,
+ },
+ {
+ MethodName: "SendNotificationChannelVerificationCode",
+ Handler: _NotificationChannelService_SendNotificationChannelVerificationCode_Handler,
+ },
+ {
+ MethodName: "GetNotificationChannelVerificationCode",
+ Handler: _NotificationChannelService_GetNotificationChannelVerificationCode_Handler,
+ },
+ {
+ MethodName: "VerifyNotificationChannel",
+ Handler: _NotificationChannelService_VerifyNotificationChannel_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/notification_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go
new file mode 100644
index 000000000..6402f18ca
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/query_service.pb.go
@@ -0,0 +1,221 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/query_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var File_google_monitoring_v3_query_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_query_service_proto_rawDesc = []byte{
+ 0x0a, 0x28, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65,
+ 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x32, 0xe1, 0x02, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x12, 0xa4, 0x01, 0x0a, 0x0f, 0x51, 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x51,
+ 0x75, 0x65, 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x51, 0x75, 0x65,
+ 0x72, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x3a, 0x01, 0x2a, 0x22,
+ 0x26, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65,
+ 0x73, 0x3a, 0x71, 0x75, 0x65, 0x72, 0x79, 0x88, 0x02, 0x01, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74,
+ 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70,
+ 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77,
+ 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xcc, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x42, 0x11, 0x51, 0x75, 0x65, 0x72, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76,
+ 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_google_monitoring_v3_query_service_proto_goTypes = []any{
+ (*QueryTimeSeriesRequest)(nil), // 0: google.monitoring.v3.QueryTimeSeriesRequest
+ (*QueryTimeSeriesResponse)(nil), // 1: google.monitoring.v3.QueryTimeSeriesResponse
+}
+var file_google_monitoring_v3_query_service_proto_depIdxs = []int32{
+ 0, // 0: google.monitoring.v3.QueryService.QueryTimeSeries:input_type -> google.monitoring.v3.QueryTimeSeriesRequest
+ 1, // 1: google.monitoring.v3.QueryService.QueryTimeSeries:output_type -> google.monitoring.v3.QueryTimeSeriesResponse
+ 1, // [1:2] is the sub-list for method output_type
+ 0, // [0:1] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_query_service_proto_init() }
+func file_google_monitoring_v3_query_service_proto_init() {
+ if File_google_monitoring_v3_query_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_metric_service_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_query_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_query_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_query_service_proto_depIdxs,
+ }.Build()
+ File_google_monitoring_v3_query_service_proto = out.File
+ file_google_monitoring_v3_query_service_proto_rawDesc = nil
+ file_google_monitoring_v3_query_service_proto_goTypes = nil
+ file_google_monitoring_v3_query_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// QueryServiceClient is the client API for QueryService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type QueryServiceClient interface {
+ // Deprecated: Do not use.
+ // Queries time series by using Monitoring Query Language (MQL). We recommend
+ // using PromQL instead of MQL. For more information about the status of MQL,
+ // see the [MQL deprecation
+ // notice](https://cloud.google.com/stackdriver/docs/deprecations/mql).
+ QueryTimeSeries(ctx context.Context, in *QueryTimeSeriesRequest, opts ...grpc.CallOption) (*QueryTimeSeriesResponse, error)
+}
+
+type queryServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewQueryServiceClient(cc grpc.ClientConnInterface) QueryServiceClient {
+ return &queryServiceClient{cc}
+}
+
+// Deprecated: Do not use.
+func (c *queryServiceClient) QueryTimeSeries(ctx context.Context, in *QueryTimeSeriesRequest, opts ...grpc.CallOption) (*QueryTimeSeriesResponse, error) {
+ out := new(QueryTimeSeriesResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.QueryService/QueryTimeSeries", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// QueryServiceServer is the server API for QueryService service.
+type QueryServiceServer interface {
+ // Deprecated: Do not use.
+ // Queries time series by using Monitoring Query Language (MQL). We recommend
+ // using PromQL instead of MQL. For more information about the status of MQL,
+ // see the [MQL deprecation
+ // notice](https://cloud.google.com/stackdriver/docs/deprecations/mql).
+ QueryTimeSeries(context.Context, *QueryTimeSeriesRequest) (*QueryTimeSeriesResponse, error)
+}
+
+// UnimplementedQueryServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedQueryServiceServer struct {
+}
+
+func (*UnimplementedQueryServiceServer) QueryTimeSeries(context.Context, *QueryTimeSeriesRequest) (*QueryTimeSeriesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method QueryTimeSeries not implemented")
+}
+
+func RegisterQueryServiceServer(s *grpc.Server, srv QueryServiceServer) {
+ s.RegisterService(&_QueryService_serviceDesc, srv)
+}
+
+func _QueryService_QueryTimeSeries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(QueryTimeSeriesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(QueryServiceServer).QueryTimeSeries(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.QueryService/QueryTimeSeries",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(QueryServiceServer).QueryTimeSeries(ctx, req.(*QueryTimeSeriesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _QueryService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.QueryService",
+ HandlerType: (*QueryServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "QueryTimeSeries",
+ Handler: _QueryService_QueryTimeSeries_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/query_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go
new file mode 100644
index 000000000..a9d2ae8cb
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service.pb.go
@@ -0,0 +1,2755 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/service.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ calendarperiod "google.golang.org/genproto/googleapis/type/calendarperiod"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// `ServiceLevelObjective.View` determines what form of
+// `ServiceLevelObjective` is returned from `GetServiceLevelObjective`,
+// `ListServiceLevelObjectives`, and `ListServiceLevelObjectiveVersions` RPCs.
+type ServiceLevelObjective_View int32
+
+const (
+ // Same as FULL.
+ ServiceLevelObjective_VIEW_UNSPECIFIED ServiceLevelObjective_View = 0
+ // Return the embedded `ServiceLevelIndicator` in the form in which it was
+ // defined. If it was defined using a `BasicSli`, return that `BasicSli`.
+ ServiceLevelObjective_FULL ServiceLevelObjective_View = 2
+ // For `ServiceLevelIndicator`s using `BasicSli` articulation, instead
+ // return the `ServiceLevelIndicator` with its mode of computation fully
+ // spelled out as a `RequestBasedSli`. For `ServiceLevelIndicator`s using
+ // `RequestBasedSli` or `WindowsBasedSli`, return the
+ // `ServiceLevelIndicator` as it was provided.
+ ServiceLevelObjective_EXPLICIT ServiceLevelObjective_View = 1
+)
+
+// Enum value maps for ServiceLevelObjective_View.
+var (
+ ServiceLevelObjective_View_name = map[int32]string{
+ 0: "VIEW_UNSPECIFIED",
+ 2: "FULL",
+ 1: "EXPLICIT",
+ }
+ ServiceLevelObjective_View_value = map[string]int32{
+ "VIEW_UNSPECIFIED": 0,
+ "FULL": 2,
+ "EXPLICIT": 1,
+ }
+)
+
+func (x ServiceLevelObjective_View) Enum() *ServiceLevelObjective_View {
+ p := new(ServiceLevelObjective_View)
+ *p = x
+ return p
+}
+
+func (x ServiceLevelObjective_View) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ServiceLevelObjective_View) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_service_proto_enumTypes[0].Descriptor()
+}
+
+func (ServiceLevelObjective_View) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_service_proto_enumTypes[0]
+}
+
+func (x ServiceLevelObjective_View) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ServiceLevelObjective_View.Descriptor instead.
+func (ServiceLevelObjective_View) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{1, 0}
+}
+
+// A `Service` is a discrete, autonomous, and network-accessible unit, designed
+// to solve an individual concern
+// ([Wikipedia](https://en.wikipedia.org/wiki/Service-orientation)). In
+// Cloud Monitoring, a `Service` acts as the root resource under which
+// operational aspects of the service are accessible.
+type Service struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier. Resource name for this Service. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Name used for UI elements listing this Service.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // REQUIRED. Service-identifying atoms specifying the underlying service.
+ //
+ // Types that are assignable to Identifier:
+ //
+ // *Service_Custom_
+ // *Service_AppEngine_
+ // *Service_CloudEndpoints_
+ // *Service_ClusterIstio_
+ // *Service_MeshIstio_
+ // *Service_IstioCanonicalService_
+ // *Service_CloudRun_
+ // *Service_GkeNamespace_
+ // *Service_GkeWorkload_
+ // *Service_GkeService_
+ Identifier isService_Identifier `protobuf_oneof:"identifier"`
+ // Message that contains the service type and service labels of this service
+ // if it is a basic service.
+ // Documentation and examples
+ // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli).
+ BasicService *Service_BasicService `protobuf:"bytes,19,opt,name=basic_service,json=basicService,proto3" json:"basic_service,omitempty"`
+ // Configuration for how to query telemetry on a Service.
+ Telemetry *Service_Telemetry `protobuf:"bytes,13,opt,name=telemetry,proto3" json:"telemetry,omitempty"`
+ // Labels which have been used to annotate the service. Label keys must start
+ // with a letter. Label keys and values may contain lowercase letters,
+ // numbers, underscores, and dashes. Label keys and values have a maximum
+ // length of 63 characters, and must be less than 128 bytes in size. Up to 64
+ // label entries may be stored. For labels which do not have a semantic value,
+ // the empty string may be supplied for the label value.
+ UserLabels map[string]string `protobuf:"bytes,14,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *Service) Reset() {
+ *x = Service{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service) ProtoMessage() {}
+
+func (x *Service) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service.ProtoReflect.Descriptor instead.
+func (*Service) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Service) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Service) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (m *Service) GetIdentifier() isService_Identifier {
+ if m != nil {
+ return m.Identifier
+ }
+ return nil
+}
+
+func (x *Service) GetCustom() *Service_Custom {
+ if x, ok := x.GetIdentifier().(*Service_Custom_); ok {
+ return x.Custom
+ }
+ return nil
+}
+
+func (x *Service) GetAppEngine() *Service_AppEngine {
+ if x, ok := x.GetIdentifier().(*Service_AppEngine_); ok {
+ return x.AppEngine
+ }
+ return nil
+}
+
+func (x *Service) GetCloudEndpoints() *Service_CloudEndpoints {
+ if x, ok := x.GetIdentifier().(*Service_CloudEndpoints_); ok {
+ return x.CloudEndpoints
+ }
+ return nil
+}
+
+func (x *Service) GetClusterIstio() *Service_ClusterIstio {
+ if x, ok := x.GetIdentifier().(*Service_ClusterIstio_); ok {
+ return x.ClusterIstio
+ }
+ return nil
+}
+
+func (x *Service) GetMeshIstio() *Service_MeshIstio {
+ if x, ok := x.GetIdentifier().(*Service_MeshIstio_); ok {
+ return x.MeshIstio
+ }
+ return nil
+}
+
+func (x *Service) GetIstioCanonicalService() *Service_IstioCanonicalService {
+ if x, ok := x.GetIdentifier().(*Service_IstioCanonicalService_); ok {
+ return x.IstioCanonicalService
+ }
+ return nil
+}
+
+func (x *Service) GetCloudRun() *Service_CloudRun {
+ if x, ok := x.GetIdentifier().(*Service_CloudRun_); ok {
+ return x.CloudRun
+ }
+ return nil
+}
+
+func (x *Service) GetGkeNamespace() *Service_GkeNamespace {
+ if x, ok := x.GetIdentifier().(*Service_GkeNamespace_); ok {
+ return x.GkeNamespace
+ }
+ return nil
+}
+
+func (x *Service) GetGkeWorkload() *Service_GkeWorkload {
+ if x, ok := x.GetIdentifier().(*Service_GkeWorkload_); ok {
+ return x.GkeWorkload
+ }
+ return nil
+}
+
+func (x *Service) GetGkeService() *Service_GkeService {
+ if x, ok := x.GetIdentifier().(*Service_GkeService_); ok {
+ return x.GkeService
+ }
+ return nil
+}
+
+func (x *Service) GetBasicService() *Service_BasicService {
+ if x != nil {
+ return x.BasicService
+ }
+ return nil
+}
+
+func (x *Service) GetTelemetry() *Service_Telemetry {
+ if x != nil {
+ return x.Telemetry
+ }
+ return nil
+}
+
+func (x *Service) GetUserLabels() map[string]string {
+ if x != nil {
+ return x.UserLabels
+ }
+ return nil
+}
+
+type isService_Identifier interface {
+ isService_Identifier()
+}
+
+type Service_Custom_ struct {
+ // Custom service type.
+ Custom *Service_Custom `protobuf:"bytes,6,opt,name=custom,proto3,oneof"`
+}
+
+type Service_AppEngine_ struct {
+ // Type used for App Engine services.
+ AppEngine *Service_AppEngine `protobuf:"bytes,7,opt,name=app_engine,json=appEngine,proto3,oneof"`
+}
+
+type Service_CloudEndpoints_ struct {
+ // Type used for Cloud Endpoints services.
+ CloudEndpoints *Service_CloudEndpoints `protobuf:"bytes,8,opt,name=cloud_endpoints,json=cloudEndpoints,proto3,oneof"`
+}
+
+type Service_ClusterIstio_ struct {
+ // Type used for Istio services that live in a Kubernetes cluster.
+ ClusterIstio *Service_ClusterIstio `protobuf:"bytes,9,opt,name=cluster_istio,json=clusterIstio,proto3,oneof"`
+}
+
+type Service_MeshIstio_ struct {
+ // Type used for Istio services scoped to an Istio mesh.
+ MeshIstio *Service_MeshIstio `protobuf:"bytes,10,opt,name=mesh_istio,json=meshIstio,proto3,oneof"`
+}
+
+type Service_IstioCanonicalService_ struct {
+ // Type used for canonical services scoped to an Istio mesh.
+ // Metrics for Istio are
+ // [documented here](https://istio.io/latest/docs/reference/config/metrics/)
+ IstioCanonicalService *Service_IstioCanonicalService `protobuf:"bytes,11,opt,name=istio_canonical_service,json=istioCanonicalService,proto3,oneof"`
+}
+
+type Service_CloudRun_ struct {
+ // Type used for Cloud Run services.
+ CloudRun *Service_CloudRun `protobuf:"bytes,12,opt,name=cloud_run,json=cloudRun,proto3,oneof"`
+}
+
+type Service_GkeNamespace_ struct {
+ // Type used for GKE Namespaces.
+ GkeNamespace *Service_GkeNamespace `protobuf:"bytes,15,opt,name=gke_namespace,json=gkeNamespace,proto3,oneof"`
+}
+
+type Service_GkeWorkload_ struct {
+ // Type used for GKE Workloads.
+ GkeWorkload *Service_GkeWorkload `protobuf:"bytes,16,opt,name=gke_workload,json=gkeWorkload,proto3,oneof"`
+}
+
+type Service_GkeService_ struct {
+ // Type used for GKE Services (the Kubernetes concept of a service).
+ GkeService *Service_GkeService `protobuf:"bytes,17,opt,name=gke_service,json=gkeService,proto3,oneof"`
+}
+
+func (*Service_Custom_) isService_Identifier() {}
+
+func (*Service_AppEngine_) isService_Identifier() {}
+
+func (*Service_CloudEndpoints_) isService_Identifier() {}
+
+func (*Service_ClusterIstio_) isService_Identifier() {}
+
+func (*Service_MeshIstio_) isService_Identifier() {}
+
+func (*Service_IstioCanonicalService_) isService_Identifier() {}
+
+func (*Service_CloudRun_) isService_Identifier() {}
+
+func (*Service_GkeNamespace_) isService_Identifier() {}
+
+func (*Service_GkeWorkload_) isService_Identifier() {}
+
+func (*Service_GkeService_) isService_Identifier() {}
+
+// A Service-Level Objective (SLO) describes a level of desired good service. It
+// consists of a service-level indicator (SLI), a performance goal, and a period
+// over which the objective is to be evaluated against that goal. The SLO can
+// use SLIs defined in a number of different manners. Typical SLOs might include
+// "99% of requests in each rolling week have latency below 200 milliseconds" or
+// "99.5% of requests in each calendar month return successfully."
+type ServiceLevelObjective struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier. Resource name for this `ServiceLevelObjective`. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Name used for UI elements listing this SLO.
+ DisplayName string `protobuf:"bytes,11,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The definition of good service, used to measure and calculate the quality
+ // of the `Service`'s performance with respect to a single aspect of service
+ // quality.
+ ServiceLevelIndicator *ServiceLevelIndicator `protobuf:"bytes,3,opt,name=service_level_indicator,json=serviceLevelIndicator,proto3" json:"service_level_indicator,omitempty"`
+ // The fraction of service that must be good in order for this objective to be
+ // met. `0 < goal <= 0.9999`.
+ Goal float64 `protobuf:"fixed64,4,opt,name=goal,proto3" json:"goal,omitempty"`
+ // The time period over which the objective will be evaluated.
+ //
+ // Types that are assignable to Period:
+ //
+ // *ServiceLevelObjective_RollingPeriod
+ // *ServiceLevelObjective_CalendarPeriod
+ Period isServiceLevelObjective_Period `protobuf_oneof:"period"`
+ // Labels which have been used to annotate the service-level objective. Label
+ // keys must start with a letter. Label keys and values may contain lowercase
+ // letters, numbers, underscores, and dashes. Label keys and values have a
+ // maximum length of 63 characters, and must be less than 128 bytes in size.
+ // Up to 64 label entries may be stored. For labels which do not have a
+ // semantic value, the empty string may be supplied for the label value.
+ UserLabels map[string]string `protobuf:"bytes,12,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *ServiceLevelObjective) Reset() {
+ *x = ServiceLevelObjective{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ServiceLevelObjective) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServiceLevelObjective) ProtoMessage() {}
+
+func (x *ServiceLevelObjective) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServiceLevelObjective.ProtoReflect.Descriptor instead.
+func (*ServiceLevelObjective) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ServiceLevelObjective) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ServiceLevelObjective) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *ServiceLevelObjective) GetServiceLevelIndicator() *ServiceLevelIndicator {
+ if x != nil {
+ return x.ServiceLevelIndicator
+ }
+ return nil
+}
+
+func (x *ServiceLevelObjective) GetGoal() float64 {
+ if x != nil {
+ return x.Goal
+ }
+ return 0
+}
+
+func (m *ServiceLevelObjective) GetPeriod() isServiceLevelObjective_Period {
+ if m != nil {
+ return m.Period
+ }
+ return nil
+}
+
+func (x *ServiceLevelObjective) GetRollingPeriod() *durationpb.Duration {
+ if x, ok := x.GetPeriod().(*ServiceLevelObjective_RollingPeriod); ok {
+ return x.RollingPeriod
+ }
+ return nil
+}
+
+func (x *ServiceLevelObjective) GetCalendarPeriod() calendarperiod.CalendarPeriod {
+ if x, ok := x.GetPeriod().(*ServiceLevelObjective_CalendarPeriod); ok {
+ return x.CalendarPeriod
+ }
+ return calendarperiod.CalendarPeriod(0)
+}
+
+func (x *ServiceLevelObjective) GetUserLabels() map[string]string {
+ if x != nil {
+ return x.UserLabels
+ }
+ return nil
+}
+
+type isServiceLevelObjective_Period interface {
+ isServiceLevelObjective_Period()
+}
+
+type ServiceLevelObjective_RollingPeriod struct {
+ // A rolling time period, semantically "in the past ``".
+ // Must be an integer multiple of 1 day no larger than 30 days.
+ RollingPeriod *durationpb.Duration `protobuf:"bytes,5,opt,name=rolling_period,json=rollingPeriod,proto3,oneof"`
+}
+
+type ServiceLevelObjective_CalendarPeriod struct {
+ // A calendar period, semantically "since the start of the current
+ // ``". At this time, only `DAY`, `WEEK`, `FORTNIGHT`, and
+ // `MONTH` are supported.
+ CalendarPeriod calendarperiod.CalendarPeriod `protobuf:"varint,6,opt,name=calendar_period,json=calendarPeriod,proto3,enum=google.type.CalendarPeriod,oneof"`
+}
+
+func (*ServiceLevelObjective_RollingPeriod) isServiceLevelObjective_Period() {}
+
+func (*ServiceLevelObjective_CalendarPeriod) isServiceLevelObjective_Period() {}
+
+// A Service-Level Indicator (SLI) describes the "performance" of a service. For
+// some services, the SLI is well-defined. In such cases, the SLI can be
+// described easily by referencing the well-known SLI and providing the needed
+// parameters. Alternatively, a "custom" SLI can be defined with a query to the
+// underlying metric store. An SLI is defined to be `good_service /
+// total_service` over any queried time interval. The value of performance
+// always falls into the range `0 <= performance <= 1`. A custom SLI describes
+// how to compute this ratio, whether this is by dividing values from a pair of
+// time series, cutting a `Distribution` into good and bad counts, or counting
+// time windows in which the service complies with a criterion. For separation
+// of concerns, a single Service-Level Indicator measures performance for only
+// one aspect of service quality, such as fraction of successful queries or
+// fast-enough queries.
+type ServiceLevelIndicator struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Service level indicators can be grouped by whether the "unit" of service
+ // being measured is based on counts of good requests or on counts of good
+ // time windows
+ //
+ // Types that are assignable to Type:
+ //
+ // *ServiceLevelIndicator_BasicSli
+ // *ServiceLevelIndicator_RequestBased
+ // *ServiceLevelIndicator_WindowsBased
+ Type isServiceLevelIndicator_Type `protobuf_oneof:"type"`
+}
+
+func (x *ServiceLevelIndicator) Reset() {
+ *x = ServiceLevelIndicator{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ServiceLevelIndicator) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServiceLevelIndicator) ProtoMessage() {}
+
+func (x *ServiceLevelIndicator) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServiceLevelIndicator.ProtoReflect.Descriptor instead.
+func (*ServiceLevelIndicator) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (m *ServiceLevelIndicator) GetType() isServiceLevelIndicator_Type {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+func (x *ServiceLevelIndicator) GetBasicSli() *BasicSli {
+ if x, ok := x.GetType().(*ServiceLevelIndicator_BasicSli); ok {
+ return x.BasicSli
+ }
+ return nil
+}
+
+func (x *ServiceLevelIndicator) GetRequestBased() *RequestBasedSli {
+ if x, ok := x.GetType().(*ServiceLevelIndicator_RequestBased); ok {
+ return x.RequestBased
+ }
+ return nil
+}
+
+func (x *ServiceLevelIndicator) GetWindowsBased() *WindowsBasedSli {
+ if x, ok := x.GetType().(*ServiceLevelIndicator_WindowsBased); ok {
+ return x.WindowsBased
+ }
+ return nil
+}
+
+type isServiceLevelIndicator_Type interface {
+ isServiceLevelIndicator_Type()
+}
+
+type ServiceLevelIndicator_BasicSli struct {
+ // Basic SLI on a well-known service type.
+ BasicSli *BasicSli `protobuf:"bytes,4,opt,name=basic_sli,json=basicSli,proto3,oneof"`
+}
+
+type ServiceLevelIndicator_RequestBased struct {
+ // Request-based SLIs
+ RequestBased *RequestBasedSli `protobuf:"bytes,1,opt,name=request_based,json=requestBased,proto3,oneof"`
+}
+
+type ServiceLevelIndicator_WindowsBased struct {
+ // Windows-based SLIs
+ WindowsBased *WindowsBasedSli `protobuf:"bytes,2,opt,name=windows_based,json=windowsBased,proto3,oneof"`
+}
+
+func (*ServiceLevelIndicator_BasicSli) isServiceLevelIndicator_Type() {}
+
+func (*ServiceLevelIndicator_RequestBased) isServiceLevelIndicator_Type() {}
+
+func (*ServiceLevelIndicator_WindowsBased) isServiceLevelIndicator_Type() {}
+
+// An SLI measuring performance on a well-known service type. Performance will
+// be computed on the basis of pre-defined metrics. The type of the
+// `service_resource` determines the metrics to use and the
+// `service_resource.labels` and `metric_labels` are used to construct a
+// monitoring filter to filter that metric down to just the data relevant to
+// this service.
+type BasicSli struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // OPTIONAL: The set of RPCs to which this SLI is relevant. Telemetry from
+ // other methods will not be used to calculate performance for this SLI. If
+ // omitted, this SLI applies to all the Service's methods. For service types
+ // that don't support breaking down by method, setting this field will result
+ // in an error.
+ Method []string `protobuf:"bytes,7,rep,name=method,proto3" json:"method,omitempty"`
+ // OPTIONAL: The set of locations to which this SLI is relevant. Telemetry
+ // from other locations will not be used to calculate performance for this
+ // SLI. If omitted, this SLI applies to all locations in which the Service has
+ // activity. For service types that don't support breaking down by location,
+ // setting this field will result in an error.
+ Location []string `protobuf:"bytes,8,rep,name=location,proto3" json:"location,omitempty"`
+ // OPTIONAL: The set of API versions to which this SLI is relevant. Telemetry
+ // from other API versions will not be used to calculate performance for this
+ // SLI. If omitted, this SLI applies to all API versions. For service types
+ // that don't support breaking down by version, setting this field will result
+ // in an error.
+ Version []string `protobuf:"bytes,9,rep,name=version,proto3" json:"version,omitempty"`
+ // This SLI can be evaluated on the basis of availability or latency.
+ //
+ // Types that are assignable to SliCriteria:
+ //
+ // *BasicSli_Availability
+ // *BasicSli_Latency
+ SliCriteria isBasicSli_SliCriteria `protobuf_oneof:"sli_criteria"`
+}
+
+func (x *BasicSli) Reset() {
+ *x = BasicSli{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BasicSli) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BasicSli) ProtoMessage() {}
+
+func (x *BasicSli) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BasicSli.ProtoReflect.Descriptor instead.
+func (*BasicSli) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *BasicSli) GetMethod() []string {
+ if x != nil {
+ return x.Method
+ }
+ return nil
+}
+
+func (x *BasicSli) GetLocation() []string {
+ if x != nil {
+ return x.Location
+ }
+ return nil
+}
+
+func (x *BasicSli) GetVersion() []string {
+ if x != nil {
+ return x.Version
+ }
+ return nil
+}
+
+func (m *BasicSli) GetSliCriteria() isBasicSli_SliCriteria {
+ if m != nil {
+ return m.SliCriteria
+ }
+ return nil
+}
+
+func (x *BasicSli) GetAvailability() *BasicSli_AvailabilityCriteria {
+ if x, ok := x.GetSliCriteria().(*BasicSli_Availability); ok {
+ return x.Availability
+ }
+ return nil
+}
+
+func (x *BasicSli) GetLatency() *BasicSli_LatencyCriteria {
+ if x, ok := x.GetSliCriteria().(*BasicSli_Latency); ok {
+ return x.Latency
+ }
+ return nil
+}
+
+type isBasicSli_SliCriteria interface {
+ isBasicSli_SliCriteria()
+}
+
+type BasicSli_Availability struct {
+ // Good service is defined to be the count of requests made to this service
+ // that return successfully.
+ Availability *BasicSli_AvailabilityCriteria `protobuf:"bytes,2,opt,name=availability,proto3,oneof"`
+}
+
+type BasicSli_Latency struct {
+ // Good service is defined to be the count of requests made to this service
+ // that are fast enough with respect to `latency.threshold`.
+ Latency *BasicSli_LatencyCriteria `protobuf:"bytes,3,opt,name=latency,proto3,oneof"`
+}
+
+func (*BasicSli_Availability) isBasicSli_SliCriteria() {}
+
+func (*BasicSli_Latency) isBasicSli_SliCriteria() {}
+
+// Range of numerical values within `min` and `max`.
+type Range struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Range minimum.
+ Min float64 `protobuf:"fixed64,1,opt,name=min,proto3" json:"min,omitempty"`
+ // Range maximum.
+ Max float64 `protobuf:"fixed64,2,opt,name=max,proto3" json:"max,omitempty"`
+}
+
+func (x *Range) Reset() {
+ *x = Range{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Range) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Range) ProtoMessage() {}
+
+func (x *Range) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Range.ProtoReflect.Descriptor instead.
+func (*Range) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Range) GetMin() float64 {
+ if x != nil {
+ return x.Min
+ }
+ return 0
+}
+
+func (x *Range) GetMax() float64 {
+ if x != nil {
+ return x.Max
+ }
+ return 0
+}
+
+// Service Level Indicators for which atomic units of service are counted
+// directly.
+type RequestBasedSli struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The means to compute a ratio of `good_service` to `total_service`.
+ //
+ // Types that are assignable to Method:
+ //
+ // *RequestBasedSli_GoodTotalRatio
+ // *RequestBasedSli_DistributionCut
+ Method isRequestBasedSli_Method `protobuf_oneof:"method"`
+}
+
+func (x *RequestBasedSli) Reset() {
+ *x = RequestBasedSli{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *RequestBasedSli) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RequestBasedSli) ProtoMessage() {}
+
+func (x *RequestBasedSli) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RequestBasedSli.ProtoReflect.Descriptor instead.
+func (*RequestBasedSli) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (m *RequestBasedSli) GetMethod() isRequestBasedSli_Method {
+ if m != nil {
+ return m.Method
+ }
+ return nil
+}
+
+func (x *RequestBasedSli) GetGoodTotalRatio() *TimeSeriesRatio {
+ if x, ok := x.GetMethod().(*RequestBasedSli_GoodTotalRatio); ok {
+ return x.GoodTotalRatio
+ }
+ return nil
+}
+
+func (x *RequestBasedSli) GetDistributionCut() *DistributionCut {
+ if x, ok := x.GetMethod().(*RequestBasedSli_DistributionCut); ok {
+ return x.DistributionCut
+ }
+ return nil
+}
+
+type isRequestBasedSli_Method interface {
+ isRequestBasedSli_Method()
+}
+
+type RequestBasedSli_GoodTotalRatio struct {
+ // `good_total_ratio` is used when the ratio of `good_service` to
+ // `total_service` is computed from two `TimeSeries`.
+ GoodTotalRatio *TimeSeriesRatio `protobuf:"bytes,1,opt,name=good_total_ratio,json=goodTotalRatio,proto3,oneof"`
+}
+
+type RequestBasedSli_DistributionCut struct {
+ // `distribution_cut` is used when `good_service` is a count of values
+ // aggregated in a `Distribution` that fall into a good range. The
+ // `total_service` is the total count of all values aggregated in the
+ // `Distribution`.
+ DistributionCut *DistributionCut `protobuf:"bytes,3,opt,name=distribution_cut,json=distributionCut,proto3,oneof"`
+}
+
+func (*RequestBasedSli_GoodTotalRatio) isRequestBasedSli_Method() {}
+
+func (*RequestBasedSli_DistributionCut) isRequestBasedSli_Method() {}
+
+// A `TimeSeriesRatio` specifies two `TimeSeries` to use for computing the
+// `good_service / total_service` ratio. The specified `TimeSeries` must have
+// `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind =
+// DELTA` or `MetricKind = CUMULATIVE`. The `TimeSeriesRatio` must specify
+// exactly two of good, bad, and total, and the relationship `good_service +
+// bad_service = total_service` will be assumed.
+type TimeSeriesRatio struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // specifying a `TimeSeries` quantifying good service provided. Must have
+ // `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind =
+ // DELTA` or `MetricKind = CUMULATIVE`.
+ GoodServiceFilter string `protobuf:"bytes,4,opt,name=good_service_filter,json=goodServiceFilter,proto3" json:"good_service_filter,omitempty"`
+ // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // specifying a `TimeSeries` quantifying bad service, either demanded service
+ // that was not provided or demanded service that was of inadequate quality.
+ // Must have `ValueType = DOUBLE` or `ValueType = INT64` and must have
+ // `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.
+ BadServiceFilter string `protobuf:"bytes,5,opt,name=bad_service_filter,json=badServiceFilter,proto3" json:"bad_service_filter,omitempty"`
+ // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // specifying a `TimeSeries` quantifying total demanded service. Must have
+ // `ValueType = DOUBLE` or `ValueType = INT64` and must have `MetricKind =
+ // DELTA` or `MetricKind = CUMULATIVE`.
+ TotalServiceFilter string `protobuf:"bytes,6,opt,name=total_service_filter,json=totalServiceFilter,proto3" json:"total_service_filter,omitempty"`
+}
+
+func (x *TimeSeriesRatio) Reset() {
+ *x = TimeSeriesRatio{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *TimeSeriesRatio) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TimeSeriesRatio) ProtoMessage() {}
+
+func (x *TimeSeriesRatio) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TimeSeriesRatio.ProtoReflect.Descriptor instead.
+func (*TimeSeriesRatio) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *TimeSeriesRatio) GetGoodServiceFilter() string {
+ if x != nil {
+ return x.GoodServiceFilter
+ }
+ return ""
+}
+
+func (x *TimeSeriesRatio) GetBadServiceFilter() string {
+ if x != nil {
+ return x.BadServiceFilter
+ }
+ return ""
+}
+
+func (x *TimeSeriesRatio) GetTotalServiceFilter() string {
+ if x != nil {
+ return x.TotalServiceFilter
+ }
+ return ""
+}
+
+// A `DistributionCut` defines a `TimeSeries` and thresholds used for measuring
+// good service and total service. The `TimeSeries` must have `ValueType =
+// DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`. The
+// computed `good_service` will be the estimated count of values in the
+// `Distribution` that fall within the specified `min` and `max`.
+type DistributionCut struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // specifying a `TimeSeries` aggregating values. Must have `ValueType =
+ // DISTRIBUTION` and `MetricKind = DELTA` or `MetricKind = CUMULATIVE`.
+ DistributionFilter string `protobuf:"bytes,4,opt,name=distribution_filter,json=distributionFilter,proto3" json:"distribution_filter,omitempty"`
+ // Range of values considered "good." For a one-sided range, set one bound to
+ // an infinite value.
+ Range *Range `protobuf:"bytes,5,opt,name=range,proto3" json:"range,omitempty"`
+}
+
+func (x *DistributionCut) Reset() {
+ *x = DistributionCut{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DistributionCut) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DistributionCut) ProtoMessage() {}
+
+func (x *DistributionCut) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DistributionCut.ProtoReflect.Descriptor instead.
+func (*DistributionCut) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *DistributionCut) GetDistributionFilter() string {
+ if x != nil {
+ return x.DistributionFilter
+ }
+ return ""
+}
+
+func (x *DistributionCut) GetRange() *Range {
+ if x != nil {
+ return x.Range
+ }
+ return nil
+}
+
+// A `WindowsBasedSli` defines `good_service` as the count of time windows for
+// which the provided service was of good quality. Criteria for determining
+// if service was good are embedded in the `window_criterion`.
+type WindowsBasedSli struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The criterion to use for evaluating window goodness.
+ //
+ // Types that are assignable to WindowCriterion:
+ //
+ // *WindowsBasedSli_GoodBadMetricFilter
+ // *WindowsBasedSli_GoodTotalRatioThreshold
+ // *WindowsBasedSli_MetricMeanInRange
+ // *WindowsBasedSli_MetricSumInRange
+ WindowCriterion isWindowsBasedSli_WindowCriterion `protobuf_oneof:"window_criterion"`
+ // Duration over which window quality is evaluated. Must be an integer
+ // fraction of a day and at least `60s`.
+ WindowPeriod *durationpb.Duration `protobuf:"bytes,4,opt,name=window_period,json=windowPeriod,proto3" json:"window_period,omitempty"`
+}
+
+func (x *WindowsBasedSli) Reset() {
+ *x = WindowsBasedSli{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *WindowsBasedSli) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WindowsBasedSli) ProtoMessage() {}
+
+func (x *WindowsBasedSli) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WindowsBasedSli.ProtoReflect.Descriptor instead.
+func (*WindowsBasedSli) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8}
+}
+
+func (m *WindowsBasedSli) GetWindowCriterion() isWindowsBasedSli_WindowCriterion {
+ if m != nil {
+ return m.WindowCriterion
+ }
+ return nil
+}
+
+func (x *WindowsBasedSli) GetGoodBadMetricFilter() string {
+ if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_GoodBadMetricFilter); ok {
+ return x.GoodBadMetricFilter
+ }
+ return ""
+}
+
+func (x *WindowsBasedSli) GetGoodTotalRatioThreshold() *WindowsBasedSli_PerformanceThreshold {
+ if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_GoodTotalRatioThreshold); ok {
+ return x.GoodTotalRatioThreshold
+ }
+ return nil
+}
+
+func (x *WindowsBasedSli) GetMetricMeanInRange() *WindowsBasedSli_MetricRange {
+ if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_MetricMeanInRange); ok {
+ return x.MetricMeanInRange
+ }
+ return nil
+}
+
+func (x *WindowsBasedSli) GetMetricSumInRange() *WindowsBasedSli_MetricRange {
+ if x, ok := x.GetWindowCriterion().(*WindowsBasedSli_MetricSumInRange); ok {
+ return x.MetricSumInRange
+ }
+ return nil
+}
+
+func (x *WindowsBasedSli) GetWindowPeriod() *durationpb.Duration {
+ if x != nil {
+ return x.WindowPeriod
+ }
+ return nil
+}
+
+type isWindowsBasedSli_WindowCriterion interface {
+ isWindowsBasedSli_WindowCriterion()
+}
+
+type WindowsBasedSli_GoodBadMetricFilter struct {
+ // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // specifying a `TimeSeries` with `ValueType = BOOL`. The window is good if
+ // any `true` values appear in the window.
+ GoodBadMetricFilter string `protobuf:"bytes,5,opt,name=good_bad_metric_filter,json=goodBadMetricFilter,proto3,oneof"`
+}
+
+type WindowsBasedSli_GoodTotalRatioThreshold struct {
+ // A window is good if its `performance` is high enough.
+ GoodTotalRatioThreshold *WindowsBasedSli_PerformanceThreshold `protobuf:"bytes,2,opt,name=good_total_ratio_threshold,json=goodTotalRatioThreshold,proto3,oneof"`
+}
+
+type WindowsBasedSli_MetricMeanInRange struct {
+ // A window is good if the metric's value is in a good range, averaged
+ // across returned streams.
+ MetricMeanInRange *WindowsBasedSli_MetricRange `protobuf:"bytes,6,opt,name=metric_mean_in_range,json=metricMeanInRange,proto3,oneof"`
+}
+
+type WindowsBasedSli_MetricSumInRange struct {
+ // A window is good if the metric's value is in a good range, summed across
+ // returned streams.
+ MetricSumInRange *WindowsBasedSli_MetricRange `protobuf:"bytes,7,opt,name=metric_sum_in_range,json=metricSumInRange,proto3,oneof"`
+}
+
+func (*WindowsBasedSli_GoodBadMetricFilter) isWindowsBasedSli_WindowCriterion() {}
+
+func (*WindowsBasedSli_GoodTotalRatioThreshold) isWindowsBasedSli_WindowCriterion() {}
+
+func (*WindowsBasedSli_MetricMeanInRange) isWindowsBasedSli_WindowCriterion() {}
+
+func (*WindowsBasedSli_MetricSumInRange) isWindowsBasedSli_WindowCriterion() {}
+
+// Use a custom service to designate a service that you want to monitor
+// when none of the other service types (like App Engine, Cloud Run, or
+// a GKE type) matches your intended service.
+type Service_Custom struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *Service_Custom) Reset() {
+ *x = Service_Custom{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_Custom) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_Custom) ProtoMessage() {}
+
+func (x *Service_Custom) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_Custom.ProtoReflect.Descriptor instead.
+func (*Service_Custom) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// App Engine service. Learn more at https://cloud.google.com/appengine.
+type Service_AppEngine struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The ID of the App Engine module underlying this service. Corresponds to
+ // the `module_id` resource label in the [`gae_app` monitored
+ // resource](https://cloud.google.com/monitoring/api/resources#tag_gae_app).
+ ModuleId string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,proto3" json:"module_id,omitempty"`
+}
+
+func (x *Service_AppEngine) Reset() {
+ *x = Service_AppEngine{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_AppEngine) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_AppEngine) ProtoMessage() {}
+
+func (x *Service_AppEngine) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_AppEngine.ProtoReflect.Descriptor instead.
+func (*Service_AppEngine) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *Service_AppEngine) GetModuleId() string {
+ if x != nil {
+ return x.ModuleId
+ }
+ return ""
+}
+
+// Cloud Endpoints service. Learn more at https://cloud.google.com/endpoints.
+type Service_CloudEndpoints struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the Cloud Endpoints service underlying this service.
+ // Corresponds to the `service` resource label in the [`api` monitored
+ // resource](https://cloud.google.com/monitoring/api/resources#tag_api).
+ Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
+}
+
+func (x *Service_CloudEndpoints) Reset() {
+ *x = Service_CloudEndpoints{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_CloudEndpoints) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_CloudEndpoints) ProtoMessage() {}
+
+func (x *Service_CloudEndpoints) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_CloudEndpoints.ProtoReflect.Descriptor instead.
+func (*Service_CloudEndpoints) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 2}
+}
+
+func (x *Service_CloudEndpoints) GetService() string {
+ if x != nil {
+ return x.Service
+ }
+ return ""
+}
+
+// Istio service scoped to a single Kubernetes cluster. Learn more at
+// https://istio.io. Clusters running OSS Istio will have their services
+// ingested as this type.
+type Service_ClusterIstio struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The location of the Kubernetes cluster in which this Istio service is
+ // defined. Corresponds to the `location` resource label in `k8s_cluster`
+ // resources.
+ Location string `protobuf:"bytes,1,opt,name=location,proto3" json:"location,omitempty"`
+ // The name of the Kubernetes cluster in which this Istio service is
+ // defined. Corresponds to the `cluster_name` resource label in
+ // `k8s_cluster` resources.
+ ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
+ // The namespace of the Istio service underlying this service. Corresponds
+ // to the `destination_service_namespace` metric label in Istio metrics.
+ ServiceNamespace string `protobuf:"bytes,3,opt,name=service_namespace,json=serviceNamespace,proto3" json:"service_namespace,omitempty"`
+ // The name of the Istio service underlying this service. Corresponds to the
+ // `destination_service_name` metric label in Istio metrics.
+ ServiceName string `protobuf:"bytes,4,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+}
+
+func (x *Service_ClusterIstio) Reset() {
+ *x = Service_ClusterIstio{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_ClusterIstio) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_ClusterIstio) ProtoMessage() {}
+
+func (x *Service_ClusterIstio) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_ClusterIstio.ProtoReflect.Descriptor instead.
+func (*Service_ClusterIstio) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 3}
+}
+
+func (x *Service_ClusterIstio) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *Service_ClusterIstio) GetClusterName() string {
+ if x != nil {
+ return x.ClusterName
+ }
+ return ""
+}
+
+func (x *Service_ClusterIstio) GetServiceNamespace() string {
+ if x != nil {
+ return x.ServiceNamespace
+ }
+ return ""
+}
+
+func (x *Service_ClusterIstio) GetServiceName() string {
+ if x != nil {
+ return x.ServiceName
+ }
+ return ""
+}
+
+// Istio service scoped to an Istio mesh. Anthos clusters running ASM < 1.6.8
+// will have their services ingested as this type.
+type Service_MeshIstio struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier for the mesh in which this Istio service is defined.
+ // Corresponds to the `mesh_uid` metric label in Istio metrics.
+ MeshUid string `protobuf:"bytes,1,opt,name=mesh_uid,json=meshUid,proto3" json:"mesh_uid,omitempty"`
+ // The namespace of the Istio service underlying this service. Corresponds
+ // to the `destination_service_namespace` metric label in Istio metrics.
+ ServiceNamespace string `protobuf:"bytes,3,opt,name=service_namespace,json=serviceNamespace,proto3" json:"service_namespace,omitempty"`
+ // The name of the Istio service underlying this service. Corresponds to the
+ // `destination_service_name` metric label in Istio metrics.
+ ServiceName string `protobuf:"bytes,4,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+}
+
+func (x *Service_MeshIstio) Reset() {
+ *x = Service_MeshIstio{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_MeshIstio) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_MeshIstio) ProtoMessage() {}
+
+func (x *Service_MeshIstio) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_MeshIstio.ProtoReflect.Descriptor instead.
+func (*Service_MeshIstio) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 4}
+}
+
+func (x *Service_MeshIstio) GetMeshUid() string {
+ if x != nil {
+ return x.MeshUid
+ }
+ return ""
+}
+
+func (x *Service_MeshIstio) GetServiceNamespace() string {
+ if x != nil {
+ return x.ServiceNamespace
+ }
+ return ""
+}
+
+func (x *Service_MeshIstio) GetServiceName() string {
+ if x != nil {
+ return x.ServiceName
+ }
+ return ""
+}
+
+// Canonical service scoped to an Istio mesh. Anthos clusters running ASM >=
+// 1.6.8 will have their services ingested as this type.
+type Service_IstioCanonicalService struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier for the Istio mesh in which this canonical service is defined.
+ // Corresponds to the `mesh_uid` metric label in
+ // [Istio metrics](https://cloud.google.com/monitoring/api/metrics_istio).
+ MeshUid string `protobuf:"bytes,1,opt,name=mesh_uid,json=meshUid,proto3" json:"mesh_uid,omitempty"`
+ // The namespace of the canonical service underlying this service.
+ // Corresponds to the `destination_canonical_service_namespace` metric
+ // label in [Istio
+ // metrics](https://cloud.google.com/monitoring/api/metrics_istio).
+ CanonicalServiceNamespace string `protobuf:"bytes,3,opt,name=canonical_service_namespace,json=canonicalServiceNamespace,proto3" json:"canonical_service_namespace,omitempty"`
+ // The name of the canonical service underlying this service.
+ // Corresponds to the `destination_canonical_service_name` metric label in
+ // label in [Istio
+ // metrics](https://cloud.google.com/monitoring/api/metrics_istio).
+ CanonicalService string `protobuf:"bytes,4,opt,name=canonical_service,json=canonicalService,proto3" json:"canonical_service,omitempty"`
+}
+
+func (x *Service_IstioCanonicalService) Reset() {
+ *x = Service_IstioCanonicalService{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_IstioCanonicalService) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_IstioCanonicalService) ProtoMessage() {}
+
+func (x *Service_IstioCanonicalService) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[14]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_IstioCanonicalService.ProtoReflect.Descriptor instead.
+func (*Service_IstioCanonicalService) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 5}
+}
+
+func (x *Service_IstioCanonicalService) GetMeshUid() string {
+ if x != nil {
+ return x.MeshUid
+ }
+ return ""
+}
+
+func (x *Service_IstioCanonicalService) GetCanonicalServiceNamespace() string {
+ if x != nil {
+ return x.CanonicalServiceNamespace
+ }
+ return ""
+}
+
+func (x *Service_IstioCanonicalService) GetCanonicalService() string {
+ if x != nil {
+ return x.CanonicalService
+ }
+ return ""
+}
+
+// Cloud Run service. Learn more at https://cloud.google.com/run.
+type Service_CloudRun struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the Cloud Run service. Corresponds to the `service_name`
+ // resource label in the [`cloud_run_revision` monitored
+ // resource](https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision).
+ ServiceName string `protobuf:"bytes,1,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+ // The location the service is run. Corresponds to the `location`
+ // resource label in the [`cloud_run_revision` monitored
+ // resource](https://cloud.google.com/monitoring/api/resources#tag_cloud_run_revision).
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+}
+
+func (x *Service_CloudRun) Reset() {
+ *x = Service_CloudRun{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_CloudRun) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_CloudRun) ProtoMessage() {}
+
+func (x *Service_CloudRun) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[15]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_CloudRun.ProtoReflect.Descriptor instead.
+func (*Service_CloudRun) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 6}
+}
+
+func (x *Service_CloudRun) GetServiceName() string {
+ if x != nil {
+ return x.ServiceName
+ }
+ return ""
+}
+
+func (x *Service_CloudRun) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+// GKE Namespace. The field names correspond to the resource metadata labels
+// on monitored resources that fall under a namespace (for example,
+// `k8s_container` or `k8s_pod`).
+type Service_GkeNamespace struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Output only. The project this resource lives in. For legacy services
+ // migrated from the `Custom` type, this may be a distinct project from the
+ // one parenting the service itself.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
+ // The location of the parent cluster. This may be a zone or region.
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ // The name of the parent cluster.
+ ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
+ // The name of this namespace.
+ NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"`
+}
+
+func (x *Service_GkeNamespace) Reset() {
+ *x = Service_GkeNamespace{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_GkeNamespace) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_GkeNamespace) ProtoMessage() {}
+
+func (x *Service_GkeNamespace) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[16]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_GkeNamespace.ProtoReflect.Descriptor instead.
+func (*Service_GkeNamespace) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 7}
+}
+
+func (x *Service_GkeNamespace) GetProjectId() string {
+ if x != nil {
+ return x.ProjectId
+ }
+ return ""
+}
+
+func (x *Service_GkeNamespace) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *Service_GkeNamespace) GetClusterName() string {
+ if x != nil {
+ return x.ClusterName
+ }
+ return ""
+}
+
+func (x *Service_GkeNamespace) GetNamespaceName() string {
+ if x != nil {
+ return x.NamespaceName
+ }
+ return ""
+}
+
+// A GKE Workload (Deployment, StatefulSet, etc). The field names correspond
+// to the metadata labels on monitored resources that fall under a workload
+// (for example, `k8s_container` or `k8s_pod`).
+type Service_GkeWorkload struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Output only. The project this resource lives in. For legacy services
+ // migrated from the `Custom` type, this may be a distinct project from the
+ // one parenting the service itself.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
+ // The location of the parent cluster. This may be a zone or region.
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ // The name of the parent cluster.
+ ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
+ // The name of the parent namespace.
+ NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"`
+ // The type of this workload (for example, "Deployment" or "DaemonSet")
+ TopLevelControllerType string `protobuf:"bytes,5,opt,name=top_level_controller_type,json=topLevelControllerType,proto3" json:"top_level_controller_type,omitempty"`
+ // The name of this workload.
+ TopLevelControllerName string `protobuf:"bytes,6,opt,name=top_level_controller_name,json=topLevelControllerName,proto3" json:"top_level_controller_name,omitempty"`
+}
+
+func (x *Service_GkeWorkload) Reset() {
+ *x = Service_GkeWorkload{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_GkeWorkload) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_GkeWorkload) ProtoMessage() {}
+
+func (x *Service_GkeWorkload) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[17]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_GkeWorkload.ProtoReflect.Descriptor instead.
+func (*Service_GkeWorkload) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 8}
+}
+
+func (x *Service_GkeWorkload) GetProjectId() string {
+ if x != nil {
+ return x.ProjectId
+ }
+ return ""
+}
+
+func (x *Service_GkeWorkload) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *Service_GkeWorkload) GetClusterName() string {
+ if x != nil {
+ return x.ClusterName
+ }
+ return ""
+}
+
+func (x *Service_GkeWorkload) GetNamespaceName() string {
+ if x != nil {
+ return x.NamespaceName
+ }
+ return ""
+}
+
+func (x *Service_GkeWorkload) GetTopLevelControllerType() string {
+ if x != nil {
+ return x.TopLevelControllerType
+ }
+ return ""
+}
+
+func (x *Service_GkeWorkload) GetTopLevelControllerName() string {
+ if x != nil {
+ return x.TopLevelControllerName
+ }
+ return ""
+}
+
+// GKE Service. The "service" here represents a
+// [Kubernetes service
+// object](https://kubernetes.io/docs/concepts/services-networking/service).
+// The field names correspond to the resource labels on [`k8s_service`
+// monitored
+// resources](https://cloud.google.com/monitoring/api/resources#tag_k8s_service).
+type Service_GkeService struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Output only. The project this resource lives in. For legacy services
+ // migrated from the `Custom` type, this may be a distinct project from the
+ // one parenting the service itself.
+ ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
+ // The location of the parent cluster. This may be a zone or region.
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ // The name of the parent cluster.
+ ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
+ // The name of the parent namespace.
+ NamespaceName string `protobuf:"bytes,4,opt,name=namespace_name,json=namespaceName,proto3" json:"namespace_name,omitempty"`
+ // The name of this service.
+ ServiceName string `protobuf:"bytes,5,opt,name=service_name,json=serviceName,proto3" json:"service_name,omitempty"`
+}
+
+func (x *Service_GkeService) Reset() {
+ *x = Service_GkeService{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_GkeService) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_GkeService) ProtoMessage() {}
+
+func (x *Service_GkeService) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[18]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_GkeService.ProtoReflect.Descriptor instead.
+func (*Service_GkeService) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 9}
+}
+
+func (x *Service_GkeService) GetProjectId() string {
+ if x != nil {
+ return x.ProjectId
+ }
+ return ""
+}
+
+func (x *Service_GkeService) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *Service_GkeService) GetClusterName() string {
+ if x != nil {
+ return x.ClusterName
+ }
+ return ""
+}
+
+func (x *Service_GkeService) GetNamespaceName() string {
+ if x != nil {
+ return x.NamespaceName
+ }
+ return ""
+}
+
+func (x *Service_GkeService) GetServiceName() string {
+ if x != nil {
+ return x.ServiceName
+ }
+ return ""
+}
+
+// A well-known service type, defined by its service type and service labels.
+// Documentation and examples
+// [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli).
+type Service_BasicService struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The type of service that this basic service defines, e.g.
+ // APP_ENGINE service type.
+ // Documentation and valid values
+ // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli).
+ ServiceType string `protobuf:"bytes,1,opt,name=service_type,json=serviceType,proto3" json:"service_type,omitempty"`
+ // Labels that specify the resource that emits the monitoring data which
+ // is used for SLO reporting of this `Service`.
+ // Documentation and valid values for given service types
+ // [here](https://cloud.google.com/stackdriver/docs/solutions/slo-monitoring/api/api-structures#basic-svc-w-basic-sli).
+ ServiceLabels map[string]string `protobuf:"bytes,2,rep,name=service_labels,json=serviceLabels,proto3" json:"service_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *Service_BasicService) Reset() {
+ *x = Service_BasicService{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_BasicService) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_BasicService) ProtoMessage() {}
+
+func (x *Service_BasicService) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[19]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_BasicService.ProtoReflect.Descriptor instead.
+func (*Service_BasicService) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 10}
+}
+
+func (x *Service_BasicService) GetServiceType() string {
+ if x != nil {
+ return x.ServiceType
+ }
+ return ""
+}
+
+func (x *Service_BasicService) GetServiceLabels() map[string]string {
+ if x != nil {
+ return x.ServiceLabels
+ }
+ return nil
+}
+
+// Configuration for how to query telemetry on a Service.
+type Service_Telemetry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The full name of the resource that defines this service. Formatted as
+ // described in https://cloud.google.com/apis/design/resource_names.
+ ResourceName string `protobuf:"bytes,1,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
+}
+
+func (x *Service_Telemetry) Reset() {
+ *x = Service_Telemetry{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Service_Telemetry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Service_Telemetry) ProtoMessage() {}
+
+func (x *Service_Telemetry) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[20]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Service_Telemetry.ProtoReflect.Descriptor instead.
+func (*Service_Telemetry) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{0, 11}
+}
+
+func (x *Service_Telemetry) GetResourceName() string {
+ if x != nil {
+ return x.ResourceName
+ }
+ return ""
+}
+
+// Future parameters for the availability SLI.
+type BasicSli_AvailabilityCriteria struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *BasicSli_AvailabilityCriteria) Reset() {
+ *x = BasicSli_AvailabilityCriteria{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BasicSli_AvailabilityCriteria) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BasicSli_AvailabilityCriteria) ProtoMessage() {}
+
+func (x *BasicSli_AvailabilityCriteria) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[24]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BasicSli_AvailabilityCriteria.ProtoReflect.Descriptor instead.
+func (*BasicSli_AvailabilityCriteria) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3, 0}
+}
+
+// Parameters for a latency threshold SLI.
+type BasicSli_LatencyCriteria struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Good service is defined to be the count of requests made to this service
+ // that return in no more than `threshold`.
+ Threshold *durationpb.Duration `protobuf:"bytes,3,opt,name=threshold,proto3" json:"threshold,omitempty"`
+}
+
+func (x *BasicSli_LatencyCriteria) Reset() {
+ *x = BasicSli_LatencyCriteria{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BasicSli_LatencyCriteria) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BasicSli_LatencyCriteria) ProtoMessage() {}
+
+func (x *BasicSli_LatencyCriteria) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[25]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BasicSli_LatencyCriteria.ProtoReflect.Descriptor instead.
+func (*BasicSli_LatencyCriteria) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{3, 1}
+}
+
+func (x *BasicSli_LatencyCriteria) GetThreshold() *durationpb.Duration {
+ if x != nil {
+ return x.Threshold
+ }
+ return nil
+}
+
+// A `PerformanceThreshold` is used when each window is good when that window
+// has a sufficiently high `performance`.
+type WindowsBasedSli_PerformanceThreshold struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The means, either a request-based SLI or a basic SLI, by which to compute
+ // performance over a window.
+ //
+ // Types that are assignable to Type:
+ //
+ // *WindowsBasedSli_PerformanceThreshold_Performance
+ // *WindowsBasedSli_PerformanceThreshold_BasicSliPerformance
+ Type isWindowsBasedSli_PerformanceThreshold_Type `protobuf_oneof:"type"`
+ // If window `performance >= threshold`, the window is counted as good.
+ Threshold float64 `protobuf:"fixed64,2,opt,name=threshold,proto3" json:"threshold,omitempty"`
+}
+
+func (x *WindowsBasedSli_PerformanceThreshold) Reset() {
+ *x = WindowsBasedSli_PerformanceThreshold{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *WindowsBasedSli_PerformanceThreshold) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WindowsBasedSli_PerformanceThreshold) ProtoMessage() {}
+
+func (x *WindowsBasedSli_PerformanceThreshold) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[26]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WindowsBasedSli_PerformanceThreshold.ProtoReflect.Descriptor instead.
+func (*WindowsBasedSli_PerformanceThreshold) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8, 0}
+}
+
+func (m *WindowsBasedSli_PerformanceThreshold) GetType() isWindowsBasedSli_PerformanceThreshold_Type {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+func (x *WindowsBasedSli_PerformanceThreshold) GetPerformance() *RequestBasedSli {
+ if x, ok := x.GetType().(*WindowsBasedSli_PerformanceThreshold_Performance); ok {
+ return x.Performance
+ }
+ return nil
+}
+
+func (x *WindowsBasedSli_PerformanceThreshold) GetBasicSliPerformance() *BasicSli {
+ if x, ok := x.GetType().(*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance); ok {
+ return x.BasicSliPerformance
+ }
+ return nil
+}
+
+func (x *WindowsBasedSli_PerformanceThreshold) GetThreshold() float64 {
+ if x != nil {
+ return x.Threshold
+ }
+ return 0
+}
+
+type isWindowsBasedSli_PerformanceThreshold_Type interface {
+ isWindowsBasedSli_PerformanceThreshold_Type()
+}
+
+type WindowsBasedSli_PerformanceThreshold_Performance struct {
+ // `RequestBasedSli` to evaluate to judge window quality.
+ Performance *RequestBasedSli `protobuf:"bytes,1,opt,name=performance,proto3,oneof"`
+}
+
+type WindowsBasedSli_PerformanceThreshold_BasicSliPerformance struct {
+ // `BasicSli` to evaluate to judge window quality.
+ BasicSliPerformance *BasicSli `protobuf:"bytes,3,opt,name=basic_sli_performance,json=basicSliPerformance,proto3,oneof"`
+}
+
+func (*WindowsBasedSli_PerformanceThreshold_Performance) isWindowsBasedSli_PerformanceThreshold_Type() {
+}
+
+func (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance) isWindowsBasedSli_PerformanceThreshold_Type() {
+}
+
+// A `MetricRange` is used when each window is good when the value x of a
+// single `TimeSeries` satisfies `range.min <= x <= range.max`. The provided
+// `TimeSeries` must have `ValueType = INT64` or `ValueType = DOUBLE` and
+// `MetricKind = GAUGE`.
+type WindowsBasedSli_MetricRange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A [monitoring filter](https://cloud.google.com/monitoring/api/v3/filters)
+ // specifying the `TimeSeries` to use for evaluating window quality.
+ TimeSeries string `protobuf:"bytes,1,opt,name=time_series,json=timeSeries,proto3" json:"time_series,omitempty"`
+ // Range of values considered "good." For a one-sided range, set one bound
+ // to an infinite value.
+ Range *Range `protobuf:"bytes,4,opt,name=range,proto3" json:"range,omitempty"`
+}
+
+func (x *WindowsBasedSli_MetricRange) Reset() {
+ *x = WindowsBasedSli_MetricRange{}
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *WindowsBasedSli_MetricRange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WindowsBasedSli_MetricRange) ProtoMessage() {}
+
+func (x *WindowsBasedSli_MetricRange) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_proto_msgTypes[27]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WindowsBasedSli_MetricRange.ProtoReflect.Descriptor instead.
+func (*WindowsBasedSli_MetricRange) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_proto_rawDescGZIP(), []int{8, 1}
+}
+
+func (x *WindowsBasedSli_MetricRange) GetTimeSeries() string {
+ if x != nil {
+ return x.TimeSeries
+ }
+ return ""
+}
+
+func (x *WindowsBasedSli_MetricRange) GetRange() *Range {
+ if x != nil {
+ return x.Range
+ }
+ return nil
+}
+
+var File_google_monitoring_v3_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_service_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68,
+ 0x61, 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74,
+ 0x79, 0x70, 0x65, 0x2f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f, 0x70, 0x65, 0x72,
+ 0x69, 0x6f, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa4, 0x16, 0x0a, 0x07, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21,
+ 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d,
+ 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f,
+ 0x6d, 0x12, 0x48, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2e, 0x41, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x48, 0x00,
+ 0x52, 0x09, 0x61, 0x70, 0x70, 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x57, 0x0a, 0x0f, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e,
+ 0x74, 0x73, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70, 0x6f,
+ 0x69, 0x6e, 0x74, 0x73, 0x12, 0x51, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x48, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x68, 0x5f,
+ 0x69, 0x73, 0x74, 0x69, 0x6f, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x73, 0x68, 0x49,
+ 0x73, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69,
+ 0x6f, 0x12, 0x6d, 0x0a, 0x17, 0x69, 0x73, 0x74, 0x69, 0x6f, 0x5f, 0x63, 0x61, 0x6e, 0x6f, 0x6e,
+ 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x2e, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x15, 0x69, 0x73, 0x74, 0x69, 0x6f,
+ 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x12, 0x45, 0x0a, 0x09, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f, 0x72, 0x75, 0x6e, 0x18, 0x0c, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x48, 0x00, 0x52, 0x08, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e, 0x12, 0x51, 0x0a, 0x0d, 0x67, 0x6b, 0x65, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x6b,
+ 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0c, 0x67, 0x6b,
+ 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x4e, 0x0a, 0x0c, 0x67, 0x6b,
+ 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
+ 0x47, 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x00, 0x52, 0x0b, 0x67,
+ 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x4b, 0x0a, 0x0b, 0x67, 0x6b,
+ 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47,
+ 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x67, 0x6b, 0x65,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4f, 0x0a, 0x0d, 0x62, 0x61, 0x73, 0x69, 0x63,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61,
+ 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0c, 0x62, 0x61, 0x73, 0x69,
+ 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x09, 0x74, 0x65, 0x6c, 0x65,
+ 0x6d, 0x65, 0x74, 0x72, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, 0x65, 0x6c, 0x65, 0x6d,
+ 0x65, 0x74, 0x72, 0x79, 0x52, 0x09, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x12,
+ 0x4e, 0x0a, 0x0b, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0e,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a,
+ 0x08, 0x0a, 0x06, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x1a, 0x28, 0x0a, 0x09, 0x41, 0x70, 0x70,
+ 0x45, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6d, 0x6f, 0x64, 0x75, 0x6c,
+ 0x65, 0x49, 0x64, 0x1a, 0x2a, 0x0a, 0x0e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x6e, 0x64, 0x70,
+ 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a,
+ 0x9d, 0x01, 0x0a, 0x0c, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x73, 0x74, 0x69, 0x6f,
+ 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12,
+ 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a,
+ 0x76, 0x0a, 0x09, 0x4d, 0x65, 0x73, 0x68, 0x49, 0x73, 0x74, 0x69, 0x6f, 0x12, 0x19, 0x0a, 0x08,
+ 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+ 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x10, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x15, 0x49, 0x73, 0x74, 0x69,
+ 0x6f, 0x43, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x68, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x68, 0x55, 0x69, 0x64, 0x12, 0x3e, 0x0a, 0x1b,
+ 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x19, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x2b, 0x0a, 0x11,
+ 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x61, 0x6e, 0x6f, 0x6e, 0x69, 0x63,
+ 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x49, 0x0a, 0x08, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x52, 0x75, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x98, 0x01, 0x0a, 0x0c, 0x47, 0x6b, 0x65, 0x4e, 0x61, 0x6d, 0x65,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a,
+ 0x8d, 0x02, 0x0a, 0x0b, 0x47, 0x6b, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x12,
+ 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x21, 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65,
+ 0x73, 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x74, 0x6f, 0x70,
+ 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
+ 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x74, 0x6f,
+ 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72,
+ 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x19, 0x74, 0x6f, 0x70, 0x5f, 0x6c, 0x65, 0x76, 0x65,
+ 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x74, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x1a,
+ 0xb9, 0x01, 0x0a, 0x0a, 0x47, 0x6b, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x22,
+ 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21,
+ 0x0a, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d,
+ 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x61, 0x6d, 0x65, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0xd9, 0x01, 0x0a, 0x0c,
+ 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12,
+ 0x64, 0x0a, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
+ 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x30, 0x0a, 0x09, 0x54, 0x65, 0x6c, 0x65, 0x6d,
+ 0x65, 0x74, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65,
+ 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0xa7, 0x01, 0xea, 0x41, 0xa3, 0x01, 0x0a,
+ 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x12, 0x25, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
+ 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x2f, 0x6f, 0x72, 0x67, 0x61, 0x6e,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69,
+ 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12, 0x23, 0x66, 0x6f, 0x6c, 0x64,
+ 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x12,
+ 0x01, 0x2a, 0x42, 0x0c, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
+ 0x22, 0x82, 0x07, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c,
+ 0x61, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x63, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f,
+ 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63,
+ 0x61, 0x74, 0x6f, 0x72, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x67,
+ 0x6f, 0x61, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x04, 0x67, 0x6f, 0x61, 0x6c, 0x12,
+ 0x42, 0x0a, 0x0e, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f,
+ 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x6f, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x50, 0x65, 0x72,
+ 0x69, 0x6f, 0x64, 0x12, 0x46, 0x0a, 0x0f, 0x63, 0x61, 0x6c, 0x65, 0x6e, 0x64, 0x61, 0x72, 0x5f,
+ 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x43, 0x61, 0x6c, 0x65, 0x6e,
+ 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x61, 0x6c,
+ 0x65, 0x6e, 0x64, 0x61, 0x72, 0x50, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x5c, 0x0a, 0x0b, 0x75,
+ 0x73, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c,
+ 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x55, 0x73,
+ 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75,
+ 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x55, 0x73, 0x65,
+ 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x34, 0x0a, 0x04, 0x56, 0x69, 0x65, 0x77,
+ 0x12, 0x14, 0x0a, 0x10, 0x56, 0x49, 0x45, 0x57, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
+ 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x02,
+ 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, 0x01, 0x3a, 0xca,
+ 0x02, 0xea, 0x41, 0xc6, 0x02, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x56, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65,
+ 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x60,
+ 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6f,
+ 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f,
+ 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d,
+ 0x12, 0x54, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65,
+ 0x72, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x7b, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x69, 0x76, 0x65, 0x7d, 0x12, 0x01, 0x2a, 0x20, 0x01, 0x42, 0x08, 0x0a, 0x06, 0x70,
+ 0x65, 0x72, 0x69, 0x6f, 0x64, 0x22, 0xfa, 0x01, 0x0a, 0x15, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12,
+ 0x3d, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53,
+ 0x6c, 0x69, 0x48, 0x00, 0x52, 0x08, 0x62, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12, 0x4c,
+ 0x0a, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c,
+ 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x12, 0x4c, 0x0a, 0x0d,
+ 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f,
+ 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0c, 0x77, 0x69,
+ 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x42, 0x06, 0x0a, 0x04, 0x74, 0x79,
+ 0x70, 0x65, 0x22, 0xf3, 0x02, 0x0a, 0x08, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x12,
+ 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a,
+ 0x0c, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63,
+ 0x53, 0x6c, 0x69, 0x2e, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79,
+ 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x0c, 0x61, 0x76, 0x61, 0x69,
+ 0x6c, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x4a, 0x0a, 0x07, 0x6c, 0x61, 0x74, 0x65,
+ 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x2e, 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63,
+ 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x48, 0x00, 0x52, 0x07, 0x6c, 0x61, 0x74,
+ 0x65, 0x6e, 0x63, 0x79, 0x1a, 0x16, 0x0a, 0x14, 0x41, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x69,
+ 0x6c, 0x69, 0x74, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x1a, 0x4a, 0x0a, 0x0f,
+ 0x4c, 0x61, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12,
+ 0x37, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x74,
+ 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x0e, 0x0a, 0x0c, 0x73, 0x6c, 0x69, 0x5f,
+ 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x22, 0x2b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x03,
+ 0x6d, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01,
+ 0x52, 0x03, 0x6d, 0x61, 0x78, 0x22, 0xc2, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x51, 0x0a, 0x10, 0x67, 0x6f, 0x6f,
+ 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x48, 0x00, 0x52, 0x0e, 0x67, 0x6f,
+ 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x52, 0x0a, 0x10,
+ 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x75, 0x74,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x69,
+ 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74, 0x48, 0x00, 0x52,
+ 0x0f, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75, 0x74,
+ 0x42, 0x08, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0xa1, 0x01, 0x0a, 0x0f, 0x54,
+ 0x69, 0x6d, 0x65, 0x53, 0x65, 0x72, 0x69, 0x65, 0x73, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x12, 0x2e,
+ 0x0a, 0x13, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x67, 0x6f, 0x6f,
+ 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x2c,
+ 0x0a, 0x12, 0x62, 0x61, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x62, 0x61, 0x64, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x30, 0x0a, 0x14,
+ 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61,
+ 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x75,
+ 0x0a, 0x0f, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x75,
+ 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12,
+ 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x05,
+ 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0xa4, 0x06, 0x0a, 0x0f, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77,
+ 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x12, 0x35, 0x0a, 0x16, 0x67, 0x6f, 0x6f,
+ 0x64, 0x5f, 0x62, 0x61, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x13, 0x67, 0x6f, 0x6f,
+ 0x64, 0x42, 0x61, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x12, 0x79, 0x0a, 0x1a, 0x67, 0x6f, 0x6f, 0x64, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64,
+ 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x50, 0x65, 0x72, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64,
+ 0x48, 0x00, 0x52, 0x17, 0x67, 0x6f, 0x6f, 0x64, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x52, 0x61, 0x74,
+ 0x69, 0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x64, 0x0a, 0x14, 0x6d,
+ 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x5f, 0x72, 0x61,
+ 0x6e, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69,
+ 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x48, 0x00, 0x52, 0x11,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x4d, 0x65, 0x61, 0x6e, 0x49, 0x6e, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x12, 0x62, 0x0a, 0x13, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x75, 0x6d, 0x5f,
+ 0x69, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x73, 0x42, 0x61, 0x73,
+ 0x65, 0x64, 0x53, 0x6c, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x48, 0x00, 0x52, 0x10, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x53, 0x75, 0x6d, 0x49, 0x6e,
+ 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x3e, 0x0a, 0x0d, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x5f,
+ 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x50,
+ 0x65, 0x72, 0x69, 0x6f, 0x64, 0x1a, 0xdd, 0x01, 0x0a, 0x14, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x49,
+ 0x0a, 0x0b, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x65,
+ 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x54, 0x0a, 0x15, 0x62, 0x61, 0x73,
+ 0x69, 0x63, 0x5f, 0x73, 0x6c, 0x69, 0x5f, 0x70, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e,
+ 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x42, 0x61, 0x73, 0x69, 0x63, 0x53, 0x6c, 0x69, 0x48, 0x00, 0x52, 0x13, 0x62, 0x61, 0x73, 0x69,
+ 0x63, 0x53, 0x6c, 0x69, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12,
+ 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x01, 0x52, 0x09, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x06, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x1a, 0x61, 0x0a, 0x0b, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65, 0x72,
+ 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x53,
+ 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x31, 0x0a, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x52, 0x05, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x77, 0x69, 0x6e, 0x64,
+ 0x6f, 0x77, 0x5f, 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x6f, 0x6e, 0x42, 0xd1, 0x01, 0x0a,
+ 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33,
+ 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64,
+ 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_service_proto_rawDescData = file_google_monitoring_v3_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_service_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_monitoring_v3_service_proto_msgTypes = make([]protoimpl.MessageInfo, 28)
+var file_google_monitoring_v3_service_proto_goTypes = []any{
+ (ServiceLevelObjective_View)(0), // 0: google.monitoring.v3.ServiceLevelObjective.View
+ (*Service)(nil), // 1: google.monitoring.v3.Service
+ (*ServiceLevelObjective)(nil), // 2: google.monitoring.v3.ServiceLevelObjective
+ (*ServiceLevelIndicator)(nil), // 3: google.monitoring.v3.ServiceLevelIndicator
+ (*BasicSli)(nil), // 4: google.monitoring.v3.BasicSli
+ (*Range)(nil), // 5: google.monitoring.v3.Range
+ (*RequestBasedSli)(nil), // 6: google.monitoring.v3.RequestBasedSli
+ (*TimeSeriesRatio)(nil), // 7: google.monitoring.v3.TimeSeriesRatio
+ (*DistributionCut)(nil), // 8: google.monitoring.v3.DistributionCut
+ (*WindowsBasedSli)(nil), // 9: google.monitoring.v3.WindowsBasedSli
+ (*Service_Custom)(nil), // 10: google.monitoring.v3.Service.Custom
+ (*Service_AppEngine)(nil), // 11: google.monitoring.v3.Service.AppEngine
+ (*Service_CloudEndpoints)(nil), // 12: google.monitoring.v3.Service.CloudEndpoints
+ (*Service_ClusterIstio)(nil), // 13: google.monitoring.v3.Service.ClusterIstio
+ (*Service_MeshIstio)(nil), // 14: google.monitoring.v3.Service.MeshIstio
+ (*Service_IstioCanonicalService)(nil), // 15: google.monitoring.v3.Service.IstioCanonicalService
+ (*Service_CloudRun)(nil), // 16: google.monitoring.v3.Service.CloudRun
+ (*Service_GkeNamespace)(nil), // 17: google.monitoring.v3.Service.GkeNamespace
+ (*Service_GkeWorkload)(nil), // 18: google.monitoring.v3.Service.GkeWorkload
+ (*Service_GkeService)(nil), // 19: google.monitoring.v3.Service.GkeService
+ (*Service_BasicService)(nil), // 20: google.monitoring.v3.Service.BasicService
+ (*Service_Telemetry)(nil), // 21: google.monitoring.v3.Service.Telemetry
+ nil, // 22: google.monitoring.v3.Service.UserLabelsEntry
+ nil, // 23: google.monitoring.v3.Service.BasicService.ServiceLabelsEntry
+ nil, // 24: google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry
+ (*BasicSli_AvailabilityCriteria)(nil), // 25: google.monitoring.v3.BasicSli.AvailabilityCriteria
+ (*BasicSli_LatencyCriteria)(nil), // 26: google.monitoring.v3.BasicSli.LatencyCriteria
+ (*WindowsBasedSli_PerformanceThreshold)(nil), // 27: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold
+ (*WindowsBasedSli_MetricRange)(nil), // 28: google.monitoring.v3.WindowsBasedSli.MetricRange
+ (*durationpb.Duration)(nil), // 29: google.protobuf.Duration
+ (calendarperiod.CalendarPeriod)(0), // 30: google.type.CalendarPeriod
+}
+var file_google_monitoring_v3_service_proto_depIdxs = []int32{
+ 10, // 0: google.monitoring.v3.Service.custom:type_name -> google.monitoring.v3.Service.Custom
+ 11, // 1: google.monitoring.v3.Service.app_engine:type_name -> google.monitoring.v3.Service.AppEngine
+ 12, // 2: google.monitoring.v3.Service.cloud_endpoints:type_name -> google.monitoring.v3.Service.CloudEndpoints
+ 13, // 3: google.monitoring.v3.Service.cluster_istio:type_name -> google.monitoring.v3.Service.ClusterIstio
+ 14, // 4: google.monitoring.v3.Service.mesh_istio:type_name -> google.monitoring.v3.Service.MeshIstio
+ 15, // 5: google.monitoring.v3.Service.istio_canonical_service:type_name -> google.monitoring.v3.Service.IstioCanonicalService
+ 16, // 6: google.monitoring.v3.Service.cloud_run:type_name -> google.monitoring.v3.Service.CloudRun
+ 17, // 7: google.monitoring.v3.Service.gke_namespace:type_name -> google.monitoring.v3.Service.GkeNamespace
+ 18, // 8: google.monitoring.v3.Service.gke_workload:type_name -> google.monitoring.v3.Service.GkeWorkload
+ 19, // 9: google.monitoring.v3.Service.gke_service:type_name -> google.monitoring.v3.Service.GkeService
+ 20, // 10: google.monitoring.v3.Service.basic_service:type_name -> google.monitoring.v3.Service.BasicService
+ 21, // 11: google.monitoring.v3.Service.telemetry:type_name -> google.monitoring.v3.Service.Telemetry
+ 22, // 12: google.monitoring.v3.Service.user_labels:type_name -> google.monitoring.v3.Service.UserLabelsEntry
+ 3, // 13: google.monitoring.v3.ServiceLevelObjective.service_level_indicator:type_name -> google.monitoring.v3.ServiceLevelIndicator
+ 29, // 14: google.monitoring.v3.ServiceLevelObjective.rolling_period:type_name -> google.protobuf.Duration
+ 30, // 15: google.monitoring.v3.ServiceLevelObjective.calendar_period:type_name -> google.type.CalendarPeriod
+ 24, // 16: google.monitoring.v3.ServiceLevelObjective.user_labels:type_name -> google.monitoring.v3.ServiceLevelObjective.UserLabelsEntry
+ 4, // 17: google.monitoring.v3.ServiceLevelIndicator.basic_sli:type_name -> google.monitoring.v3.BasicSli
+ 6, // 18: google.monitoring.v3.ServiceLevelIndicator.request_based:type_name -> google.monitoring.v3.RequestBasedSli
+ 9, // 19: google.monitoring.v3.ServiceLevelIndicator.windows_based:type_name -> google.monitoring.v3.WindowsBasedSli
+ 25, // 20: google.monitoring.v3.BasicSli.availability:type_name -> google.monitoring.v3.BasicSli.AvailabilityCriteria
+ 26, // 21: google.monitoring.v3.BasicSli.latency:type_name -> google.monitoring.v3.BasicSli.LatencyCriteria
+ 7, // 22: google.monitoring.v3.RequestBasedSli.good_total_ratio:type_name -> google.monitoring.v3.TimeSeriesRatio
+ 8, // 23: google.monitoring.v3.RequestBasedSli.distribution_cut:type_name -> google.monitoring.v3.DistributionCut
+ 5, // 24: google.monitoring.v3.DistributionCut.range:type_name -> google.monitoring.v3.Range
+ 27, // 25: google.monitoring.v3.WindowsBasedSli.good_total_ratio_threshold:type_name -> google.monitoring.v3.WindowsBasedSli.PerformanceThreshold
+ 28, // 26: google.monitoring.v3.WindowsBasedSli.metric_mean_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange
+ 28, // 27: google.monitoring.v3.WindowsBasedSli.metric_sum_in_range:type_name -> google.monitoring.v3.WindowsBasedSli.MetricRange
+ 29, // 28: google.monitoring.v3.WindowsBasedSli.window_period:type_name -> google.protobuf.Duration
+ 23, // 29: google.monitoring.v3.Service.BasicService.service_labels:type_name -> google.monitoring.v3.Service.BasicService.ServiceLabelsEntry
+ 29, // 30: google.monitoring.v3.BasicSli.LatencyCriteria.threshold:type_name -> google.protobuf.Duration
+ 6, // 31: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.performance:type_name -> google.monitoring.v3.RequestBasedSli
+ 4, // 32: google.monitoring.v3.WindowsBasedSli.PerformanceThreshold.basic_sli_performance:type_name -> google.monitoring.v3.BasicSli
+ 5, // 33: google.monitoring.v3.WindowsBasedSli.MetricRange.range:type_name -> google.monitoring.v3.Range
+ 34, // [34:34] is the sub-list for method output_type
+ 34, // [34:34] is the sub-list for method input_type
+ 34, // [34:34] is the sub-list for extension type_name
+ 34, // [34:34] is the sub-list for extension extendee
+ 0, // [0:34] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_service_proto_init() }
+func file_google_monitoring_v3_service_proto_init() {
+ if File_google_monitoring_v3_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_service_proto_msgTypes[0].OneofWrappers = []any{
+ (*Service_Custom_)(nil),
+ (*Service_AppEngine_)(nil),
+ (*Service_CloudEndpoints_)(nil),
+ (*Service_ClusterIstio_)(nil),
+ (*Service_MeshIstio_)(nil),
+ (*Service_IstioCanonicalService_)(nil),
+ (*Service_CloudRun_)(nil),
+ (*Service_GkeNamespace_)(nil),
+ (*Service_GkeWorkload_)(nil),
+ (*Service_GkeService_)(nil),
+ }
+ file_google_monitoring_v3_service_proto_msgTypes[1].OneofWrappers = []any{
+ (*ServiceLevelObjective_RollingPeriod)(nil),
+ (*ServiceLevelObjective_CalendarPeriod)(nil),
+ }
+ file_google_monitoring_v3_service_proto_msgTypes[2].OneofWrappers = []any{
+ (*ServiceLevelIndicator_BasicSli)(nil),
+ (*ServiceLevelIndicator_RequestBased)(nil),
+ (*ServiceLevelIndicator_WindowsBased)(nil),
+ }
+ file_google_monitoring_v3_service_proto_msgTypes[3].OneofWrappers = []any{
+ (*BasicSli_Availability)(nil),
+ (*BasicSli_Latency)(nil),
+ }
+ file_google_monitoring_v3_service_proto_msgTypes[5].OneofWrappers = []any{
+ (*RequestBasedSli_GoodTotalRatio)(nil),
+ (*RequestBasedSli_DistributionCut)(nil),
+ }
+ file_google_monitoring_v3_service_proto_msgTypes[8].OneofWrappers = []any{
+ (*WindowsBasedSli_GoodBadMetricFilter)(nil),
+ (*WindowsBasedSli_GoodTotalRatioThreshold)(nil),
+ (*WindowsBasedSli_MetricMeanInRange)(nil),
+ (*WindowsBasedSli_MetricSumInRange)(nil),
+ }
+ file_google_monitoring_v3_service_proto_msgTypes[26].OneofWrappers = []any{
+ (*WindowsBasedSli_PerformanceThreshold_Performance)(nil),
+ (*WindowsBasedSli_PerformanceThreshold_BasicSliPerformance)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_service_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 28,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_service_proto_depIdxs,
+ EnumInfos: file_google_monitoring_v3_service_proto_enumTypes,
+ MessageInfos: file_google_monitoring_v3_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_service_proto = out.File
+ file_google_monitoring_v3_service_proto_rawDesc = nil
+ file_google_monitoring_v3_service_proto_goTypes = nil
+ file_google_monitoring_v3_service_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go
new file mode 100644
index 000000000..08c2e08e2
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/service_service.pb.go
@@ -0,0 +1,1626 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/service_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The `CreateService` request.
+type CreateServiceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource
+ // [name](https://cloud.google.com/monitoring/api/v3#project_name) of the
+ // parent Metrics Scope. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Optional. The Service id to use for this Service. If omitted, an id will be
+ // generated instead. Must match the pattern `[a-z0-9\-]+`
+ ServiceId string `protobuf:"bytes,3,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"`
+ // Required. The `Service` to create.
+ Service *Service `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"`
+}
+
+func (x *CreateServiceRequest) Reset() {
+ *x = CreateServiceRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateServiceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateServiceRequest) ProtoMessage() {}
+
+func (x *CreateServiceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateServiceRequest.ProtoReflect.Descriptor instead.
+func (*CreateServiceRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CreateServiceRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateServiceRequest) GetServiceId() string {
+ if x != nil {
+ return x.ServiceId
+ }
+ return ""
+}
+
+func (x *CreateServiceRequest) GetService() *Service {
+ if x != nil {
+ return x.Service
+ }
+ return nil
+}
+
+// The `GetService` request.
+type GetServiceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource name of the `Service`. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetServiceRequest) Reset() {
+ *x = GetServiceRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetServiceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetServiceRequest) ProtoMessage() {}
+
+func (x *GetServiceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetServiceRequest.ProtoReflect.Descriptor instead.
+func (*GetServiceRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *GetServiceRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `ListServices` request.
+type ListServicesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource name of the parent containing the listed services,
+ // either a [project](https://cloud.google.com/monitoring/api/v3#project_name)
+ // or a Monitoring Metrics Scope. The formats are:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ // workspaces/[HOST_PROJECT_ID_OR_NUMBER]
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // A filter specifying what `Service`s to return. The filter supports
+ // filtering on a particular service-identifier type or one of its attributes.
+ //
+ // To filter on a particular service-identifier type, the `identifier_case`
+ // refers to which option in the `identifier` field is populated. For example,
+ // the filter `identifier_case = "CUSTOM"` would match all services with a
+ // value for the `custom` field. Valid options include "CUSTOM", "APP_ENGINE",
+ // "MESH_ISTIO", and the other options listed at
+ // https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service
+ //
+ // To filter on an attribute of a service-identifier type, apply the filter
+ // name by using the snake case of the service-identifier type and the
+ // attribute of that service-identifier type, and join the two with a period.
+ // For example, to filter by the `meshUid` field of the `MeshIstio`
+ // service-identifier type, you must filter on `mesh_istio.mesh_uid =
+ // "123"` to match all services with mesh UID "123". Service-identifier types
+ // and their attributes are described at
+ // https://cloud.google.com/monitoring/api/ref_v3/rest/v3/services#Service
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // A non-negative number that is the maximum number of results to return.
+ // When 0, use default page size.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListServicesRequest) Reset() {
+ *x = ListServicesRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListServicesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListServicesRequest) ProtoMessage() {}
+
+func (x *ListServicesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListServicesRequest.ProtoReflect.Descriptor instead.
+func (*ListServicesRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListServicesRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListServicesRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListServicesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListServicesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The `ListServices` response.
+type ListServicesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The `Service`s matching the specified filter.
+ Services []*Service `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListServicesResponse) Reset() {
+ *x = ListServicesResponse{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListServicesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListServicesResponse) ProtoMessage() {}
+
+func (x *ListServicesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListServicesResponse.ProtoReflect.Descriptor instead.
+func (*ListServicesResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *ListServicesResponse) GetServices() []*Service {
+ if x != nil {
+ return x.Services
+ }
+ return nil
+}
+
+func (x *ListServicesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The `UpdateService` request.
+type UpdateServiceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The `Service` to draw updates from.
+ // The given `name` specifies the resource to update.
+ Service *Service `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
+ // A set of field paths defining which fields to use for the update.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+}
+
+func (x *UpdateServiceRequest) Reset() {
+ *x = UpdateServiceRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateServiceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateServiceRequest) ProtoMessage() {}
+
+func (x *UpdateServiceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateServiceRequest.ProtoReflect.Descriptor instead.
+func (*UpdateServiceRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UpdateServiceRequest) GetService() *Service {
+ if x != nil {
+ return x.Service
+ }
+ return nil
+}
+
+func (x *UpdateServiceRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+// The `DeleteService` request.
+type DeleteServiceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource name of the `Service` to delete. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteServiceRequest) Reset() {
+ *x = DeleteServiceRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteServiceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteServiceRequest) ProtoMessage() {}
+
+func (x *DeleteServiceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteServiceRequest.ProtoReflect.Descriptor instead.
+func (*DeleteServiceRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DeleteServiceRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The `CreateServiceLevelObjective` request.
+type CreateServiceLevelObjectiveRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource name of the parent `Service`. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Optional. The ServiceLevelObjective id to use for this
+ // ServiceLevelObjective. If omitted, an id will be generated instead. Must
+ // match the pattern `^[a-zA-Z0-9-_:.]+$`
+ ServiceLevelObjectiveId string `protobuf:"bytes,3,opt,name=service_level_objective_id,json=serviceLevelObjectiveId,proto3" json:"service_level_objective_id,omitempty"`
+ // Required. The `ServiceLevelObjective` to create.
+ // The provided `name` will be respected if no `ServiceLevelObjective` exists
+ // with this name.
+ ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,2,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"`
+}
+
+func (x *CreateServiceLevelObjectiveRequest) Reset() {
+ *x = CreateServiceLevelObjectiveRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateServiceLevelObjectiveRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateServiceLevelObjectiveRequest) ProtoMessage() {}
+
+func (x *CreateServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead.
+func (*CreateServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *CreateServiceLevelObjectiveRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateServiceLevelObjectiveRequest) GetServiceLevelObjectiveId() string {
+ if x != nil {
+ return x.ServiceLevelObjectiveId
+ }
+ return ""
+}
+
+func (x *CreateServiceLevelObjectiveRequest) GetServiceLevelObjective() *ServiceLevelObjective {
+ if x != nil {
+ return x.ServiceLevelObjective
+ }
+ return nil
+}
+
+// The `GetServiceLevelObjective` request.
+type GetServiceLevelObjectiveRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource name of the `ServiceLevelObjective` to get. The format
+ // is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // View of the `ServiceLevelObjective` to return. If `DEFAULT`, return the
+ // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the
+ // `ServiceLevelObjective` is defined in terms of a `BasicSli`, replace the
+ // `BasicSli` with a `RequestBasedSli` spelling out how the SLI is computed.
+ View ServiceLevelObjective_View `protobuf:"varint,2,opt,name=view,proto3,enum=google.monitoring.v3.ServiceLevelObjective_View" json:"view,omitempty"`
+}
+
+func (x *GetServiceLevelObjectiveRequest) Reset() {
+ *x = GetServiceLevelObjectiveRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetServiceLevelObjectiveRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetServiceLevelObjectiveRequest) ProtoMessage() {}
+
+func (x *GetServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead.
+func (*GetServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *GetServiceLevelObjectiveRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *GetServiceLevelObjectiveRequest) GetView() ServiceLevelObjective_View {
+ if x != nil {
+ return x.View
+ }
+ return ServiceLevelObjective_VIEW_UNSPECIFIED
+}
+
+// The `ListServiceLevelObjectives` request.
+type ListServiceLevelObjectivesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource name of the parent containing the listed SLOs, either a
+ // project or a Monitoring Metrics Scope. The formats are:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]
+ // workspaces/[HOST_PROJECT_ID_OR_NUMBER]/services/-
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // A filter specifying what `ServiceLevelObjective`s to return.
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // A non-negative number that is the maximum number of results to return.
+ // When 0, use default page size.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return additional results from the previous method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // View of the `ServiceLevelObjective`s to return. If `DEFAULT`, return each
+ // `ServiceLevelObjective` as originally defined. If `EXPLICIT` and the
+ // `ServiceLevelObjective` is defined in terms of a `BasicSli`, replace the
+ // `BasicSli` with a `RequestBasedSli` spelling out how the SLI is computed.
+ View ServiceLevelObjective_View `protobuf:"varint,5,opt,name=view,proto3,enum=google.monitoring.v3.ServiceLevelObjective_View" json:"view,omitempty"`
+}
+
+func (x *ListServiceLevelObjectivesRequest) Reset() {
+ *x = ListServiceLevelObjectivesRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListServiceLevelObjectivesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListServiceLevelObjectivesRequest) ProtoMessage() {}
+
+func (x *ListServiceLevelObjectivesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListServiceLevelObjectivesRequest.ProtoReflect.Descriptor instead.
+func (*ListServiceLevelObjectivesRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *ListServiceLevelObjectivesRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListServiceLevelObjectivesRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListServiceLevelObjectivesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListServiceLevelObjectivesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+func (x *ListServiceLevelObjectivesRequest) GetView() ServiceLevelObjective_View {
+ if x != nil {
+ return x.View
+ }
+ return ServiceLevelObjective_VIEW_UNSPECIFIED
+}
+
+// The `ListServiceLevelObjectives` response.
+type ListServiceLevelObjectivesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The `ServiceLevelObjective`s matching the specified filter.
+ ServiceLevelObjectives []*ServiceLevelObjective `protobuf:"bytes,1,rep,name=service_level_objectives,json=serviceLevelObjectives,proto3" json:"service_level_objectives,omitempty"`
+ // If there are more results than have been returned, then this field is set
+ // to a non-empty value. To see the additional results,
+ // use that value as `page_token` in the next call to this method.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListServiceLevelObjectivesResponse) Reset() {
+ *x = ListServiceLevelObjectivesResponse{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListServiceLevelObjectivesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListServiceLevelObjectivesResponse) ProtoMessage() {}
+
+func (x *ListServiceLevelObjectivesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListServiceLevelObjectivesResponse.ProtoReflect.Descriptor instead.
+func (*ListServiceLevelObjectivesResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *ListServiceLevelObjectivesResponse) GetServiceLevelObjectives() []*ServiceLevelObjective {
+ if x != nil {
+ return x.ServiceLevelObjectives
+ }
+ return nil
+}
+
+func (x *ListServiceLevelObjectivesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The `UpdateServiceLevelObjective` request.
+type UpdateServiceLevelObjectiveRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The `ServiceLevelObjective` to draw updates from.
+ // The given `name` specifies the resource to update.
+ ServiceLevelObjective *ServiceLevelObjective `protobuf:"bytes,1,opt,name=service_level_objective,json=serviceLevelObjective,proto3" json:"service_level_objective,omitempty"`
+ // A set of field paths defining which fields to use for the update.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+}
+
+func (x *UpdateServiceLevelObjectiveRequest) Reset() {
+ *x = UpdateServiceLevelObjectiveRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateServiceLevelObjectiveRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateServiceLevelObjectiveRequest) ProtoMessage() {}
+
+func (x *UpdateServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead.
+func (*UpdateServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *UpdateServiceLevelObjectiveRequest) GetServiceLevelObjective() *ServiceLevelObjective {
+ if x != nil {
+ return x.ServiceLevelObjective
+ }
+ return nil
+}
+
+func (x *UpdateServiceLevelObjectiveRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+// The `DeleteServiceLevelObjective` request.
+type DeleteServiceLevelObjectiveRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Resource name of the `ServiceLevelObjective` to delete. The
+ // format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/services/[SERVICE_ID]/serviceLevelObjectives/[SLO_NAME]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteServiceLevelObjectiveRequest) Reset() {
+ *x = DeleteServiceLevelObjectiveRequest{}
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteServiceLevelObjectiveRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteServiceLevelObjectiveRequest) ProtoMessage() {}
+
+func (x *DeleteServiceLevelObjectiveRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_service_service_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteServiceLevelObjectiveRequest.ProtoReflect.Descriptor instead.
+func (*DeleteServiceLevelObjectiveRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_service_service_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *DeleteServiceLevelObjectiveRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+var File_google_monitoring_v3_service_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_service_service_proto_rawDesc = []byte{
+ 0x0a, 0x2a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61,
+ 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61,
+ 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb6, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x12, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x22, 0x52, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xac, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a,
+ 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x23, 0x12, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65,
+ 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67,
+ 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x79, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x08,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x08, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f,
+ 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22,
+ 0x91, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d,
+ 0x61, 0x73, 0x6b, 0x22, 0x55, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x23, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x8e, 0x02, 0x0a, 0x22, 0x43,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f,
+ 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x49,
+ 0x64, 0x12, 0x68, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76,
+ 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0xb4, 0x01, 0x0a, 0x1f,
+ 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x4b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x37, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x31, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x44, 0x0a, 0x04,
+ 0x76, 0x69, 0x65, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52, 0x04, 0x76, 0x69,
+ 0x65, 0x77, 0x22, 0x80, 0x02, 0x0a, 0x21, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x29, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x23,
+ 0x0a, 0x21, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
+ 0x44, 0x0a, 0x04, 0x76, 0x69, 0x65, 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x56, 0x69, 0x65, 0x77, 0x52,
+ 0x04, 0x76, 0x69, 0x65, 0x77, 0x22, 0xb3, 0x01, 0x0a, 0x22, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x65, 0x0a, 0x18,
+ 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x16, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65,
+ 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65,
+ 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xcb, 0x01, 0x0a, 0x22,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x68, 0x0a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65,
+ 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65,
+ 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x3b, 0x0a, 0x0b,
+ 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0x71, 0x0a, 0x22, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x4b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x37, 0xe0,
+ 0x41, 0x02, 0xfa, 0x41, 0x31, 0x0a, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xea, 0x0f, 0x0a,
+ 0x18, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x97, 0x01, 0x0a, 0x0d, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x3b, 0xda, 0x41, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x3a,
+ 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x12, 0x7e, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0x28, 0xda, 0x41, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e,
+ 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73,
+ 0x2f, 0x2a, 0x7d, 0x12, 0x91, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2a, 0xda, 0x41, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x12, 0x19, 0x2f, 0x76,
+ 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x7d, 0x2f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x22, 0x3c, 0xda, 0x41, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x3a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x32,
+ 0x21, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6e, 0x61,
+ 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
+ 0x2a, 0x7d, 0x12, 0x7d, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74,
+ 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x28, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1b, 0x2a, 0x19, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a,
+ 0x7d, 0x12, 0xfa, 0x01, 0x0a, 0x1b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76,
+ 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0x74, 0xda, 0x41, 0x1e, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65,
+ 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02,
+ 0x4d, 0x3a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c,
+ 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22, 0x32, 0x2f, 0x76, 0x33, 0x2f,
+ 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c,
+ 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x12, 0xc1,
+ 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x35, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x22,
+ 0x41, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32,
+ 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f,
+ 0x2a, 0x7d, 0x12, 0xd4, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x73, 0x12, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82,
+ 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f,
+ 0x2a, 0x7d, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x12, 0x8c, 0x02, 0x0a, 0x1b, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65,
+ 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x22, 0x85, 0x01, 0xda, 0x41, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65,
+ 0x76, 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x82, 0xd3, 0xe4,
+ 0x93, 0x02, 0x65, 0x3a, 0x17, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76,
+ 0x65, 0x6c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x32, 0x4a, 0x2f, 0x76,
+ 0x33, 0x2f, 0x7b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c,
+ 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d,
+ 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xb2, 0x01, 0x0a, 0x1b, 0x44, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x41, 0xda, 0x41, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x2a, 0x32, 0x2f, 0x76, 0x33, 0x2f, 0x7b,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x2a, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4c, 0x65, 0x76, 0x65, 0x6c,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0xa9, 0x01,
+ 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01,
+ 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68,
+ 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a,
+ 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xd8, 0x01, 0x0a, 0x18, 0x63, 0x6f,
+ 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x1d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32,
+ 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43,
+ 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_service_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_service_service_proto_rawDescData = file_google_monitoring_v3_service_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_service_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_service_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_service_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_service_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_service_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_service_service_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_google_monitoring_v3_service_service_proto_goTypes = []any{
+ (*CreateServiceRequest)(nil), // 0: google.monitoring.v3.CreateServiceRequest
+ (*GetServiceRequest)(nil), // 1: google.monitoring.v3.GetServiceRequest
+ (*ListServicesRequest)(nil), // 2: google.monitoring.v3.ListServicesRequest
+ (*ListServicesResponse)(nil), // 3: google.monitoring.v3.ListServicesResponse
+ (*UpdateServiceRequest)(nil), // 4: google.monitoring.v3.UpdateServiceRequest
+ (*DeleteServiceRequest)(nil), // 5: google.monitoring.v3.DeleteServiceRequest
+ (*CreateServiceLevelObjectiveRequest)(nil), // 6: google.monitoring.v3.CreateServiceLevelObjectiveRequest
+ (*GetServiceLevelObjectiveRequest)(nil), // 7: google.monitoring.v3.GetServiceLevelObjectiveRequest
+ (*ListServiceLevelObjectivesRequest)(nil), // 8: google.monitoring.v3.ListServiceLevelObjectivesRequest
+ (*ListServiceLevelObjectivesResponse)(nil), // 9: google.monitoring.v3.ListServiceLevelObjectivesResponse
+ (*UpdateServiceLevelObjectiveRequest)(nil), // 10: google.monitoring.v3.UpdateServiceLevelObjectiveRequest
+ (*DeleteServiceLevelObjectiveRequest)(nil), // 11: google.monitoring.v3.DeleteServiceLevelObjectiveRequest
+ (*Service)(nil), // 12: google.monitoring.v3.Service
+ (*fieldmaskpb.FieldMask)(nil), // 13: google.protobuf.FieldMask
+ (*ServiceLevelObjective)(nil), // 14: google.monitoring.v3.ServiceLevelObjective
+ (ServiceLevelObjective_View)(0), // 15: google.monitoring.v3.ServiceLevelObjective.View
+ (*emptypb.Empty)(nil), // 16: google.protobuf.Empty
+}
+var file_google_monitoring_v3_service_service_proto_depIdxs = []int32{
+ 12, // 0: google.monitoring.v3.CreateServiceRequest.service:type_name -> google.monitoring.v3.Service
+ 12, // 1: google.monitoring.v3.ListServicesResponse.services:type_name -> google.monitoring.v3.Service
+ 12, // 2: google.monitoring.v3.UpdateServiceRequest.service:type_name -> google.monitoring.v3.Service
+ 13, // 3: google.monitoring.v3.UpdateServiceRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 14, // 4: google.monitoring.v3.CreateServiceLevelObjectiveRequest.service_level_objective:type_name -> google.monitoring.v3.ServiceLevelObjective
+ 15, // 5: google.monitoring.v3.GetServiceLevelObjectiveRequest.view:type_name -> google.monitoring.v3.ServiceLevelObjective.View
+ 15, // 6: google.monitoring.v3.ListServiceLevelObjectivesRequest.view:type_name -> google.monitoring.v3.ServiceLevelObjective.View
+ 14, // 7: google.monitoring.v3.ListServiceLevelObjectivesResponse.service_level_objectives:type_name -> google.monitoring.v3.ServiceLevelObjective
+ 14, // 8: google.monitoring.v3.UpdateServiceLevelObjectiveRequest.service_level_objective:type_name -> google.monitoring.v3.ServiceLevelObjective
+ 13, // 9: google.monitoring.v3.UpdateServiceLevelObjectiveRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 0, // 10: google.monitoring.v3.ServiceMonitoringService.CreateService:input_type -> google.monitoring.v3.CreateServiceRequest
+ 1, // 11: google.monitoring.v3.ServiceMonitoringService.GetService:input_type -> google.monitoring.v3.GetServiceRequest
+ 2, // 12: google.monitoring.v3.ServiceMonitoringService.ListServices:input_type -> google.monitoring.v3.ListServicesRequest
+ 4, // 13: google.monitoring.v3.ServiceMonitoringService.UpdateService:input_type -> google.monitoring.v3.UpdateServiceRequest
+ 5, // 14: google.monitoring.v3.ServiceMonitoringService.DeleteService:input_type -> google.monitoring.v3.DeleteServiceRequest
+ 6, // 15: google.monitoring.v3.ServiceMonitoringService.CreateServiceLevelObjective:input_type -> google.monitoring.v3.CreateServiceLevelObjectiveRequest
+ 7, // 16: google.monitoring.v3.ServiceMonitoringService.GetServiceLevelObjective:input_type -> google.monitoring.v3.GetServiceLevelObjectiveRequest
+ 8, // 17: google.monitoring.v3.ServiceMonitoringService.ListServiceLevelObjectives:input_type -> google.monitoring.v3.ListServiceLevelObjectivesRequest
+ 10, // 18: google.monitoring.v3.ServiceMonitoringService.UpdateServiceLevelObjective:input_type -> google.monitoring.v3.UpdateServiceLevelObjectiveRequest
+ 11, // 19: google.monitoring.v3.ServiceMonitoringService.DeleteServiceLevelObjective:input_type -> google.monitoring.v3.DeleteServiceLevelObjectiveRequest
+ 12, // 20: google.monitoring.v3.ServiceMonitoringService.CreateService:output_type -> google.monitoring.v3.Service
+ 12, // 21: google.monitoring.v3.ServiceMonitoringService.GetService:output_type -> google.monitoring.v3.Service
+ 3, // 22: google.monitoring.v3.ServiceMonitoringService.ListServices:output_type -> google.monitoring.v3.ListServicesResponse
+ 12, // 23: google.monitoring.v3.ServiceMonitoringService.UpdateService:output_type -> google.monitoring.v3.Service
+ 16, // 24: google.monitoring.v3.ServiceMonitoringService.DeleteService:output_type -> google.protobuf.Empty
+ 14, // 25: google.monitoring.v3.ServiceMonitoringService.CreateServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective
+ 14, // 26: google.monitoring.v3.ServiceMonitoringService.GetServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective
+ 9, // 27: google.monitoring.v3.ServiceMonitoringService.ListServiceLevelObjectives:output_type -> google.monitoring.v3.ListServiceLevelObjectivesResponse
+ 14, // 28: google.monitoring.v3.ServiceMonitoringService.UpdateServiceLevelObjective:output_type -> google.monitoring.v3.ServiceLevelObjective
+ 16, // 29: google.monitoring.v3.ServiceMonitoringService.DeleteServiceLevelObjective:output_type -> google.protobuf.Empty
+ 20, // [20:30] is the sub-list for method output_type
+ 10, // [10:20] is the sub-list for method input_type
+ 10, // [10:10] is the sub-list for extension type_name
+ 10, // [10:10] is the sub-list for extension extendee
+ 0, // [0:10] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_service_service_proto_init() }
+func file_google_monitoring_v3_service_service_proto_init() {
+ if File_google_monitoring_v3_service_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_service_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_service_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 12,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_service_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_service_service_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_service_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_service_service_proto = out.File
+ file_google_monitoring_v3_service_service_proto_rawDesc = nil
+ file_google_monitoring_v3_service_service_proto_goTypes = nil
+ file_google_monitoring_v3_service_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// ServiceMonitoringServiceClient is the client API for ServiceMonitoringService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ServiceMonitoringServiceClient interface {
+ // Create a `Service`.
+ CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error)
+ // Get the named `Service`.
+ GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error)
+ // List `Service`s for this Metrics Scope.
+ ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error)
+ // Update this `Service`.
+ UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error)
+ // Soft delete this `Service`.
+ DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Create a `ServiceLevelObjective` for the given `Service`.
+ CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error)
+ // Get a `ServiceLevelObjective` by name.
+ GetServiceLevelObjective(ctx context.Context, in *GetServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error)
+ // List the `ServiceLevelObjective`s for the given `Service`.
+ ListServiceLevelObjectives(ctx context.Context, in *ListServiceLevelObjectivesRequest, opts ...grpc.CallOption) (*ListServiceLevelObjectivesResponse, error)
+ // Update the given `ServiceLevelObjective`.
+ UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error)
+ // Delete the given `ServiceLevelObjective`.
+ DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+}
+
+type serviceMonitoringServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewServiceMonitoringServiceClient(cc grpc.ClientConnInterface) ServiceMonitoringServiceClient {
+ return &serviceMonitoringServiceClient{cc}
+}
+
+func (c *serviceMonitoringServiceClient) CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*Service, error) {
+ out := new(Service)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/CreateService", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*Service, error) {
+ out := new(Service)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/GetService", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) {
+ out := new(ListServicesResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/ListServices", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*Service, error) {
+ out := new(Service)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/UpdateService", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) DeleteService(ctx context.Context, in *DeleteServiceRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteService", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) CreateServiceLevelObjective(ctx context.Context, in *CreateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) {
+ out := new(ServiceLevelObjective)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) GetServiceLevelObjective(ctx context.Context, in *GetServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) {
+ out := new(ServiceLevelObjective)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) ListServiceLevelObjectives(ctx context.Context, in *ListServiceLevelObjectivesRequest, opts ...grpc.CallOption) (*ListServiceLevelObjectivesResponse, error) {
+ out := new(ListServiceLevelObjectivesResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) UpdateServiceLevelObjective(ctx context.Context, in *UpdateServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*ServiceLevelObjective, error) {
+ out := new(ServiceLevelObjective)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *serviceMonitoringServiceClient) DeleteServiceLevelObjective(ctx context.Context, in *DeleteServiceLevelObjectiveRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ServiceMonitoringServiceServer is the server API for ServiceMonitoringService service.
+type ServiceMonitoringServiceServer interface {
+ // Create a `Service`.
+ CreateService(context.Context, *CreateServiceRequest) (*Service, error)
+ // Get the named `Service`.
+ GetService(context.Context, *GetServiceRequest) (*Service, error)
+ // List `Service`s for this Metrics Scope.
+ ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error)
+ // Update this `Service`.
+ UpdateService(context.Context, *UpdateServiceRequest) (*Service, error)
+ // Soft delete this `Service`.
+ DeleteService(context.Context, *DeleteServiceRequest) (*emptypb.Empty, error)
+ // Create a `ServiceLevelObjective` for the given `Service`.
+ CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error)
+ // Get a `ServiceLevelObjective` by name.
+ GetServiceLevelObjective(context.Context, *GetServiceLevelObjectiveRequest) (*ServiceLevelObjective, error)
+ // List the `ServiceLevelObjective`s for the given `Service`.
+ ListServiceLevelObjectives(context.Context, *ListServiceLevelObjectivesRequest) (*ListServiceLevelObjectivesResponse, error)
+ // Update the given `ServiceLevelObjective`.
+ UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error)
+ // Delete the given `ServiceLevelObjective`.
+ DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*emptypb.Empty, error)
+}
+
+// UnimplementedServiceMonitoringServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedServiceMonitoringServiceServer struct {
+}
+
+func (*UnimplementedServiceMonitoringServiceServer) CreateService(context.Context, *CreateServiceRequest) (*Service, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateService not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) GetService(context.Context, *GetServiceRequest) (*Service, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetService not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListServices not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) UpdateService(context.Context, *UpdateServiceRequest) (*Service, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateService not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) DeleteService(context.Context, *DeleteServiceRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteService not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) CreateServiceLevelObjective(context.Context, *CreateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateServiceLevelObjective not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) GetServiceLevelObjective(context.Context, *GetServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetServiceLevelObjective not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) ListServiceLevelObjectives(context.Context, *ListServiceLevelObjectivesRequest) (*ListServiceLevelObjectivesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListServiceLevelObjectives not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) UpdateServiceLevelObjective(context.Context, *UpdateServiceLevelObjectiveRequest) (*ServiceLevelObjective, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateServiceLevelObjective not implemented")
+}
+func (*UnimplementedServiceMonitoringServiceServer) DeleteServiceLevelObjective(context.Context, *DeleteServiceLevelObjectiveRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteServiceLevelObjective not implemented")
+}
+
+func RegisterServiceMonitoringServiceServer(s *grpc.Server, srv ServiceMonitoringServiceServer) {
+ s.RegisterService(&_ServiceMonitoringService_serviceDesc, srv)
+}
+
+func _ServiceMonitoringService_CreateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).CreateService(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/CreateService",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).CreateService(ctx, req.(*CreateServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_GetService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).GetService(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/GetService",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).GetService(ctx, req.(*GetServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListServicesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).ListServices(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/ListServices",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).ListServices(ctx, req.(*ListServicesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_UpdateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).UpdateService(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/UpdateService",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).UpdateService(ctx, req.(*UpdateServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_DeleteService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).DeleteService(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/DeleteService",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).DeleteService(ctx, req.(*DeleteServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_CreateServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateServiceLevelObjectiveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).CreateServiceLevelObjective(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/CreateServiceLevelObjective",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).CreateServiceLevelObjective(ctx, req.(*CreateServiceLevelObjectiveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_GetServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetServiceLevelObjectiveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).GetServiceLevelObjective(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/GetServiceLevelObjective",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).GetServiceLevelObjective(ctx, req.(*GetServiceLevelObjectiveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_ListServiceLevelObjectives_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListServiceLevelObjectivesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).ListServiceLevelObjectives(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/ListServiceLevelObjectives",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).ListServiceLevelObjectives(ctx, req.(*ListServiceLevelObjectivesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_UpdateServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateServiceLevelObjectiveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).UpdateServiceLevelObjective(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/UpdateServiceLevelObjective",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).UpdateServiceLevelObjective(ctx, req.(*UpdateServiceLevelObjectiveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _ServiceMonitoringService_DeleteServiceLevelObjective_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteServiceLevelObjectiveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ServiceMonitoringServiceServer).DeleteServiceLevelObjective(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.ServiceMonitoringService/DeleteServiceLevelObjective",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ServiceMonitoringServiceServer).DeleteServiceLevelObjective(ctx, req.(*DeleteServiceLevelObjectiveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _ServiceMonitoringService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.ServiceMonitoringService",
+ HandlerType: (*ServiceMonitoringServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "CreateService",
+ Handler: _ServiceMonitoringService_CreateService_Handler,
+ },
+ {
+ MethodName: "GetService",
+ Handler: _ServiceMonitoringService_GetService_Handler,
+ },
+ {
+ MethodName: "ListServices",
+ Handler: _ServiceMonitoringService_ListServices_Handler,
+ },
+ {
+ MethodName: "UpdateService",
+ Handler: _ServiceMonitoringService_UpdateService_Handler,
+ },
+ {
+ MethodName: "DeleteService",
+ Handler: _ServiceMonitoringService_DeleteService_Handler,
+ },
+ {
+ MethodName: "CreateServiceLevelObjective",
+ Handler: _ServiceMonitoringService_CreateServiceLevelObjective_Handler,
+ },
+ {
+ MethodName: "GetServiceLevelObjective",
+ Handler: _ServiceMonitoringService_GetServiceLevelObjective_Handler,
+ },
+ {
+ MethodName: "ListServiceLevelObjectives",
+ Handler: _ServiceMonitoringService_ListServiceLevelObjectives_Handler,
+ },
+ {
+ MethodName: "UpdateServiceLevelObjective",
+ Handler: _ServiceMonitoringService_UpdateServiceLevelObjective_Handler,
+ },
+ {
+ MethodName: "DeleteServiceLevelObjective",
+ Handler: _ServiceMonitoringService_DeleteServiceLevelObjective_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/service_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go
new file mode 100644
index 000000000..861e045f2
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze.pb.go
@@ -0,0 +1,310 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/snooze.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// A `Snooze` will prevent any alerts from being opened, and close any that
+// are already open. The `Snooze` will work on alerts that match the
+// criteria defined in the `Snooze`. The `Snooze` will be active from
+// `interval.start_time` through `interval.end_time`.
+type Snooze struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Identifier. The name of the `Snooze`. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID]
+ //
+ // The ID of the `Snooze` will be generated by the system.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Required. This defines the criteria for applying the `Snooze`. See
+ // `Criteria` for more information.
+ Criteria *Snooze_Criteria `protobuf:"bytes,3,opt,name=criteria,proto3" json:"criteria,omitempty"`
+ // Required. The `Snooze` will be active from `interval.start_time` through
+ // `interval.end_time`.
+ // `interval.start_time` cannot be in the past. There is a 15 second clock
+ // skew to account for the time it takes for a request to reach the API from
+ // the UI.
+ Interval *TimeInterval `protobuf:"bytes,4,opt,name=interval,proto3" json:"interval,omitempty"`
+ // Required. A display name for the `Snooze`. This can be, at most, 512
+ // unicode characters.
+ DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+}
+
+func (x *Snooze) Reset() {
+ *x = Snooze{}
+ mi := &file_google_monitoring_v3_snooze_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Snooze) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Snooze) ProtoMessage() {}
+
+func (x *Snooze) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_snooze_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Snooze.ProtoReflect.Descriptor instead.
+func (*Snooze) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_snooze_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Snooze) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Snooze) GetCriteria() *Snooze_Criteria {
+ if x != nil {
+ return x.Criteria
+ }
+ return nil
+}
+
+func (x *Snooze) GetInterval() *TimeInterval {
+ if x != nil {
+ return x.Interval
+ }
+ return nil
+}
+
+func (x *Snooze) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+// Criteria specific to the `AlertPolicy`s that this `Snooze` applies to. The
+// `Snooze` will suppress alerts that come from one of the `AlertPolicy`s
+// whose names are supplied.
+type Snooze_Criteria struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The specific `AlertPolicy` names for the alert that should be snoozed.
+ // The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[POLICY_ID]
+ //
+ // There is a limit of 16 policies per snooze. This limit is checked during
+ // snooze creation.
+ // Exactly 1 alert policy is required if `filter` is specified at the same
+ // time.
+ Policies []string `protobuf:"bytes,1,rep,name=policies,proto3" json:"policies,omitempty"`
+ // Optional. The filter string to match on Alert fields when silencing the
+ // alerts. It follows the standard https://google.aip.dev/160 syntax.
+ // A filter string used to apply the snooze to specific incidents
+ // that have matching filter values.
+ // Filters can be defined for snoozes that apply to one alerting
+ // policy.
+ // Filters must be a string formatted as one or more resource labels with
+ // specific label values. If multiple resource labels are used, then they
+ // must be connected with an AND operator. For example, the following filter
+ // applies the snooze to incidents that have an instance ID of
+ // `1234567890` and a zone of `us-central1-a`:
+ //
+ // resource.labels.instance_id="1234567890" AND
+ // resource.labels.zone="us-central1-a"
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+}
+
+func (x *Snooze_Criteria) Reset() {
+ *x = Snooze_Criteria{}
+ mi := &file_google_monitoring_v3_snooze_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Snooze_Criteria) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Snooze_Criteria) ProtoMessage() {}
+
+func (x *Snooze_Criteria) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_snooze_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Snooze_Criteria.ProtoReflect.Descriptor instead.
+func (*Snooze_Criteria) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_snooze_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Snooze_Criteria) GetPolicies() []string {
+ if x != nil {
+ return x.Policies
+ }
+ return nil
+}
+
+func (x *Snooze_Criteria) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+var File_google_monitoring_v3_snooze_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_snooze_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61,
+ 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x03, 0x0a, 0x06, 0x53, 0x6e, 0x6f,
+ 0x6f, 0x7a, 0x65, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x08,
+ 0x63, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x43, 0x72, 0x69,
+ 0x74, 0x65, 0x72, 0x69, 0x61, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x63, 0x72, 0x69, 0x74,
+ 0x65, 0x72, 0x69, 0x61, 0x12, 0x43, 0x0a, 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
+ 0x08, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x26, 0x0a, 0x0c, 0x64, 0x69, 0x73,
+ 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61, 0x6d,
+ 0x65, 0x1a, 0x6a, 0x0a, 0x08, 0x43, 0x72, 0x69, 0x74, 0x65, 0x72, 0x69, 0x61, 0x12, 0x46, 0x0a,
+ 0x08, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42,
+ 0x2a, 0xfa, 0x41, 0x27, 0x0a, 0x25, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x41, 0x6c, 0x65, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x08, 0x70, 0x6f, 0x6c,
+ 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x3a, 0x4a, 0xea,
+ 0x41, 0x47, 0x0a, 0x20, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e,
+ 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x23, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73,
+ 0x2f, 0x7b, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x7d, 0x42, 0xc6, 0x01, 0x0a, 0x18, 0x63, 0x6f,
+ 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43,
+ 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c,
+ 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a,
+ 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_snooze_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_snooze_proto_rawDescData = file_google_monitoring_v3_snooze_proto_rawDesc
+)
+
+func file_google_monitoring_v3_snooze_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_snooze_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_snooze_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_snooze_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_snooze_proto_rawDescData
+}
+
+var file_google_monitoring_v3_snooze_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_google_monitoring_v3_snooze_proto_goTypes = []any{
+ (*Snooze)(nil), // 0: google.monitoring.v3.Snooze
+ (*Snooze_Criteria)(nil), // 1: google.monitoring.v3.Snooze.Criteria
+ (*TimeInterval)(nil), // 2: google.monitoring.v3.TimeInterval
+}
+var file_google_monitoring_v3_snooze_proto_depIdxs = []int32{
+ 1, // 0: google.monitoring.v3.Snooze.criteria:type_name -> google.monitoring.v3.Snooze.Criteria
+ 2, // 1: google.monitoring.v3.Snooze.interval:type_name -> google.monitoring.v3.TimeInterval
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_snooze_proto_init() }
+func file_google_monitoring_v3_snooze_proto_init() {
+ if File_google_monitoring_v3_snooze_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_common_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_snooze_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_snooze_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_snooze_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_snooze_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_snooze_proto = out.File
+ file_google_monitoring_v3_snooze_proto_rawDesc = nil
+ file_google_monitoring_v3_snooze_proto_goTypes = nil
+ file_google_monitoring_v3_snooze_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go
new file mode 100644
index 000000000..c562d60bc
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/snooze_service.pb.go
@@ -0,0 +1,793 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/snooze_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The message definition for creating a `Snooze`. Users must provide the body
+// of the `Snooze` to be created but must omit the `Snooze` field, `name`.
+type CreateSnoozeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which
+ // a `Snooze` should be created. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Required. The `Snooze` to create. Omit the `name` field, as it will be
+ // filled in by the API.
+ Snooze *Snooze `protobuf:"bytes,2,opt,name=snooze,proto3" json:"snooze,omitempty"`
+}
+
+func (x *CreateSnoozeRequest) Reset() {
+ *x = CreateSnoozeRequest{}
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateSnoozeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateSnoozeRequest) ProtoMessage() {}
+
+func (x *CreateSnoozeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateSnoozeRequest.ProtoReflect.Descriptor instead.
+func (*CreateSnoozeRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CreateSnoozeRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateSnoozeRequest) GetSnooze() *Snooze {
+ if x != nil {
+ return x.Snooze
+ }
+ return nil
+}
+
+// The message definition for listing `Snooze`s associated with the given
+// `parent`, satisfying the optional `filter`.
+type ListSnoozesRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose
+ // `Snooze`s should be listed. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Optional. Optional filter to restrict results to the given criteria. The
+ // following fields are supported.
+ //
+ // - `interval.start_time`
+ // - `interval.end_time`
+ //
+ // For example:
+ //
+ // interval.start_time > "2022-03-11T00:00:00-08:00" AND
+ // interval.end_time < "2022-03-12T00:00:00-08:00"
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Optional. The maximum number of results to return for a single query. The
+ // server may further constrain the maximum number of results returned in a
+ // single page. The value should be in the range [1, 1000]. If the value given
+ // is outside this range, the server will decide the number of results to be
+ // returned.
+ PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // Optional. The `next_page_token` from a previous call to
+ // `ListSnoozesRequest` to get the next page of results.
+ PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListSnoozesRequest) Reset() {
+ *x = ListSnoozesRequest{}
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListSnoozesRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListSnoozesRequest) ProtoMessage() {}
+
+func (x *ListSnoozesRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListSnoozesRequest.ProtoReflect.Descriptor instead.
+func (*ListSnoozesRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListSnoozesRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListSnoozesRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListSnoozesRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListSnoozesRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The results of a successful `ListSnoozes` call, containing the matching
+// `Snooze`s.
+type ListSnoozesResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // `Snooze`s matching this list call.
+ Snoozes []*Snooze `protobuf:"bytes,1,rep,name=snoozes,proto3" json:"snoozes,omitempty"`
+ // Page token for repeated calls to `ListSnoozes`, to fetch additional pages
+ // of results. If this is empty or missing, there are no more pages.
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListSnoozesResponse) Reset() {
+ *x = ListSnoozesResponse{}
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListSnoozesResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListSnoozesResponse) ProtoMessage() {}
+
+func (x *ListSnoozesResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListSnoozesResponse.ProtoReflect.Descriptor instead.
+func (*ListSnoozesResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ListSnoozesResponse) GetSnoozes() []*Snooze {
+ if x != nil {
+ return x.Snoozes
+ }
+ return nil
+}
+
+func (x *ListSnoozesResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+// The message definition for retrieving a `Snooze`. Users must specify the
+// field, `name`, which identifies the `Snooze`.
+type GetSnoozeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The ID of the `Snooze` to retrieve. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/snoozes/[SNOOZE_ID]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetSnoozeRequest) Reset() {
+ *x = GetSnoozeRequest{}
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetSnoozeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetSnoozeRequest) ProtoMessage() {}
+
+func (x *GetSnoozeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetSnoozeRequest.ProtoReflect.Descriptor instead.
+func (*GetSnoozeRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *GetSnoozeRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The message definition for updating a `Snooze`. The field, `snooze.name`
+// identifies the `Snooze` to be updated. The remainder of `snooze` gives the
+// content the `Snooze` in question will be assigned.
+//
+// What fields can be updated depends on the start time and end time of the
+// `Snooze`.
+//
+// - end time is in the past: These `Snooze`s are considered
+// read-only and cannot be updated.
+// - start time is in the past and end time is in the future: `display_name`
+// and `interval.end_time` can be updated.
+// - start time is in the future: `display_name`, `interval.start_time` and
+// `interval.end_time` can be updated.
+type UpdateSnoozeRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The `Snooze` to update. Must have the name field present.
+ Snooze *Snooze `protobuf:"bytes,1,opt,name=snooze,proto3" json:"snooze,omitempty"`
+ // Required. The fields to update.
+ //
+ // For each field listed in `update_mask`:
+ //
+ // - If the `Snooze` object supplied in the `UpdateSnoozeRequest` has a
+ // value for that field, the value of the field in the existing `Snooze`
+ // will be set to the value of the field in the supplied `Snooze`.
+ // - If the field does not have a value in the supplied `Snooze`, the field
+ // in the existing `Snooze` is set to its default value.
+ //
+ // Fields not listed retain their existing value.
+ //
+ // The following are the field names that are accepted in `update_mask`:
+ //
+ // - `display_name`
+ // - `interval.start_time`
+ // - `interval.end_time`
+ //
+ // That said, the start time and end time of the `Snooze` determines which
+ // fields can legally be updated. Before attempting an update, users should
+ // consult the documentation for `UpdateSnoozeRequest`, which talks about
+ // which fields can be updated.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+}
+
+func (x *UpdateSnoozeRequest) Reset() {
+ *x = UpdateSnoozeRequest{}
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateSnoozeRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateSnoozeRequest) ProtoMessage() {}
+
+func (x *UpdateSnoozeRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_snooze_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateSnoozeRequest.ProtoReflect.Descriptor instead.
+func (*UpdateSnoozeRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_snooze_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UpdateSnoozeRequest) GetSnooze() *Snooze {
+ if x != nil {
+ return x.Snooze
+ }
+ return nil
+}
+
+func (x *UpdateSnoozeRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+var File_google_monitoring_v3_snooze_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_snooze_service_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76,
+ 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d,
+ 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x12, 0x20, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a,
+ 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0xb9,
+ 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x12, 0x20, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52,
+ 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x06, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x08, 0x70, 0x61,
+ 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52,
+ 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x75, 0x0a, 0x13, 0x4c, 0x69,
+ 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x36, 0x0a, 0x07, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65,
+ 0x52, 0x07, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78,
+ 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x0a, 0x20, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x22, 0x92, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e,
+ 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x73,
+ 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06,
+ 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x32, 0x98, 0x06, 0x0a, 0x0d, 0x53, 0x6e, 0x6f,
+ 0x6f, 0x7a, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x43,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e,
+ 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x3f, 0xda, 0x41, 0x0d, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c,
+ 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x3a, 0x06, 0x73, 0x6e,
+ 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e,
+ 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x94, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e,
+ 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73,
+ 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a,
+ 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0xda, 0x41, 0x06, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x33,
+ 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x12, 0x81, 0x01, 0x0a,
+ 0x09, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65,
+ 0x22, 0x2e, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12,
+ 0x1f, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x2a, 0x7d,
+ 0x12, 0xa4, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x6f, 0x6f, 0x7a,
+ 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53,
+ 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x22, 0x4b, 0xda, 0x41, 0x12, 0x73,
+ 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73,
+ 0x6b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x30, 0x3a, 0x06, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x32,
+ 0x26, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x73, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x2e, 0x6e, 0x61, 0x6d,
+ 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x6f,
+ 0x6f, 0x7a, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a,
+ 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d,
+ 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f,
+ 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61,
+ 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72,
+ 0x65, 0x61, 0x64, 0x42, 0xcd, 0x01, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x42, 0x12, 0x53, 0x6e, 0x6f, 0x6f, 0x7a, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a,
+ 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_snooze_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_snooze_service_proto_rawDescData = file_google_monitoring_v3_snooze_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_snooze_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_snooze_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_snooze_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_snooze_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_snooze_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_snooze_service_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_google_monitoring_v3_snooze_service_proto_goTypes = []any{
+ (*CreateSnoozeRequest)(nil), // 0: google.monitoring.v3.CreateSnoozeRequest
+ (*ListSnoozesRequest)(nil), // 1: google.monitoring.v3.ListSnoozesRequest
+ (*ListSnoozesResponse)(nil), // 2: google.monitoring.v3.ListSnoozesResponse
+ (*GetSnoozeRequest)(nil), // 3: google.monitoring.v3.GetSnoozeRequest
+ (*UpdateSnoozeRequest)(nil), // 4: google.monitoring.v3.UpdateSnoozeRequest
+ (*Snooze)(nil), // 5: google.monitoring.v3.Snooze
+ (*fieldmaskpb.FieldMask)(nil), // 6: google.protobuf.FieldMask
+}
+var file_google_monitoring_v3_snooze_service_proto_depIdxs = []int32{
+ 5, // 0: google.monitoring.v3.CreateSnoozeRequest.snooze:type_name -> google.monitoring.v3.Snooze
+ 5, // 1: google.monitoring.v3.ListSnoozesResponse.snoozes:type_name -> google.monitoring.v3.Snooze
+ 5, // 2: google.monitoring.v3.UpdateSnoozeRequest.snooze:type_name -> google.monitoring.v3.Snooze
+ 6, // 3: google.monitoring.v3.UpdateSnoozeRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 0, // 4: google.monitoring.v3.SnoozeService.CreateSnooze:input_type -> google.monitoring.v3.CreateSnoozeRequest
+ 1, // 5: google.monitoring.v3.SnoozeService.ListSnoozes:input_type -> google.monitoring.v3.ListSnoozesRequest
+ 3, // 6: google.monitoring.v3.SnoozeService.GetSnooze:input_type -> google.monitoring.v3.GetSnoozeRequest
+ 4, // 7: google.monitoring.v3.SnoozeService.UpdateSnooze:input_type -> google.monitoring.v3.UpdateSnoozeRequest
+ 5, // 8: google.monitoring.v3.SnoozeService.CreateSnooze:output_type -> google.monitoring.v3.Snooze
+ 2, // 9: google.monitoring.v3.SnoozeService.ListSnoozes:output_type -> google.monitoring.v3.ListSnoozesResponse
+ 5, // 10: google.monitoring.v3.SnoozeService.GetSnooze:output_type -> google.monitoring.v3.Snooze
+ 5, // 11: google.monitoring.v3.SnoozeService.UpdateSnooze:output_type -> google.monitoring.v3.Snooze
+ 8, // [8:12] is the sub-list for method output_type
+ 4, // [4:8] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_snooze_service_proto_init() }
+func file_google_monitoring_v3_snooze_service_proto_init() {
+ if File_google_monitoring_v3_snooze_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_snooze_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_snooze_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_snooze_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_snooze_service_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_snooze_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_snooze_service_proto = out.File
+ file_google_monitoring_v3_snooze_service_proto_rawDesc = nil
+ file_google_monitoring_v3_snooze_service_proto_goTypes = nil
+ file_google_monitoring_v3_snooze_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// SnoozeServiceClient is the client API for SnoozeService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type SnoozeServiceClient interface {
+ // Creates a `Snooze` that will prevent alerts, which match the provided
+ // criteria, from being opened. The `Snooze` applies for a specific time
+ // interval.
+ CreateSnooze(ctx context.Context, in *CreateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error)
+ // Lists the `Snooze`s associated with a project. Can optionally pass in
+ // `filter`, which specifies predicates to match `Snooze`s.
+ ListSnoozes(ctx context.Context, in *ListSnoozesRequest, opts ...grpc.CallOption) (*ListSnoozesResponse, error)
+ // Retrieves a `Snooze` by `name`.
+ GetSnooze(ctx context.Context, in *GetSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error)
+ // Updates a `Snooze`, identified by its `name`, with the parameters in the
+ // given `Snooze` object.
+ UpdateSnooze(ctx context.Context, in *UpdateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error)
+}
+
+type snoozeServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewSnoozeServiceClient(cc grpc.ClientConnInterface) SnoozeServiceClient {
+ return &snoozeServiceClient{cc}
+}
+
+func (c *snoozeServiceClient) CreateSnooze(ctx context.Context, in *CreateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) {
+ out := new(Snooze)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/CreateSnooze", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *snoozeServiceClient) ListSnoozes(ctx context.Context, in *ListSnoozesRequest, opts ...grpc.CallOption) (*ListSnoozesResponse, error) {
+ out := new(ListSnoozesResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/ListSnoozes", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *snoozeServiceClient) GetSnooze(ctx context.Context, in *GetSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) {
+ out := new(Snooze)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/GetSnooze", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *snoozeServiceClient) UpdateSnooze(ctx context.Context, in *UpdateSnoozeRequest, opts ...grpc.CallOption) (*Snooze, error) {
+ out := new(Snooze)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.SnoozeService/UpdateSnooze", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// SnoozeServiceServer is the server API for SnoozeService service.
+type SnoozeServiceServer interface {
+ // Creates a `Snooze` that will prevent alerts, which match the provided
+ // criteria, from being opened. The `Snooze` applies for a specific time
+ // interval.
+ CreateSnooze(context.Context, *CreateSnoozeRequest) (*Snooze, error)
+ // Lists the `Snooze`s associated with a project. Can optionally pass in
+ // `filter`, which specifies predicates to match `Snooze`s.
+ ListSnoozes(context.Context, *ListSnoozesRequest) (*ListSnoozesResponse, error)
+ // Retrieves a `Snooze` by `name`.
+ GetSnooze(context.Context, *GetSnoozeRequest) (*Snooze, error)
+ // Updates a `Snooze`, identified by its `name`, with the parameters in the
+ // given `Snooze` object.
+ UpdateSnooze(context.Context, *UpdateSnoozeRequest) (*Snooze, error)
+}
+
+// UnimplementedSnoozeServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedSnoozeServiceServer struct {
+}
+
+func (*UnimplementedSnoozeServiceServer) CreateSnooze(context.Context, *CreateSnoozeRequest) (*Snooze, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateSnooze not implemented")
+}
+func (*UnimplementedSnoozeServiceServer) ListSnoozes(context.Context, *ListSnoozesRequest) (*ListSnoozesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListSnoozes not implemented")
+}
+func (*UnimplementedSnoozeServiceServer) GetSnooze(context.Context, *GetSnoozeRequest) (*Snooze, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetSnooze not implemented")
+}
+func (*UnimplementedSnoozeServiceServer) UpdateSnooze(context.Context, *UpdateSnoozeRequest) (*Snooze, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateSnooze not implemented")
+}
+
+func RegisterSnoozeServiceServer(s *grpc.Server, srv SnoozeServiceServer) {
+ s.RegisterService(&_SnoozeService_serviceDesc, srv)
+}
+
+func _SnoozeService_CreateSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateSnoozeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SnoozeServiceServer).CreateSnooze(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.SnoozeService/CreateSnooze",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SnoozeServiceServer).CreateSnooze(ctx, req.(*CreateSnoozeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SnoozeService_ListSnoozes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListSnoozesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SnoozeServiceServer).ListSnoozes(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.SnoozeService/ListSnoozes",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SnoozeServiceServer).ListSnoozes(ctx, req.(*ListSnoozesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SnoozeService_GetSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetSnoozeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SnoozeServiceServer).GetSnooze(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.SnoozeService/GetSnooze",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SnoozeServiceServer).GetSnooze(ctx, req.(*GetSnoozeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _SnoozeService_UpdateSnooze_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateSnoozeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(SnoozeServiceServer).UpdateSnooze(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.SnoozeService/UpdateSnooze",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(SnoozeServiceServer).UpdateSnooze(ctx, req.(*UpdateSnoozeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _SnoozeService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.SnoozeService",
+ HandlerType: (*SnoozeServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "CreateSnooze",
+ Handler: _SnoozeService_CreateSnooze_Handler,
+ },
+ {
+ MethodName: "ListSnoozes",
+ Handler: _SnoozeService_ListSnoozes_Handler,
+ },
+ {
+ MethodName: "GetSnooze",
+ Handler: _SnoozeService_GetSnooze_Handler,
+ },
+ {
+ MethodName: "UpdateSnooze",
+ Handler: _SnoozeService_UpdateSnooze_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/snooze_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go
new file mode 100644
index 000000000..23f42835f
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/span_context.pb.go
@@ -0,0 +1,172 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/span_context.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The context of a span. This is attached to an
+// [Exemplar][google.api.Distribution.Exemplar]
+// in [Distribution][google.api.Distribution] values during aggregation.
+//
+// It contains the name of a span with format:
+//
+// projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID]
+type SpanContext struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The resource name of the span. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/traces/[TRACE_ID]/spans/[SPAN_ID]
+ //
+ // `[TRACE_ID]` is a unique identifier for a trace within a project;
+ // it is a 32-character hexadecimal encoding of a 16-byte array.
+ //
+ // `[SPAN_ID]` is a unique identifier for a span within a trace; it
+ // is a 16-character hexadecimal encoding of an 8-byte array.
+ SpanName string `protobuf:"bytes,1,opt,name=span_name,json=spanName,proto3" json:"span_name,omitempty"`
+}
+
+func (x *SpanContext) Reset() {
+ *x = SpanContext{}
+ mi := &file_google_monitoring_v3_span_context_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SpanContext) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SpanContext) ProtoMessage() {}
+
+func (x *SpanContext) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_span_context_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SpanContext.ProtoReflect.Descriptor instead.
+func (*SpanContext) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_span_context_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *SpanContext) GetSpanName() string {
+ if x != nil {
+ return x.SpanName
+ }
+ return ""
+}
+
+var File_google_monitoring_v3_span_context_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_span_context_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x78, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x22,
+ 0x2a, 0x0a, 0x0b, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1b,
+ 0x0a, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x73, 0x70, 0x61, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0xcb, 0x01, 0x0a, 0x18,
+ 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x10, 0x53, 0x70, 0x61, 0x6e, 0x43, 0x6f,
+ 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67,
+ 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69,
+ 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0xaa,
+ 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x4d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca, 0x02, 0x1a, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_google_monitoring_v3_span_context_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_span_context_proto_rawDescData = file_google_monitoring_v3_span_context_proto_rawDesc
+)
+
+func file_google_monitoring_v3_span_context_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_span_context_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_span_context_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_span_context_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_span_context_proto_rawDescData
+}
+
+var file_google_monitoring_v3_span_context_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_monitoring_v3_span_context_proto_goTypes = []any{
+ (*SpanContext)(nil), // 0: google.monitoring.v3.SpanContext
+}
+var file_google_monitoring_v3_span_context_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_span_context_proto_init() }
+func file_google_monitoring_v3_span_context_proto_init() {
+ if File_google_monitoring_v3_span_context_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_span_context_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_span_context_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_span_context_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_span_context_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_span_context_proto = out.File
+ file_google_monitoring_v3_span_context_proto_rawDesc = nil
+ file_google_monitoring_v3_span_context_proto_goTypes = nil
+ file_google_monitoring_v3_span_context_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go
new file mode 100644
index 000000000..f303ac251
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime.pb.go
@@ -0,0 +1,2531 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/uptime.proto
+
+package monitoringpb
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ monitoredres "google.golang.org/genproto/googleapis/api/monitoredres"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The regions from which an Uptime check can be run.
+type UptimeCheckRegion int32
+
+const (
+ // Default value if no region is specified. Will result in Uptime checks
+ // running from all regions.
+ UptimeCheckRegion_REGION_UNSPECIFIED UptimeCheckRegion = 0
+ // Allows checks to run from locations within the United States of America.
+ UptimeCheckRegion_USA UptimeCheckRegion = 1
+ // Allows checks to run from locations within the continent of Europe.
+ UptimeCheckRegion_EUROPE UptimeCheckRegion = 2
+ // Allows checks to run from locations within the continent of South
+ // America.
+ UptimeCheckRegion_SOUTH_AMERICA UptimeCheckRegion = 3
+ // Allows checks to run from locations within the Asia Pacific area (ex:
+ // Singapore).
+ UptimeCheckRegion_ASIA_PACIFIC UptimeCheckRegion = 4
+ // Allows checks to run from locations within the western United States of
+ // America
+ UptimeCheckRegion_USA_OREGON UptimeCheckRegion = 5
+ // Allows checks to run from locations within the central United States of
+ // America
+ UptimeCheckRegion_USA_IOWA UptimeCheckRegion = 6
+ // Allows checks to run from locations within the eastern United States of
+ // America
+ UptimeCheckRegion_USA_VIRGINIA UptimeCheckRegion = 7
+)
+
+// Enum value maps for UptimeCheckRegion.
+var (
+ UptimeCheckRegion_name = map[int32]string{
+ 0: "REGION_UNSPECIFIED",
+ 1: "USA",
+ 2: "EUROPE",
+ 3: "SOUTH_AMERICA",
+ 4: "ASIA_PACIFIC",
+ 5: "USA_OREGON",
+ 6: "USA_IOWA",
+ 7: "USA_VIRGINIA",
+ }
+ UptimeCheckRegion_value = map[string]int32{
+ "REGION_UNSPECIFIED": 0,
+ "USA": 1,
+ "EUROPE": 2,
+ "SOUTH_AMERICA": 3,
+ "ASIA_PACIFIC": 4,
+ "USA_OREGON": 5,
+ "USA_IOWA": 6,
+ "USA_VIRGINIA": 7,
+ }
+)
+
+func (x UptimeCheckRegion) Enum() *UptimeCheckRegion {
+ p := new(UptimeCheckRegion)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckRegion) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckRegion) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[0].Descriptor()
+}
+
+func (UptimeCheckRegion) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[0]
+}
+
+func (x UptimeCheckRegion) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckRegion.Descriptor instead.
+func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0}
+}
+
+// The supported resource types that can be used as values of
+// `group_resource.resource_type`.
+// `INSTANCE` includes `gce_instance` and `aws_ec2_instance` resource types.
+// The resource types `gae_app` and `uptime_url` are not valid here because
+// group checks on App Engine modules and URLs are not allowed.
+type GroupResourceType int32
+
+const (
+ // Default value (not valid).
+ GroupResourceType_RESOURCE_TYPE_UNSPECIFIED GroupResourceType = 0
+ // A group of instances from Google Cloud Platform (GCP) or
+ // Amazon Web Services (AWS).
+ GroupResourceType_INSTANCE GroupResourceType = 1
+ // A group of Amazon ELB load balancers.
+ GroupResourceType_AWS_ELB_LOAD_BALANCER GroupResourceType = 2
+)
+
+// Enum value maps for GroupResourceType.
+var (
+ GroupResourceType_name = map[int32]string{
+ 0: "RESOURCE_TYPE_UNSPECIFIED",
+ 1: "INSTANCE",
+ 2: "AWS_ELB_LOAD_BALANCER",
+ }
+ GroupResourceType_value = map[string]int32{
+ "RESOURCE_TYPE_UNSPECIFIED": 0,
+ "INSTANCE": 1,
+ "AWS_ELB_LOAD_BALANCER": 2,
+ }
+)
+
+func (x GroupResourceType) Enum() *GroupResourceType {
+ p := new(GroupResourceType)
+ *p = x
+ return p
+}
+
+func (x GroupResourceType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GroupResourceType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[1].Descriptor()
+}
+
+func (GroupResourceType) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[1]
+}
+
+func (x GroupResourceType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GroupResourceType.Descriptor instead.
+func (GroupResourceType) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1}
+}
+
+// Operational states for an internal checker.
+type InternalChecker_State int32
+
+const (
+ // An internal checker should never be in the unspecified state.
+ InternalChecker_UNSPECIFIED InternalChecker_State = 0
+ // The checker is being created, provisioned, and configured. A checker in
+ // this state can be returned by `ListInternalCheckers` or
+ // `GetInternalChecker`, as well as by examining the [long running
+ // Operation](https://cloud.google.com/apis/design/design_patterns#long_running_operations)
+ // that created it.
+ InternalChecker_CREATING InternalChecker_State = 1
+ // The checker is running and available for use. A checker in this state
+ // can be returned by `ListInternalCheckers` or `GetInternalChecker` as
+ // well as by examining the [long running
+ // Operation](https://cloud.google.com/apis/design/design_patterns#long_running_operations)
+ // that created it.
+ // If a checker is being torn down, it is neither visible nor usable, so
+ // there is no "deleting" or "down" state.
+ InternalChecker_RUNNING InternalChecker_State = 2
+)
+
+// Enum value maps for InternalChecker_State.
+var (
+ InternalChecker_State_name = map[int32]string{
+ 0: "UNSPECIFIED",
+ 1: "CREATING",
+ 2: "RUNNING",
+ }
+ InternalChecker_State_value = map[string]int32{
+ "UNSPECIFIED": 0,
+ "CREATING": 1,
+ "RUNNING": 2,
+ }
+)
+
+func (x InternalChecker_State) Enum() *InternalChecker_State {
+ p := new(InternalChecker_State)
+ *p = x
+ return p
+}
+
+func (x InternalChecker_State) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (InternalChecker_State) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[2].Descriptor()
+}
+
+func (InternalChecker_State) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[2]
+}
+
+func (x InternalChecker_State) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use InternalChecker_State.Descriptor instead.
+func (InternalChecker_State) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// What kind of checkers are available to be used by the check.
+type UptimeCheckConfig_CheckerType int32
+
+const (
+ // The default checker type. Currently converted to `STATIC_IP_CHECKERS`
+ // on creation, the default conversion behavior may change in the future.
+ UptimeCheckConfig_CHECKER_TYPE_UNSPECIFIED UptimeCheckConfig_CheckerType = 0
+ // `STATIC_IP_CHECKERS` are used for uptime checks that perform egress
+ // across the public internet. `STATIC_IP_CHECKERS` use the static IP
+ // addresses returned by `ListUptimeCheckIps`.
+ UptimeCheckConfig_STATIC_IP_CHECKERS UptimeCheckConfig_CheckerType = 1
+ // `VPC_CHECKERS` are used for uptime checks that perform egress using
+ // Service Directory and private network access. When using `VPC_CHECKERS`,
+ // the monitored resource type must be `servicedirectory_service`.
+ UptimeCheckConfig_VPC_CHECKERS UptimeCheckConfig_CheckerType = 3
+)
+
+// Enum value maps for UptimeCheckConfig_CheckerType.
+var (
+ UptimeCheckConfig_CheckerType_name = map[int32]string{
+ 0: "CHECKER_TYPE_UNSPECIFIED",
+ 1: "STATIC_IP_CHECKERS",
+ 3: "VPC_CHECKERS",
+ }
+ UptimeCheckConfig_CheckerType_value = map[string]int32{
+ "CHECKER_TYPE_UNSPECIFIED": 0,
+ "STATIC_IP_CHECKERS": 1,
+ "VPC_CHECKERS": 3,
+ }
+)
+
+func (x UptimeCheckConfig_CheckerType) Enum() *UptimeCheckConfig_CheckerType {
+ p := new(UptimeCheckConfig_CheckerType)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckConfig_CheckerType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckConfig_CheckerType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[3].Descriptor()
+}
+
+func (UptimeCheckConfig_CheckerType) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[3]
+}
+
+func (x UptimeCheckConfig_CheckerType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_CheckerType.Descriptor instead.
+func (UptimeCheckConfig_CheckerType) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 0}
+}
+
+// The HTTP request method options.
+type UptimeCheckConfig_HttpCheck_RequestMethod int32
+
+const (
+ // No request method specified.
+ UptimeCheckConfig_HttpCheck_METHOD_UNSPECIFIED UptimeCheckConfig_HttpCheck_RequestMethod = 0
+ // GET request.
+ UptimeCheckConfig_HttpCheck_GET UptimeCheckConfig_HttpCheck_RequestMethod = 1
+ // POST request.
+ UptimeCheckConfig_HttpCheck_POST UptimeCheckConfig_HttpCheck_RequestMethod = 2
+)
+
+// Enum value maps for UptimeCheckConfig_HttpCheck_RequestMethod.
+var (
+ UptimeCheckConfig_HttpCheck_RequestMethod_name = map[int32]string{
+ 0: "METHOD_UNSPECIFIED",
+ 1: "GET",
+ 2: "POST",
+ }
+ UptimeCheckConfig_HttpCheck_RequestMethod_value = map[string]int32{
+ "METHOD_UNSPECIFIED": 0,
+ "GET": 1,
+ "POST": 2,
+ }
+)
+
+func (x UptimeCheckConfig_HttpCheck_RequestMethod) Enum() *UptimeCheckConfig_HttpCheck_RequestMethod {
+ p := new(UptimeCheckConfig_HttpCheck_RequestMethod)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckConfig_HttpCheck_RequestMethod) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckConfig_HttpCheck_RequestMethod) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[4].Descriptor()
+}
+
+func (UptimeCheckConfig_HttpCheck_RequestMethod) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[4]
+}
+
+func (x UptimeCheckConfig_HttpCheck_RequestMethod) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck_RequestMethod.Descriptor instead.
+func (UptimeCheckConfig_HttpCheck_RequestMethod) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 0}
+}
+
+// Header options corresponding to the content type of a HTTP request body.
+type UptimeCheckConfig_HttpCheck_ContentType int32
+
+const (
+ // No content type specified.
+ UptimeCheckConfig_HttpCheck_TYPE_UNSPECIFIED UptimeCheckConfig_HttpCheck_ContentType = 0
+ // `body` is in URL-encoded form. Equivalent to setting the `Content-Type`
+ // to `application/x-www-form-urlencoded` in the HTTP request.
+ UptimeCheckConfig_HttpCheck_URL_ENCODED UptimeCheckConfig_HttpCheck_ContentType = 1
+ // `body` is in `custom_content_type` form. Equivalent to setting the
+ // `Content-Type` to the contents of `custom_content_type` in the HTTP
+ // request.
+ UptimeCheckConfig_HttpCheck_USER_PROVIDED UptimeCheckConfig_HttpCheck_ContentType = 2
+)
+
+// Enum value maps for UptimeCheckConfig_HttpCheck_ContentType.
+var (
+ UptimeCheckConfig_HttpCheck_ContentType_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "URL_ENCODED",
+ 2: "USER_PROVIDED",
+ }
+ UptimeCheckConfig_HttpCheck_ContentType_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "URL_ENCODED": 1,
+ "USER_PROVIDED": 2,
+ }
+)
+
+func (x UptimeCheckConfig_HttpCheck_ContentType) Enum() *UptimeCheckConfig_HttpCheck_ContentType {
+ p := new(UptimeCheckConfig_HttpCheck_ContentType)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckConfig_HttpCheck_ContentType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckConfig_HttpCheck_ContentType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[5].Descriptor()
+}
+
+func (UptimeCheckConfig_HttpCheck_ContentType) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[5]
+}
+
+func (x UptimeCheckConfig_HttpCheck_ContentType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck_ContentType.Descriptor instead.
+func (UptimeCheckConfig_HttpCheck_ContentType) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1}
+}
+
+// An HTTP status code class.
+type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass int32
+
+const (
+ // Default value that matches no status codes.
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_UNSPECIFIED UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 0
+ // The class of status codes between 100 and 199.
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_1XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 100
+ // The class of status codes between 200 and 299.
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_2XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 200
+ // The class of status codes between 300 and 399.
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_3XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 300
+ // The class of status codes between 400 and 499.
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_4XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 400
+ // The class of status codes between 500 and 599.
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_5XX UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 500
+ // The class of all status codes.
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_ANY UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass = 1000
+)
+
+// Enum value maps for UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass.
+var (
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_name = map[int32]string{
+ 0: "STATUS_CLASS_UNSPECIFIED",
+ 100: "STATUS_CLASS_1XX",
+ 200: "STATUS_CLASS_2XX",
+ 300: "STATUS_CLASS_3XX",
+ 400: "STATUS_CLASS_4XX",
+ 500: "STATUS_CLASS_5XX",
+ 1000: "STATUS_CLASS_ANY",
+ }
+ UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_value = map[string]int32{
+ "STATUS_CLASS_UNSPECIFIED": 0,
+ "STATUS_CLASS_1XX": 100,
+ "STATUS_CLASS_2XX": 200,
+ "STATUS_CLASS_3XX": 300,
+ "STATUS_CLASS_4XX": 400,
+ "STATUS_CLASS_5XX": 500,
+ "STATUS_CLASS_ANY": 1000,
+ }
+)
+
+func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Enum() *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass {
+ p := new(UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[6].Descriptor()
+}
+
+func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[6]
+}
+
+func (x UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass.Descriptor instead.
+func (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1, 0}
+}
+
+// Type of authentication.
+type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType int32
+
+const (
+ // Default value, will result in OIDC Authentication.
+ UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType = 0
+ // OIDC Authentication
+ UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_OIDC_TOKEN UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType = 1
+)
+
+// Enum value maps for UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType.
+var (
+ UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType_name = map[int32]string{
+ 0: "SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED",
+ 1: "OIDC_TOKEN",
+ }
+ UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType_value = map[string]int32{
+ "SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED": 0,
+ "OIDC_TOKEN": 1,
+ }
+)
+
+func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Enum() *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType {
+ p := new(UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[7].Descriptor()
+}
+
+func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[7]
+}
+
+func (x UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType.Descriptor instead.
+func (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 2, 0}
+}
+
+// Options to perform content matching.
+type UptimeCheckConfig_ContentMatcher_ContentMatcherOption int32
+
+const (
+ // No content matcher type specified (maintained for backward
+ // compatibility, but deprecated for future use).
+ // Treated as `CONTAINS_STRING`.
+ UptimeCheckConfig_ContentMatcher_CONTENT_MATCHER_OPTION_UNSPECIFIED UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 0
+ // Selects substring matching. The match succeeds if the output contains
+ // the `content` string. This is the default value for checks without
+ // a `matcher` option, or where the value of `matcher` is
+ // `CONTENT_MATCHER_OPTION_UNSPECIFIED`.
+ UptimeCheckConfig_ContentMatcher_CONTAINS_STRING UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 1
+ // Selects negation of substring matching. The match succeeds if the
+ // output does _NOT_ contain the `content` string.
+ UptimeCheckConfig_ContentMatcher_NOT_CONTAINS_STRING UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 2
+ // Selects regular-expression matching. The match succeeds if the output
+ // matches the regular expression specified in the `content` string.
+ // Regex matching is only supported for HTTP/HTTPS checks.
+ UptimeCheckConfig_ContentMatcher_MATCHES_REGEX UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 3
+ // Selects negation of regular-expression matching. The match succeeds if
+ // the output does _NOT_ match the regular expression specified in the
+ // `content` string. Regex matching is only supported for HTTP/HTTPS
+ // checks.
+ UptimeCheckConfig_ContentMatcher_NOT_MATCHES_REGEX UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 4
+ // Selects JSONPath matching. See `JsonPathMatcher` for details on when
+ // the match succeeds. JSONPath matching is only supported for HTTP/HTTPS
+ // checks.
+ UptimeCheckConfig_ContentMatcher_MATCHES_JSON_PATH UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 5
+ // Selects JSONPath matching. See `JsonPathMatcher` for details on when
+ // the match succeeds. Succeeds when output does _NOT_ match as specified.
+ // JSONPath is only supported for HTTP/HTTPS checks.
+ UptimeCheckConfig_ContentMatcher_NOT_MATCHES_JSON_PATH UptimeCheckConfig_ContentMatcher_ContentMatcherOption = 6
+)
+
+// Enum value maps for UptimeCheckConfig_ContentMatcher_ContentMatcherOption.
+var (
+ UptimeCheckConfig_ContentMatcher_ContentMatcherOption_name = map[int32]string{
+ 0: "CONTENT_MATCHER_OPTION_UNSPECIFIED",
+ 1: "CONTAINS_STRING",
+ 2: "NOT_CONTAINS_STRING",
+ 3: "MATCHES_REGEX",
+ 4: "NOT_MATCHES_REGEX",
+ 5: "MATCHES_JSON_PATH",
+ 6: "NOT_MATCHES_JSON_PATH",
+ }
+ UptimeCheckConfig_ContentMatcher_ContentMatcherOption_value = map[string]int32{
+ "CONTENT_MATCHER_OPTION_UNSPECIFIED": 0,
+ "CONTAINS_STRING": 1,
+ "NOT_CONTAINS_STRING": 2,
+ "MATCHES_REGEX": 3,
+ "NOT_MATCHES_REGEX": 4,
+ "MATCHES_JSON_PATH": 5,
+ "NOT_MATCHES_JSON_PATH": 6,
+ }
+)
+
+func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Enum() *UptimeCheckConfig_ContentMatcher_ContentMatcherOption {
+ p := new(UptimeCheckConfig_ContentMatcher_ContentMatcherOption)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[8].Descriptor()
+}
+
+func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[8]
+}
+
+func (x UptimeCheckConfig_ContentMatcher_ContentMatcherOption) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_ContentMatcher_ContentMatcherOption.Descriptor instead.
+func (UptimeCheckConfig_ContentMatcher_ContentMatcherOption) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0}
+}
+
+// Options to perform JSONPath content matching.
+type UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption int32
+
+const (
+ // No JSONPath matcher type specified (not valid).
+ UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JSON_PATH_MATCHER_OPTION_UNSPECIFIED UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 0
+ // Selects 'exact string' matching. The match succeeds if the content at
+ // the `json_path` within the output is exactly the same as the
+ // `content` string.
+ UptimeCheckConfig_ContentMatcher_JsonPathMatcher_EXACT_MATCH UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 1
+ // Selects regular-expression matching. The match succeeds if the
+ // content at the `json_path` within the output matches the regular
+ // expression specified in the `content` string.
+ UptimeCheckConfig_ContentMatcher_JsonPathMatcher_REGEX_MATCH UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption = 2
+)
+
+// Enum value maps for UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption.
+var (
+ UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption_name = map[int32]string{
+ 0: "JSON_PATH_MATCHER_OPTION_UNSPECIFIED",
+ 1: "EXACT_MATCH",
+ 2: "REGEX_MATCH",
+ }
+ UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption_value = map[string]int32{
+ "JSON_PATH_MATCHER_OPTION_UNSPECIFIED": 0,
+ "EXACT_MATCH": 1,
+ "REGEX_MATCH": 2,
+ }
+)
+
+func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Enum() *UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption {
+ p := new(UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption)
+ *p = x
+ return p
+}
+
+func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_monitoring_v3_uptime_proto_enumTypes[9].Descriptor()
+}
+
+func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Type() protoreflect.EnumType {
+ return &file_google_monitoring_v3_uptime_proto_enumTypes[9]
+}
+
+func (x UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption.Descriptor instead.
+func (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption) EnumDescriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0, 0}
+}
+
+// An internal checker allows Uptime checks to run on private/internal GCP
+// resources.
+//
+// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto.
+type InternalChecker struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A unique resource name for this InternalChecker. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/internalCheckers/[INTERNAL_CHECKER_ID]
+ //
+ // `[PROJECT_ID_OR_NUMBER]` is the Cloud Monitoring Metrics Scope project for
+ // the Uptime check config associated with the internal checker.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The checker's human-readable name. The display name
+ // should be unique within a Cloud Monitoring Metrics Scope in order to make
+ // it easier to identify; however, uniqueness is not enforced.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The [GCP VPC network](https://cloud.google.com/vpc/docs/vpc) where the
+ // internal resource lives (ex: "default").
+ Network string `protobuf:"bytes,3,opt,name=network,proto3" json:"network,omitempty"`
+ // The GCP zone the Uptime check should egress from. Only respected for
+ // internal Uptime checks, where internal_network is specified.
+ GcpZone string `protobuf:"bytes,4,opt,name=gcp_zone,json=gcpZone,proto3" json:"gcp_zone,omitempty"`
+ // The GCP project ID where the internal checker lives. Not necessary
+ // the same as the Metrics Scope project.
+ PeerProjectId string `protobuf:"bytes,6,opt,name=peer_project_id,json=peerProjectId,proto3" json:"peer_project_id,omitempty"`
+ // The current operational state of the internal checker.
+ State InternalChecker_State `protobuf:"varint,7,opt,name=state,proto3,enum=google.monitoring.v3.InternalChecker_State" json:"state,omitempty"`
+}
+
+func (x *InternalChecker) Reset() {
+ *x = InternalChecker{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *InternalChecker) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InternalChecker) ProtoMessage() {}
+
+func (x *InternalChecker) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use InternalChecker.ProtoReflect.Descriptor instead.
+func (*InternalChecker) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *InternalChecker) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *InternalChecker) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (x *InternalChecker) GetNetwork() string {
+ if x != nil {
+ return x.Network
+ }
+ return ""
+}
+
+func (x *InternalChecker) GetGcpZone() string {
+ if x != nil {
+ return x.GcpZone
+ }
+ return ""
+}
+
+func (x *InternalChecker) GetPeerProjectId() string {
+ if x != nil {
+ return x.PeerProjectId
+ }
+ return ""
+}
+
+func (x *InternalChecker) GetState() InternalChecker_State {
+ if x != nil {
+ return x.State
+ }
+ return InternalChecker_UNSPECIFIED
+}
+
+// Describes a Synthetic Monitor to be invoked by Uptime.
+type SyntheticMonitorTarget struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies a Synthetic Monitor's execution stack.
+ //
+ // Types that are assignable to Target:
+ //
+ // *SyntheticMonitorTarget_CloudFunctionV2
+ Target isSyntheticMonitorTarget_Target `protobuf_oneof:"target"`
+}
+
+func (x *SyntheticMonitorTarget) Reset() {
+ *x = SyntheticMonitorTarget{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SyntheticMonitorTarget) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SyntheticMonitorTarget) ProtoMessage() {}
+
+func (x *SyntheticMonitorTarget) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SyntheticMonitorTarget.ProtoReflect.Descriptor instead.
+func (*SyntheticMonitorTarget) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *SyntheticMonitorTarget) GetTarget() isSyntheticMonitorTarget_Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (x *SyntheticMonitorTarget) GetCloudFunctionV2() *SyntheticMonitorTarget_CloudFunctionV2Target {
+ if x, ok := x.GetTarget().(*SyntheticMonitorTarget_CloudFunctionV2); ok {
+ return x.CloudFunctionV2
+ }
+ return nil
+}
+
+type isSyntheticMonitorTarget_Target interface {
+ isSyntheticMonitorTarget_Target()
+}
+
+type SyntheticMonitorTarget_CloudFunctionV2 struct {
+ // Target a Synthetic Monitor GCFv2 instance.
+ CloudFunctionV2 *SyntheticMonitorTarget_CloudFunctionV2Target `protobuf:"bytes,1,opt,name=cloud_function_v2,json=cloudFunctionV2,proto3,oneof"`
+}
+
+func (*SyntheticMonitorTarget_CloudFunctionV2) isSyntheticMonitorTarget_Target() {}
+
+// This message configures which resources and services to monitor for
+// availability.
+type UptimeCheckConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Identifier. A unique resource name for this Uptime check configuration. The
+ // format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]
+ //
+ // `[PROJECT_ID_OR_NUMBER]` is the Workspace host project associated with the
+ // Uptime check.
+ //
+ // This field should be omitted when creating the Uptime check configuration;
+ // on create, the resource name is assigned by the server and included in the
+ // response.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A human-friendly name for the Uptime check configuration. The display name
+ // should be unique within a Cloud Monitoring Workspace in order to make it
+ // easier to identify; however, uniqueness is not enforced. Required.
+ DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
+ // The resource the check is checking. Required.
+ //
+ // Types that are assignable to Resource:
+ //
+ // *UptimeCheckConfig_MonitoredResource
+ // *UptimeCheckConfig_ResourceGroup_
+ // *UptimeCheckConfig_SyntheticMonitor
+ Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"`
+ // The type of Uptime check request.
+ //
+ // Types that are assignable to CheckRequestType:
+ //
+ // *UptimeCheckConfig_HttpCheck_
+ // *UptimeCheckConfig_TcpCheck_
+ CheckRequestType isUptimeCheckConfig_CheckRequestType `protobuf_oneof:"check_request_type"`
+ // How often, in seconds, the Uptime check is performed.
+ // Currently, the only supported values are `60s` (1 minute), `300s`
+ // (5 minutes), `600s` (10 minutes), and `900s` (15 minutes). Optional,
+ // defaults to `60s`.
+ Period *durationpb.Duration `protobuf:"bytes,7,opt,name=period,proto3" json:"period,omitempty"`
+ // The maximum amount of time to wait for the request to complete (must be
+ // between 1 and 60 seconds). Required.
+ Timeout *durationpb.Duration `protobuf:"bytes,8,opt,name=timeout,proto3" json:"timeout,omitempty"`
+ // The content that is expected to appear in the data returned by the target
+ // server against which the check is run. Currently, only the first entry
+ // in the `content_matchers` list is supported, and additional entries will
+ // be ignored. This field is optional and should only be specified if a
+ // content match is required as part of the/ Uptime check.
+ ContentMatchers []*UptimeCheckConfig_ContentMatcher `protobuf:"bytes,9,rep,name=content_matchers,json=contentMatchers,proto3" json:"content_matchers,omitempty"`
+ // The type of checkers to use to execute the Uptime check.
+ CheckerType UptimeCheckConfig_CheckerType `protobuf:"varint,17,opt,name=checker_type,json=checkerType,proto3,enum=google.monitoring.v3.UptimeCheckConfig_CheckerType" json:"checker_type,omitempty"`
+ // The list of regions from which the check will be run.
+ // Some regions contain one location, and others contain more than one.
+ // If this field is specified, enough regions must be provided to include a
+ // minimum of 3 locations. Not specifying this field will result in Uptime
+ // checks running from all available regions.
+ SelectedRegions []UptimeCheckRegion `protobuf:"varint,10,rep,packed,name=selected_regions,json=selectedRegions,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"selected_regions,omitempty"`
+ // If this is `true`, then checks are made only from the 'internal_checkers'.
+ // If it is `false`, then checks are made only from the 'selected_regions'.
+ // It is an error to provide 'selected_regions' when is_internal is `true`,
+ // or to provide 'internal_checkers' when is_internal is `false`.
+ //
+ // Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto.
+ IsInternal bool `protobuf:"varint,15,opt,name=is_internal,json=isInternal,proto3" json:"is_internal,omitempty"`
+ // The internal checkers that this check will egress from. If `is_internal` is
+ // `true` and this list is empty, the check will egress from all the
+ // InternalCheckers configured for the project that owns this
+ // `UptimeCheckConfig`.
+ //
+ // Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto.
+ InternalCheckers []*InternalChecker `protobuf:"bytes,14,rep,name=internal_checkers,json=internalCheckers,proto3" json:"internal_checkers,omitempty"`
+ // User-supplied key/value data to be used for organizing and
+ // identifying the `UptimeCheckConfig` objects.
+ //
+ // The field can contain up to 64 entries. Each key and value is limited to
+ // 63 Unicode characters or 128 bytes, whichever is smaller. Labels and
+ // values can contain only lowercase letters, numerals, underscores, and
+ // dashes. Keys must begin with a letter.
+ UserLabels map[string]string `protobuf:"bytes,20,rep,name=user_labels,json=userLabels,proto3" json:"user_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *UptimeCheckConfig) Reset() {
+ *x = UptimeCheckConfig{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig) ProtoMessage() {}
+
+func (x *UptimeCheckConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *UptimeCheckConfig) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *UptimeCheckConfig) GetDisplayName() string {
+ if x != nil {
+ return x.DisplayName
+ }
+ return ""
+}
+
+func (m *UptimeCheckConfig) GetResource() isUptimeCheckConfig_Resource {
+ if m != nil {
+ return m.Resource
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetMonitoredResource() *monitoredres.MonitoredResource {
+ if x, ok := x.GetResource().(*UptimeCheckConfig_MonitoredResource); ok {
+ return x.MonitoredResource
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetResourceGroup() *UptimeCheckConfig_ResourceGroup {
+ if x, ok := x.GetResource().(*UptimeCheckConfig_ResourceGroup_); ok {
+ return x.ResourceGroup
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetSyntheticMonitor() *SyntheticMonitorTarget {
+ if x, ok := x.GetResource().(*UptimeCheckConfig_SyntheticMonitor); ok {
+ return x.SyntheticMonitor
+ }
+ return nil
+}
+
+func (m *UptimeCheckConfig) GetCheckRequestType() isUptimeCheckConfig_CheckRequestType {
+ if m != nil {
+ return m.CheckRequestType
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetHttpCheck() *UptimeCheckConfig_HttpCheck {
+ if x, ok := x.GetCheckRequestType().(*UptimeCheckConfig_HttpCheck_); ok {
+ return x.HttpCheck
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetTcpCheck() *UptimeCheckConfig_TcpCheck {
+ if x, ok := x.GetCheckRequestType().(*UptimeCheckConfig_TcpCheck_); ok {
+ return x.TcpCheck
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetPeriod() *durationpb.Duration {
+ if x != nil {
+ return x.Period
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.Timeout
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetContentMatchers() []*UptimeCheckConfig_ContentMatcher {
+ if x != nil {
+ return x.ContentMatchers
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetCheckerType() UptimeCheckConfig_CheckerType {
+ if x != nil {
+ return x.CheckerType
+ }
+ return UptimeCheckConfig_CHECKER_TYPE_UNSPECIFIED
+}
+
+func (x *UptimeCheckConfig) GetSelectedRegions() []UptimeCheckRegion {
+ if x != nil {
+ return x.SelectedRegions
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto.
+func (x *UptimeCheckConfig) GetIsInternal() bool {
+ if x != nil {
+ return x.IsInternal
+ }
+ return false
+}
+
+// Deprecated: Marked as deprecated in google/monitoring/v3/uptime.proto.
+func (x *UptimeCheckConfig) GetInternalCheckers() []*InternalChecker {
+ if x != nil {
+ return x.InternalCheckers
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig) GetUserLabels() map[string]string {
+ if x != nil {
+ return x.UserLabels
+ }
+ return nil
+}
+
+type isUptimeCheckConfig_Resource interface {
+ isUptimeCheckConfig_Resource()
+}
+
+type UptimeCheckConfig_MonitoredResource struct {
+ // The [monitored
+ // resource](https://cloud.google.com/monitoring/api/resources) associated
+ // with the configuration.
+ // The following monitored resource types are valid for this field:
+ //
+ // `uptime_url`,
+ // `gce_instance`,
+ // `gae_app`,
+ // `aws_ec2_instance`,
+ // `aws_elb_load_balancer`
+ // `k8s_service`
+ // `servicedirectory_service`
+ // `cloud_run_revision`
+ MonitoredResource *monitoredres.MonitoredResource `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource,proto3,oneof"`
+}
+
+type UptimeCheckConfig_ResourceGroup_ struct {
+ // The group resource associated with the configuration.
+ ResourceGroup *UptimeCheckConfig_ResourceGroup `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,proto3,oneof"`
+}
+
+type UptimeCheckConfig_SyntheticMonitor struct {
+ // Specifies a Synthetic Monitor to invoke.
+ SyntheticMonitor *SyntheticMonitorTarget `protobuf:"bytes,21,opt,name=synthetic_monitor,json=syntheticMonitor,proto3,oneof"`
+}
+
+func (*UptimeCheckConfig_MonitoredResource) isUptimeCheckConfig_Resource() {}
+
+func (*UptimeCheckConfig_ResourceGroup_) isUptimeCheckConfig_Resource() {}
+
+func (*UptimeCheckConfig_SyntheticMonitor) isUptimeCheckConfig_Resource() {}
+
+type isUptimeCheckConfig_CheckRequestType interface {
+ isUptimeCheckConfig_CheckRequestType()
+}
+
+type UptimeCheckConfig_HttpCheck_ struct {
+ // Contains information needed to make an HTTP or HTTPS check.
+ HttpCheck *UptimeCheckConfig_HttpCheck `protobuf:"bytes,5,opt,name=http_check,json=httpCheck,proto3,oneof"`
+}
+
+type UptimeCheckConfig_TcpCheck_ struct {
+ // Contains information needed to make a TCP check.
+ TcpCheck *UptimeCheckConfig_TcpCheck `protobuf:"bytes,6,opt,name=tcp_check,json=tcpCheck,proto3,oneof"`
+}
+
+func (*UptimeCheckConfig_HttpCheck_) isUptimeCheckConfig_CheckRequestType() {}
+
+func (*UptimeCheckConfig_TcpCheck_) isUptimeCheckConfig_CheckRequestType() {}
+
+// Contains the region, location, and list of IP
+// addresses where checkers in the location run from.
+type UptimeCheckIp struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A broad region category in which the IP address is located.
+ Region UptimeCheckRegion `protobuf:"varint,1,opt,name=region,proto3,enum=google.monitoring.v3.UptimeCheckRegion" json:"region,omitempty"`
+ // A more specific location within the region that typically encodes
+ // a particular city/town/metro (and its containing state/province or country)
+ // within the broader umbrella region category.
+ Location string `protobuf:"bytes,2,opt,name=location,proto3" json:"location,omitempty"`
+ // The IP address from which the Uptime check originates. This is a fully
+ // specified IP address (not an IP address range). Most IP addresses, as of
+ // this publication, are in IPv4 format; however, one should not rely on the
+ // IP addresses being in IPv4 format indefinitely, and should support
+ // interpreting this field in either IPv4 or IPv6 format.
+ IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"`
+}
+
+func (x *UptimeCheckIp) Reset() {
+ *x = UptimeCheckIp{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckIp) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckIp) ProtoMessage() {}
+
+func (x *UptimeCheckIp) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckIp.ProtoReflect.Descriptor instead.
+func (*UptimeCheckIp) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *UptimeCheckIp) GetRegion() UptimeCheckRegion {
+ if x != nil {
+ return x.Region
+ }
+ return UptimeCheckRegion_REGION_UNSPECIFIED
+}
+
+func (x *UptimeCheckIp) GetLocation() string {
+ if x != nil {
+ return x.Location
+ }
+ return ""
+}
+
+func (x *UptimeCheckIp) GetIpAddress() string {
+ if x != nil {
+ return x.IpAddress
+ }
+ return ""
+}
+
+// A Synthetic Monitor deployed to a Cloud Functions V2 instance.
+type SyntheticMonitorTarget_CloudFunctionV2Target struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Fully qualified GCFv2 resource name
+ // i.e. `projects/{project}/locations/{location}/functions/{function}`
+ // Required.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Output only. The `cloud_run_revision` Monitored Resource associated with
+ // the GCFv2. The Synthetic Monitor execution results (metrics, logs, and
+ // spans) are reported against this Monitored Resource. This field is output
+ // only.
+ CloudRunRevision *monitoredres.MonitoredResource `protobuf:"bytes,2,opt,name=cloud_run_revision,json=cloudRunRevision,proto3" json:"cloud_run_revision,omitempty"`
+}
+
+func (x *SyntheticMonitorTarget_CloudFunctionV2Target) Reset() {
+ *x = SyntheticMonitorTarget_CloudFunctionV2Target{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SyntheticMonitorTarget_CloudFunctionV2Target) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SyntheticMonitorTarget_CloudFunctionV2Target) ProtoMessage() {}
+
+func (x *SyntheticMonitorTarget_CloudFunctionV2Target) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SyntheticMonitorTarget_CloudFunctionV2Target.ProtoReflect.Descriptor instead.
+func (*SyntheticMonitorTarget_CloudFunctionV2Target) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *SyntheticMonitorTarget_CloudFunctionV2Target) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *SyntheticMonitorTarget_CloudFunctionV2Target) GetCloudRunRevision() *monitoredres.MonitoredResource {
+ if x != nil {
+ return x.CloudRunRevision
+ }
+ return nil
+}
+
+// The resource submessage for group checks. It can be used instead of a
+// monitored resource, when multiple resources are being monitored.
+type UptimeCheckConfig_ResourceGroup struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The group of resources being monitored. Should be only the `[GROUP_ID]`,
+ // and not the full-path
+ // `projects/[PROJECT_ID_OR_NUMBER]/groups/[GROUP_ID]`.
+ GroupId string `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+ // The resource type of the group members.
+ ResourceType GroupResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=google.monitoring.v3.GroupResourceType" json:"resource_type,omitempty"`
+}
+
+func (x *UptimeCheckConfig_ResourceGroup) Reset() {
+ *x = UptimeCheckConfig_ResourceGroup{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_ResourceGroup) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_ResourceGroup) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_ResourceGroup.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *UptimeCheckConfig_ResourceGroup) GetGroupId() string {
+ if x != nil {
+ return x.GroupId
+ }
+ return ""
+}
+
+func (x *UptimeCheckConfig_ResourceGroup) GetResourceType() GroupResourceType {
+ if x != nil {
+ return x.ResourceType
+ }
+ return GroupResourceType_RESOURCE_TYPE_UNSPECIFIED
+}
+
+// Information involved in sending ICMP pings alongside public HTTP/TCP
+// checks. For HTTP, the pings are performed for each part of the redirect
+// chain.
+type UptimeCheckConfig_PingConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Number of ICMP pings. A maximum of 3 ICMP pings is currently supported.
+ PingsCount int32 `protobuf:"varint,1,opt,name=pings_count,json=pingsCount,proto3" json:"pings_count,omitempty"`
+}
+
+func (x *UptimeCheckConfig_PingConfig) Reset() {
+ *x = UptimeCheckConfig_PingConfig{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_PingConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_PingConfig) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_PingConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_PingConfig.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_PingConfig) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *UptimeCheckConfig_PingConfig) GetPingsCount() int32 {
+ if x != nil {
+ return x.PingsCount
+ }
+ return 0
+}
+
+// Information involved in an HTTP/HTTPS Uptime check request.
+type UptimeCheckConfig_HttpCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The HTTP request method to use for the check. If set to
+ // `METHOD_UNSPECIFIED` then `request_method` defaults to `GET`.
+ RequestMethod UptimeCheckConfig_HttpCheck_RequestMethod `protobuf:"varint,8,opt,name=request_method,json=requestMethod,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_RequestMethod" json:"request_method,omitempty"`
+ // If `true`, use HTTPS instead of HTTP to run the check.
+ UseSsl bool `protobuf:"varint,1,opt,name=use_ssl,json=useSsl,proto3" json:"use_ssl,omitempty"`
+ // Optional (defaults to "/"). The path to the page against which to run
+ // the check. Will be combined with the `host` (specified within the
+ // `monitored_resource`) and `port` to construct the full URL. If the
+ // provided path does not begin with "/", a "/" will be prepended
+ // automatically.
+ Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
+ // Optional (defaults to 80 when `use_ssl` is `false`, and 443 when
+ // `use_ssl` is `true`). The TCP port on the HTTP server against which to
+ // run the check. Will be combined with host (specified within the
+ // `monitored_resource`) and `path` to construct the full URL.
+ Port int32 `protobuf:"varint,3,opt,name=port,proto3" json:"port,omitempty"`
+ // The authentication information. Optional when creating an HTTP check;
+ // defaults to empty.
+ // Do not set both `auth_method` and `auth_info`.
+ AuthInfo *UptimeCheckConfig_HttpCheck_BasicAuthentication `protobuf:"bytes,4,opt,name=auth_info,json=authInfo,proto3" json:"auth_info,omitempty"`
+ // Boolean specifying whether to encrypt the header information.
+ // Encryption should be specified for any headers related to authentication
+ // that you do not wish to be seen when retrieving the configuration. The
+ // server will be responsible for encrypting the headers.
+ // On Get/List calls, if `mask_headers` is set to `true` then the headers
+ // will be obscured with `******.`
+ MaskHeaders bool `protobuf:"varint,5,opt,name=mask_headers,json=maskHeaders,proto3" json:"mask_headers,omitempty"`
+ // The list of headers to send as part of the Uptime check request.
+ // If two headers have the same key and different values, they should
+ // be entered as a single header, with the value being a comma-separated
+ // list of all the desired values as described at
+ // https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31).
+ // Entering two separate headers with the same key in a Create call will
+ // cause the first to be overwritten by the second.
+ // The maximum number of headers allowed is 100.
+ Headers map[string]string `protobuf:"bytes,6,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // The content type header to use for the check. The following
+ // configurations result in errors:
+ // 1. Content type is specified in both the `headers` field and the
+ // `content_type` field.
+ // 2. Request method is `GET` and `content_type` is not `TYPE_UNSPECIFIED`
+ // 3. Request method is `POST` and `content_type` is `TYPE_UNSPECIFIED`.
+ // 4. Request method is `POST` and a "Content-Type" header is provided via
+ // `headers` field. The `content_type` field should be used instead.
+ ContentType UptimeCheckConfig_HttpCheck_ContentType `protobuf:"varint,9,opt,name=content_type,json=contentType,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ContentType" json:"content_type,omitempty"`
+ // A user provided content type header to use for the check. The invalid
+ // configurations outlined in the `content_type` field apply to
+ // `custom_content_type`, as well as the following:
+ // 1. `content_type` is `URL_ENCODED` and `custom_content_type` is set.
+ // 2. `content_type` is `USER_PROVIDED` and `custom_content_type` is not
+ // set.
+ CustomContentType string `protobuf:"bytes,13,opt,name=custom_content_type,json=customContentType,proto3" json:"custom_content_type,omitempty"`
+ // Boolean specifying whether to include SSL certificate validation as a
+ // part of the Uptime check. Only applies to checks where
+ // `monitored_resource` is set to `uptime_url`. If `use_ssl` is `false`,
+ // setting `validate_ssl` to `true` has no effect.
+ ValidateSsl bool `protobuf:"varint,7,opt,name=validate_ssl,json=validateSsl,proto3" json:"validate_ssl,omitempty"`
+ // The request body associated with the HTTP POST request. If `content_type`
+ // is `URL_ENCODED`, the body passed in must be URL-encoded. Users can
+ // provide a `Content-Length` header via the `headers` field or the API will
+ // do so. If the `request_method` is `GET` and `body` is not empty, the API
+ // will return an error. The maximum byte size is 1 megabyte.
+ //
+ // Note: If client libraries aren't used (which performs the conversion
+ // automatically) base64 encode your `body` data since the field is of
+ // `bytes` type.
+ Body []byte `protobuf:"bytes,10,opt,name=body,proto3" json:"body,omitempty"`
+ // If present, the check will only pass if the HTTP response status code is
+ // in this set of status codes. If empty, the HTTP status code will only
+ // pass if the HTTP status code is 200-299.
+ AcceptedResponseStatusCodes []*UptimeCheckConfig_HttpCheck_ResponseStatusCode `protobuf:"bytes,11,rep,name=accepted_response_status_codes,json=acceptedResponseStatusCodes,proto3" json:"accepted_response_status_codes,omitempty"`
+ // Contains information needed to add pings to an HTTP check.
+ PingConfig *UptimeCheckConfig_PingConfig `protobuf:"bytes,12,opt,name=ping_config,json=pingConfig,proto3" json:"ping_config,omitempty"`
+ // This field is optional and should be set only by users interested in
+ // an authenticated uptime check.
+ // Do not set both `auth_method` and `auth_info`.
+ //
+ // Types that are assignable to AuthMethod:
+ //
+ // *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_
+ AuthMethod isUptimeCheckConfig_HttpCheck_AuthMethod `protobuf_oneof:"auth_method"`
+}
+
+func (x *UptimeCheckConfig_HttpCheck) Reset() {
+ *x = UptimeCheckConfig_HttpCheck{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_HttpCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_HttpCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2}
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetRequestMethod() UptimeCheckConfig_HttpCheck_RequestMethod {
+ if x != nil {
+ return x.RequestMethod
+ }
+ return UptimeCheckConfig_HttpCheck_METHOD_UNSPECIFIED
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetUseSsl() bool {
+ if x != nil {
+ return x.UseSsl
+ }
+ return false
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetPort() int32 {
+ if x != nil {
+ return x.Port
+ }
+ return 0
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetAuthInfo() *UptimeCheckConfig_HttpCheck_BasicAuthentication {
+ if x != nil {
+ return x.AuthInfo
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetMaskHeaders() bool {
+ if x != nil {
+ return x.MaskHeaders
+ }
+ return false
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetHeaders() map[string]string {
+ if x != nil {
+ return x.Headers
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetContentType() UptimeCheckConfig_HttpCheck_ContentType {
+ if x != nil {
+ return x.ContentType
+ }
+ return UptimeCheckConfig_HttpCheck_TYPE_UNSPECIFIED
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetCustomContentType() string {
+ if x != nil {
+ return x.CustomContentType
+ }
+ return ""
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetValidateSsl() bool {
+ if x != nil {
+ return x.ValidateSsl
+ }
+ return false
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetBody() []byte {
+ if x != nil {
+ return x.Body
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetAcceptedResponseStatusCodes() []*UptimeCheckConfig_HttpCheck_ResponseStatusCode {
+ if x != nil {
+ return x.AcceptedResponseStatusCodes
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetPingConfig() *UptimeCheckConfig_PingConfig {
+ if x != nil {
+ return x.PingConfig
+ }
+ return nil
+}
+
+func (m *UptimeCheckConfig_HttpCheck) GetAuthMethod() isUptimeCheckConfig_HttpCheck_AuthMethod {
+ if m != nil {
+ return m.AuthMethod
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig_HttpCheck) GetServiceAgentAuthentication() *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication {
+ if x, ok := x.GetAuthMethod().(*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_); ok {
+ return x.ServiceAgentAuthentication
+ }
+ return nil
+}
+
+type isUptimeCheckConfig_HttpCheck_AuthMethod interface {
+ isUptimeCheckConfig_HttpCheck_AuthMethod()
+}
+
+type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ struct {
+ // If specified, Uptime will generate and attach an OIDC JWT token for the
+ // Monitoring service agent service account as an `Authorization` header
+ // in the HTTP request when probing.
+ ServiceAgentAuthentication *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication `protobuf:"bytes,14,opt,name=service_agent_authentication,json=serviceAgentAuthentication,proto3,oneof"`
+}
+
+func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_) isUptimeCheckConfig_HttpCheck_AuthMethod() {
+}
+
+// Information required for a TCP Uptime check request.
+type UptimeCheckConfig_TcpCheck struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The TCP port on the server against which to run the check. Will be
+ // combined with host (specified within the `monitored_resource`) to
+ // construct the full URL. Required.
+ Port int32 `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
+ // Contains information needed to add pings to a TCP check.
+ PingConfig *UptimeCheckConfig_PingConfig `protobuf:"bytes,2,opt,name=ping_config,json=pingConfig,proto3" json:"ping_config,omitempty"`
+}
+
+func (x *UptimeCheckConfig_TcpCheck) Reset() {
+ *x = UptimeCheckConfig_TcpCheck{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_TcpCheck) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_TcpCheck) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_TcpCheck.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 3}
+}
+
+func (x *UptimeCheckConfig_TcpCheck) GetPort() int32 {
+ if x != nil {
+ return x.Port
+ }
+ return 0
+}
+
+func (x *UptimeCheckConfig_TcpCheck) GetPingConfig() *UptimeCheckConfig_PingConfig {
+ if x != nil {
+ return x.PingConfig
+ }
+ return nil
+}
+
+// Optional. Used to perform content matching. This allows matching based on
+// substrings and regular expressions, together with their negations. Only the
+// first 4 MB of an HTTP or HTTPS check's response (and the first
+// 1 MB of a TCP check's response) are examined for purposes of content
+// matching.
+type UptimeCheckConfig_ContentMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // String, regex or JSON content to match. Maximum 1024 bytes. An empty
+ // `content` string indicates no content matching is to be performed.
+ Content string `protobuf:"bytes,1,opt,name=content,proto3" json:"content,omitempty"`
+ // The type of content matcher that will be applied to the server output,
+ // compared to the `content` string when the check is run.
+ Matcher UptimeCheckConfig_ContentMatcher_ContentMatcherOption `protobuf:"varint,2,opt,name=matcher,proto3,enum=google.monitoring.v3.UptimeCheckConfig_ContentMatcher_ContentMatcherOption" json:"matcher,omitempty"`
+ // Certain `ContentMatcherOption` types require additional information.
+ // `MATCHES_JSON_PATH` or `NOT_MATCHES_JSON_PATH` require a
+ // `JsonPathMatcher`; not used for other options.
+ //
+ // Types that are assignable to AdditionalMatcherInfo:
+ //
+ // *UptimeCheckConfig_ContentMatcher_JsonPathMatcher_
+ AdditionalMatcherInfo isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo `protobuf_oneof:"additional_matcher_info"`
+}
+
+func (x *UptimeCheckConfig_ContentMatcher) Reset() {
+ *x = UptimeCheckConfig_ContentMatcher{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_ContentMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_ContentMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_ContentMatcher.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4}
+}
+
+func (x *UptimeCheckConfig_ContentMatcher) GetContent() string {
+ if x != nil {
+ return x.Content
+ }
+ return ""
+}
+
+func (x *UptimeCheckConfig_ContentMatcher) GetMatcher() UptimeCheckConfig_ContentMatcher_ContentMatcherOption {
+ if x != nil {
+ return x.Matcher
+ }
+ return UptimeCheckConfig_ContentMatcher_CONTENT_MATCHER_OPTION_UNSPECIFIED
+}
+
+func (m *UptimeCheckConfig_ContentMatcher) GetAdditionalMatcherInfo() isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo {
+ if m != nil {
+ return m.AdditionalMatcherInfo
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig_ContentMatcher) GetJsonPathMatcher() *UptimeCheckConfig_ContentMatcher_JsonPathMatcher {
+ if x, ok := x.GetAdditionalMatcherInfo().(*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_); ok {
+ return x.JsonPathMatcher
+ }
+ return nil
+}
+
+type isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo interface {
+ isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo()
+}
+
+type UptimeCheckConfig_ContentMatcher_JsonPathMatcher_ struct {
+ // Matcher information for `MATCHES_JSON_PATH` and `NOT_MATCHES_JSON_PATH`
+ JsonPathMatcher *UptimeCheckConfig_ContentMatcher_JsonPathMatcher `protobuf:"bytes,3,opt,name=json_path_matcher,json=jsonPathMatcher,proto3,oneof"`
+}
+
+func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_) isUptimeCheckConfig_ContentMatcher_AdditionalMatcherInfo() {
+}
+
+// The authentication parameters to provide to the specified resource or
+// URL that requires a username and password. Currently, only
+// [Basic HTTP authentication](https://tools.ietf.org/html/rfc7617) is
+// supported in Uptime checks.
+type UptimeCheckConfig_HttpCheck_BasicAuthentication struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The username to use when authenticating with the HTTP server.
+ Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
+ // The password to use when authenticating with the HTTP server.
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+}
+
+func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) Reset() {
+ *x = UptimeCheckConfig_HttpCheck_BasicAuthentication{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck_BasicAuthentication.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 0}
+}
+
+func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string {
+ if x != nil {
+ return x.Username
+ }
+ return ""
+}
+
+func (x *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetPassword() string {
+ if x != nil {
+ return x.Password
+ }
+ return ""
+}
+
+// A status to accept. Either a status code class like "2xx", or an integer
+// status code like "200".
+type UptimeCheckConfig_HttpCheck_ResponseStatusCode struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Either a specific value or a class of status codes.
+ //
+ // Types that are assignable to StatusCode:
+ //
+ // *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue
+ // *UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_
+ StatusCode isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode `protobuf_oneof:"status_code"`
+}
+
+func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) Reset() {
+ *x = UptimeCheckConfig_HttpCheck_ResponseStatusCode{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck_ResponseStatusCode.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 1}
+}
+
+func (m *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusCode() isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode {
+ if m != nil {
+ return m.StatusCode
+ }
+ return nil
+}
+
+func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusValue() int32 {
+ if x, ok := x.GetStatusCode().(*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue); ok {
+ return x.StatusValue
+ }
+ return 0
+}
+
+func (x *UptimeCheckConfig_HttpCheck_ResponseStatusCode) GetStatusClass() UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass {
+ if x, ok := x.GetStatusCode().(*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_); ok {
+ return x.StatusClass
+ }
+ return UptimeCheckConfig_HttpCheck_ResponseStatusCode_STATUS_CLASS_UNSPECIFIED
+}
+
+type isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode interface {
+ isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode()
+}
+
+type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue struct {
+ // A status code to accept.
+ StatusValue int32 `protobuf:"varint,1,opt,name=status_value,json=statusValue,proto3,oneof"`
+}
+
+type UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_ struct {
+ // A class of status codes to accept.
+ StatusClass UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass `protobuf:"varint,2,opt,name=status_class,json=statusClass,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass,oneof"`
+}
+
+func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue) isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() {
+}
+
+func (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_) isUptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusCode() {
+}
+
+// Contains information needed for generating either an
+// [OpenID Connect
+// token](https://developers.google.com/identity/protocols/OpenIDConnect) or
+// [OAuth token](https://developers.google.com/identity/protocols/oauth2).
+// The token will be generated for the Monitoring service agent service
+// account.
+type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Type of authentication.
+ Type UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType `protobuf:"varint,1,opt,name=type,proto3,enum=google.monitoring.v3.UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType" json:"type,omitempty"`
+}
+
+func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) Reset() {
+ *x = UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 2, 2}
+}
+
+func (x *UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication) GetType() UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType {
+ if x != nil {
+ return x.Type
+ }
+ return UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_SERVICE_AGENT_AUTHENTICATION_TYPE_UNSPECIFIED
+}
+
+// Information needed to perform a JSONPath content match.
+// Used for `ContentMatcherOption::MATCHES_JSON_PATH` and
+// `ContentMatcherOption::NOT_MATCHES_JSON_PATH`.
+type UptimeCheckConfig_ContentMatcher_JsonPathMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // JSONPath within the response output pointing to the expected
+ // `ContentMatcher::content` to match against.
+ JsonPath string `protobuf:"bytes,1,opt,name=json_path,json=jsonPath,proto3" json:"json_path,omitempty"`
+ // The type of JSONPath match that will be applied to the JSON output
+ // (`ContentMatcher.content`)
+ JsonMatcher UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption `protobuf:"varint,2,opt,name=json_matcher,json=jsonMatcher,proto3,enum=google.monitoring.v3.UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption" json:"json_matcher,omitempty"`
+}
+
+func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) Reset() {
+ *x = UptimeCheckConfig_ContentMatcher_JsonPathMatcher{}
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoMessage() {}
+
+func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_proto_msgTypes[15]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UptimeCheckConfig_ContentMatcher_JsonPathMatcher.ProtoReflect.Descriptor instead.
+func (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_proto_rawDescGZIP(), []int{2, 4, 0}
+}
+
+func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) GetJsonPath() string {
+ if x != nil {
+ return x.JsonPath
+ }
+ return ""
+}
+
+func (x *UptimeCheckConfig_ContentMatcher_JsonPathMatcher) GetJsonMatcher() UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption {
+ if x != nil {
+ return x.JsonMatcher
+ }
+ return UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JSON_PATH_MATCHER_OPTION_UNSPECIFIED
+}
+
+var File_google_monitoring_v3_uptime_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_uptime_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61,
+ 0x76, 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x61, 0x70, 0x69, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, 0x02, 0x0a, 0x0f, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x19, 0x0a, 0x08,
+ 0x67, 0x63, 0x70, 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+ 0x67, 0x63, 0x70, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x65, 0x65, 0x72, 0x5f,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x70, 0x65, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x49, 0x64, 0x12,
+ 0x41, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61,
+ 0x74, 0x65, 0x22, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x55,
+ 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08,
+ 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55,
+ 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x3a, 0x02, 0x18, 0x01, 0x22, 0xc4, 0x02, 0x0a, 0x16,
+ 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x70, 0x0a, 0x11, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f,
+ 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x76, 0x32, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74,
+ 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e,
+ 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x54,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x46, 0x75,
+ 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x1a, 0xad, 0x01, 0x0a, 0x15, 0x43, 0x6c, 0x6f,
+ 0x75, 0x64, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x56, 0x32, 0x54, 0x61, 0x72, 0x67,
+ 0x65, 0x74, 0x12, 0x42, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x2e, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x28, 0x0a, 0x26, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x5f,
+ 0x72, 0x75, 0x6e, 0x5f, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
+ 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x10, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x52, 0x75, 0x6e,
+ 0x52, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67,
+ 0x65, 0x74, 0x22, 0x94, 0x23, 0x0a, 0x11, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x08, 0x52, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79,
+ 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x12, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65,
+ 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x48,
+ 0x00, 0x52, 0x11, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x12, 0x5e, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x72,
+ 0x6f, 0x75, 0x70, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47,
+ 0x72, 0x6f, 0x75, 0x70, 0x12, 0x5b, 0x0a, 0x11, 0x73, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69,
+ 0x63, 0x5f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63,
+ 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x48, 0x00, 0x52,
+ 0x10, 0x73, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74,
+ 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48,
+ 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, 0x09, 0x68, 0x74, 0x74, 0x70,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x4f, 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, 0x63, 0x68, 0x65,
+ 0x63, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x48, 0x01, 0x52, 0x08, 0x74, 0x63,
+ 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x31, 0x0a, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x06, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x69, 0x6d,
+ 0x65, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x61,
+ 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x73, 0x12, 0x56, 0x0a, 0x0c, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x52, 0x0a, 0x10, 0x73, 0x65, 0x6c,
+ 0x65, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0a, 0x20,
+ 0x03, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x73, 0x65,
+ 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a,
+ 0x0b, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x0f, 0x20, 0x01,
+ 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x69, 0x73, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x12, 0x56, 0x0a, 0x11, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x65, 0x72, 0x42, 0x02, 0x18, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, 0x0b, 0x75, 0x73,
+ 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62,
+ 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x4c, 0x61,
+ 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x78, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64,
+ 0x12, 0x4c, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47,
+ 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65,
+ 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x1a, 0x2d,
+ 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x70, 0x69, 0x6e, 0x67, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x05, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xef, 0x0e,
+ 0x0a, 0x09, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x66, 0x0a, 0x0e, 0x72,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74,
+ 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65,
+ 0x74, 0x68, 0x6f, 0x64, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x5f, 0x73, 0x73, 0x6c, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x75, 0x73, 0x65, 0x53, 0x73, 0x6c, 0x12, 0x12, 0x0a, 0x04,
+ 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68,
+ 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04,
+ 0x70, 0x6f, 0x72, 0x74, 0x12, 0x62, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63,
+ 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08,
+ 0x61, 0x75, 0x74, 0x68, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x73, 0x6b,
+ 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b,
+ 0x6d, 0x61, 0x73, 0x6b, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x58, 0x0a, 0x07, 0x68,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e,
+ 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3d, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f,
+ 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x43, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x61, 0x74, 0x65, 0x5f, 0x73, 0x73, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x73, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6f,
+ 0x64, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x12, 0x89,
+ 0x01, 0x0a, 0x1e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65,
+ 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x1b, 0x61,
+ 0x63, 0x63, 0x65, 0x70, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x0b, 0x70, 0x69,
+ 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x90, 0x01, 0x0a, 0x1c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x67, 0x65, 0x6e,
+ 0x74, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x4c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70,
+ 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x1a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41,
+ 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x1a, 0x4d, 0x0a, 0x13, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65,
+ 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65,
+ 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65,
+ 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72,
+ 0x64, 0x1a, 0xf6, 0x02, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00,
+ 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x75, 0x0a,
+ 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x50, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x48, 0x74, 0x74,
+ 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x43, 0x6c, 0x61, 0x73, 0x73, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43,
+ 0x6c, 0x61, 0x73, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43,
+ 0x6c, 0x61, 0x73, 0x73, 0x12, 0x1c, 0x0a, 0x18, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43,
+ 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41,
+ 0x53, 0x53, 0x5f, 0x31, 0x58, 0x58, 0x10, 0x64, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54,
+ 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x32, 0x58, 0x58, 0x10, 0xc8, 0x01, 0x12,
+ 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f,
+ 0x33, 0x58, 0x58, 0x10, 0xac, 0x02, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53,
+ 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x34, 0x58, 0x58, 0x10, 0x90, 0x03, 0x12, 0x15, 0x0a,
+ 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x35, 0x58,
+ 0x58, 0x10, 0xf4, 0x03, 0x12, 0x15, 0x0a, 0x10, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43,
+ 0x4c, 0x41, 0x53, 0x53, 0x5f, 0x41, 0x4e, 0x59, 0x10, 0xe8, 0x07, 0x42, 0x0d, 0x0a, 0x0b, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x1a, 0x82, 0x02, 0x0a, 0x1a, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65,
+ 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7f, 0x0a, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x6b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x48, 0x74, 0x74, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65,
+ 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x63, 0x0a, 0x1e, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e,
+ 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x31, 0x0a, 0x2d,
+ 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x41, 0x47, 0x45, 0x4e, 0x54, 0x5f, 0x41, 0x55,
+ 0x54, 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x59, 0x50,
+ 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12,
+ 0x0e, 0x0a, 0x0a, 0x4f, 0x49, 0x44, 0x43, 0x5f, 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x10, 0x01, 0x1a,
+ 0x3a, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3a, 0x0a, 0x0d, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x16, 0x0a, 0x12,
+ 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
+ 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a,
+ 0x04, 0x50, 0x4f, 0x53, 0x54, 0x10, 0x02, 0x22, 0x47, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
+ 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b,
+ 0x55, 0x52, 0x4c, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x45, 0x44, 0x10, 0x01, 0x12, 0x11, 0x0a,
+ 0x0d, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x56, 0x49, 0x44, 0x45, 0x44, 0x10, 0x02,
+ 0x42, 0x0d, 0x0a, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x1a,
+ 0x73, 0x0a, 0x08, 0x54, 0x63, 0x70, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x70,
+ 0x6f, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x12,
+ 0x53, 0x0a, 0x0b, 0x70, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69,
+ 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x69,
+ 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x69, 0x6e, 0x67, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x84, 0x06, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x12, 0x65, 0x0a, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x74, 0x0a, 0x11, 0x6a, 0x73, 0x6f, 0x6e,
+ 0x5f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e,
+ 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4a, 0x73, 0x6f, 0x6e,
+ 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0f, 0x6a,
+ 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x1a, 0x94,
+ 0x02, 0x0a, 0x0f, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x12,
+ 0x7f, 0x0a, 0x0c, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x5c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74,
+ 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4a, 0x73,
+ 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4a, 0x73,
+ 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x22, 0x63, 0x0a, 0x15, 0x4a, 0x73, 0x6f, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x24, 0x4a, 0x53, 0x4f,
+ 0x4e, 0x5f, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x52, 0x5f, 0x4f,
+ 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x41, 0x43, 0x54, 0x5f, 0x4d, 0x41, 0x54,
+ 0x43, 0x48, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x47, 0x45, 0x58, 0x5f, 0x4d, 0x41,
+ 0x54, 0x43, 0x48, 0x10, 0x02, 0x22, 0xc8, 0x01, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26,
+ 0x0a, 0x22, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x4e, 0x54, 0x5f, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45,
+ 0x52, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49,
+ 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49,
+ 0x4e, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x4e,
+ 0x4f, 0x54, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49,
+ 0x4e, 0x47, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f,
+ 0x52, 0x45, 0x47, 0x45, 0x58, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x54, 0x5f, 0x4d,
+ 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f, 0x52, 0x45, 0x47, 0x45, 0x58, 0x10, 0x04, 0x12, 0x15,
+ 0x0a, 0x11, 0x4d, 0x41, 0x54, 0x43, 0x48, 0x45, 0x53, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x50,
+ 0x41, 0x54, 0x48, 0x10, 0x05, 0x12, 0x19, 0x0a, 0x15, 0x4e, 0x4f, 0x54, 0x5f, 0x4d, 0x41, 0x54,
+ 0x43, 0x48, 0x45, 0x53, 0x5f, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x50, 0x41, 0x54, 0x48, 0x10, 0x06,
+ 0x42, 0x19, 0x0a, 0x17, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x1a, 0x3d, 0x0a, 0x0f, 0x55,
+ 0x73, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x55, 0x0a, 0x0b, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x65, 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x48, 0x45,
+ 0x43, 0x4b, 0x45, 0x52, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x54, 0x41, 0x54, 0x49,
+ 0x43, 0x5f, 0x49, 0x50, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, 0x52, 0x53, 0x10, 0x01, 0x12,
+ 0x10, 0x0a, 0x0c, 0x56, 0x50, 0x43, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x45, 0x52, 0x53, 0x10,
+ 0x03, 0x3a, 0xf3, 0x01, 0xea, 0x41, 0xef, 0x01, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f,
+ 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f,
+ 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x75, 0x70,
+ 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x7d, 0x12, 0x45, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x7b, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d,
+ 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x73, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63,
+ 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x12, 0x39, 0x66, 0x6f, 0x6c, 0x64, 0x65,
+ 0x72, 0x73, 0x2f, 0x7b, 0x66, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69,
+ 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x7b,
+ 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x7d, 0x12, 0x01, 0x2a, 0x42, 0x0a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x42, 0x14, 0x0a, 0x12, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x72, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x22, 0x8b, 0x01, 0x0a, 0x0d, 0x55, 0x70,
+ 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x12, 0x3f, 0x0a, 0x06, 0x72,
+ 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65,
+ 0x67, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x61,
+ 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x70,
+ 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2a, 0x95, 0x01, 0x0a, 0x11, 0x55, 0x70, 0x74, 0x69,
+ 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a,
+ 0x12, 0x52, 0x45, 0x47, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+ 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x55, 0x53, 0x41, 0x10, 0x01, 0x12, 0x0a,
+ 0x0a, 0x06, 0x45, 0x55, 0x52, 0x4f, 0x50, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x4f,
+ 0x55, 0x54, 0x48, 0x5f, 0x41, 0x4d, 0x45, 0x52, 0x49, 0x43, 0x41, 0x10, 0x03, 0x12, 0x10, 0x0a,
+ 0x0c, 0x41, 0x53, 0x49, 0x41, 0x5f, 0x50, 0x41, 0x43, 0x49, 0x46, 0x49, 0x43, 0x10, 0x04, 0x12,
+ 0x0e, 0x0a, 0x0a, 0x55, 0x53, 0x41, 0x5f, 0x4f, 0x52, 0x45, 0x47, 0x4f, 0x4e, 0x10, 0x05, 0x12,
+ 0x0c, 0x0a, 0x08, 0x55, 0x53, 0x41, 0x5f, 0x49, 0x4f, 0x57, 0x41, 0x10, 0x06, 0x12, 0x10, 0x0a,
+ 0x0c, 0x55, 0x53, 0x41, 0x5f, 0x56, 0x49, 0x52, 0x47, 0x49, 0x4e, 0x49, 0x41, 0x10, 0x07, 0x2a,
+ 0x5b, 0x0a, 0x11, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45,
+ 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x53, 0x54, 0x41, 0x4e, 0x43, 0x45, 0x10,
+ 0x01, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x57, 0x53, 0x5f, 0x45, 0x4c, 0x42, 0x5f, 0x4c, 0x4f, 0x41,
+ 0x44, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x52, 0x10, 0x02, 0x42, 0xaf, 0x02, 0xea,
+ 0x41, 0x66, 0x0a, 0x26, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3c, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c,
+ 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x7d, 0x2f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x66,
+ 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x0a, 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x76, 0x33, 0x42, 0x0b, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e,
+ 0x67, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56,
+ 0x33, 0xca, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64,
+ 0x5c, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02,
+ 0x1d, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a,
+ 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_uptime_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_uptime_proto_rawDescData = file_google_monitoring_v3_uptime_proto_rawDesc
+)
+
+func file_google_monitoring_v3_uptime_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_uptime_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_uptime_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_uptime_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_uptime_proto_rawDescData
+}
+
+var file_google_monitoring_v3_uptime_proto_enumTypes = make([]protoimpl.EnumInfo, 10)
+var file_google_monitoring_v3_uptime_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_google_monitoring_v3_uptime_proto_goTypes = []any{
+ (UptimeCheckRegion)(0), // 0: google.monitoring.v3.UptimeCheckRegion
+ (GroupResourceType)(0), // 1: google.monitoring.v3.GroupResourceType
+ (InternalChecker_State)(0), // 2: google.monitoring.v3.InternalChecker.State
+ (UptimeCheckConfig_CheckerType)(0), // 3: google.monitoring.v3.UptimeCheckConfig.CheckerType
+ (UptimeCheckConfig_HttpCheck_RequestMethod)(0), // 4: google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod
+ (UptimeCheckConfig_HttpCheck_ContentType)(0), // 5: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType
+ (UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass)(0), // 6: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass
+ (UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_ServiceAgentAuthenticationType)(0), // 7: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.ServiceAgentAuthenticationType
+ (UptimeCheckConfig_ContentMatcher_ContentMatcherOption)(0), // 8: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption
+ (UptimeCheckConfig_ContentMatcher_JsonPathMatcher_JsonPathMatcherOption)(0), // 9: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption
+ (*InternalChecker)(nil), // 10: google.monitoring.v3.InternalChecker
+ (*SyntheticMonitorTarget)(nil), // 11: google.monitoring.v3.SyntheticMonitorTarget
+ (*UptimeCheckConfig)(nil), // 12: google.monitoring.v3.UptimeCheckConfig
+ (*UptimeCheckIp)(nil), // 13: google.monitoring.v3.UptimeCheckIp
+ (*SyntheticMonitorTarget_CloudFunctionV2Target)(nil), // 14: google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target
+ (*UptimeCheckConfig_ResourceGroup)(nil), // 15: google.monitoring.v3.UptimeCheckConfig.ResourceGroup
+ (*UptimeCheckConfig_PingConfig)(nil), // 16: google.monitoring.v3.UptimeCheckConfig.PingConfig
+ (*UptimeCheckConfig_HttpCheck)(nil), // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck
+ (*UptimeCheckConfig_TcpCheck)(nil), // 18: google.monitoring.v3.UptimeCheckConfig.TcpCheck
+ (*UptimeCheckConfig_ContentMatcher)(nil), // 19: google.monitoring.v3.UptimeCheckConfig.ContentMatcher
+ nil, // 20: google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry
+ (*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), // 21: google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication
+ (*UptimeCheckConfig_HttpCheck_ResponseStatusCode)(nil), // 22: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode
+ (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication)(nil), // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication
+ nil, // 24: google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry
+ (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher)(nil), // 25: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher
+ (*monitoredres.MonitoredResource)(nil), // 26: google.api.MonitoredResource
+ (*durationpb.Duration)(nil), // 27: google.protobuf.Duration
+}
+var file_google_monitoring_v3_uptime_proto_depIdxs = []int32{
+ 2, // 0: google.monitoring.v3.InternalChecker.state:type_name -> google.monitoring.v3.InternalChecker.State
+ 14, // 1: google.monitoring.v3.SyntheticMonitorTarget.cloud_function_v2:type_name -> google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target
+ 26, // 2: google.monitoring.v3.UptimeCheckConfig.monitored_resource:type_name -> google.api.MonitoredResource
+ 15, // 3: google.monitoring.v3.UptimeCheckConfig.resource_group:type_name -> google.monitoring.v3.UptimeCheckConfig.ResourceGroup
+ 11, // 4: google.monitoring.v3.UptimeCheckConfig.synthetic_monitor:type_name -> google.monitoring.v3.SyntheticMonitorTarget
+ 17, // 5: google.monitoring.v3.UptimeCheckConfig.http_check:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck
+ 18, // 6: google.monitoring.v3.UptimeCheckConfig.tcp_check:type_name -> google.monitoring.v3.UptimeCheckConfig.TcpCheck
+ 27, // 7: google.monitoring.v3.UptimeCheckConfig.period:type_name -> google.protobuf.Duration
+ 27, // 8: google.monitoring.v3.UptimeCheckConfig.timeout:type_name -> google.protobuf.Duration
+ 19, // 9: google.monitoring.v3.UptimeCheckConfig.content_matchers:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher
+ 3, // 10: google.monitoring.v3.UptimeCheckConfig.checker_type:type_name -> google.monitoring.v3.UptimeCheckConfig.CheckerType
+ 0, // 11: google.monitoring.v3.UptimeCheckConfig.selected_regions:type_name -> google.monitoring.v3.UptimeCheckRegion
+ 10, // 12: google.monitoring.v3.UptimeCheckConfig.internal_checkers:type_name -> google.monitoring.v3.InternalChecker
+ 20, // 13: google.monitoring.v3.UptimeCheckConfig.user_labels:type_name -> google.monitoring.v3.UptimeCheckConfig.UserLabelsEntry
+ 0, // 14: google.monitoring.v3.UptimeCheckIp.region:type_name -> google.monitoring.v3.UptimeCheckRegion
+ 26, // 15: google.monitoring.v3.SyntheticMonitorTarget.CloudFunctionV2Target.cloud_run_revision:type_name -> google.api.MonitoredResource
+ 1, // 16: google.monitoring.v3.UptimeCheckConfig.ResourceGroup.resource_type:type_name -> google.monitoring.v3.GroupResourceType
+ 4, // 17: google.monitoring.v3.UptimeCheckConfig.HttpCheck.request_method:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.RequestMethod
+ 21, // 18: google.monitoring.v3.UptimeCheckConfig.HttpCheck.auth_info:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication
+ 24, // 19: google.monitoring.v3.UptimeCheckConfig.HttpCheck.headers:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.HeadersEntry
+ 5, // 20: google.monitoring.v3.UptimeCheckConfig.HttpCheck.content_type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ContentType
+ 22, // 21: google.monitoring.v3.UptimeCheckConfig.HttpCheck.accepted_response_status_codes:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode
+ 16, // 22: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig
+ 23, // 23: google.monitoring.v3.UptimeCheckConfig.HttpCheck.service_agent_authentication:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication
+ 16, // 24: google.monitoring.v3.UptimeCheckConfig.TcpCheck.ping_config:type_name -> google.monitoring.v3.UptimeCheckConfig.PingConfig
+ 8, // 25: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.ContentMatcherOption
+ 25, // 26: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.json_path_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher
+ 6, // 27: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.status_class:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ResponseStatusCode.StatusClass
+ 7, // 28: google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.type:type_name -> google.monitoring.v3.UptimeCheckConfig.HttpCheck.ServiceAgentAuthentication.ServiceAgentAuthenticationType
+ 9, // 29: google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.json_matcher:type_name -> google.monitoring.v3.UptimeCheckConfig.ContentMatcher.JsonPathMatcher.JsonPathMatcherOption
+ 30, // [30:30] is the sub-list for method output_type
+ 30, // [30:30] is the sub-list for method input_type
+ 30, // [30:30] is the sub-list for extension type_name
+ 30, // [30:30] is the sub-list for extension extendee
+ 0, // [0:30] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_uptime_proto_init() }
+func file_google_monitoring_v3_uptime_proto_init() {
+ if File_google_monitoring_v3_uptime_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_uptime_proto_msgTypes[1].OneofWrappers = []any{
+ (*SyntheticMonitorTarget_CloudFunctionV2)(nil),
+ }
+ file_google_monitoring_v3_uptime_proto_msgTypes[2].OneofWrappers = []any{
+ (*UptimeCheckConfig_MonitoredResource)(nil),
+ (*UptimeCheckConfig_ResourceGroup_)(nil),
+ (*UptimeCheckConfig_SyntheticMonitor)(nil),
+ (*UptimeCheckConfig_HttpCheck_)(nil),
+ (*UptimeCheckConfig_TcpCheck_)(nil),
+ }
+ file_google_monitoring_v3_uptime_proto_msgTypes[7].OneofWrappers = []any{
+ (*UptimeCheckConfig_HttpCheck_ServiceAgentAuthentication_)(nil),
+ }
+ file_google_monitoring_v3_uptime_proto_msgTypes[9].OneofWrappers = []any{
+ (*UptimeCheckConfig_ContentMatcher_JsonPathMatcher_)(nil),
+ }
+ file_google_monitoring_v3_uptime_proto_msgTypes[12].OneofWrappers = []any{
+ (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusValue)(nil),
+ (*UptimeCheckConfig_HttpCheck_ResponseStatusCode_StatusClass_)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_uptime_proto_rawDesc,
+ NumEnums: 10,
+ NumMessages: 16,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_monitoring_v3_uptime_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_uptime_proto_depIdxs,
+ EnumInfos: file_google_monitoring_v3_uptime_proto_enumTypes,
+ MessageInfos: file_google_monitoring_v3_uptime_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_uptime_proto = out.File
+ file_google_monitoring_v3_uptime_proto_rawDesc = nil
+ file_google_monitoring_v3_uptime_proto_goTypes = nil
+ file_google_monitoring_v3_uptime_proto_depIdxs = nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go
new file mode 100644
index 000000000..9ea159bbd
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/monitoringpb/uptime_service.pb.go
@@ -0,0 +1,1112 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.35.2
+// protoc v4.25.3
+// source: google/monitoring/v3/uptime_service.proto
+
+package monitoringpb
+
+import (
+ context "context"
+ reflect "reflect"
+ sync "sync"
+
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ emptypb "google.golang.org/protobuf/types/known/emptypb"
+ fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The protocol for the `ListUptimeCheckConfigs` request.
+type ListUptimeCheckConfigsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) whose
+ // Uptime check configurations are listed. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // If provided, this field specifies the criteria that must be met by
+ // uptime checks to be included in the response.
+ //
+ // For more details, see [Filtering
+ // syntax](https://cloud.google.com/monitoring/api/v3/sorting-and-filtering#filter_syntax).
+ Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // The maximum number of results to return in a single response. The server
+ // may further constrain the maximum number of results returned in a single
+ // page. If the page_size is <=0, the server will decide the number of results
+ // to be returned.
+ PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return more results from the previous method call.
+ PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListUptimeCheckConfigsRequest) Reset() {
+ *x = ListUptimeCheckConfigsRequest{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListUptimeCheckConfigsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListUptimeCheckConfigsRequest) ProtoMessage() {}
+
+func (x *ListUptimeCheckConfigsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListUptimeCheckConfigsRequest.ProtoReflect.Descriptor instead.
+func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ListUptimeCheckConfigsRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListUptimeCheckConfigsRequest) GetFilter() string {
+ if x != nil {
+ return x.Filter
+ }
+ return ""
+}
+
+func (x *ListUptimeCheckConfigsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListUptimeCheckConfigsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The protocol for the `ListUptimeCheckConfigs` response.
+type ListUptimeCheckConfigsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The returned Uptime check configurations.
+ UptimeCheckConfigs []*UptimeCheckConfig `protobuf:"bytes,1,rep,name=uptime_check_configs,json=uptimeCheckConfigs,proto3" json:"uptime_check_configs,omitempty"`
+ // This field represents the pagination token to retrieve the next page of
+ // results. If the value is empty, it means no further results for the
+ // request. To retrieve the next page of results, the value of the
+ // next_page_token is passed to the subsequent List method call (in the
+ // request message's page_token field).
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // The total number of Uptime check configurations for the project,
+ // irrespective of any pagination.
+ TotalSize int32 `protobuf:"varint,3,opt,name=total_size,json=totalSize,proto3" json:"total_size,omitempty"`
+}
+
+func (x *ListUptimeCheckConfigsResponse) Reset() {
+ *x = ListUptimeCheckConfigsResponse{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListUptimeCheckConfigsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListUptimeCheckConfigsResponse) ProtoMessage() {}
+
+func (x *ListUptimeCheckConfigsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListUptimeCheckConfigsResponse.ProtoReflect.Descriptor instead.
+func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListUptimeCheckConfigsResponse) GetUptimeCheckConfigs() []*UptimeCheckConfig {
+ if x != nil {
+ return x.UptimeCheckConfigs
+ }
+ return nil
+}
+
+func (x *ListUptimeCheckConfigsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+func (x *ListUptimeCheckConfigsResponse) GetTotalSize() int32 {
+ if x != nil {
+ return x.TotalSize
+ }
+ return 0
+}
+
+// The protocol for the `GetUptimeCheckConfig` request.
+type GetUptimeCheckConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The Uptime check configuration to retrieve. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *GetUptimeCheckConfigRequest) Reset() {
+ *x = GetUptimeCheckConfigRequest{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GetUptimeCheckConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GetUptimeCheckConfigRequest) ProtoMessage() {}
+
+func (x *GetUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GetUptimeCheckConfigRequest.ProtoReflect.Descriptor instead.
+func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *GetUptimeCheckConfigRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The protocol for the `CreateUptimeCheckConfig` request.
+type CreateUptimeCheckConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The
+ // [project](https://cloud.google.com/monitoring/api/v3#project_name) in which
+ // to create the Uptime check. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Required. The new Uptime check configuration.
+ UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,2,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"`
+}
+
+func (x *CreateUptimeCheckConfigRequest) Reset() {
+ *x = CreateUptimeCheckConfigRequest{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *CreateUptimeCheckConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CreateUptimeCheckConfigRequest) ProtoMessage() {}
+
+func (x *CreateUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CreateUptimeCheckConfigRequest.ProtoReflect.Descriptor instead.
+func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *CreateUptimeCheckConfigRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *CreateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig {
+ if x != nil {
+ return x.UptimeCheckConfig
+ }
+ return nil
+}
+
+// The protocol for the `UpdateUptimeCheckConfig` request.
+type UpdateUptimeCheckConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional. If present, only the listed fields in the current Uptime check
+ // configuration are updated with values from the new configuration. If this
+ // field is empty, then the current configuration is completely replaced with
+ // the new configuration.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ // Required. If an `updateMask` has been specified, this field gives
+ // the values for the set of fields mentioned in the `updateMask`. If an
+ // `updateMask` has not been given, this Uptime check configuration replaces
+ // the current configuration. If a field is mentioned in `updateMask` but
+ // the corresponding field is omitted in this partial Uptime check
+ // configuration, it has the effect of deleting/clearing the field from the
+ // configuration on the server.
+ //
+ // The following fields can be updated: `display_name`,
+ // `http_check`, `tcp_check`, `timeout`, `content_matchers`, and
+ // `selected_regions`.
+ UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,3,opt,name=uptime_check_config,json=uptimeCheckConfig,proto3" json:"uptime_check_config,omitempty"`
+}
+
+func (x *UpdateUptimeCheckConfigRequest) Reset() {
+ *x = UpdateUptimeCheckConfigRequest{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *UpdateUptimeCheckConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateUptimeCheckConfigRequest) ProtoMessage() {}
+
+func (x *UpdateUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateUptimeCheckConfigRequest.ProtoReflect.Descriptor instead.
+func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *UpdateUptimeCheckConfigRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.UpdateMask
+ }
+ return nil
+}
+
+func (x *UpdateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig {
+ if x != nil {
+ return x.UptimeCheckConfig
+ }
+ return nil
+}
+
+// The protocol for the `DeleteUptimeCheckConfig` request.
+type DeleteUptimeCheckConfigRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The Uptime check configuration to delete. The format is:
+ //
+ // projects/[PROJECT_ID_OR_NUMBER]/uptimeCheckConfigs/[UPTIME_CHECK_ID]
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *DeleteUptimeCheckConfigRequest) Reset() {
+ *x = DeleteUptimeCheckConfigRequest{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *DeleteUptimeCheckConfigRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DeleteUptimeCheckConfigRequest) ProtoMessage() {}
+
+func (x *DeleteUptimeCheckConfigRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DeleteUptimeCheckConfigRequest.ProtoReflect.Descriptor instead.
+func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *DeleteUptimeCheckConfigRequest) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The protocol for the `ListUptimeCheckIps` request.
+type ListUptimeCheckIpsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The maximum number of results to return in a single response. The server
+ // may further constrain the maximum number of results returned in a single
+ // page. If the page_size is <=0, the server will decide the number of results
+ // to be returned.
+ // NOTE: this field is not yet implemented
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // If this field is not empty then it must contain the `nextPageToken` value
+ // returned by a previous call to this method. Using this field causes the
+ // method to return more results from the previous method call.
+ // NOTE: this field is not yet implemented
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+}
+
+func (x *ListUptimeCheckIpsRequest) Reset() {
+ *x = ListUptimeCheckIpsRequest{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListUptimeCheckIpsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListUptimeCheckIpsRequest) ProtoMessage() {}
+
+func (x *ListUptimeCheckIpsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListUptimeCheckIpsRequest.ProtoReflect.Descriptor instead.
+func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ListUptimeCheckIpsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListUptimeCheckIpsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+// The protocol for the `ListUptimeCheckIps` response.
+type ListUptimeCheckIpsResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The returned list of IP addresses (including region and location) that the
+ // checkers run from.
+ UptimeCheckIps []*UptimeCheckIp `protobuf:"bytes,1,rep,name=uptime_check_ips,json=uptimeCheckIps,proto3" json:"uptime_check_ips,omitempty"`
+ // This field represents the pagination token to retrieve the next page of
+ // results. If the value is empty, it means no further results for the
+ // request. To retrieve the next page of results, the value of the
+ // next_page_token is passed to the subsequent List method call (in the
+ // request message's page_token field).
+ // NOTE: this field is not yet implemented
+ NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+}
+
+func (x *ListUptimeCheckIpsResponse) Reset() {
+ *x = ListUptimeCheckIpsResponse{}
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListUptimeCheckIpsResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListUptimeCheckIpsResponse) ProtoMessage() {}
+
+func (x *ListUptimeCheckIpsResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_monitoring_v3_uptime_service_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListUptimeCheckIpsResponse.ProtoReflect.Descriptor instead.
+func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) {
+ return file_google_monitoring_v3_uptime_service_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ListUptimeCheckIpsResponse) GetUptimeCheckIps() []*UptimeCheckIp {
+ if x != nil {
+ return x.UptimeCheckIps
+ }
+ return nil
+}
+
+func (x *ListUptimeCheckIpsResponse) GetNextPageToken() string {
+ if x != nil {
+ return x.NextPageToken
+ }
+ return ""
+}
+
+var File_google_monitoring_v3_uptime_service_proto protoreflect.FileDescriptor
+
+var file_google_monitoring_v3_uptime_service_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76,
+ 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc0, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d,
+ 0x12, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69,
+ 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a,
+ 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61,
+ 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc2, 0x01, 0x0a, 0x1e, 0x4c, 0x69,
+ 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x14,
+ 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76,
+ 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f,
+ 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x05, 0x52, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x66,
+ 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02,
+ 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xcb, 0x01, 0x0a, 0x1e, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x06, 0x70, 0x61, 0x72,
+ 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x2d, 0x12, 0x2b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74,
+ 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5c, 0x0a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbb, 0x01, 0x0a, 0x1e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x5c, 0x0a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
+ 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x22, 0x69, 0x0a, 0x1e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55, 0x70, 0x74, 0x69,
+ 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x57, 0x0a,
+ 0x19, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x49, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61,
+ 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70,
+ 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f,
+ 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67,
+ 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x93, 0x01, 0x0a, 0x1a, 0x4c, 0x69, 0x73, 0x74, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x10, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f,
+ 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x69, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x49, 0x70, 0x52, 0x0e, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63,
+ 0x6b, 0x49, 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67,
+ 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e,
+ 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0xbd, 0x0a, 0x0a,
+ 0x12, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x12, 0xc0, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69,
+ 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x33,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69,
+ 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0xda, 0x41, 0x06, 0x70, 0x61,
+ 0x72, 0x65, 0x6e, 0x74, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f,
+ 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0xad, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x55, 0x70,
+ 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x65, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x39, 0xda, 0x41, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x33, 0x2f,
+ 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a,
+ 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xde, 0x01, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x22, 0x64, 0xda, 0x41, 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x75, 0x70, 0x74,
+ 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x41, 0x3a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x2a, 0x2f, 0x76, 0x33,
+ 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0xeb, 0x01, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33,
+ 0x2e, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x22, 0x71, 0xda, 0x41, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x55,
+ 0x3a, 0x13, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x32, 0x3e, 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x75, 0x70, 0x74, 0x69,
+ 0x6d, 0x65, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f,
+ 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0xa2, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+ 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x12, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x55,
+ 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22,
+ 0x39, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a, 0x2a,
+ 0x2f, 0x76, 0x33, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x2f, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x12, 0x4c,
+ 0x69, 0x73, 0x74, 0x55, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70,
+ 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69, 0x74,
+ 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70, 0x74,
+ 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x55, 0x70,
+ 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x76,
+ 0x33, 0x2f, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x49, 0x70, 0x73,
+ 0x1a, 0xa9, 0x01, 0xca, 0x41, 0x19, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2,
+ 0x41, 0x89, 0x01, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75,
+ 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72,
+ 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74,
+ 0x68, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2c, 0x68, 0x74, 0x74,
+ 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6d, 0x6f, 0x6e,
+ 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x42, 0xcd, 0x01, 0x0a,
+ 0x18, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6d, 0x6f, 0x6e, 0x69,
+ 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x12, 0x55, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
+ 0x41, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2f,
+ 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72,
+ 0x69, 0x6e, 0x67, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67,
+ 0x70, 0x62, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75,
+ 0x64, 0x2e, 0x4d, 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x56, 0x33, 0xca,
+ 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x4d,
+ 0x6f, 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x5c, 0x56, 0x33, 0xea, 0x02, 0x1d, 0x47,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x4d, 0x6f,
+ 0x6e, 0x69, 0x74, 0x6f, 0x72, 0x69, 0x6e, 0x67, 0x3a, 0x3a, 0x56, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_monitoring_v3_uptime_service_proto_rawDescOnce sync.Once
+ file_google_monitoring_v3_uptime_service_proto_rawDescData = file_google_monitoring_v3_uptime_service_proto_rawDesc
+)
+
+func file_google_monitoring_v3_uptime_service_proto_rawDescGZIP() []byte {
+ file_google_monitoring_v3_uptime_service_proto_rawDescOnce.Do(func() {
+ file_google_monitoring_v3_uptime_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_monitoring_v3_uptime_service_proto_rawDescData)
+ })
+ return file_google_monitoring_v3_uptime_service_proto_rawDescData
+}
+
+var file_google_monitoring_v3_uptime_service_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_google_monitoring_v3_uptime_service_proto_goTypes = []any{
+ (*ListUptimeCheckConfigsRequest)(nil), // 0: google.monitoring.v3.ListUptimeCheckConfigsRequest
+ (*ListUptimeCheckConfigsResponse)(nil), // 1: google.monitoring.v3.ListUptimeCheckConfigsResponse
+ (*GetUptimeCheckConfigRequest)(nil), // 2: google.monitoring.v3.GetUptimeCheckConfigRequest
+ (*CreateUptimeCheckConfigRequest)(nil), // 3: google.monitoring.v3.CreateUptimeCheckConfigRequest
+ (*UpdateUptimeCheckConfigRequest)(nil), // 4: google.monitoring.v3.UpdateUptimeCheckConfigRequest
+ (*DeleteUptimeCheckConfigRequest)(nil), // 5: google.monitoring.v3.DeleteUptimeCheckConfigRequest
+ (*ListUptimeCheckIpsRequest)(nil), // 6: google.monitoring.v3.ListUptimeCheckIpsRequest
+ (*ListUptimeCheckIpsResponse)(nil), // 7: google.monitoring.v3.ListUptimeCheckIpsResponse
+ (*UptimeCheckConfig)(nil), // 8: google.monitoring.v3.UptimeCheckConfig
+ (*fieldmaskpb.FieldMask)(nil), // 9: google.protobuf.FieldMask
+ (*UptimeCheckIp)(nil), // 10: google.monitoring.v3.UptimeCheckIp
+ (*emptypb.Empty)(nil), // 11: google.protobuf.Empty
+}
+var file_google_monitoring_v3_uptime_service_proto_depIdxs = []int32{
+ 8, // 0: google.monitoring.v3.ListUptimeCheckConfigsResponse.uptime_check_configs:type_name -> google.monitoring.v3.UptimeCheckConfig
+ 8, // 1: google.monitoring.v3.CreateUptimeCheckConfigRequest.uptime_check_config:type_name -> google.monitoring.v3.UptimeCheckConfig
+ 9, // 2: google.monitoring.v3.UpdateUptimeCheckConfigRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 8, // 3: google.monitoring.v3.UpdateUptimeCheckConfigRequest.uptime_check_config:type_name -> google.monitoring.v3.UptimeCheckConfig
+ 10, // 4: google.monitoring.v3.ListUptimeCheckIpsResponse.uptime_check_ips:type_name -> google.monitoring.v3.UptimeCheckIp
+ 0, // 5: google.monitoring.v3.UptimeCheckService.ListUptimeCheckConfigs:input_type -> google.monitoring.v3.ListUptimeCheckConfigsRequest
+ 2, // 6: google.monitoring.v3.UptimeCheckService.GetUptimeCheckConfig:input_type -> google.monitoring.v3.GetUptimeCheckConfigRequest
+ 3, // 7: google.monitoring.v3.UptimeCheckService.CreateUptimeCheckConfig:input_type -> google.monitoring.v3.CreateUptimeCheckConfigRequest
+ 4, // 8: google.monitoring.v3.UptimeCheckService.UpdateUptimeCheckConfig:input_type -> google.monitoring.v3.UpdateUptimeCheckConfigRequest
+ 5, // 9: google.monitoring.v3.UptimeCheckService.DeleteUptimeCheckConfig:input_type -> google.monitoring.v3.DeleteUptimeCheckConfigRequest
+ 6, // 10: google.monitoring.v3.UptimeCheckService.ListUptimeCheckIps:input_type -> google.monitoring.v3.ListUptimeCheckIpsRequest
+ 1, // 11: google.monitoring.v3.UptimeCheckService.ListUptimeCheckConfigs:output_type -> google.monitoring.v3.ListUptimeCheckConfigsResponse
+ 8, // 12: google.monitoring.v3.UptimeCheckService.GetUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig
+ 8, // 13: google.monitoring.v3.UptimeCheckService.CreateUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig
+ 8, // 14: google.monitoring.v3.UptimeCheckService.UpdateUptimeCheckConfig:output_type -> google.monitoring.v3.UptimeCheckConfig
+ 11, // 15: google.monitoring.v3.UptimeCheckService.DeleteUptimeCheckConfig:output_type -> google.protobuf.Empty
+ 7, // 16: google.monitoring.v3.UptimeCheckService.ListUptimeCheckIps:output_type -> google.monitoring.v3.ListUptimeCheckIpsResponse
+ 11, // [11:17] is the sub-list for method output_type
+ 5, // [5:11] is the sub-list for method input_type
+ 5, // [5:5] is the sub-list for extension type_name
+ 5, // [5:5] is the sub-list for extension extendee
+ 0, // [0:5] is the sub-list for field type_name
+}
+
+func init() { file_google_monitoring_v3_uptime_service_proto_init() }
+func file_google_monitoring_v3_uptime_service_proto_init() {
+ if File_google_monitoring_v3_uptime_service_proto != nil {
+ return
+ }
+ file_google_monitoring_v3_uptime_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_monitoring_v3_uptime_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 8,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_google_monitoring_v3_uptime_service_proto_goTypes,
+ DependencyIndexes: file_google_monitoring_v3_uptime_service_proto_depIdxs,
+ MessageInfos: file_google_monitoring_v3_uptime_service_proto_msgTypes,
+ }.Build()
+ File_google_monitoring_v3_uptime_service_proto = out.File
+ file_google_monitoring_v3_uptime_service_proto_rawDesc = nil
+ file_google_monitoring_v3_uptime_service_proto_goTypes = nil
+ file_google_monitoring_v3_uptime_service_proto_depIdxs = nil
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConnInterface
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion6
+
+// UptimeCheckServiceClient is the client API for UptimeCheckService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type UptimeCheckServiceClient interface {
+ // Lists the existing valid Uptime check configurations for the project
+ // (leaving out any invalid configurations).
+ ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error)
+ // Gets a single Uptime check configuration.
+ GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error)
+ // Creates a new Uptime check configuration.
+ CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error)
+ // Updates an Uptime check configuration. You can either replace the entire
+ // configuration with a new one or replace only certain fields in the current
+ // configuration by specifying the fields to be updated via `updateMask`.
+ // Returns the updated configuration.
+ UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error)
+ // Deletes an Uptime check configuration. Note that this method will fail
+ // if the Uptime check configuration is referenced by an alert policy or
+ // other dependent configs that would be rendered invalid by the deletion.
+ DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
+ // Returns the list of IP addresses that checkers run from.
+ ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error)
+}
+
+type uptimeCheckServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewUptimeCheckServiceClient(cc grpc.ClientConnInterface) UptimeCheckServiceClient {
+ return &uptimeCheckServiceClient{cc}
+}
+
+func (c *uptimeCheckServiceClient) ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) {
+ out := new(ListUptimeCheckConfigsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) {
+ out := new(UptimeCheckConfig)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) {
+ out := new(UptimeCheckConfig)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) {
+ out := new(UptimeCheckConfig)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
+ out := new(emptypb.Empty)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *uptimeCheckServiceClient) ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) {
+ out := new(ListUptimeCheckIpsResponse)
+ err := c.cc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// UptimeCheckServiceServer is the server API for UptimeCheckService service.
+type UptimeCheckServiceServer interface {
+ // Lists the existing valid Uptime check configurations for the project
+ // (leaving out any invalid configurations).
+ ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error)
+ // Gets a single Uptime check configuration.
+ GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error)
+ // Creates a new Uptime check configuration.
+ CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error)
+ // Updates an Uptime check configuration. You can either replace the entire
+ // configuration with a new one or replace only certain fields in the current
+ // configuration by specifying the fields to be updated via `updateMask`.
+ // Returns the updated configuration.
+ UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error)
+ // Deletes an Uptime check configuration. Note that this method will fail
+ // if the Uptime check configuration is referenced by an alert policy or
+ // other dependent configs that would be rendered invalid by the deletion.
+ DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error)
+ // Returns the list of IP addresses that checkers run from.
+ ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error)
+}
+
+// UnimplementedUptimeCheckServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedUptimeCheckServiceServer struct {
+}
+
+func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListUptimeCheckConfigs not implemented")
+}
+func (*UnimplementedUptimeCheckServiceServer) GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method GetUptimeCheckConfig not implemented")
+}
+func (*UnimplementedUptimeCheckServiceServer) CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method CreateUptimeCheckConfig not implemented")
+}
+func (*UnimplementedUptimeCheckServiceServer) UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UpdateUptimeCheckConfig not implemented")
+}
+func (*UnimplementedUptimeCheckServiceServer) DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*emptypb.Empty, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteUptimeCheckConfig not implemented")
+}
+func (*UnimplementedUptimeCheckServiceServer) ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method ListUptimeCheckIps not implemented")
+}
+
+func RegisterUptimeCheckServiceServer(s *grpc.Server, srv UptimeCheckServiceServer) {
+ s.RegisterService(&_UptimeCheckService_serviceDesc, srv)
+}
+
+func _UptimeCheckService_ListUptimeCheckConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListUptimeCheckConfigsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, req.(*ListUptimeCheckConfigsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_GetUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(GetUptimeCheckConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, req.(*GetUptimeCheckConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_CreateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CreateUptimeCheckConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, req.(*CreateUptimeCheckConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_UpdateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UpdateUptimeCheckConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, req.(*UpdateUptimeCheckConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_DeleteUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteUptimeCheckConfigRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, req.(*DeleteUptimeCheckConfigRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _UptimeCheckService_ListUptimeCheckIps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ListUptimeCheckIpsRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, req.(*ListUptimeCheckIpsRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _UptimeCheckService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "google.monitoring.v3.UptimeCheckService",
+ HandlerType: (*UptimeCheckServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "ListUptimeCheckConfigs",
+ Handler: _UptimeCheckService_ListUptimeCheckConfigs_Handler,
+ },
+ {
+ MethodName: "GetUptimeCheckConfig",
+ Handler: _UptimeCheckService_GetUptimeCheckConfig_Handler,
+ },
+ {
+ MethodName: "CreateUptimeCheckConfig",
+ Handler: _UptimeCheckService_CreateUptimeCheckConfig_Handler,
+ },
+ {
+ MethodName: "UpdateUptimeCheckConfig",
+ Handler: _UptimeCheckService_UpdateUptimeCheckConfig_Handler,
+ },
+ {
+ MethodName: "DeleteUptimeCheckConfig",
+ Handler: _UptimeCheckService_DeleteUptimeCheckConfig_Handler,
+ },
+ {
+ MethodName: "ListUptimeCheckIps",
+ Handler: _UptimeCheckService_ListUptimeCheckIps_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "google/monitoring/v3/uptime_service.proto",
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go
new file mode 100644
index 000000000..3b36b219e
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/notification_channel_client.go
@@ -0,0 +1,622 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+ "time"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+)
+
+var newNotificationChannelClientHook clientHook
+
+// NotificationChannelCallOptions contains the retry settings for each method of NotificationChannelClient.
+type NotificationChannelCallOptions struct {
+ ListNotificationChannelDescriptors []gax.CallOption
+ GetNotificationChannelDescriptor []gax.CallOption
+ ListNotificationChannels []gax.CallOption
+ GetNotificationChannel []gax.CallOption
+ CreateNotificationChannel []gax.CallOption
+ UpdateNotificationChannel []gax.CallOption
+ DeleteNotificationChannel []gax.CallOption
+ SendNotificationChannelVerificationCode []gax.CallOption
+ GetNotificationChannelVerificationCode []gax.CallOption
+ VerifyNotificationChannel []gax.CallOption
+}
+
+func defaultNotificationChannelGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultNotificationChannelCallOptions() *NotificationChannelCallOptions {
+ return &NotificationChannelCallOptions{
+ ListNotificationChannelDescriptors: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetNotificationChannelDescriptor: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListNotificationChannels: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetNotificationChannel: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateNotificationChannel: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ UpdateNotificationChannel: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ DeleteNotificationChannel: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ SendNotificationChannelVerificationCode: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ GetNotificationChannelVerificationCode: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ VerifyNotificationChannel: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ }
+}
+
+// internalNotificationChannelClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalNotificationChannelClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ ListNotificationChannelDescriptors(context.Context, *monitoringpb.ListNotificationChannelDescriptorsRequest, ...gax.CallOption) *NotificationChannelDescriptorIterator
+ GetNotificationChannelDescriptor(context.Context, *monitoringpb.GetNotificationChannelDescriptorRequest, ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error)
+ ListNotificationChannels(context.Context, *monitoringpb.ListNotificationChannelsRequest, ...gax.CallOption) *NotificationChannelIterator
+ GetNotificationChannel(context.Context, *monitoringpb.GetNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error)
+ CreateNotificationChannel(context.Context, *monitoringpb.CreateNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error)
+ UpdateNotificationChannel(context.Context, *monitoringpb.UpdateNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error)
+ DeleteNotificationChannel(context.Context, *monitoringpb.DeleteNotificationChannelRequest, ...gax.CallOption) error
+ SendNotificationChannelVerificationCode(context.Context, *monitoringpb.SendNotificationChannelVerificationCodeRequest, ...gax.CallOption) error
+ GetNotificationChannelVerificationCode(context.Context, *monitoringpb.GetNotificationChannelVerificationCodeRequest, ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error)
+ VerifyNotificationChannel(context.Context, *monitoringpb.VerifyNotificationChannelRequest, ...gax.CallOption) (*monitoringpb.NotificationChannel, error)
+}
+
+// NotificationChannelClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// The Notification Channel API provides access to configuration that
+// controls how messages related to incidents are sent.
+type NotificationChannelClient struct {
+ // The internal transport-dependent client.
+ internalClient internalNotificationChannelClient
+
+ // The call options for this service.
+ CallOptions *NotificationChannelCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *NotificationChannelClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *NotificationChannelClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *NotificationChannelClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// ListNotificationChannelDescriptors lists the descriptors for supported channel types. The use of descriptors
+// makes it possible for new channel types to be dynamically added.
+func (c *NotificationChannelClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator {
+ return c.internalClient.ListNotificationChannelDescriptors(ctx, req, opts...)
+}
+
+// GetNotificationChannelDescriptor gets a single channel descriptor. The descriptor indicates which fields
+// are expected / permitted for a notification channel of the given type.
+func (c *NotificationChannelClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) {
+ return c.internalClient.GetNotificationChannelDescriptor(ctx, req, opts...)
+}
+
+// ListNotificationChannels lists the notification channels that have been created for the project.
+// To list the types of notification channels that are supported, use
+// the ListNotificationChannelDescriptors method.
+func (c *NotificationChannelClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator {
+ return c.internalClient.ListNotificationChannels(ctx, req, opts...)
+}
+
+// GetNotificationChannel gets a single notification channel. The channel includes the relevant
+// configuration details with which the channel was created. However, the
+// response may truncate or omit passwords, API keys, or other private key
+// matter and thus the response may not be 100% identical to the information
+// that was supplied in the call to the create method.
+func (c *NotificationChannelClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ return c.internalClient.GetNotificationChannel(ctx, req, opts...)
+}
+
+// CreateNotificationChannel creates a new notification channel, representing a single notification
+// endpoint such as an email address, SMS number, or PagerDuty service.
+//
+// Design your application to single-thread API calls that modify the state of
+// notification channels in a single project. This includes calls to
+// CreateNotificationChannel, DeleteNotificationChannel and
+// UpdateNotificationChannel.
+func (c *NotificationChannelClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ return c.internalClient.CreateNotificationChannel(ctx, req, opts...)
+}
+
+// UpdateNotificationChannel updates a notification channel. Fields not specified in the field mask
+// remain unchanged.
+//
+// Design your application to single-thread API calls that modify the state of
+// notification channels in a single project. This includes calls to
+// CreateNotificationChannel, DeleteNotificationChannel and
+// UpdateNotificationChannel.
+func (c *NotificationChannelClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ return c.internalClient.UpdateNotificationChannel(ctx, req, opts...)
+}
+
+// DeleteNotificationChannel deletes a notification channel.
+//
+// Design your application to single-thread API calls that modify the state of
+// notification channels in a single project. This includes calls to
+// CreateNotificationChannel, DeleteNotificationChannel and
+// UpdateNotificationChannel.
+func (c *NotificationChannelClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteNotificationChannel(ctx, req, opts...)
+}
+
+// SendNotificationChannelVerificationCode causes a verification code to be delivered to the channel. The code
+// can then be supplied in VerifyNotificationChannel to verify the channel.
+func (c *NotificationChannelClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error {
+ return c.internalClient.SendNotificationChannelVerificationCode(ctx, req, opts...)
+}
+
+// GetNotificationChannelVerificationCode requests a verification code for an already verified channel that can then
+// be used in a call to VerifyNotificationChannel() on a different channel
+// with an equivalent identity in the same or in a different project. This
+// makes it possible to copy a channel between projects without requiring
+// manual reverification of the channel. If the channel is not in the
+// verified state, this method will fail (in other words, this may only be
+// used if the SendNotificationChannelVerificationCode and
+// VerifyNotificationChannel paths have already been used to put the given
+// channel into the verified state).
+//
+// There is no guarantee that the verification codes returned by this method
+// will be of a similar structure or form as the ones that are delivered
+// to the channel via SendNotificationChannelVerificationCode; while
+// VerifyNotificationChannel() will recognize both the codes delivered via
+// SendNotificationChannelVerificationCode() and returned from
+// GetNotificationChannelVerificationCode(), it is typically the case that
+// the verification codes delivered via
+// SendNotificationChannelVerificationCode() will be shorter and also
+// have a shorter expiration (e.g. codes such as “G-123456”) whereas
+// GetVerificationCode() will typically return a much longer, websafe base
+// 64 encoded string that has a longer expiration time.
+func (c *NotificationChannelClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) {
+ return c.internalClient.GetNotificationChannelVerificationCode(ctx, req, opts...)
+}
+
+// VerifyNotificationChannel verifies a NotificationChannel by proving receipt of the code
+// delivered to the channel as a result of calling
+// SendNotificationChannelVerificationCode.
+func (c *NotificationChannelClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ return c.internalClient.VerifyNotificationChannel(ctx, req, opts...)
+}
+
+// notificationChannelGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type notificationChannelGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing NotificationChannelClient
+ CallOptions **NotificationChannelCallOptions
+
+ // The gRPC API client.
+ notificationChannelClient monitoringpb.NotificationChannelServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewNotificationChannelClient creates a new notification channel service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// The Notification Channel API provides access to configuration that
+// controls how messages related to incidents are sent.
+func NewNotificationChannelClient(ctx context.Context, opts ...option.ClientOption) (*NotificationChannelClient, error) {
+ clientOpts := defaultNotificationChannelGRPCClientOptions()
+ if newNotificationChannelClientHook != nil {
+ hookOpts, err := newNotificationChannelClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := NotificationChannelClient{CallOptions: defaultNotificationChannelCallOptions()}
+
+ c := ¬ificationChannelGRPCClient{
+ connPool: connPool,
+ notificationChannelClient: monitoringpb.NewNotificationChannelServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *notificationChannelGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *notificationChannelGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *notificationChannelGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *notificationChannelGRPCClient) ListNotificationChannelDescriptors(ctx context.Context, req *monitoringpb.ListNotificationChannelDescriptorsRequest, opts ...gax.CallOption) *NotificationChannelDescriptorIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListNotificationChannelDescriptors[0:len((*c.CallOptions).ListNotificationChannelDescriptors):len((*c.CallOptions).ListNotificationChannelDescriptors)], opts...)
+ it := &NotificationChannelDescriptorIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListNotificationChannelDescriptorsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannelDescriptor, string, error) {
+ resp := &monitoringpb.ListNotificationChannelDescriptorsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.ListNotificationChannelDescriptors, req, settings.GRPC, c.logger, "ListNotificationChannelDescriptors")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetChannelDescriptors(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *notificationChannelGRPCClient) GetNotificationChannelDescriptor(ctx context.Context, req *monitoringpb.GetNotificationChannelDescriptorRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannelDescriptor, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetNotificationChannelDescriptor[0:len((*c.CallOptions).GetNotificationChannelDescriptor):len((*c.CallOptions).GetNotificationChannelDescriptor)], opts...)
+ var resp *monitoringpb.NotificationChannelDescriptor
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.GetNotificationChannelDescriptor, req, settings.GRPC, c.logger, "GetNotificationChannelDescriptor")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *notificationChannelGRPCClient) ListNotificationChannels(ctx context.Context, req *monitoringpb.ListNotificationChannelsRequest, opts ...gax.CallOption) *NotificationChannelIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListNotificationChannels[0:len((*c.CallOptions).ListNotificationChannels):len((*c.CallOptions).ListNotificationChannels)], opts...)
+ it := &NotificationChannelIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListNotificationChannelsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.NotificationChannel, string, error) {
+ resp := &monitoringpb.ListNotificationChannelsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.ListNotificationChannels, req, settings.GRPC, c.logger, "ListNotificationChannels")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetNotificationChannels(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *notificationChannelGRPCClient) GetNotificationChannel(ctx context.Context, req *monitoringpb.GetNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetNotificationChannel[0:len((*c.CallOptions).GetNotificationChannel):len((*c.CallOptions).GetNotificationChannel)], opts...)
+ var resp *monitoringpb.NotificationChannel
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.GetNotificationChannel, req, settings.GRPC, c.logger, "GetNotificationChannel")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *notificationChannelGRPCClient) CreateNotificationChannel(ctx context.Context, req *monitoringpb.CreateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateNotificationChannel[0:len((*c.CallOptions).CreateNotificationChannel):len((*c.CallOptions).CreateNotificationChannel)], opts...)
+ var resp *monitoringpb.NotificationChannel
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.CreateNotificationChannel, req, settings.GRPC, c.logger, "CreateNotificationChannel")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *notificationChannelGRPCClient) UpdateNotificationChannel(ctx context.Context, req *monitoringpb.UpdateNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "notification_channel.name", url.QueryEscape(req.GetNotificationChannel().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateNotificationChannel[0:len((*c.CallOptions).UpdateNotificationChannel):len((*c.CallOptions).UpdateNotificationChannel)], opts...)
+ var resp *monitoringpb.NotificationChannel
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.UpdateNotificationChannel, req, settings.GRPC, c.logger, "UpdateNotificationChannel")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *notificationChannelGRPCClient) DeleteNotificationChannel(ctx context.Context, req *monitoringpb.DeleteNotificationChannelRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteNotificationChannel[0:len((*c.CallOptions).DeleteNotificationChannel):len((*c.CallOptions).DeleteNotificationChannel)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.notificationChannelClient.DeleteNotificationChannel, req, settings.GRPC, c.logger, "DeleteNotificationChannel")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *notificationChannelGRPCClient) SendNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.SendNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).SendNotificationChannelVerificationCode[0:len((*c.CallOptions).SendNotificationChannelVerificationCode):len((*c.CallOptions).SendNotificationChannelVerificationCode)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.notificationChannelClient.SendNotificationChannelVerificationCode, req, settings.GRPC, c.logger, "SendNotificationChannelVerificationCode")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *notificationChannelGRPCClient) GetNotificationChannelVerificationCode(ctx context.Context, req *monitoringpb.GetNotificationChannelVerificationCodeRequest, opts ...gax.CallOption) (*monitoringpb.GetNotificationChannelVerificationCodeResponse, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetNotificationChannelVerificationCode[0:len((*c.CallOptions).GetNotificationChannelVerificationCode):len((*c.CallOptions).GetNotificationChannelVerificationCode)], opts...)
+ var resp *monitoringpb.GetNotificationChannelVerificationCodeResponse
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.GetNotificationChannelVerificationCode, req, settings.GRPC, c.logger, "GetNotificationChannelVerificationCode")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *notificationChannelGRPCClient) VerifyNotificationChannel(ctx context.Context, req *monitoringpb.VerifyNotificationChannelRequest, opts ...gax.CallOption) (*monitoringpb.NotificationChannel, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).VerifyNotificationChannel[0:len((*c.CallOptions).VerifyNotificationChannel):len((*c.CallOptions).VerifyNotificationChannel)], opts...)
+ var resp *monitoringpb.NotificationChannel
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.notificationChannelClient.VerifyNotificationChannel, req, settings.GRPC, c.logger, "VerifyNotificationChannel")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go
new file mode 100644
index 000000000..f792f2bd7
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/query_client.go
@@ -0,0 +1,242 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/proto"
+)
+
+var newQueryClientHook clientHook
+
+// QueryCallOptions contains the retry settings for each method of QueryClient.
+type QueryCallOptions struct {
+ QueryTimeSeries []gax.CallOption
+}
+
+func defaultQueryGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultQueryCallOptions() *QueryCallOptions {
+ return &QueryCallOptions{
+ QueryTimeSeries: []gax.CallOption{},
+ }
+}
+
+// internalQueryClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalQueryClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ QueryTimeSeries(context.Context, *monitoringpb.QueryTimeSeriesRequest, ...gax.CallOption) *TimeSeriesDataIterator
+}
+
+// QueryClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// The QueryService API is used to manage time series data in Cloud
+// Monitoring. Time series data is a collection of data points that describes
+// the time-varying values of a metric.
+type QueryClient struct {
+ // The internal transport-dependent client.
+ internalClient internalQueryClient
+
+ // The call options for this service.
+ CallOptions *QueryCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *QueryClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *QueryClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *QueryClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// QueryTimeSeries queries time series by using Monitoring Query Language (MQL). We recommend
+// using PromQL instead of MQL. For more information about the status of MQL,
+// see the MQL deprecation
+// notice (at https://cloud.google.com/stackdriver/docs/deprecations/mql).
+//
+// Deprecated: QueryTimeSeries may be removed in a future version.
+func (c *QueryClient) QueryTimeSeries(ctx context.Context, req *monitoringpb.QueryTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesDataIterator {
+ return c.internalClient.QueryTimeSeries(ctx, req, opts...)
+}
+
+// queryGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type queryGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing QueryClient
+ CallOptions **QueryCallOptions
+
+ // The gRPC API client.
+ queryClient monitoringpb.QueryServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewQueryClient creates a new query service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// The QueryService API is used to manage time series data in Cloud
+// Monitoring. Time series data is a collection of data points that describes
+// the time-varying values of a metric.
+func NewQueryClient(ctx context.Context, opts ...option.ClientOption) (*QueryClient, error) {
+ clientOpts := defaultQueryGRPCClientOptions()
+ if newQueryClientHook != nil {
+ hookOpts, err := newQueryClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := QueryClient{CallOptions: defaultQueryCallOptions()}
+
+ c := &queryGRPCClient{
+ connPool: connPool,
+ queryClient: monitoringpb.NewQueryServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *queryGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *queryGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *queryGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *queryGRPCClient) QueryTimeSeries(ctx context.Context, req *monitoringpb.QueryTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesDataIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).QueryTimeSeries[0:len((*c.CallOptions).QueryTimeSeries):len((*c.CallOptions).QueryTimeSeries)], opts...)
+ it := &TimeSeriesDataIterator{}
+ req = proto.Clone(req).(*monitoringpb.QueryTimeSeriesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeriesData, string, error) {
+ resp := &monitoringpb.QueryTimeSeriesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.queryClient.QueryTimeSeries, req, settings.GRPC, c.logger, "QueryTimeSeries")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetTimeSeriesData(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go
new file mode 100644
index 000000000..7dc66e373
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/service_monitoring_client.go
@@ -0,0 +1,569 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+ "time"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+)
+
+var newServiceMonitoringClientHook clientHook
+
+// ServiceMonitoringCallOptions contains the retry settings for each method of ServiceMonitoringClient.
+type ServiceMonitoringCallOptions struct {
+ CreateService []gax.CallOption
+ GetService []gax.CallOption
+ ListServices []gax.CallOption
+ UpdateService []gax.CallOption
+ DeleteService []gax.CallOption
+ CreateServiceLevelObjective []gax.CallOption
+ GetServiceLevelObjective []gax.CallOption
+ ListServiceLevelObjectives []gax.CallOption
+ UpdateServiceLevelObjective []gax.CallOption
+ DeleteServiceLevelObjective []gax.CallOption
+}
+
+func defaultServiceMonitoringGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultServiceMonitoringCallOptions() *ServiceMonitoringCallOptions {
+ return &ServiceMonitoringCallOptions{
+ CreateService: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ GetService: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListServices: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ UpdateService: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ DeleteService: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateServiceLevelObjective: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ GetServiceLevelObjective: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListServiceLevelObjectives: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ UpdateServiceLevelObjective: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ DeleteServiceLevelObjective: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ }
+}
+
+// internalServiceMonitoringClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalServiceMonitoringClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ CreateService(context.Context, *monitoringpb.CreateServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error)
+ GetService(context.Context, *monitoringpb.GetServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error)
+ ListServices(context.Context, *monitoringpb.ListServicesRequest, ...gax.CallOption) *ServiceIterator
+ UpdateService(context.Context, *monitoringpb.UpdateServiceRequest, ...gax.CallOption) (*monitoringpb.Service, error)
+ DeleteService(context.Context, *monitoringpb.DeleteServiceRequest, ...gax.CallOption) error
+ CreateServiceLevelObjective(context.Context, *monitoringpb.CreateServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error)
+ GetServiceLevelObjective(context.Context, *monitoringpb.GetServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error)
+ ListServiceLevelObjectives(context.Context, *monitoringpb.ListServiceLevelObjectivesRequest, ...gax.CallOption) *ServiceLevelObjectiveIterator
+ UpdateServiceLevelObjective(context.Context, *monitoringpb.UpdateServiceLevelObjectiveRequest, ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error)
+ DeleteServiceLevelObjective(context.Context, *monitoringpb.DeleteServiceLevelObjectiveRequest, ...gax.CallOption) error
+}
+
+// ServiceMonitoringClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// The Cloud Monitoring Service-Oriented Monitoring API has endpoints for
+// managing and querying aspects of a Metrics Scope’s services. These include
+// the Service's monitored resources, its Service-Level Objectives, and a
+// taxonomy of categorized Health Metrics.
+type ServiceMonitoringClient struct {
+ // The internal transport-dependent client.
+ internalClient internalServiceMonitoringClient
+
+ // The call options for this service.
+ CallOptions *ServiceMonitoringCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *ServiceMonitoringClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *ServiceMonitoringClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *ServiceMonitoringClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// CreateService create a Service.
+func (c *ServiceMonitoringClient) CreateService(ctx context.Context, req *monitoringpb.CreateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
+ return c.internalClient.CreateService(ctx, req, opts...)
+}
+
+// GetService get the named Service.
+func (c *ServiceMonitoringClient) GetService(ctx context.Context, req *monitoringpb.GetServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
+ return c.internalClient.GetService(ctx, req, opts...)
+}
+
+// ListServices list Services for this Metrics Scope.
+func (c *ServiceMonitoringClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator {
+ return c.internalClient.ListServices(ctx, req, opts...)
+}
+
+// UpdateService update this Service.
+func (c *ServiceMonitoringClient) UpdateService(ctx context.Context, req *monitoringpb.UpdateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
+ return c.internalClient.UpdateService(ctx, req, opts...)
+}
+
+// DeleteService soft delete this Service.
+func (c *ServiceMonitoringClient) DeleteService(ctx context.Context, req *monitoringpb.DeleteServiceRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteService(ctx, req, opts...)
+}
+
+// CreateServiceLevelObjective create a ServiceLevelObjective for the given Service.
+func (c *ServiceMonitoringClient) CreateServiceLevelObjective(ctx context.Context, req *monitoringpb.CreateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
+ return c.internalClient.CreateServiceLevelObjective(ctx, req, opts...)
+}
+
+// GetServiceLevelObjective get a ServiceLevelObjective by name.
+func (c *ServiceMonitoringClient) GetServiceLevelObjective(ctx context.Context, req *monitoringpb.GetServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
+ return c.internalClient.GetServiceLevelObjective(ctx, req, opts...)
+}
+
+// ListServiceLevelObjectives list the ServiceLevelObjectives for the given Service.
+func (c *ServiceMonitoringClient) ListServiceLevelObjectives(ctx context.Context, req *monitoringpb.ListServiceLevelObjectivesRequest, opts ...gax.CallOption) *ServiceLevelObjectiveIterator {
+ return c.internalClient.ListServiceLevelObjectives(ctx, req, opts...)
+}
+
+// UpdateServiceLevelObjective update the given ServiceLevelObjective.
+func (c *ServiceMonitoringClient) UpdateServiceLevelObjective(ctx context.Context, req *monitoringpb.UpdateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
+ return c.internalClient.UpdateServiceLevelObjective(ctx, req, opts...)
+}
+
+// DeleteServiceLevelObjective delete the given ServiceLevelObjective.
+func (c *ServiceMonitoringClient) DeleteServiceLevelObjective(ctx context.Context, req *monitoringpb.DeleteServiceLevelObjectiveRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteServiceLevelObjective(ctx, req, opts...)
+}
+
+// serviceMonitoringGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type serviceMonitoringGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing ServiceMonitoringClient
+ CallOptions **ServiceMonitoringCallOptions
+
+ // The gRPC API client.
+ serviceMonitoringClient monitoringpb.ServiceMonitoringServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewServiceMonitoringClient creates a new service monitoring service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// The Cloud Monitoring Service-Oriented Monitoring API has endpoints for
+// managing and querying aspects of a Metrics Scope’s services. These include
+// the Service's monitored resources, its Service-Level Objectives, and a
+// taxonomy of categorized Health Metrics.
+func NewServiceMonitoringClient(ctx context.Context, opts ...option.ClientOption) (*ServiceMonitoringClient, error) {
+ clientOpts := defaultServiceMonitoringGRPCClientOptions()
+ if newServiceMonitoringClientHook != nil {
+ hookOpts, err := newServiceMonitoringClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := ServiceMonitoringClient{CallOptions: defaultServiceMonitoringCallOptions()}
+
+ c := &serviceMonitoringGRPCClient{
+ connPool: connPool,
+ serviceMonitoringClient: monitoringpb.NewServiceMonitoringServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *serviceMonitoringGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *serviceMonitoringGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *serviceMonitoringGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *serviceMonitoringGRPCClient) CreateService(ctx context.Context, req *monitoringpb.CreateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateService[0:len((*c.CallOptions).CreateService):len((*c.CallOptions).CreateService)], opts...)
+ var resp *monitoringpb.Service
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.CreateService, req, settings.GRPC, c.logger, "CreateService")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *serviceMonitoringGRPCClient) GetService(ctx context.Context, req *monitoringpb.GetServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetService[0:len((*c.CallOptions).GetService):len((*c.CallOptions).GetService)], opts...)
+ var resp *monitoringpb.Service
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.GetService, req, settings.GRPC, c.logger, "GetService")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *serviceMonitoringGRPCClient) ListServices(ctx context.Context, req *monitoringpb.ListServicesRequest, opts ...gax.CallOption) *ServiceIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListServices[0:len((*c.CallOptions).ListServices):len((*c.CallOptions).ListServices)], opts...)
+ it := &ServiceIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListServicesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Service, string, error) {
+ resp := &monitoringpb.ListServicesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.ListServices, req, settings.GRPC, c.logger, "ListServices")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetServices(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *serviceMonitoringGRPCClient) UpdateService(ctx context.Context, req *monitoringpb.UpdateServiceRequest, opts ...gax.CallOption) (*monitoringpb.Service, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "service.name", url.QueryEscape(req.GetService().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateService[0:len((*c.CallOptions).UpdateService):len((*c.CallOptions).UpdateService)], opts...)
+ var resp *monitoringpb.Service
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.UpdateService, req, settings.GRPC, c.logger, "UpdateService")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *serviceMonitoringGRPCClient) DeleteService(ctx context.Context, req *monitoringpb.DeleteServiceRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteService[0:len((*c.CallOptions).DeleteService):len((*c.CallOptions).DeleteService)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.serviceMonitoringClient.DeleteService, req, settings.GRPC, c.logger, "DeleteService")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *serviceMonitoringGRPCClient) CreateServiceLevelObjective(ctx context.Context, req *monitoringpb.CreateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateServiceLevelObjective[0:len((*c.CallOptions).CreateServiceLevelObjective):len((*c.CallOptions).CreateServiceLevelObjective)], opts...)
+ var resp *monitoringpb.ServiceLevelObjective
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.CreateServiceLevelObjective, req, settings.GRPC, c.logger, "CreateServiceLevelObjective")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *serviceMonitoringGRPCClient) GetServiceLevelObjective(ctx context.Context, req *monitoringpb.GetServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetServiceLevelObjective[0:len((*c.CallOptions).GetServiceLevelObjective):len((*c.CallOptions).GetServiceLevelObjective)], opts...)
+ var resp *monitoringpb.ServiceLevelObjective
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.GetServiceLevelObjective, req, settings.GRPC, c.logger, "GetServiceLevelObjective")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *serviceMonitoringGRPCClient) ListServiceLevelObjectives(ctx context.Context, req *monitoringpb.ListServiceLevelObjectivesRequest, opts ...gax.CallOption) *ServiceLevelObjectiveIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListServiceLevelObjectives[0:len((*c.CallOptions).ListServiceLevelObjectives):len((*c.CallOptions).ListServiceLevelObjectives)], opts...)
+ it := &ServiceLevelObjectiveIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListServiceLevelObjectivesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.ServiceLevelObjective, string, error) {
+ resp := &monitoringpb.ListServiceLevelObjectivesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.ListServiceLevelObjectives, req, settings.GRPC, c.logger, "ListServiceLevelObjectives")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetServiceLevelObjectives(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *serviceMonitoringGRPCClient) UpdateServiceLevelObjective(ctx context.Context, req *monitoringpb.UpdateServiceLevelObjectiveRequest, opts ...gax.CallOption) (*monitoringpb.ServiceLevelObjective, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "service_level_objective.name", url.QueryEscape(req.GetServiceLevelObjective().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateServiceLevelObjective[0:len((*c.CallOptions).UpdateServiceLevelObjective):len((*c.CallOptions).UpdateServiceLevelObjective)], opts...)
+ var resp *monitoringpb.ServiceLevelObjective
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.serviceMonitoringClient.UpdateServiceLevelObjective, req, settings.GRPC, c.logger, "UpdateServiceLevelObjective")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *serviceMonitoringGRPCClient) DeleteServiceLevelObjective(ctx context.Context, req *monitoringpb.DeleteServiceLevelObjectiveRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteServiceLevelObjective[0:len((*c.CallOptions).DeleteServiceLevelObjective):len((*c.CallOptions).DeleteServiceLevelObjective)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.serviceMonitoringClient.DeleteServiceLevelObjective, req, settings.GRPC, c.logger, "DeleteServiceLevelObjective")
+ return err
+ }, opts...)
+ return err
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go
new file mode 100644
index 000000000..5b76a486b
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/snooze_client.go
@@ -0,0 +1,347 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+ "time"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+)
+
+var newSnoozeClientHook clientHook
+
+// SnoozeCallOptions contains the retry settings for each method of SnoozeClient.
+type SnoozeCallOptions struct {
+ CreateSnooze []gax.CallOption
+ ListSnoozes []gax.CallOption
+ GetSnooze []gax.CallOption
+ UpdateSnooze []gax.CallOption
+}
+
+func defaultSnoozeGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultSnoozeCallOptions() *SnoozeCallOptions {
+ return &SnoozeCallOptions{
+ CreateSnooze: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ ListSnoozes: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetSnooze: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ UpdateSnooze: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ }
+}
+
+// internalSnoozeClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalSnoozeClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ CreateSnooze(context.Context, *monitoringpb.CreateSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error)
+ ListSnoozes(context.Context, *monitoringpb.ListSnoozesRequest, ...gax.CallOption) *SnoozeIterator
+ GetSnooze(context.Context, *monitoringpb.GetSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error)
+ UpdateSnooze(context.Context, *monitoringpb.UpdateSnoozeRequest, ...gax.CallOption) (*monitoringpb.Snooze, error)
+}
+
+// SnoozeClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// The SnoozeService API is used to temporarily prevent an alert policy from
+// generating alerts. A Snooze is a description of the criteria under which one
+// or more alert policies should not fire alerts for the specified duration.
+type SnoozeClient struct {
+ // The internal transport-dependent client.
+ internalClient internalSnoozeClient
+
+ // The call options for this service.
+ CallOptions *SnoozeCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *SnoozeClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *SnoozeClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *SnoozeClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// CreateSnooze creates a Snooze that will prevent alerts, which match the provided
+// criteria, from being opened. The Snooze applies for a specific time
+// interval.
+func (c *SnoozeClient) CreateSnooze(ctx context.Context, req *monitoringpb.CreateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
+ return c.internalClient.CreateSnooze(ctx, req, opts...)
+}
+
+// ListSnoozes lists the Snoozes associated with a project. Can optionally pass in
+// filter, which specifies predicates to match Snoozes.
+func (c *SnoozeClient) ListSnoozes(ctx context.Context, req *monitoringpb.ListSnoozesRequest, opts ...gax.CallOption) *SnoozeIterator {
+ return c.internalClient.ListSnoozes(ctx, req, opts...)
+}
+
+// GetSnooze retrieves a Snooze by name.
+func (c *SnoozeClient) GetSnooze(ctx context.Context, req *monitoringpb.GetSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
+ return c.internalClient.GetSnooze(ctx, req, opts...)
+}
+
+// UpdateSnooze updates a Snooze, identified by its name, with the parameters in the
+// given Snooze object.
+func (c *SnoozeClient) UpdateSnooze(ctx context.Context, req *monitoringpb.UpdateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
+ return c.internalClient.UpdateSnooze(ctx, req, opts...)
+}
+
+// snoozeGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type snoozeGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing SnoozeClient
+ CallOptions **SnoozeCallOptions
+
+ // The gRPC API client.
+ snoozeClient monitoringpb.SnoozeServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewSnoozeClient creates a new snooze service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// The SnoozeService API is used to temporarily prevent an alert policy from
+// generating alerts. A Snooze is a description of the criteria under which one
+// or more alert policies should not fire alerts for the specified duration.
+func NewSnoozeClient(ctx context.Context, opts ...option.ClientOption) (*SnoozeClient, error) {
+ clientOpts := defaultSnoozeGRPCClientOptions()
+ if newSnoozeClientHook != nil {
+ hookOpts, err := newSnoozeClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := SnoozeClient{CallOptions: defaultSnoozeCallOptions()}
+
+ c := &snoozeGRPCClient{
+ connPool: connPool,
+ snoozeClient: monitoringpb.NewSnoozeServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *snoozeGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *snoozeGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *snoozeGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *snoozeGRPCClient) CreateSnooze(ctx context.Context, req *monitoringpb.CreateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateSnooze[0:len((*c.CallOptions).CreateSnooze):len((*c.CallOptions).CreateSnooze)], opts...)
+ var resp *monitoringpb.Snooze
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.snoozeClient.CreateSnooze, req, settings.GRPC, c.logger, "CreateSnooze")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *snoozeGRPCClient) ListSnoozes(ctx context.Context, req *monitoringpb.ListSnoozesRequest, opts ...gax.CallOption) *SnoozeIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListSnoozes[0:len((*c.CallOptions).ListSnoozes):len((*c.CallOptions).ListSnoozes)], opts...)
+ it := &SnoozeIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListSnoozesRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Snooze, string, error) {
+ resp := &monitoringpb.ListSnoozesResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.snoozeClient.ListSnoozes, req, settings.GRPC, c.logger, "ListSnoozes")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetSnoozes(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *snoozeGRPCClient) GetSnooze(ctx context.Context, req *monitoringpb.GetSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetSnooze[0:len((*c.CallOptions).GetSnooze):len((*c.CallOptions).GetSnooze)], opts...)
+ var resp *monitoringpb.Snooze
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.snoozeClient.GetSnooze, req, settings.GRPC, c.logger, "GetSnooze")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *snoozeGRPCClient) UpdateSnooze(ctx context.Context, req *monitoringpb.UpdateSnoozeRequest, opts ...gax.CallOption) (*monitoringpb.Snooze, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "snooze.name", url.QueryEscape(req.GetSnooze().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateSnooze[0:len((*c.CallOptions).UpdateSnooze):len((*c.CallOptions).UpdateSnooze)], opts...)
+ var resp *monitoringpb.Snooze
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.snoozeClient.UpdateSnooze, req, settings.GRPC, c.logger, "UpdateSnooze")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go
new file mode 100644
index 000000000..df0ec2957
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/uptime_check_client.go
@@ -0,0 +1,454 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package monitoring
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "math"
+ "net/url"
+ "time"
+
+ monitoringpb "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/iterator"
+ "google.golang.org/api/option"
+ "google.golang.org/api/option/internaloption"
+ gtransport "google.golang.org/api/transport/grpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+)
+
+var newUptimeCheckClientHook clientHook
+
+// UptimeCheckCallOptions contains the retry settings for each method of UptimeCheckClient.
+type UptimeCheckCallOptions struct {
+ ListUptimeCheckConfigs []gax.CallOption
+ GetUptimeCheckConfig []gax.CallOption
+ CreateUptimeCheckConfig []gax.CallOption
+ UpdateUptimeCheckConfig []gax.CallOption
+ DeleteUptimeCheckConfig []gax.CallOption
+ ListUptimeCheckIps []gax.CallOption
+}
+
+func defaultUptimeCheckGRPCClientOptions() []option.ClientOption {
+ return []option.ClientOption{
+ internaloption.WithDefaultEndpoint("monitoring.googleapis.com:443"),
+ internaloption.WithDefaultEndpointTemplate("monitoring.UNIVERSE_DOMAIN:443"),
+ internaloption.WithDefaultMTLSEndpoint("monitoring.mtls.googleapis.com:443"),
+ internaloption.WithDefaultUniverseDomain("googleapis.com"),
+ internaloption.WithDefaultAudience("https://monitoring.googleapis.com/"),
+ internaloption.WithDefaultScopes(DefaultAuthScopes()...),
+ internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
+ option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
+ grpc.MaxCallRecvMsgSize(math.MaxInt32))),
+ }
+}
+
+func defaultUptimeCheckCallOptions() *UptimeCheckCallOptions {
+ return &UptimeCheckCallOptions{
+ ListUptimeCheckConfigs: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ GetUptimeCheckConfig: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ CreateUptimeCheckConfig: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ UpdateUptimeCheckConfig: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ },
+ DeleteUptimeCheckConfig: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ ListUptimeCheckIps: []gax.CallOption{
+ gax.WithTimeout(30000 * time.Millisecond),
+ gax.WithRetry(func() gax.Retryer {
+ return gax.OnCodes([]codes.Code{
+ codes.Unavailable,
+ }, gax.Backoff{
+ Initial: 100 * time.Millisecond,
+ Max: 30000 * time.Millisecond,
+ Multiplier: 1.30,
+ })
+ }),
+ },
+ }
+}
+
+// internalUptimeCheckClient is an interface that defines the methods available from Cloud Monitoring API.
+type internalUptimeCheckClient interface {
+ Close() error
+ setGoogleClientInfo(...string)
+ Connection() *grpc.ClientConn
+ ListUptimeCheckConfigs(context.Context, *monitoringpb.ListUptimeCheckConfigsRequest, ...gax.CallOption) *UptimeCheckConfigIterator
+ GetUptimeCheckConfig(context.Context, *monitoringpb.GetUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error)
+ CreateUptimeCheckConfig(context.Context, *monitoringpb.CreateUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error)
+ UpdateUptimeCheckConfig(context.Context, *monitoringpb.UpdateUptimeCheckConfigRequest, ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error)
+ DeleteUptimeCheckConfig(context.Context, *monitoringpb.DeleteUptimeCheckConfigRequest, ...gax.CallOption) error
+ ListUptimeCheckIps(context.Context, *monitoringpb.ListUptimeCheckIpsRequest, ...gax.CallOption) *UptimeCheckIpIterator
+}
+
+// UptimeCheckClient is a client for interacting with Cloud Monitoring API.
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+//
+// The UptimeCheckService API is used to manage (list, create, delete, edit)
+// Uptime check configurations in the Cloud Monitoring product. An Uptime
+// check is a piece of configuration that determines which resources and
+// services to monitor for availability. These configurations can also be
+// configured interactively by navigating to the [Cloud console]
+// (https://console.cloud.google.com (at https://console.cloud.google.com)), selecting the appropriate project,
+// clicking on “Monitoring” on the left-hand side to navigate to Cloud
+// Monitoring, and then clicking on “Uptime”.
+type UptimeCheckClient struct {
+ // The internal transport-dependent client.
+ internalClient internalUptimeCheckClient
+
+ // The call options for this service.
+ CallOptions *UptimeCheckCallOptions
+}
+
+// Wrapper methods routed to the internal client.
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *UptimeCheckClient) Close() error {
+ return c.internalClient.Close()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *UptimeCheckClient) setGoogleClientInfo(keyval ...string) {
+ c.internalClient.setGoogleClientInfo(keyval...)
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *UptimeCheckClient) Connection() *grpc.ClientConn {
+ return c.internalClient.Connection()
+}
+
+// ListUptimeCheckConfigs lists the existing valid Uptime check configurations for the project
+// (leaving out any invalid configurations).
+func (c *UptimeCheckClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator {
+ return c.internalClient.ListUptimeCheckConfigs(ctx, req, opts...)
+}
+
+// GetUptimeCheckConfig gets a single Uptime check configuration.
+func (c *UptimeCheckClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ return c.internalClient.GetUptimeCheckConfig(ctx, req, opts...)
+}
+
+// CreateUptimeCheckConfig creates a new Uptime check configuration.
+func (c *UptimeCheckClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ return c.internalClient.CreateUptimeCheckConfig(ctx, req, opts...)
+}
+
+// UpdateUptimeCheckConfig updates an Uptime check configuration. You can either replace the entire
+// configuration with a new one or replace only certain fields in the current
+// configuration by specifying the fields to be updated via updateMask.
+// Returns the updated configuration.
+func (c *UptimeCheckClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ return c.internalClient.UpdateUptimeCheckConfig(ctx, req, opts...)
+}
+
+// DeleteUptimeCheckConfig deletes an Uptime check configuration. Note that this method will fail
+// if the Uptime check configuration is referenced by an alert policy or
+// other dependent configs that would be rendered invalid by the deletion.
+func (c *UptimeCheckClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error {
+ return c.internalClient.DeleteUptimeCheckConfig(ctx, req, opts...)
+}
+
+// ListUptimeCheckIps returns the list of IP addresses that checkers run from.
+func (c *UptimeCheckClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator {
+ return c.internalClient.ListUptimeCheckIps(ctx, req, opts...)
+}
+
+// uptimeCheckGRPCClient is a client for interacting with Cloud Monitoring API over gRPC transport.
+//
+// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls.
+type uptimeCheckGRPCClient struct {
+ // Connection pool of gRPC connections to the service.
+ connPool gtransport.ConnPool
+
+ // Points back to the CallOptions field of the containing UptimeCheckClient
+ CallOptions **UptimeCheckCallOptions
+
+ // The gRPC API client.
+ uptimeCheckClient monitoringpb.UptimeCheckServiceClient
+
+ // The x-goog-* metadata to be sent with each request.
+ xGoogHeaders []string
+
+ logger *slog.Logger
+}
+
+// NewUptimeCheckClient creates a new uptime check service client based on gRPC.
+// The returned client must be Closed when it is done being used to clean up its underlying connections.
+//
+// The UptimeCheckService API is used to manage (list, create, delete, edit)
+// Uptime check configurations in the Cloud Monitoring product. An Uptime
+// check is a piece of configuration that determines which resources and
+// services to monitor for availability. These configurations can also be
+// configured interactively by navigating to the [Cloud console]
+// (https://console.cloud.google.com (at https://console.cloud.google.com)), selecting the appropriate project,
+// clicking on “Monitoring” on the left-hand side to navigate to Cloud
+// Monitoring, and then clicking on “Uptime”.
+func NewUptimeCheckClient(ctx context.Context, opts ...option.ClientOption) (*UptimeCheckClient, error) {
+ clientOpts := defaultUptimeCheckGRPCClientOptions()
+ if newUptimeCheckClientHook != nil {
+ hookOpts, err := newUptimeCheckClientHook(ctx, clientHookParams{})
+ if err != nil {
+ return nil, err
+ }
+ clientOpts = append(clientOpts, hookOpts...)
+ }
+
+ connPool, err := gtransport.DialPool(ctx, append(clientOpts, opts...)...)
+ if err != nil {
+ return nil, err
+ }
+ client := UptimeCheckClient{CallOptions: defaultUptimeCheckCallOptions()}
+
+ c := &uptimeCheckGRPCClient{
+ connPool: connPool,
+ uptimeCheckClient: monitoringpb.NewUptimeCheckServiceClient(connPool),
+ CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
+ }
+ c.setGoogleClientInfo()
+
+ client.internalClient = c
+
+ return &client, nil
+}
+
+// Connection returns a connection to the API service.
+//
+// Deprecated: Connections are now pooled so this method does not always
+// return the same resource.
+func (c *uptimeCheckGRPCClient) Connection() *grpc.ClientConn {
+ return c.connPool.Conn()
+}
+
+// setGoogleClientInfo sets the name and version of the application in
+// the `x-goog-api-client` header passed on each request. Intended for
+// use by Google-written clients.
+func (c *uptimeCheckGRPCClient) setGoogleClientInfo(keyval ...string) {
+ kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
+ kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
+}
+
+// Close closes the connection to the API service. The user should invoke this when
+// the client is no longer required.
+func (c *uptimeCheckGRPCClient) Close() error {
+ return c.connPool.Close()
+}
+
+func (c *uptimeCheckGRPCClient) ListUptimeCheckConfigs(ctx context.Context, req *monitoringpb.ListUptimeCheckConfigsRequest, opts ...gax.CallOption) *UptimeCheckConfigIterator {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).ListUptimeCheckConfigs[0:len((*c.CallOptions).ListUptimeCheckConfigs):len((*c.CallOptions).ListUptimeCheckConfigs)], opts...)
+ it := &UptimeCheckConfigIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListUptimeCheckConfigsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckConfig, string, error) {
+ resp := &monitoringpb.ListUptimeCheckConfigsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.uptimeCheckClient.ListUptimeCheckConfigs, req, settings.GRPC, c.logger, "ListUptimeCheckConfigs")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetUptimeCheckConfigs(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
+
+func (c *uptimeCheckGRPCClient) GetUptimeCheckConfig(ctx context.Context, req *monitoringpb.GetUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).GetUptimeCheckConfig[0:len((*c.CallOptions).GetUptimeCheckConfig):len((*c.CallOptions).GetUptimeCheckConfig)], opts...)
+ var resp *monitoringpb.UptimeCheckConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.uptimeCheckClient.GetUptimeCheckConfig, req, settings.GRPC, c.logger, "GetUptimeCheckConfig")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *uptimeCheckGRPCClient) CreateUptimeCheckConfig(ctx context.Context, req *monitoringpb.CreateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "parent", url.QueryEscape(req.GetParent()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).CreateUptimeCheckConfig[0:len((*c.CallOptions).CreateUptimeCheckConfig):len((*c.CallOptions).CreateUptimeCheckConfig)], opts...)
+ var resp *monitoringpb.UptimeCheckConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.uptimeCheckClient.CreateUptimeCheckConfig, req, settings.GRPC, c.logger, "CreateUptimeCheckConfig")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *uptimeCheckGRPCClient) UpdateUptimeCheckConfig(ctx context.Context, req *monitoringpb.UpdateUptimeCheckConfigRequest, opts ...gax.CallOption) (*monitoringpb.UptimeCheckConfig, error) {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "uptime_check_config.name", url.QueryEscape(req.GetUptimeCheckConfig().GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).UpdateUptimeCheckConfig[0:len((*c.CallOptions).UpdateUptimeCheckConfig):len((*c.CallOptions).UpdateUptimeCheckConfig)], opts...)
+ var resp *monitoringpb.UptimeCheckConfig
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.uptimeCheckClient.UpdateUptimeCheckConfig, req, settings.GRPC, c.logger, "UpdateUptimeCheckConfig")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *uptimeCheckGRPCClient) DeleteUptimeCheckConfig(ctx context.Context, req *monitoringpb.DeleteUptimeCheckConfigRequest, opts ...gax.CallOption) error {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))}
+
+ hds = append(c.xGoogHeaders, hds...)
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+ opts = append((*c.CallOptions).DeleteUptimeCheckConfig[0:len((*c.CallOptions).DeleteUptimeCheckConfig):len((*c.CallOptions).DeleteUptimeCheckConfig)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ _, err = executeRPC(ctx, c.uptimeCheckClient.DeleteUptimeCheckConfig, req, settings.GRPC, c.logger, "DeleteUptimeCheckConfig")
+ return err
+ }, opts...)
+ return err
+}
+
+func (c *uptimeCheckGRPCClient) ListUptimeCheckIps(ctx context.Context, req *monitoringpb.ListUptimeCheckIpsRequest, opts ...gax.CallOption) *UptimeCheckIpIterator {
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...)
+ opts = append((*c.CallOptions).ListUptimeCheckIps[0:len((*c.CallOptions).ListUptimeCheckIps):len((*c.CallOptions).ListUptimeCheckIps)], opts...)
+ it := &UptimeCheckIpIterator{}
+ req = proto.Clone(req).(*monitoringpb.ListUptimeCheckIpsRequest)
+ it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.UptimeCheckIp, string, error) {
+ resp := &monitoringpb.ListUptimeCheckIpsResponse{}
+ if pageToken != "" {
+ req.PageToken = pageToken
+ }
+ if pageSize > math.MaxInt32 {
+ req.PageSize = math.MaxInt32
+ } else if pageSize != 0 {
+ req.PageSize = int32(pageSize)
+ }
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ resp, err = executeRPC(ctx, c.uptimeCheckClient.ListUptimeCheckIps, req, settings.GRPC, c.logger, "ListUptimeCheckIps")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, "", err
+ }
+
+ it.Response = resp
+ return resp.GetUptimeCheckIps(), resp.GetNextPageToken(), nil
+ }
+ fetch := func(pageSize int, pageToken string) (string, error) {
+ items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
+ if err != nil {
+ return "", err
+ }
+ it.items = append(it.items, items...)
+ return nextPageToken, nil
+ }
+
+ it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
+ it.pageInfo.MaxSize = int(req.GetPageSize())
+ it.pageInfo.Token = req.GetPageToken()
+
+ return it
+}
diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go b/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go
new file mode 100644
index 000000000..accff0f5e
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/apiv3/v2/version.go
@@ -0,0 +1,23 @@
+// Copyright 2023 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by gapicgen. DO NOT EDIT.
+
+package monitoring
+
+import "cloud.google.com/go/monitoring/internal"
+
+func init() {
+ versionClient = internal.Version
+}
diff --git a/vendor/cloud.google.com/go/monitoring/internal/version.go b/vendor/cloud.google.com/go/monitoring/internal/version.go
new file mode 100644
index 000000000..e199c1168
--- /dev/null
+++ b/vendor/cloud.google.com/go/monitoring/internal/version.go
@@ -0,0 +1,18 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+// Version is the current tagged release of the library.
+const Version = "1.24.2"
diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md
index 2da498b8e..e90454d01 100644
--- a/vendor/cloud.google.com/go/storage/CHANGES.md
+++ b/vendor/cloud.google.com/go/storage/CHANGES.md
@@ -1,6 +1,194 @@
# Changes
+## [1.50.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.49.0...storage/v1.50.0) (2025-01-09)
+
+
+### Features
+
+* **storage/internal:** Add new appendable Object to BidiWrite API ([2e4feb9](https://github.com/googleapis/google-cloud-go/commit/2e4feb938ce9ab023c8aa6bd1dbdf36fe589213a))
+* **storage/internal:** Add new preview BidiReadObject API ([2e4feb9](https://github.com/googleapis/google-cloud-go/commit/2e4feb938ce9ab023c8aa6bd1dbdf36fe589213a))
+* **storage:** Add support for gRPC bi-directional multi-range reads. This API is in private preview and not generally and is not yet available for general use. ([#11377](https://github.com/googleapis/google-cloud-go/issues/11377)) ([b4d86a5](https://github.com/googleapis/google-cloud-go/commit/b4d86a52bd319a602115cdb710a743c71494a88b))
+* **storage:** Add support for ReadHandle, a gRPC feature that allows for accelerated resumption of streams when one is interrupted. ReadHandle requires the bi-directional read API, which is in private preview and is not yet available for general use. ([#11377](https://github.com/googleapis/google-cloud-go/issues/11377)) ([b4d86a5](https://github.com/googleapis/google-cloud-go/commit/b4d86a52bd319a602115cdb710a743c71494a88b))
+* **storage:** Support appendable semantics for writes in gRPC. This API is in preview. ([#11377](https://github.com/googleapis/google-cloud-go/issues/11377)) ([b4d86a5](https://github.com/googleapis/google-cloud-go/commit/b4d86a52bd319a602115cdb710a743c71494a88b))
+* **storage:** Refactor gRPC writer flow ([#11377](https://github.com/googleapis/google-cloud-go/issues/11377)) ([b4d86a5](https://github.com/googleapis/google-cloud-go/commit/b4d86a52bd319a602115cdb710a743c71494a88b))
+
+
+### Bug Fixes
+
+* **storage:** Add mutex around uses of mrd variables ([#11405](https://github.com/googleapis/google-cloud-go/issues/11405)) ([54bfc32](https://github.com/googleapis/google-cloud-go/commit/54bfc32db7a0ff40a493de4d466f21ad624de04e))
+* **storage:** Return the appropriate error for method not supported ([#11416](https://github.com/googleapis/google-cloud-go/issues/11416)) ([56d704e](https://github.com/googleapis/google-cloud-go/commit/56d704e3037840aeb87b22cc83f2b6088c79bcee))
+
+
+### Documentation
+
+* **storage/internal:** Add IAM information to RPC comments for reference documentation ([2e4feb9](https://github.com/googleapis/google-cloud-go/commit/2e4feb938ce9ab023c8aa6bd1dbdf36fe589213a))
+* **storage:** Add preview comment to NewMultiRangeDownloader ([#11420](https://github.com/googleapis/google-cloud-go/issues/11420)) ([4ec1d66](https://github.com/googleapis/google-cloud-go/commit/4ec1d66ee180e800606568e8693a282645ec7369))
+
+## [1.49.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.48.0...storage/v1.49.0) (2024-12-21)
+
+
+### Features
+
+* **storage/internal:** Add finalize_time field in Object metadata ([46fc993](https://github.com/googleapis/google-cloud-go/commit/46fc993a3195203a230e2831bee456baaa9f7b1c))
+* **storage/internal:** Add MoveObject RPC ([46fc993](https://github.com/googleapis/google-cloud-go/commit/46fc993a3195203a230e2831bee456baaa9f7b1c))
+* **storage:** Add ObjectHandle.Move method ([#11302](https://github.com/googleapis/google-cloud-go/issues/11302)) ([a3cb8c4](https://github.com/googleapis/google-cloud-go/commit/a3cb8c4fc48883b54d4e830ae5f5ef4f1a3b8ca3))
+* **storage:** Return file metadata on read ([#11212](https://github.com/googleapis/google-cloud-go/issues/11212)) ([d49263b](https://github.com/googleapis/google-cloud-go/commit/d49263b2ab614cad801e26b4a169eafe08d4a2a0))
+
+
+### Bug Fixes
+
+* **storage/dataflux:** Address deadlock when reading from ranges ([#11303](https://github.com/googleapis/google-cloud-go/issues/11303)) ([32cbf56](https://github.com/googleapis/google-cloud-go/commit/32cbf561590541eb0387787bf729be6ddf68e4ee))
+* **storage:** Disable allow non-default credentials flag ([#11337](https://github.com/googleapis/google-cloud-go/issues/11337)) ([145ddf4](https://github.com/googleapis/google-cloud-go/commit/145ddf4f6123d9561856d2b6adeefdfae462b3f7))
+* **storage:** Monitored resource detection ([#11197](https://github.com/googleapis/google-cloud-go/issues/11197)) ([911bcd8](https://github.com/googleapis/google-cloud-go/commit/911bcd8b1816256482bd52e85da7eaf00c315293))
+* **storage:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
+
+## [1.48.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.47.0...storage/v1.48.0) (2024-12-05)
+
+
+### Features
+
+* **storage/dataflux:** Run worksteal listing parallel to sequential listing ([#10966](https://github.com/googleapis/google-cloud-go/issues/10966)) ([3005f5a](https://github.com/googleapis/google-cloud-go/commit/3005f5a86c18254e569b8b1782bf014aa62f33cc))
+* **storage:** Add Writer.ChunkTransferTimeout ([#11111](https://github.com/googleapis/google-cloud-go/issues/11111)) ([fd1db20](https://github.com/googleapis/google-cloud-go/commit/fd1db203d0de898891b9920aacb141ea39228609))
+* **storage:** Allow non default service account ([#11137](https://github.com/googleapis/google-cloud-go/issues/11137)) ([19f01c3](https://github.com/googleapis/google-cloud-go/commit/19f01c3c48ed1272c8fc0af9e5f69646cb662808))
+
+
+### Bug Fixes
+
+* **storage:** Add backoff to gRPC write retries ([#11200](https://github.com/googleapis/google-cloud-go/issues/11200)) ([a7db927](https://github.com/googleapis/google-cloud-go/commit/a7db927da9cf4c6cf242a5db83e44a16d75a8291))
+* **storage:** Correct direct connectivity check ([#11152](https://github.com/googleapis/google-cloud-go/issues/11152)) ([a75c8b0](https://github.com/googleapis/google-cloud-go/commit/a75c8b0f72c38d9a85c908715c3e37eb5cffb131))
+* **storage:** Disable soft delete policy using 0 retentionDurationSeconds ([#11226](https://github.com/googleapis/google-cloud-go/issues/11226)) ([f087721](https://github.com/googleapis/google-cloud-go/commit/f087721b7b20ad28ded1d0a84756a8bbaa2bb95a))
+* **storage:** Retry SignBlob call for URL signing ([#11154](https://github.com/googleapis/google-cloud-go/issues/11154)) ([f198452](https://github.com/googleapis/google-cloud-go/commit/f198452fd2b29e779e9080ba79d7e873eb0c32ef))
+
+## [1.47.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.46.0...storage/v1.47.0) (2024-11-14)
+
+
+### Features
+
+* **storage:** Introduce dp detector based on grpc metrics ([#11100](https://github.com/googleapis/google-cloud-go/issues/11100)) ([60c2323](https://github.com/googleapis/google-cloud-go/commit/60c2323102b623e042fc508e2b1bb830a03f9577))
+
+
+### Bug Fixes
+
+* **storage:** Bump auth dep ([#11135](https://github.com/googleapis/google-cloud-go/issues/11135)) ([9620a51](https://github.com/googleapis/google-cloud-go/commit/9620a51b2c6904d8d93e124494bc297fb98553d2))
+
+## [1.46.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.45.0...storage/v1.46.0) (2024-10-31)
+
+### Features
+
+* **storage:** Add grpc metrics experimental options ([#10984](https://github.com/googleapis/google-cloud-go/issues/10984)) ([5b7397b](https://github.com/googleapis/google-cloud-go/commit/5b7397b169176f030049e1511859a883422c774e))
+
+
+### Bug Fixes
+
+* **storage:** Skip only specific transport tests. ([#11016](https://github.com/googleapis/google-cloud-go/issues/11016)) ([d40fbff](https://github.com/googleapis/google-cloud-go/commit/d40fbff9c1984aeed0224a4ac93eb95c5af17126))
+* **storage:** Update google.golang.org/api to v0.203.0 ([8bb87d5](https://github.com/googleapis/google-cloud-go/commit/8bb87d56af1cba736e0fe243979723e747e5e11e))
+* **storage:** WARNING: On approximately Dec 1, 2024, an update to Protobuf will change service registration function signatures to use an interface instead of a concrete type in generated .pb.go files. This change is expected to affect very few if any users of this client library. For more information, see https://togithub.com/googleapis/google-cloud-go/issues/11020. ([2b8ca4b](https://github.com/googleapis/google-cloud-go/commit/2b8ca4b4127ce3025c7a21cc7247510e07cc5625))
+
+
+### Miscellaneous Chores
+
+* **storage/internal:** Remove notification, service account, and hmac RPCS. These API have been migrated to Storage Control and are available via the JSON API. ([#11008](https://github.com/googleapis/google-cloud-go/issues/11008)) ([e0759f4](https://github.com/googleapis/google-cloud-go/commit/e0759f46639b4c542e5b49e4dc81340d8e123370))
+
+## [1.45.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.44.0...storage/v1.45.0) (2024-10-17)
+
+
+### Features
+
+* **storage/internal:** Adds support for restore token ([70d82fe](https://github.com/googleapis/google-cloud-go/commit/70d82fe93f60f1075298a077ce1616f9ae7e13fe))
+* **storage:** Adding bucket-specific dynamicDelay ([#10987](https://github.com/googleapis/google-cloud-go/issues/10987)) ([a807a7e](https://github.com/googleapis/google-cloud-go/commit/a807a7e7f9fb002374407622c126102c5e61af82))
+* **storage:** Dynamic read request stall timeout ([#10958](https://github.com/googleapis/google-cloud-go/issues/10958)) ([a09f00e](https://github.com/googleapis/google-cloud-go/commit/a09f00eeecac82af98ae769bab284ee58a3a66cb))
+
+
+### Documentation
+
+* **storage:** Remove preview wording from NewGRPCClient ([#11002](https://github.com/googleapis/google-cloud-go/issues/11002)) ([40c3a5b](https://github.com/googleapis/google-cloud-go/commit/40c3a5b9c4cd4db2f1695e180419197b6a03ed7f))
+
+## [1.44.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.43.0...storage/v1.44.0) (2024-10-03)
+
+
+### Features
+
+* **storage/dataflux:** Add dataflux interface ([#10748](https://github.com/googleapis/google-cloud-go/issues/10748)) ([cb7b0a1](https://github.com/googleapis/google-cloud-go/commit/cb7b0a1b285de9d4182155a123747419232dd35f))
+* **storage/dataflux:** Add range_splitter [#10748](https://github.com/googleapis/google-cloud-go/issues/10748) ([#10899](https://github.com/googleapis/google-cloud-go/issues/10899)) ([d49da26](https://github.com/googleapis/google-cloud-go/commit/d49da26be7dc52fad37c392c2876f62b1a5625a2))
+* **storage/dataflux:** Add worksteal algorithm to fast-listing ([#10913](https://github.com/googleapis/google-cloud-go/issues/10913)) ([015b52c](https://github.com/googleapis/google-cloud-go/commit/015b52c345df75408be3edcfda96d37145794f9f))
+* **storage/internal:** Add managed folder to testIamPermissions method ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
+* **storage/transfermanager:** Add option to StripPrefix on directory download ([#10894](https://github.com/googleapis/google-cloud-go/issues/10894)) ([607534c](https://github.com/googleapis/google-cloud-go/commit/607534cdd5edf2d15d3de891cf6a0b6cbaa7d545))
+* **storage/transfermanager:** Add SkipIfExists option ([#10893](https://github.com/googleapis/google-cloud-go/issues/10893)) ([7daa1bd](https://github.com/googleapis/google-cloud-go/commit/7daa1bdc78844adac80f6378b1f6f2dd415b80a8))
+* **storage/transfermanager:** Checksum full object downloads ([#10569](https://github.com/googleapis/google-cloud-go/issues/10569)) ([c366c90](https://github.com/googleapis/google-cloud-go/commit/c366c908534ef09442f1f3e8a4f74bd545a474fb))
+* **storage:** Add direct google access side-effect imports by default ([#10757](https://github.com/googleapis/google-cloud-go/issues/10757)) ([9ad8324](https://github.com/googleapis/google-cloud-go/commit/9ad83248a7049c82580bc45d9685c329811bce88))
+* **storage:** Add full object checksum to reader.Attrs ([#10538](https://github.com/googleapis/google-cloud-go/issues/10538)) ([245d2ea](https://github.com/googleapis/google-cloud-go/commit/245d2eaddb4862da7c8d1892d5d462bf390adb2b))
+* **storage:** Add support for Go 1.23 iterators ([84461c0](https://github.com/googleapis/google-cloud-go/commit/84461c0ba464ec2f951987ba60030e37c8a8fc18))
+* **storage:** Add update time in bucketAttrs ([#10710](https://github.com/googleapis/google-cloud-go/issues/10710)) ([5f06ae1](https://github.com/googleapis/google-cloud-go/commit/5f06ae1a331c46ded47c96c205b3f1be92d64d29)), refs [#9361](https://github.com/googleapis/google-cloud-go/issues/9361)
+* **storage:** GA gRPC client ([#10859](https://github.com/googleapis/google-cloud-go/issues/10859)) ([c7a55a2](https://github.com/googleapis/google-cloud-go/commit/c7a55a26c645905317fe27505d503c338f50ee34))
+* **storage:** Introduce gRPC client-side metrics ([#10639](https://github.com/googleapis/google-cloud-go/issues/10639)) ([437bcb1](https://github.com/googleapis/google-cloud-go/commit/437bcb1e0b514959648eed36ba3963aa4fbeffc8))
+* **storage:** Support IncludeFoldersAsPrefixes for gRPC ([#10767](https://github.com/googleapis/google-cloud-go/issues/10767)) ([65bcc59](https://github.com/googleapis/google-cloud-go/commit/65bcc59a6c0753f8fbd66c8792bc69300e95ec62))
+
+
+### Bug Fixes
+
+* **storage/transfermanager:** Correct Attrs.StartOffset for sharded downloads ([#10512](https://github.com/googleapis/google-cloud-go/issues/10512)) ([01a5cbb](https://github.com/googleapis/google-cloud-go/commit/01a5cbba6d9d9f425f045b58fa16d8c85804c29c))
+* **storage:** Add retryalways policy to encryption test ([#10644](https://github.com/googleapis/google-cloud-go/issues/10644)) ([59cfd12](https://github.com/googleapis/google-cloud-go/commit/59cfd12ce5650279c99787da4a273db1e3253c76)), refs [#10567](https://github.com/googleapis/google-cloud-go/issues/10567)
+* **storage:** Add unknown host to retriable errors ([#10619](https://github.com/googleapis/google-cloud-go/issues/10619)) ([4ec0452](https://github.com/googleapis/google-cloud-go/commit/4ec0452a393341b1036ac6e1e7287843f097d978))
+* **storage:** Bump dependencies ([2ddeb15](https://github.com/googleapis/google-cloud-go/commit/2ddeb1544a53188a7592046b98913982f1b0cf04))
+* **storage:** Bump google.golang.org/grpc@v1.64.1 ([8ecc4e9](https://github.com/googleapis/google-cloud-go/commit/8ecc4e9622e5bbe9b90384d5848ab816027226c5))
+* **storage:** Check for grpc NotFound error in HMAC test ([#10645](https://github.com/googleapis/google-cloud-go/issues/10645)) ([3c8e88a](https://github.com/googleapis/google-cloud-go/commit/3c8e88a085bab3142dfff6ef9a8e49c29a5c877d))
+* **storage:** Disable grpc metrics using emulator ([#10870](https://github.com/googleapis/google-cloud-go/issues/10870)) ([35ad73d](https://github.com/googleapis/google-cloud-go/commit/35ad73d3be5485ac592e2ef1ea6c0854f1eff4a0))
+* **storage:** Retry gRPC DEADLINE_EXCEEDED errors ([#10635](https://github.com/googleapis/google-cloud-go/issues/10635)) ([0018415](https://github.com/googleapis/google-cloud-go/commit/0018415295a5fd964b923db6a4785e9eed46a2e2))
+* **storage:** Update dependencies ([257c40b](https://github.com/googleapis/google-cloud-go/commit/257c40bd6d7e59730017cf32bda8823d7a232758))
+* **storage:** Update google.golang.org/api to v0.191.0 ([5b32644](https://github.com/googleapis/google-cloud-go/commit/5b32644eb82eb6bd6021f80b4fad471c60fb9d73))
+
+
+### Performance Improvements
+
+* **storage:** GRPC zerocopy codec ([#10888](https://github.com/googleapis/google-cloud-go/issues/10888)) ([aeba28f](https://github.com/googleapis/google-cloud-go/commit/aeba28ffffcd82ac5540e45247112bdacc5c530d))
+
+
+### Documentation
+
+* **storage/internal:** Clarify possible objectAccessControl roles ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
+* **storage/internal:** Update dual-region bucket link ([2f0aec8](https://github.com/googleapis/google-cloud-go/commit/2f0aec894179304d234be6c792d82cf4336b6d0a))
+
+## [1.43.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.42.0...storage/v1.43.0) (2024-07-03)
+
+
+### Features
+
+* **storage/transfermanager:** Add DownloadDirectory ([#10430](https://github.com/googleapis/google-cloud-go/issues/10430)) ([0d0e5dd](https://github.com/googleapis/google-cloud-go/commit/0d0e5dd5214769cc2c197991c2ece1303bd600de))
+* **storage/transfermanager:** Automatically shard downloads ([#10379](https://github.com/googleapis/google-cloud-go/issues/10379)) ([05816f9](https://github.com/googleapis/google-cloud-go/commit/05816f9fafd3132c371da37f3a879bb9e8e7e604))
+
+
+### Bug Fixes
+
+* **storage/transfermanager:** WaitAndClose waits for Callbacks to finish ([#10504](https://github.com/googleapis/google-cloud-go/issues/10504)) ([0e81002](https://github.com/googleapis/google-cloud-go/commit/0e81002b3a5e560c874d814d28a35a102311d9ef)), refs [#10502](https://github.com/googleapis/google-cloud-go/issues/10502)
+* **storage:** Allow empty soft delete on Create ([#10394](https://github.com/googleapis/google-cloud-go/issues/10394)) ([d8bd2c1](https://github.com/googleapis/google-cloud-go/commit/d8bd2c1ffc4f27503a74ded438d8bfbdd7707c63)), refs [#10380](https://github.com/googleapis/google-cloud-go/issues/10380)
+* **storage:** Bump google.golang.org/api@v0.187.0 ([8fa9e39](https://github.com/googleapis/google-cloud-go/commit/8fa9e398e512fd8533fd49060371e61b5725a85b))
+* **storage:** Retry broken pipe error ([#10374](https://github.com/googleapis/google-cloud-go/issues/10374)) ([2f4daa1](https://github.com/googleapis/google-cloud-go/commit/2f4daa11acf9d3f260fa888333090359c4d9198e)), refs [#9178](https://github.com/googleapis/google-cloud-go/issues/9178)
+
+
+### Documentation
+
+* **storage/control:** Remove allowlist note from Folders RPCs ([d6c543c](https://github.com/googleapis/google-cloud-go/commit/d6c543c3969016c63e158a862fc173dff60fb8d9))
+
+## [1.42.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.41.0...storage/v1.42.0) (2024-06-10)
+
+
+### Features
+
+* **storage:** Add new package transfermanager. This package is intended for parallel uploads and downloads, and is in preview. It is not stable, and is likely to change. ([#10045](https://github.com/googleapis/google-cloud-go/issues/10045)) ([cde5cbb](https://github.com/googleapis/google-cloud-go/commit/cde5cbba3145d5a702683656a42158621234fe71))
+* **storage:** Add bucket HierarchicalNamespace ([#10315](https://github.com/googleapis/google-cloud-go/issues/10315)) ([b92406c](https://github.com/googleapis/google-cloud-go/commit/b92406ccfadfdcee379e86d6f78c901d772401a9)), refs [#10146](https://github.com/googleapis/google-cloud-go/issues/10146)
+* **storage:** Add BucketName to BucketHandle ([#10127](https://github.com/googleapis/google-cloud-go/issues/10127)) ([203cc59](https://github.com/googleapis/google-cloud-go/commit/203cc599e5e2f2f821dc75b47c5a4c9073333f05))
+
+
+### Bug Fixes
+
+* **storage:** Set invocation headers on xml reads ([#10250](https://github.com/googleapis/google-cloud-go/issues/10250)) ([c87e1ab](https://github.com/googleapis/google-cloud-go/commit/c87e1ab6f9618b8b3f4d0005ac159abd87b0daaf))
+
+
+### Documentation
+
+* **storage:** Update autoclass doc ([#10135](https://github.com/googleapis/google-cloud-go/issues/10135)) ([e4b2737](https://github.com/googleapis/google-cloud-go/commit/e4b2737ddc16d3bf8139a6def7326ac905f62acd))
+
## [1.41.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.40.0...storage/v1.41.0) (2024-05-13)
diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go
index 74799e55e..560a5605d 100644
--- a/vendor/cloud.google.com/go/storage/acl.go
+++ b/vendor/cloud.google.com/go/storage/acl.go
@@ -16,8 +16,6 @@ package storage
import (
"context"
- "net/http"
- "reflect"
"cloud.google.com/go/internal/trace"
"cloud.google.com/go/storage/internal/apiv2/storagepb"
@@ -162,15 +160,6 @@ func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
return a.c.tc.DeleteObjectACL(ctx, a.bucket, a.object, entity, opts...)
}
-func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
- vc := reflect.ValueOf(call)
- vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
- if a.userProject != "" {
- vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(a.userProject)})
- }
- setClientHeader(call.Header())
-}
-
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go
index d2da86e91..43161f783 100644
--- a/vendor/cloud.google.com/go/storage/bucket.go
+++ b/vendor/cloud.google.com/go/storage/bucket.go
@@ -116,6 +116,11 @@ func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
return &b.defaultObjectACL
}
+// BucketName returns the name of the bucket.
+func (b *BucketHandle) BucketName() string {
+ return b.name
+}
+
// Object returns an ObjectHandle, which provides operations on the named object.
// This call does not perform any network operations such as fetching the object or verifying its existence.
// Use methods on ObjectHandle to perform network operations.
@@ -321,11 +326,14 @@ func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte,
if err != nil {
return nil, fmt.Errorf("unable to create iamcredentials client: %w", err)
}
-
- resp, err := svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{
- Payload: base64.StdEncoding.EncodeToString(in),
- }).Do()
- if err != nil {
+ // Do the SignBlob call with a retry for transient errors.
+ var resp *iamcredentials.SignBlobResponse
+ if err := run(ctx, func(ctx context.Context) error {
+ resp, err = svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{
+ Payload: base64.StdEncoding.EncodeToString(in),
+ }).Do()
+ return err
+ }, b.retry, true); err != nil {
return nil, fmt.Errorf("unable to sign bytes: %w", err)
}
out, err := base64.StdEncoding.DecodeString(resp.SignedBlob)
@@ -411,6 +419,10 @@ type BucketAttrs struct {
// This field is read-only.
Created time.Time
+ // Updated is the time at which the bucket was last modified.
+ // This field is read-only.
+ Updated time.Time
+
// VersioningEnabled reports whether this bucket has versioning enabled.
VersioningEnabled bool
@@ -486,6 +498,13 @@ type BucketAttrs struct {
// 7 day retention duration. In order to fully disable soft delete, you need
// to set a policy with a RetentionDuration of 0.
SoftDeletePolicy *SoftDeletePolicy
+
+ // HierarchicalNamespace contains the bucket's hierarchical namespace
+ // configuration. Hierarchical namespace enabled buckets can contain
+ // [cloud.google.com/go/storage/control/apiv2/controlpb.Folder] resources.
+ // It cannot be modified after bucket creation time.
+ // UniformBucketLevelAccess must also also be enabled on the bucket.
+ HierarchicalNamespace *HierarchicalNamespace
}
// BucketPolicyOnly is an alias for UniformBucketLevelAccess.
@@ -767,6 +786,7 @@ type Autoclass struct {
// TerminalStorageClass: The storage class that objects in the bucket
// eventually transition to if they are not read for a certain length of
// time. Valid values are NEARLINE and ARCHIVE.
+ // To modify TerminalStorageClass, Enabled must be set to true.
TerminalStorageClass string
// TerminalStorageClassUpdateTime represents the time of the most recent
// update to "TerminalStorageClass".
@@ -786,6 +806,15 @@ type SoftDeletePolicy struct {
RetentionDuration time.Duration
}
+// HierarchicalNamespace contains the bucket's hierarchical namespace
+// configuration. Hierarchical namespace enabled buckets can contain
+// [cloud.google.com/go/storage/control/apiv2/controlpb.Folder] resources.
+type HierarchicalNamespace struct {
+ // Enabled indicates whether hierarchical namespace features are enabled on
+ // the bucket. This can only be set at bucket creation time currently.
+ Enabled bool
+}
+
func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
if b == nil {
return nil, nil
@@ -802,6 +831,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
DefaultEventBasedHold: b.DefaultEventBasedHold,
StorageClass: b.StorageClass,
Created: convertTime(b.TimeCreated),
+ Updated: convertTime(b.Updated),
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
ACL: toBucketACLRules(b.Acl),
DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl),
@@ -824,6 +854,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
CustomPlacementConfig: customPlacementFromRaw(b.CustomPlacementConfig),
Autoclass: toAutoclassFromRaw(b.Autoclass),
SoftDeletePolicy: toSoftDeletePolicyFromRaw(b.SoftDeletePolicy),
+ HierarchicalNamespace: toHierarchicalNamespaceFromRaw(b.HierarchicalNamespace),
}, nil
}
@@ -838,6 +869,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
DefaultEventBasedHold: b.GetDefaultEventBasedHold(),
StorageClass: b.GetStorageClass(),
Created: b.GetCreateTime().AsTime(),
+ Updated: b.GetUpdateTime().AsTime(),
VersioningEnabled: b.GetVersioning().GetEnabled(),
ACL: toBucketACLRulesFromProto(b.GetAcl()),
DefaultObjectACL: toObjectACLRulesFromProto(b.GetDefaultObjectAcl()),
@@ -858,6 +890,7 @@ func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
ProjectNumber: parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based
Autoclass: toAutoclassFromProto(b.GetAutoclass()),
SoftDeletePolicy: toSoftDeletePolicyFromProto(b.SoftDeletePolicy),
+ HierarchicalNamespace: toHierarchicalNamespaceFromProto(b.HierarchicalNamespace),
}
}
@@ -914,6 +947,7 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(),
Autoclass: b.Autoclass.toRawAutoclass(),
SoftDeletePolicy: b.SoftDeletePolicy.toRawSoftDeletePolicy(),
+ HierarchicalNamespace: b.HierarchicalNamespace.toRawHierarchicalNamespace(),
}
}
@@ -975,6 +1009,7 @@ func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(),
Autoclass: b.Autoclass.toProtoAutoclass(),
SoftDeletePolicy: b.SoftDeletePolicy.toProtoSoftDeletePolicy(),
+ HierarchicalNamespace: b.HierarchicalNamespace.toProtoHierarchicalNamespace(),
}
}
@@ -1174,6 +1209,9 @@ type BucketAttrsToUpdate struct {
RPO RPO
// If set, updates the autoclass configuration of the bucket.
+ // To disable autoclass on the bucket, set to an empty &Autoclass{}.
+ // To update the configuration for Autoclass.TerminalStorageClass,
+ // Autoclass.Enabled must also be set to true.
// See https://cloud.google.com/storage/docs/using-autoclass for more information.
Autoclass *Autoclass
@@ -1303,8 +1341,10 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
}
if ua.SoftDeletePolicy != nil {
if ua.SoftDeletePolicy.RetentionDuration == 0 {
- rb.NullFields = append(rb.NullFields, "SoftDeletePolicy")
- rb.SoftDeletePolicy = nil
+ rb.SoftDeletePolicy = &raw.BucketSoftDeletePolicy{
+ RetentionDurationSeconds: 0,
+ ForceSendFields: []string{"RetentionDurationSeconds"},
+ }
} else {
rb.SoftDeletePolicy = ua.SoftDeletePolicy.toRawSoftDeletePolicy()
}
@@ -2094,8 +2134,11 @@ func (p *SoftDeletePolicy) toRawSoftDeletePolicy() *raw.BucketSoftDeletePolicy {
return nil
}
// Excluding read only field EffectiveTime.
+ // ForceSendFields must be set to send a zero value for RetentionDuration and disable
+ // soft delete.
return &raw.BucketSoftDeletePolicy{
RetentionDurationSeconds: int64(p.RetentionDuration.Seconds()),
+ ForceSendFields: []string{"RetentionDurationSeconds"},
}
}
@@ -2136,6 +2179,42 @@ func toSoftDeletePolicyFromProto(p *storagepb.Bucket_SoftDeletePolicy) *SoftDele
}
}
+func (hns *HierarchicalNamespace) toProtoHierarchicalNamespace() *storagepb.Bucket_HierarchicalNamespace {
+ if hns == nil {
+ return nil
+ }
+ return &storagepb.Bucket_HierarchicalNamespace{
+ Enabled: hns.Enabled,
+ }
+}
+
+func (hns *HierarchicalNamespace) toRawHierarchicalNamespace() *raw.BucketHierarchicalNamespace {
+ if hns == nil {
+ return nil
+ }
+ return &raw.BucketHierarchicalNamespace{
+ Enabled: hns.Enabled,
+ }
+}
+
+func toHierarchicalNamespaceFromProto(p *storagepb.Bucket_HierarchicalNamespace) *HierarchicalNamespace {
+ if p == nil {
+ return nil
+ }
+ return &HierarchicalNamespace{
+ Enabled: p.Enabled,
+ }
+}
+
+func toHierarchicalNamespaceFromRaw(r *raw.BucketHierarchicalNamespace) *HierarchicalNamespace {
+ if r == nil {
+ return nil
+ }
+ return &HierarchicalNamespace{
+ Enabled: r.Enabled,
+ }
+}
+
// Objects returns an iterator over the objects in the bucket that match the
// Query q. If q is nil, no filtering is done. Objects will be iterated over
// lexicographically by name.
diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go
index bbe89276a..1ea1d98ce 100644
--- a/vendor/cloud.google.com/go/storage/client.go
+++ b/vendor/cloud.google.com/go/storage/client.go
@@ -62,6 +62,7 @@ type storageClient interface {
GetObject(ctx context.Context, params *getObjectParams, opts ...storageOption) (*ObjectAttrs, error)
UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error)
RestoreObject(ctx context.Context, params *restoreObjectParams, opts ...storageOption) (*ObjectAttrs, error)
+ MoveObject(ctx context.Context, params *moveObjectParams, opts ...storageOption) (*ObjectAttrs, error)
// Default Object ACL methods.
@@ -107,6 +108,8 @@ type storageClient interface {
ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (map[string]*Notification, error)
CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (*Notification, error)
DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) error
+
+ NewMultiRangeDownloader(ctx context.Context, params *newMultiRangeDownloaderParams, opts ...storageOption) (*MultiRangeDownloader, error)
}
// settings contains transport-agnostic configuration for API calls made via
@@ -122,7 +125,7 @@ type settings struct {
gax []gax.CallOption
// idempotent indicates if the call is idempotent or not when considering
- // if the call should be retired or not.
+ // if the call should be retried or not.
idempotent bool
// clientOption is a set of option.ClientOption to be used during client
@@ -132,6 +135,8 @@ type settings struct {
// userProject is the user project that should be billed for the request.
userProject string
+
+ metricsContext *metricsContext
}
func initSettings(opts ...storageOption) *settings {
@@ -235,7 +240,8 @@ type openWriterParams struct {
chunkSize int
// chunkRetryDeadline - see `Writer.ChunkRetryDeadline`.
// Optional.
- chunkRetryDeadline time.Duration
+ chunkRetryDeadline time.Duration
+ chunkTransferTimeout time.Duration
// Object/request properties
@@ -257,6 +263,9 @@ type openWriterParams struct {
// sendCRC32C - see `Writer.SendCRC32C`.
// Optional.
sendCRC32C bool
+ // append - Write with appendable object semantics.
+ // Optional.
+ append bool
// Writer callbacks
@@ -274,6 +283,15 @@ type openWriterParams struct {
setObj func(*ObjectAttrs)
}
+type newMultiRangeDownloaderParams struct {
+ bucket string
+ conds *Conditions
+ encryptionKey []byte
+ gen int64
+ object string
+ handle *ReadHandle
+}
+
type newRangeReaderParams struct {
bucket string
conds *Conditions
@@ -283,6 +301,7 @@ type newRangeReaderParams struct {
object string
offset int64
readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently.
+ handle *ReadHandle
}
type getObjectParams struct {
@@ -310,6 +329,13 @@ type restoreObjectParams struct {
copySourceACL bool
}
+type moveObjectParams struct {
+ bucket, srcObject, dstObject string
+ srcConds *Conditions
+ dstConds *Conditions
+ encryptionKey []byte
+}
+
type composeObjectRequest struct {
dstBucket string
dstObject destinationObject
diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go
index c274c762e..4fcfb7326 100644
--- a/vendor/cloud.google.com/go/storage/doc.go
+++ b/vendor/cloud.google.com/go/storage/doc.go
@@ -331,14 +331,14 @@ to add a [custom audit logging] header:
// Use client as usual with the context and the additional headers will be sent.
client.Bucket("my-bucket").Attrs(ctx)
-# Experimental gRPC API
+# gRPC API
-This package includes support for the Cloud Storage gRPC API, which is currently
-in preview. This implementation uses gRPC rather than the current JSON & XML
-APIs to make requests to Cloud Storage. Kindly contact the Google Cloud Storage gRPC
-team at gcs-grpc-contact@google.com with a list of GCS buckets you would like to
-allowlist to access this API. The Go Storage gRPC library is not yet generally
-available, so it may be subject to breaking changes.
+This package includes support for the Cloud Storage gRPC API. The
+implementation uses gRPC rather than the Default
+JSON & XML APIs to make requests to Cloud Storage.
+The Go Storage gRPC client is generally available.
+The Notifications, Serivce Account HMAC
+and GetServiceAccount RPCs are not supported through the gRPC client.
To create a client which will use gRPC, use the alternate constructor:
@@ -349,15 +349,43 @@ To create a client which will use gRPC, use the alternate constructor:
}
// Use client as usual.
-If the application is running within GCP, users may get better performance by
-enabling Direct Google Access (enabling requests to skip some proxy steps). To enable,
-set the environment variable `GOOGLE_CLOUD_ENABLE_DIRECT_PATH_XDS=true` and add
-the following side-effect imports to your application:
+Using the gRPC API inside GCP with a bucket in the same region can allow for
+[Direct Connectivity] (enabling requests to skip some proxy steps and reducing
+response latency). A warning is emmitted if gRPC is not used within GCP to
+warn that Direct Connectivity could not be initialized. Direct Connectivity
+is not required to access the gRPC API.
- import (
- _ "google.golang.org/grpc/balancer/rls"
- _ "google.golang.org/grpc/xds/googledirectpath"
- )
+Dependencies for the gRPC API may slightly increase the size of binaries for
+applications depending on this package. If you are not using gRPC, you can use
+the build tag `disable_grpc_modules` to opt out of these dependencies and
+reduce the binary size.
+
+The gRPC client emits metrics by default and will export the
+gRPC telemetry discussed in [gRFC/66] and [gRFC/78] to
+[Google Cloud Monitoring]. The metrics are accessible through Cloud Monitoring
+API and you incur no additional cost for publishing the metrics. Google Cloud
+Support can use this information to more quickly diagnose problems related to
+GCS and gRPC.
+Sending this data does not incur any billing charges, and requires minimal
+CPU (a single RPC every minute) or memory (a few KiB to batch the
+telemetry).
+
+To access the metrics you can view them through Cloud Monitoring
+[metric explorer] with the prefix `storage.googleapis.com/client`. Metrics are emitted
+every minute.
+
+You can disable metrics using the following example when creating a new gRPC
+client using [WithDisabledClientMetrics].
+
+The metrics exporter uses Cloud Monitoring API which determines
+project ID and credentials doing the following:
+
+* Project ID is determined using OTel Resource Detector for the environment
+otherwise it falls back to the project provided by [google.FindCredentials].
+
+* Credentials are determined using [Application Default Credentials]. The
+principal must have `roles/monitoring.metricWriter` role granted. If not a
+logged warning will be emitted. Subsequent are silenced to prevent noisy logs.
# Storage Control API
@@ -366,6 +394,11 @@ and Managed Folder operations) are supported via the autogenerated Storage Contr
client, which is available as a subpackage in this module. See package docs at
[cloud.google.com/go/storage/control/apiv2] or reference the [Storage Control API] docs.
+[Application Default Credentials]: https://cloud.google.com/docs/authentication/application-default-credentials
+[google.FindCredentials]: https://pkg.go.dev/golang.org/x/oauth2/google#FindDefaultCredentials
+[gRFC/66]: https://github.com/grpc/proposal/blob/master/A66-otel-stats.md
+[gRFC/78]: https://github.com/grpc/proposal/blob/master/A78-grpc-metrics-wrr-pf-xds.md
+[Google Cloud Monitoring]: https://cloud.google.com/monitoring/docs
[Cloud Storage IAM docs]: https://cloud.google.com/storage/docs/access-control/iam
[XML POST Object docs]: https://cloud.google.com/storage/docs/xml-api/post-object
[Cloud Storage retry docs]: https://cloud.google.com/storage/docs/retry-strategy
@@ -375,5 +408,7 @@ client, which is available as a subpackage in this module. See package docs at
[IAM Service Account Credentials API]: https://console.developers.google.com/apis/api/iamcredentials.googleapis.com/overview
[custom audit logging]: https://cloud.google.com/storage/docs/audit-logging#add-custom-metadata
[Storage Control API]: https://cloud.google.com/storage/docs/reference/rpc/google.storage.control.v2
+[metric explorer]: https://console.cloud.google.com/projectselector/monitoring/metrics-explorer
+[Direct Connectivity]: https://cloud.google.com/vpc-service-controls/docs/set-up-private-connectivity#direct-connectivity
*/
package storage // import "cloud.google.com/go/storage"
diff --git a/vendor/cloud.google.com/go/storage/dynamic_delay.go b/vendor/cloud.google.com/go/storage/dynamic_delay.go
new file mode 100644
index 000000000..5944f515d
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/dynamic_delay.go
@@ -0,0 +1,237 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "fmt"
+ "math"
+ "sync"
+ "time"
+)
+
+// dynamicDelay dynamically calculates the delay at a fixed percentile, based on
+// delay samples.
+//
+// dynamicDelay is goroutine-safe.
+type dynamicDelay struct {
+ increaseFactor float64
+ decreaseFactor float64
+ minDelay time.Duration
+ maxDelay time.Duration
+ value time.Duration
+
+ // Guards the value
+ mu *sync.RWMutex
+}
+
+// validateDynamicDelayParams ensures,
+// targetPercentile is a valid fraction (between 0 and 1).
+// increaseRate is a positive number.
+// minDelay is less than maxDelay.
+func validateDynamicDelayParams(targetPercentile, increaseRate float64, minDelay, maxDelay time.Duration) error {
+ if targetPercentile < 0 || targetPercentile > 1 {
+ return fmt.Errorf("invalid targetPercentile (%v): must be within [0, 1]", targetPercentile)
+ }
+ if increaseRate <= 0 {
+ return fmt.Errorf("invalid increaseRate (%v): must be > 0", increaseRate)
+ }
+ if minDelay >= maxDelay {
+ return fmt.Errorf("invalid minDelay (%v) and maxDelay (%v) combination: minDelay must be smaller than maxDelay", minDelay, maxDelay)
+ }
+ return nil
+}
+
+// NewDynamicDelay returns a dynamicDelay.
+//
+// targetPercentile is the desired percentile to be computed. For example, a
+// targetPercentile of 0.99 computes the delay at the 99th percentile. Must be
+// in the range [0, 1].
+//
+// increaseRate (must be > 0) determines how many increase calls it takes for
+// Value to double.
+//
+// initialDelay is the start value of the delay.
+//
+// decrease can never lower the delay past minDelay, increase can never raise
+// the delay past maxDelay.
+func newDynamicDelay(targetPercentile float64, increaseRate float64, initialDelay, minDelay, maxDelay time.Duration) *dynamicDelay {
+ if initialDelay < minDelay {
+ initialDelay = minDelay
+ }
+ if initialDelay > maxDelay {
+ initialDelay = maxDelay
+ }
+
+ // Compute increaseFactor and decreaseFactor such that:
+ // (increaseFactor ^ (1 - targetPercentile)) * (decreaseFactor ^ targetPercentile) = 1
+ increaseFactor := math.Exp(math.Log(2) / increaseRate)
+ if increaseFactor < 1.001 {
+ increaseFactor = 1.001
+ }
+ decreaseFactor := math.Exp(-math.Log(increaseFactor) * (1 - targetPercentile) / targetPercentile)
+ if decreaseFactor > 0.9999 {
+ decreaseFactor = 0.9999
+ }
+
+ return &dynamicDelay{
+ increaseFactor: increaseFactor,
+ decreaseFactor: decreaseFactor,
+ minDelay: minDelay,
+ maxDelay: maxDelay,
+ value: initialDelay,
+ mu: &sync.RWMutex{},
+ }
+}
+
+func (d *dynamicDelay) unsafeIncrease() {
+ v := time.Duration(float64(d.value) * d.increaseFactor)
+ if v > d.maxDelay {
+ d.value = d.maxDelay
+ } else {
+ d.value = v
+ }
+}
+
+// increase notes that the operation took longer than the delay returned by Value.
+func (d *dynamicDelay) increase() {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.unsafeIncrease()
+}
+
+func (d *dynamicDelay) unsafeDecrease() {
+ v := time.Duration(float64(d.value) * d.decreaseFactor)
+ if v < d.minDelay {
+ d.value = d.minDelay
+ } else {
+ d.value = v
+ }
+}
+
+// decrease notes that the operation completed before the delay returned by getValue.
+func (d *dynamicDelay) decrease() {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.unsafeDecrease()
+}
+
+// update updates the delay value depending on the specified latency.
+func (d *dynamicDelay) update(latency time.Duration) {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ if latency > d.value {
+ d.unsafeIncrease()
+ } else {
+ d.unsafeDecrease()
+ }
+}
+
+// getValue returns the desired delay to wait before retry the operation.
+func (d *dynamicDelay) getValue() time.Duration {
+ d.mu.RLock()
+ defer d.mu.RUnlock()
+
+ return d.value
+}
+
+// printDelay prints the state of delay, helpful in debugging.
+func (d *dynamicDelay) printDelay() {
+ d.mu.RLock()
+ defer d.mu.RUnlock()
+
+ fmt.Println("IncreaseFactor: ", d.increaseFactor)
+ fmt.Println("DecreaseFactor: ", d.decreaseFactor)
+ fmt.Println("MinDelay: ", d.minDelay)
+ fmt.Println("MaxDelay: ", d.maxDelay)
+ fmt.Println("Value: ", d.value)
+}
+
+// bucketDelayManager wraps dynamicDelay to provide bucket-specific delays.
+type bucketDelayManager struct {
+ targetPercentile float64
+ increaseRate float64
+ initialDelay time.Duration
+ minDelay time.Duration
+ maxDelay time.Duration
+
+ // delays maps bucket names to their dynamic delay instance.
+ delays map[string]*dynamicDelay
+
+ // mu guards delays.
+ mu *sync.RWMutex
+}
+
+// newBucketDelayManager returns a new bucketDelayManager instance.
+func newBucketDelayManager(targetPercentile float64, increaseRate float64, initialDelay, minDelay, maxDelay time.Duration) (*bucketDelayManager, error) {
+ err := validateDynamicDelayParams(targetPercentile, increaseRate, minDelay, maxDelay)
+ if err != nil {
+ return nil, err
+ }
+
+ return &bucketDelayManager{
+ targetPercentile: targetPercentile,
+ increaseRate: increaseRate,
+ initialDelay: initialDelay,
+ minDelay: minDelay,
+ maxDelay: maxDelay,
+ delays: make(map[string]*dynamicDelay),
+ mu: &sync.RWMutex{},
+ }, nil
+}
+
+// getDelay retrieves the dynamicDelay instance for the given bucket name. If no delay
+// exists for the bucket, a new one is created with the configured parameters.
+func (b *bucketDelayManager) getDelay(bucketName string) *dynamicDelay {
+ b.mu.RLock()
+ delay, ok := b.delays[bucketName]
+ b.mu.RUnlock()
+
+ if !ok {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ // Check again, as someone might create b/w the execution of mu.RUnlock() and mu.Lock().
+ delay, ok = b.delays[bucketName]
+ if !ok {
+ // Create a new dynamicDelay for the bucket if it doesn't exist
+ delay = newDynamicDelay(b.targetPercentile, b.increaseRate, b.initialDelay, b.minDelay, b.maxDelay)
+ b.delays[bucketName] = delay
+ }
+ }
+ return delay
+}
+
+// increase notes that the operation took longer than the delay for the given bucket.
+func (b *bucketDelayManager) increase(bucketName string) {
+ b.getDelay(bucketName).increase()
+}
+
+// decrease notes that the operation completed before the delay for the given bucket.
+func (b *bucketDelayManager) decrease(bucketName string) {
+ b.getDelay(bucketName).decrease()
+}
+
+// update updates the delay value for the bucket depending on the specified latency.
+func (b *bucketDelayManager) update(bucketName string, latency time.Duration) {
+ b.getDelay(bucketName).update(latency)
+}
+
+// getValue returns the desired delay to wait before retrying the operation for the given bucket.
+func (b *bucketDelayManager) getValue(bucketName string) time.Duration {
+ return b.getDelay(bucketName).getValue()
+}
diff --git a/vendor/cloud.google.com/go/storage/emulator_test.sh b/vendor/cloud.google.com/go/storage/emulator_test.sh
index 7bad7cf39..258201ec9 100644
--- a/vendor/cloud.google.com/go/storage/emulator_test.sh
+++ b/vendor/cloud.google.com/go/storage/emulator_test.sh
@@ -89,4 +89,4 @@ then
fi
# Run tests
-go test -v -timeout 10m ./ -run="^Test(RetryConformance|.*Emulated)$" -short 2>&1 | tee -a sponge_log.log
+go test -v -timeout 15m ./ ./dataflux -run="^Test(RetryConformance|.*Emulated)$" -short 2>&1 | tee -a sponge_log.log
diff --git a/vendor/cloud.google.com/go/storage/experimental/experimental.go b/vendor/cloud.google.com/go/storage/experimental/experimental.go
new file mode 100644
index 000000000..5bcc59ad2
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/experimental/experimental.go
@@ -0,0 +1,87 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package experimental is a collection of experimental features that might
+// have some rough edges to them. Housing experimental features in this package
+// results in a user accessing these APIs as `experimental.Foo`, thereby making
+// it explicit that the feature is experimental and using them in production
+// code is at their own risk.
+//
+// All APIs in this package are experimental.
+package experimental
+
+import (
+ "time"
+
+ "cloud.google.com/go/storage/internal"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "google.golang.org/api/option"
+)
+
+// WithMetricInterval provides a [option.ClientOption] that may be passed to [storage.NewGRPCClient].
+// It sets how often to emit metrics [metric.WithInterval] when using
+// [metric.NewPeriodicReader]
+// When using Cloud Monitoring interval must be at minimum 1 [time.Minute].
+func WithMetricInterval(metricInterval time.Duration) option.ClientOption {
+ return internal.WithMetricInterval.(func(time.Duration) option.ClientOption)(metricInterval)
+}
+
+// WithMetricExporter provides a [option.ClientOption] that may be passed to [storage.NewGRPCClient].
+// Set an alternate client-side metric Exporter to emit metrics through.
+// Must implement [metric.Exporter]
+func WithMetricExporter(ex *metric.Exporter) option.ClientOption {
+ return internal.WithMetricExporter.(func(*metric.Exporter) option.ClientOption)(ex)
+}
+
+// WithReadStallTimeout provides a [option.ClientOption] that may be passed to [storage.NewClient].
+// It enables the client to retry stalled requests when starting a download from
+// Cloud Storage. If the timeout elapses with no response from the server, the request
+// is automatically retried.
+// The timeout is initially set to ReadStallTimeoutConfig.Min. The client tracks
+// latency across all read requests from the client for each bucket accessed, and can
+// adjust the timeout higher to the target percentile when latency for request to that
+// bucket is high.
+// Currently, this is supported only for downloads ([storage.NewReader] and
+// [storage.NewRangeReader] calls) and only for the XML API. Other read APIs (gRPC & JSON)
+// will be supported soon.
+func WithReadStallTimeout(rstc *ReadStallTimeoutConfig) option.ClientOption {
+ return internal.WithReadStallTimeout.(func(config *ReadStallTimeoutConfig) option.ClientOption)(rstc)
+}
+
+// ReadStallTimeoutConfig defines the timeout which is adjusted dynamically based on
+// past observed latencies.
+type ReadStallTimeoutConfig struct {
+ // Min is the minimum duration of the timeout. The default value is 500ms. Requests
+ // taking shorter than this value to return response headers will never time out.
+ // In general, you should choose a Min value that is greater than the typical value
+ // for the target percentile.
+ Min time.Duration
+
+ // TargetPercentile is the percentile to target for the dynamic timeout. The default
+ // value is 0.99. At the default percentile, at most 1% of requests will be timed out
+ // and retried.
+ TargetPercentile float64
+}
+
+// WithGRPCBidiReads provides an [option.ClientOption] that may be passed to
+// [cloud.google.com/go/storage.NewGRPCClient].
+// It enables the client to use bi-directional gRPC APIs for downloads rather than the
+// server streaming API. In particular, it allows users to use the [storage.MultiRangeDownloader]
+// surface, which requires bi-directional streaming.
+//
+// The bi-directional API is in private preview; please contact your account manager if
+// interested.
+func WithGRPCBidiReads() option.ClientOption {
+ return internal.WithGRPCBidiReads.(func() option.ClientOption)()
+}
diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go
index d81a17b6b..2d243bf9f 100644
--- a/vendor/cloud.google.com/go/storage/grpc_client.go
+++ b/vendor/cloud.google.com/go/storage/grpc_client.go
@@ -16,13 +16,15 @@ package storage
import (
"context"
- "encoding/base64"
+ "encoding/binary"
"errors"
"fmt"
"hash/crc32"
"io"
+ "log"
"net/url"
"os"
+ "sync"
"cloud.google.com/go/iam/apiv1/iampb"
"cloud.google.com/go/internal/trace"
@@ -33,9 +35,11 @@ import (
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
+ "google.golang.org/api/transport"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/encoding/protowire"
@@ -95,10 +99,13 @@ func defaultGRPCOptions() []option.ClientOption {
option.WithEndpoint(host),
option.WithGRPCDialOption(grpc.WithInsecure()),
option.WithoutAuthentication(),
+ WithDisabledClientMetrics(),
)
} else {
// Only enable DirectPath when the emulator is not being targeted.
- defaults = append(defaults, internaloption.EnableDirectPath(true))
+ defaults = append(defaults,
+ internaloption.EnableDirectPath(true),
+ internaloption.EnableDirectPathXds())
}
return defaults
@@ -109,6 +116,25 @@ func defaultGRPCOptions() []option.ClientOption {
type grpcStorageClient struct {
raw *gapic.Client
settings *settings
+ config *storageConfig
+}
+
+func enableClientMetrics(ctx context.Context, s *settings, config storageConfig) (*metricsContext, error) {
+ var project string
+ // TODO: use new auth client
+ c, err := transport.Creds(ctx, s.clientOption...)
+ if err == nil {
+ project = c.ProjectID
+ }
+ metricsContext, err := newGRPCMetricContext(ctx, metricsConfig{
+ project: project,
+ interval: config.metricInterval,
+ manualReader: config.manualReader},
+ )
+ if err != nil {
+ return nil, fmt.Errorf("gRPC Metrics: %w", err)
+ }
+ return metricsContext, nil
}
// newGRPCStorageClient initializes a new storageClient that uses the gRPC
@@ -124,6 +150,15 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageCl
return nil, errors.New("storage: GRPC is incompatible with any option that specifies an API for reads")
}
+ if !config.disableClientMetrics {
+ // Do not fail client creation if enabling metrics fails.
+ if metricsContext, err := enableClientMetrics(ctx, s, config); err == nil {
+ s.metricsContext = metricsContext
+ s.clientOption = append(s.clientOption, metricsContext.clientOpts...)
+ } else {
+ log.Printf("Failed to enable client metrics: %v", err)
+ }
+ }
g, err := gapic.NewClient(ctx, s.clientOption...)
if err != nil {
return nil, err
@@ -132,30 +167,22 @@ func newGRPCStorageClient(ctx context.Context, opts ...storageOption) (storageCl
return &grpcStorageClient{
raw: g,
settings: s,
+ config: &config,
}, nil
}
func (c *grpcStorageClient) Close() error {
+ if c.settings.metricsContext != nil {
+ c.settings.metricsContext.close()
+ }
return c.raw.Close()
}
// Top-level methods.
+// GetServiceAccount is not supported in the gRPC client.
func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) {
- s := callSettings(c.settings, opts...)
- req := &storagepb.GetServiceAccountRequest{
- Project: toProjectResource(project),
- }
- var resp *storagepb.ServiceAccount
- err := run(ctx, func(ctx context.Context) error {
- var err error
- resp, err = c.raw.GetServiceAccount(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return "", err
- }
- return resp.EmailAddress, err
+ return "", errMethodNotSupported
}
func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) {
@@ -432,16 +459,12 @@ func (c *grpcStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
MatchGlob: it.query.MatchGlob,
ReadMask: q.toFieldMask(), // a nil Query still results in a "*" FieldMask
SoftDeleted: it.query.SoftDeleted,
+ IncludeFoldersAsPrefixes: it.query.IncludeFoldersAsPrefixes,
}
if s.userProject != "" {
ctx = setUserProjectMetadata(ctx, s.userProject)
}
fetch := func(pageSize int, pageToken string) (token string, err error) {
- // IncludeFoldersAsPrefixes is not supported for gRPC
- // TODO: remove this when support is added in the proto.
- if it.query.IncludeFoldersAsPrefixes {
- return "", status.Errorf(codes.Unimplemented, "storage: IncludeFoldersAsPrefixes is not supported in gRPC")
- }
var objects []*storagepb.Object
var gitr *gapic.ObjectIterator
err = run(it.ctx, func(ctx context.Context) error {
@@ -659,6 +682,36 @@ func (c *grpcStorageClient) RestoreObject(ctx context.Context, params *restoreOb
return attrs, err
}
+func (c *grpcStorageClient) MoveObject(ctx context.Context, params *moveObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
+ s := callSettings(c.settings, opts...)
+ req := &storagepb.MoveObjectRequest{
+ Bucket: bucketResourceName(globalProjectAlias, params.bucket),
+ SourceObject: params.srcObject,
+ DestinationObject: params.dstObject,
+ }
+ if err := applyCondsProto("MoveObjectDestination", defaultGen, params.dstConds, req); err != nil {
+ return nil, err
+ }
+ if err := applySourceCondsProto("MoveObjectSource", defaultGen, params.srcConds, req); err != nil {
+ return nil, err
+ }
+
+ if s.userProject != "" {
+ ctx = setUserProjectMetadata(ctx, s.userProject)
+ }
+
+ var attrs *ObjectAttrs
+ err := run(ctx, func(ctx context.Context) error {
+ res, err := c.raw.MoveObject(ctx, req, s.gax...)
+ attrs = newObjectFromProto(res)
+ return err
+ }, s.retry, s.idempotent)
+ if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
+ return nil, ErrObjectNotExist
+ }
+ return attrs, err
+}
+
// Default Object ACL methods.
func (c *grpcStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error {
@@ -923,7 +976,7 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
if err := applyCondsProto("Copy destination", defaultGen, req.dstObject.conds, call); err != nil {
return nil, err
}
- if err := applySourceCondsProto(req.srcObject.gen, req.srcObject.conds, call); err != nil {
+ if err := applySourceCondsProto("Copy source", req.srcObject.gen, req.srcObject.conds, call); err != nil {
return nil, err
}
@@ -959,48 +1012,491 @@ func (c *grpcStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
return r, nil
}
-// bytesCodec is a grpc codec which permits receiving messages as either
-// protobuf messages, or as raw []bytes.
-type bytesCodec struct {
- encoding.Codec
+// Custom codec to be used for unmarshaling BidiReadObjectResponse messages.
+// This is used to avoid a copy of object data in proto.Unmarshal.
+type bytesCodecV2 struct {
}
-func (bytesCodec) Marshal(v any) ([]byte, error) {
+var _ encoding.CodecV2 = bytesCodecV2{}
+
+// Marshal is used to encode messages to send for bytesCodecV2. Since we are only
+// using this to send ReadObjectRequest messages we don't need to recycle buffers
+// here.
+func (bytesCodecV2) Marshal(v any) (mem.BufferSlice, error) {
vv, ok := v.(proto.Message)
if !ok {
return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
}
- return proto.Marshal(vv)
+ var data mem.BufferSlice
+ buf, err := proto.Marshal(vv)
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, mem.SliceBuffer(buf))
+ return data, nil
}
-func (bytesCodec) Unmarshal(data []byte, v any) error {
+// Unmarshal is used for data received for BidiReadObjectResponse. We want to preserve
+// the mem.BufferSlice in most cases rather than copying and calling proto.Unmarshal.
+func (bytesCodecV2) Unmarshal(data mem.BufferSlice, v any) error {
switch v := v.(type) {
- case *[]byte:
- // If gRPC could recycle the data []byte after unmarshaling (through
- // buffer pools), we would need to make a copy here.
+ case *mem.BufferSlice:
*v = data
+ // Pick up a reference to the data so that it is not freed while decoding.
+ data.Ref()
return nil
case proto.Message:
- return proto.Unmarshal(data, v)
+ buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
+ return proto.Unmarshal(buf.ReadOnlyData(), v)
default:
- return fmt.Errorf("can not unmarshal type %T", v)
+ return fmt.Errorf("cannot unmarshal type %T, want proto.Message or mem.BufferSlice", v)
}
}
-func (bytesCodec) Name() string {
- // If this isn't "", then gRPC sets the content-subtype of the call to this
- // value and we get errors.
+func (bytesCodecV2) Name() string {
return ""
}
+func contextMetadataFromBidiReadObject(req *storagepb.BidiReadObjectRequest) []string {
+ if len(req.GetReadObjectSpec().GetRoutingToken()) > 0 {
+ return []string{"x-goog-request-params", fmt.Sprintf("bucket=%s&routing_token=%s", req.GetReadObjectSpec().GetBucket(), req.GetReadObjectSpec().GetRoutingToken())}
+ }
+ return []string{"x-goog-request-params", fmt.Sprintf("bucket=%s", req.GetReadObjectSpec().GetBucket())}
+}
+
+type rangeSpec struct {
+ readID int64
+ writer io.Writer
+ offset int64
+ limit int64
+ bytesWritten int64
+ callback func(int64, int64, error)
+}
+
+func (c *grpcStorageClient) NewMultiRangeDownloader(ctx context.Context, params *newMultiRangeDownloaderParams, opts ...storageOption) (mr *MultiRangeDownloader, err error) {
+ ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewMultiRangeDownloader")
+ defer func() { trace.EndSpan(ctx, err) }()
+ s := callSettings(c.settings, opts...)
+
+ if s.userProject != "" {
+ ctx = setUserProjectMetadata(ctx, s.userProject)
+ }
+
+ b := bucketResourceName(globalProjectAlias, params.bucket)
+ object := params.object
+ r := &storagepb.BidiReadObjectSpec{
+ Bucket: b,
+ Object: object,
+ CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey),
+ }
+
+ // The default is a negative value, which means latest.
+ if params.gen >= 0 {
+ r.Generation = params.gen
+ }
+
+ if params.handle != nil {
+ r.ReadHandle = &storagepb.BidiReadHandle{
+ Handle: *params.handle,
+ }
+ }
+ req := &storagepb.BidiReadObjectRequest{
+ ReadObjectSpec: r,
+ }
+
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, contextMetadataFromBidiReadObject(req)...)
+
+ openStream := func() (*bidiReadStreamResponse, context.CancelFunc, error) {
+ if err := applyCondsProto("grpcStorageClient.BidiReadObject", params.gen, params.conds, r); err != nil {
+ return nil, nil, err
+ }
+ var stream storagepb.Storage_BidiReadObjectClient
+ var resp *storagepb.BidiReadObjectResponse
+ cc, cancel := context.WithCancel(ctx)
+ err = run(cc, func(ctx context.Context) error {
+ stream, err = c.raw.BidiReadObject(ctx, s.gax...)
+ if err != nil {
+ // BidiReadObjectRedirectedError error is only returned on initial open in case of a redirect.
+ // The routing token that should be used when reopening the read stream. Needs to be exported.
+ rpcStatus := status.Convert(err)
+ details := rpcStatus.Details()
+ for _, detail := range details {
+ if bidiError, ok := detail.(*storagepb.BidiReadObjectRedirectedError); ok {
+ r.ReadHandle = bidiError.ReadHandle
+ r.RoutingToken = bidiError.RoutingToken
+ req.ReadObjectSpec = r
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, contextMetadataFromBidiReadObject(req)...)
+ }
+ }
+ return err
+ }
+ // Incase stream opened succesfully, send first message on the stream.
+ // First message to stream should contain read_object_spec
+ err = stream.Send(req)
+ if err != nil {
+ return err
+ }
+ resp, err = stream.Recv()
+ if err != nil {
+ return err
+ }
+ return nil
+ }, s.retry, s.idempotent)
+ if err != nil {
+ // Close the stream context we just created to ensure we don't leak
+ // resources.
+ cancel()
+ return nil, nil, err
+ }
+ return &bidiReadStreamResponse{stream: stream, response: resp}, cancel, nil
+ }
+
+ // For the first time open stream without adding any range.
+ resp, cancel, err := openStream()
+ if err != nil {
+ return nil, err
+ }
+
+ // The first message was Recv'd on stream open, use it to populate the
+ // object metadata.
+ msg := resp.response
+ obj := msg.GetMetadata()
+ // This is the size of the entire object, even if only a range was requested.
+ size := obj.GetSize()
+
+ rr := &gRPCBidiReader{
+ stream: resp.stream,
+ cancel: cancel,
+ settings: s,
+ readHandle: msg.GetReadHandle().GetHandle(),
+ readID: 1,
+ reopen: openStream,
+ readSpec: r,
+ data: make(chan []rangeSpec, 100),
+ ctx: ctx,
+ closeReceiver: make(chan bool, 10),
+ closeManager: make(chan bool, 10),
+ managerRetry: make(chan bool), // create unbuffered channel for closing the streamManager goroutine.
+ receiverRetry: make(chan bool), // create unbuffered channel for closing the streamReceiver goroutine.
+ mp: make(map[int64]rangeSpec),
+ done: false,
+ activeTask: 0,
+ streamRecreation: false,
+ }
+
+ // streamManager goroutine runs in background where we send message to gcs and process response.
+ streamManager := func() {
+ var currentSpec []rangeSpec
+ for {
+ select {
+ case <-rr.ctx.Done():
+ rr.mu.Lock()
+ rr.done = true
+ rr.mu.Unlock()
+ return
+ case <-rr.managerRetry:
+ return
+ case <-rr.closeManager:
+ rr.mu.Lock()
+ if len(rr.mp) != 0 {
+ for key := range rr.mp {
+ rr.mp[key].callback(rr.mp[key].offset, rr.mp[key].limit, fmt.Errorf("stream closed early"))
+ delete(rr.mp, key)
+ }
+ }
+ rr.mu.Unlock()
+ return
+ case currentSpec = <-rr.data:
+ var readRanges []*storagepb.ReadRange
+ var err error
+ rr.mu.Lock()
+ for _, v := range currentSpec {
+ rr.mp[v.readID] = v
+ readRanges = append(readRanges, &storagepb.ReadRange{ReadOffset: v.offset, ReadLength: v.limit, ReadId: v.readID})
+ }
+ rr.mu.Unlock()
+ // We can just send 100 request to gcs in one request.
+ // In case of Add we will send only one range request to gcs but in case of retry we can have more than 100 ranges.
+ // Hence be will divide the request in chunk of 100.
+ // For example with 457 ranges on stream we will have 5 request to gcs [0:99], [100:199], [200:299], [300:399], [400:456]
+ requestCount := len(readRanges) / 100
+ if len(readRanges)%100 != 0 {
+ requestCount++
+ }
+ for i := 0; i < requestCount; i++ {
+ start := i * 100
+ end := (i + 1) * 100
+ if end > len(readRanges) {
+ end = len(readRanges)
+ }
+ curReq := readRanges[start:end]
+ err = rr.stream.Send(&storagepb.BidiReadObjectRequest{
+ ReadRanges: curReq,
+ })
+ if err != nil {
+ // cancel stream and reopen the stream again.
+ // Incase again an error is thrown close the streamManager goroutine.
+ rr.retrier(err, "manager")
+ break
+ }
+ }
+
+ }
+ }
+ }
+
+ streamReceiver := func() {
+ var resp *storagepb.BidiReadObjectResponse
+ var err error
+ for {
+ select {
+ case <-rr.ctx.Done():
+ rr.done = true
+ return
+ case <-rr.receiverRetry:
+ return
+ case <-rr.closeReceiver:
+ return
+ default:
+ // This function reads the data sent for a particular range request and has a callback
+ // to indicate that output buffer is filled.
+ resp, err = rr.stream.Recv()
+ if resp.GetReadHandle().GetHandle() != nil {
+ rr.readHandle = resp.GetReadHandle().GetHandle()
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ if err != nil {
+ // cancel stream and reopen the stream again.
+ // Incase again an error is thrown close the streamManager goroutine.
+ rr.retrier(err, "receiver")
+ }
+
+ if err == nil {
+ rr.mu.Lock()
+ if len(rr.mp) == 0 && rr.activeTask == 0 {
+ rr.closeReceiver <- true
+ rr.closeManager <- true
+ return
+ }
+ rr.mu.Unlock()
+ arr := resp.GetObjectDataRanges()
+ for _, val := range arr {
+ id := val.GetReadRange().GetReadId()
+ rr.mu.Lock()
+ _, err = rr.mp[id].writer.Write(val.GetChecksummedData().GetContent())
+ if err != nil {
+ rr.mp[id].callback(rr.mp[id].offset, rr.mp[id].limit, err)
+ rr.activeTask--
+ delete(rr.mp, id)
+ } else {
+ rr.mp[id] = rangeSpec{
+ readID: rr.mp[id].readID,
+ writer: rr.mp[id].writer,
+ offset: rr.mp[id].offset,
+ limit: rr.mp[id].limit,
+ bytesWritten: rr.mp[id].bytesWritten + int64(len(val.GetChecksummedData().GetContent())),
+ callback: rr.mp[id].callback,
+ }
+ }
+ if val.GetRangeEnd() {
+ rr.mp[id].callback(rr.mp[id].offset, rr.mp[id].limit, nil)
+ rr.activeTask--
+ delete(rr.mp, id)
+ }
+ rr.mu.Unlock()
+ }
+ }
+
+ }
+ }
+ }
+
+ rr.retrier = func(err error, thread string) {
+ rr.mu.Lock()
+ if !rr.streamRecreation {
+ rr.streamRecreation = true
+ } else {
+ rr.mu.Unlock()
+ return
+ }
+ rr.mu.Unlock()
+ // close both the go routines to make the stream recreation syncronous.
+ if thread == "receiver" {
+ rr.managerRetry <- true
+ } else {
+ rr.receiverRetry <- true
+ }
+ err = rr.retryStream(err)
+ if err != nil {
+ rr.mu.Lock()
+ for key := range rr.mp {
+ rr.mp[key].callback(rr.mp[key].offset, rr.mp[key].limit, err)
+ delete(rr.mp, key)
+ }
+ rr.mu.Unlock()
+ rr.close()
+ } else {
+ // If stream recreation happened successfully lets again start
+ // both the goroutine making the whole flow asynchronous again.
+ if thread == "receiver" {
+ go streamManager()
+ } else {
+ go streamReceiver()
+ }
+ }
+ rr.mu.Lock()
+ rr.streamRecreation = false
+ rr.mu.Unlock()
+ }
+
+ rr.mu.Lock()
+ rr.objectSize = size
+ rr.mu.Unlock()
+
+ go streamManager()
+ go streamReceiver()
+
+ return &MultiRangeDownloader{
+ Attrs: ReaderObjectAttrs{
+ Size: size,
+ ContentType: obj.GetContentType(),
+ ContentEncoding: obj.GetContentEncoding(),
+ CacheControl: obj.GetCacheControl(),
+ LastModified: obj.GetUpdateTime().AsTime(),
+ Metageneration: obj.GetMetageneration(),
+ Generation: obj.GetGeneration(),
+ },
+ reader: rr,
+ }, nil
+}
+
+func getActiveRange(r *gRPCBidiReader) []rangeSpec {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ var activeRange []rangeSpec
+ for k, v := range r.mp {
+ activeRange = append(activeRange, rangeSpec{
+ readID: k,
+ writer: v.writer,
+ offset: (v.offset + v.bytesWritten),
+ limit: v.limit - v.bytesWritten,
+ callback: v.callback,
+ bytesWritten: 0,
+ })
+ r.mp[k] = activeRange[len(activeRange)-1]
+ }
+ return activeRange
+}
+
+// retryStream cancel's stream and reopen the stream again.
+func (r *gRPCBidiReader) retryStream(err error) error {
+ var shouldRetry = ShouldRetry
+ if r.settings.retry != nil && r.settings.retry.shouldRetry != nil {
+ shouldRetry = r.settings.retry.shouldRetry
+ }
+ if shouldRetry(err) {
+ // This will "close" the existing stream and immediately attempt to
+ // reopen the stream, but will backoff if further attempts are necessary.
+ // When Reopening the stream only failed readID will be added to stream.
+ return r.reopenStream(getActiveRange(r))
+ }
+ return err
+}
+
+// reopenStream "closes" the existing stream and attempts to reopen a stream and
+// sets the Reader's stream and cancelStream properties in the process.
+func (r *gRPCBidiReader) reopenStream(failSpec []rangeSpec) error {
+ // Close existing stream and initialize new stream with updated offset.
+ if r.cancel != nil {
+ r.cancel()
+ }
+
+ res, cancel, err := r.reopen()
+ if err != nil {
+ return err
+ }
+ r.stream = res.stream
+ r.cancel = cancel
+ r.readHandle = res.response.GetReadHandle().GetHandle()
+ if failSpec != nil {
+ r.data <- failSpec
+ }
+ return nil
+}
+
+// Add will add current range to stream.
+func (mr *gRPCBidiReader) add(output io.Writer, offset, limit int64, callback func(int64, int64, error)) {
+ mr.mu.Lock()
+ objectSize := mr.objectSize
+ mr.mu.Unlock()
+
+ if offset > objectSize {
+ callback(offset, limit, fmt.Errorf("offset larger than size of object: %v", objectSize))
+ return
+ }
+ if limit < 0 {
+ callback(offset, limit, fmt.Errorf("limit can't be negative"))
+ return
+ }
+ mr.mu.Lock()
+ curentID := (*mr).readID
+ (*mr).readID++
+ if !mr.done {
+ spec := rangeSpec{readID: curentID, writer: output, offset: offset, limit: limit, bytesWritten: 0, callback: callback}
+ mr.mp[curentID] = spec
+ mr.activeTask++
+ mr.data <- []rangeSpec{spec}
+ } else {
+ callback(offset, limit, fmt.Errorf("stream is closed, can't add range"))
+ }
+ mr.mu.Unlock()
+}
+
+func (mr *gRPCBidiReader) wait() {
+ mr.mu.Lock()
+ keepWaiting := len(mr.mp) != 0 && mr.activeTask != 0
+ mr.mu.Unlock()
+
+ for keepWaiting {
+ mr.mu.Lock()
+ keepWaiting = len(mr.mp) != 0 && mr.activeTask != 0
+ mr.mu.Unlock()
+ }
+}
+
+// Close will notify stream manager goroutine that the reader has been closed, if it's still running.
+func (mr *gRPCBidiReader) close() error {
+ if mr.cancel != nil {
+ mr.cancel()
+ }
+ mr.mu.Lock()
+ mr.done = true
+ mr.activeTask = 0
+ mr.mu.Unlock()
+ mr.closeReceiver <- true
+ mr.closeManager <- true
+ return nil
+}
+
+func (mrr *gRPCBidiReader) getHandle() []byte {
+ return mrr.readHandle
+}
+
func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
+ // If bidi reads was not selected, use the legacy read object API.
+ if !c.config.grpcBidiReads {
+ return c.NewRangeReaderReadObject(ctx, params, opts...)
+ }
+
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
s := callSettings(c.settings, opts...)
s.gax = append(s.gax, gax.WithGRPCOptions(
- grpc.ForceCodec(bytesCodec{}),
+ grpc.ForceCodecV2(bytesCodecV2{}),
))
if s.userProject != "" {
@@ -1008,17 +1504,25 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
}
b := bucketResourceName(globalProjectAlias, params.bucket)
- req := &storagepb.ReadObjectRequest{
+
+ // Create a BidiReadObjectRequest.
+ spec := &storagepb.BidiReadObjectSpec{
Bucket: b,
Object: params.object,
CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey),
}
- // The default is a negative value, which means latest.
- if params.gen >= 0 {
- req.Generation = params.gen
+ if err := applyCondsProto("gRPCReader.NewRangeReader", params.gen, params.conds, spec); err != nil {
+ return nil, err
}
-
- var databuf []byte
+ if params.handle != nil {
+ spec.ReadHandle = &storagepb.BidiReadHandle{
+ Handle: *params.handle,
+ }
+ }
+ req := &storagepb.BidiReadObjectRequest{
+ ReadObjectSpec: spec,
+ }
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, contextMetadataFromBidiReadObject(req)...)
// Define a function that initiates a Read with offset and length, assuming
// we have already read seen bytes.
@@ -1031,34 +1535,43 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
cc, cancel := context.WithCancel(ctx)
- req.ReadOffset = params.offset + seen
+ // BidiReadObject can take multiple ranges, but we just request one in this case.
+ readRange := &storagepb.ReadRange{
+ ReadOffset: params.offset + seen,
+ ReadId: 1,
+ }
- // Only set a ReadLimit if length is greater than zero, because <= 0 means
+ // Only set a ReadLength if length is greater than zero, because <= 0 means
// to read it all.
if params.length > 0 {
- req.ReadLimit = params.length - seen
+ readRange.ReadLength = params.length - seen
}
- if err := applyCondsProto("gRPCReader.reopen", params.gen, params.conds, req); err != nil {
- cancel()
- return nil, nil, err
- }
+ req.ReadRanges = []*storagepb.ReadRange{readRange}
- var stream storagepb.Storage_ReadObjectClient
- var msg *storagepb.ReadObjectResponse
+ var stream storagepb.Storage_BidiReadObjectClient
var err error
+ var decoder *readResponseDecoder
err = run(cc, func(ctx context.Context) error {
- stream, err = c.raw.ReadObject(cc, req, s.gax...)
+ stream, err = c.raw.BidiReadObject(ctx, s.gax...)
if err != nil {
return err
}
+ if err := stream.Send(req); err != nil {
+ return err
+ }
+ // Oneshot reads can close the client->server side immediately.
+ if err := stream.CloseSend(); err != nil {
+ return err
+ }
// Receive the message into databuf as a wire-encoded message so we can
// use a custom decoder to avoid an extra copy at the protobuf layer.
- err := stream.RecvMsg(&databuf)
- // These types of errors show up on the Recv call, rather than the
- // initialization of the stream via ReadObject above.
+ databufs := mem.BufferSlice{}
+ err := stream.RecvMsg(&databufs)
+ // These types of errors show up on the RecvMsg call, rather than the
+ // initialization of the stream via BidiReadObject above.
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
return ErrObjectNotExist
}
@@ -1066,33 +1579,40 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
return err
}
// Use a custom decoder that uses protobuf unmarshalling for all
- // fields except the checksummed data.
- // Subsequent receives in Read calls will skip all protobuf
- // unmarshalling and directly read the content from the gRPC []byte
- // response, since only the first call will contain other fields.
- msg, err = readFullObjectResponse(databuf)
-
+ // fields except the object data. Object data is handled separately
+ // to avoid a copy.
+ decoder = &readResponseDecoder{
+ databufs: databufs,
+ }
+ err = decoder.readFullObjectResponse()
return err
}, s.retry, s.idempotent)
if err != nil {
// Close the stream context we just created to ensure we don't leak
// resources.
cancel()
+ // Free any buffers.
+ if decoder != nil && decoder.databufs != nil {
+ decoder.databufs.Free()
+ }
return nil, nil, err
}
- return &readStreamResponse{stream, msg}, cancel, nil
+ return &readStreamResponse{
+ stream: stream,
+ decoder: decoder,
+ }, cancel, nil
}
res, cancel, err := reopen(0)
if err != nil {
return nil, err
}
-
// The first message was Recv'd on stream open, use it to populate the
- // object metadata.
- msg := res.response
+ // object metadata and read handle.
+ msg := res.decoder.msg
obj := msg.GetMetadata()
+ handle := ReadHandle(msg.GetReadHandle().GetHandle())
// This is the size of the entire object, even if only a range was requested.
size := obj.GetSize()
@@ -1101,44 +1621,57 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
wantCRC uint32
checkCRC bool
)
- if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil && params.offset == 0 && params.length < 0 {
+ if checksums := obj.GetChecksums(); checksums != nil && checksums.Crc32C != nil {
+ if params.offset == 0 && params.length < 0 {
+ checkCRC = true
+ }
wantCRC = checksums.GetCrc32C()
- checkCRC = true
}
+ startOffset := params.offset
+ if params.offset < 0 {
+ startOffset = size + params.offset
+ }
+
+ // The remaining bytes are the lesser of the requested range and all bytes
+ // after params.offset.
+ length := params.length
+ if params.length > size || params.length < 0 {
+ // if params.length < 0 (or larger than object size),
+ // all remaining bytes were requested.
+ length = size
+ }
+ remain := length - startOffset
+
+ metadata := obj.GetMetadata()
r = &Reader{
Attrs: ReaderObjectAttrs{
Size: size,
+ StartOffset: startOffset,
ContentType: obj.GetContentType(),
ContentEncoding: obj.GetContentEncoding(),
CacheControl: obj.GetCacheControl(),
LastModified: obj.GetUpdateTime().AsTime(),
Metageneration: obj.GetMetageneration(),
Generation: obj.GetGeneration(),
+ CRC32C: wantCRC,
},
+ objectMetadata: &metadata,
reader: &gRPCReader{
stream: res.stream,
reopen: reopen,
cancel: cancel,
size: size,
- // Store the content from the first Recv in the
- // client buffer for reading later.
- leftovers: msg.GetChecksummedData().GetContent(),
+ // Preserve the decoder to read out object data when Read/WriteTo is called.
+ currMsg: res.decoder,
settings: s,
zeroRange: params.length == 0,
- databuf: databuf,
wantCRC: wantCRC,
checkCRC: checkCRC,
},
checkCRC: checkCRC,
- }
-
- cr := msg.GetContentRange()
- if cr != nil {
- r.Attrs.StartOffset = cr.GetStart()
- r.remain = cr.GetEnd() - cr.GetStart()
- } else {
- r.remain = size
+ handle: &handle,
+ remain: remain,
}
// For a zero-length request, explicitly close the stream and set remaining
@@ -1152,87 +1685,66 @@ func (c *grpcStorageClient) NewRangeReader(ctx context.Context, params *newRange
}
func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) {
- s := callSettings(c.settings, opts...)
-
var offset int64
errorf := params.setError
- progress := params.progress
setObj := params.setObj
-
pr, pw := io.Pipe()
- gw := newGRPCWriter(c, params, pr)
- gw.settings = s
- if s.userProject != "" {
- gw.ctx = setUserProjectMetadata(gw.ctx, s.userProject)
- }
+
+ s := callSettings(c.settings, opts...)
// This function reads the data sent to the pipe and sends sets of messages
// on the gRPC client-stream as the buffer is filled.
go func() {
- defer close(params.donec)
+ err := func() error {
+ // Unless the user told us the content type, we have to determine it from
+ // the first read.
+ var r io.Reader = pr
+ if params.attrs.ContentType == "" && !params.forceEmptyContentType {
+ r, params.attrs.ContentType = gax.DetermineContentType(r)
+ }
- // Loop until there is an error or the Object has been finalized.
- for {
- // Note: This blocks until either the buffer is full or EOF is read.
- recvd, doneReading, err := gw.read()
+ var gw *gRPCWriter
+ gw, err := newGRPCWriter(c, s, params, r)
if err != nil {
- err = checkCanceled(err)
- errorf(err)
- pr.CloseWithError(err)
- return
+ return err
}
- if params.attrs.Retention != nil {
- // TO-DO: remove once ObjectRetention is available - see b/308194853
- err = status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC")
- errorf(err)
- pr.CloseWithError(err)
- return
- }
- // The chunk buffer is full, but there is no end in sight. This
- // means that either:
- // 1. A resumable upload will need to be used to send
- // multiple chunks, until we are done reading data. Start a
- // resumable upload if it has not already been started.
- // 2. ChunkSize of zero may also have a full buffer, but a resumable
- // session should not be initiated in this case.
- if !doneReading && gw.upid == "" && params.chunkSize != 0 {
- err = gw.startResumableUpload()
+ // Loop until there is an error or the Object has been finalized.
+ for {
+ // Note: This blocks until either the buffer is full or EOF is read.
+ recvd, doneReading, err := gw.read()
if err != nil {
- err = checkCanceled(err)
- errorf(err)
- pr.CloseWithError(err)
- return
+ return err
}
- }
- o, off, err := gw.uploadBuffer(recvd, offset, doneReading)
- if err != nil {
- err = checkCanceled(err)
- errorf(err)
- pr.CloseWithError(err)
- return
- }
+ var o *storagepb.Object
+ uploadBuff := func(ctx context.Context) error {
+ obj, err := gw.uploadBuffer(recvd, offset, doneReading)
+ o = obj
+ return err
+ }
- // At this point, the current buffer has been uploaded. For resumable
- // uploads and chunkSize = 0, capture the committed offset here in case
- // the upload was not finalized and another chunk is to be uploaded. Call
- // the progress function for resumable uploads only.
- if gw.upid != "" || gw.chunkSize == 0 {
- offset = off
- }
- if gw.upid != "" {
- progress(offset)
+ err = run(gw.ctx, uploadBuff, gw.settings.retry, s.idempotent)
+ if err != nil {
+ return err
+ }
+ offset += int64(recvd)
+
+ // When we are done reading data without errors, set the object and
+ // finish.
+ if doneReading {
+ // Build Object from server's response.
+ setObj(newObjectFromProto(o))
+ return nil
+ }
}
+ }()
- // When we are done reading data without errors, set the object and
- // finish.
- if doneReading {
- // Build Object from server's response.
- setObj(newObjectFromProto(o))
- return
- }
- }
+ // These calls are still valid if err is nil
+ err = checkCanceled(err)
+ errorf(err)
+ pr.CloseWithError(err)
+ close(params.donec)
}()
return pw, nil
@@ -1293,213 +1805,53 @@ func (c *grpcStorageClient) TestIamPermissions(ctx context.Context, resource str
return res.Permissions, nil
}
-// HMAC Key methods.
+// HMAC Key methods are not implemented in gRPC client.
func (c *grpcStorageClient) GetHMACKey(ctx context.Context, project, accessID string, opts ...storageOption) (*HMACKey, error) {
- s := callSettings(c.settings, opts...)
- req := &storagepb.GetHmacKeyRequest{
- AccessId: accessID,
- Project: toProjectResource(project),
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- var metadata *storagepb.HmacKeyMetadata
- err := run(ctx, func(ctx context.Context) error {
- var err error
- metadata, err = c.raw.GetHmacKey(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- return toHMACKeyFromProto(metadata), nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) ListHMACKeys(ctx context.Context, project, serviceAccountEmail string, showDeletedKeys bool, opts ...storageOption) *HMACKeysIterator {
- s := callSettings(c.settings, opts...)
- req := &storagepb.ListHmacKeysRequest{
- Project: toProjectResource(project),
- ServiceAccountEmail: serviceAccountEmail,
- ShowDeletedKeys: showDeletedKeys,
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
it := &HMACKeysIterator{
ctx: ctx,
- projectID: project,
- retry: s.retry,
+ projectID: "",
+ retry: nil,
}
- fetch := func(pageSize int, pageToken string) (token string, err error) {
- var hmacKeys []*storagepb.HmacKeyMetadata
- err = run(it.ctx, func(ctx context.Context) error {
- gitr := c.raw.ListHmacKeys(ctx, req, s.gax...)
- hmacKeys, token, err = gitr.InternalFetch(pageSize, pageToken)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return "", err
- }
- for _, hkmd := range hmacKeys {
- hk := toHMACKeyFromProto(hkmd)
- it.hmacKeys = append(it.hmacKeys, hk)
- }
-
- return token, nil
+ fetch := func(_ int, _ string) (token string, err error) {
+ return "", errMethodNotSupported
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
fetch,
- func() int { return len(it.hmacKeys) - it.index },
- func() interface{} {
- prev := it.hmacKeys
- it.hmacKeys = it.hmacKeys[:0]
- it.index = 0
- return prev
- })
+ func() int { return 0 },
+ func() interface{} { return nil },
+ )
return it
}
func (c *grpcStorageClient) UpdateHMACKey(ctx context.Context, project, serviceAccountEmail, accessID string, attrs *HMACKeyAttrsToUpdate, opts ...storageOption) (*HMACKey, error) {
- s := callSettings(c.settings, opts...)
- hk := &storagepb.HmacKeyMetadata{
- AccessId: accessID,
- Project: toProjectResource(project),
- ServiceAccountEmail: serviceAccountEmail,
- State: string(attrs.State),
- Etag: attrs.Etag,
- }
- var paths []string
- fieldMask := &fieldmaskpb.FieldMask{
- Paths: paths,
- }
- if attrs.State != "" {
- fieldMask.Paths = append(fieldMask.Paths, "state")
- }
- req := &storagepb.UpdateHmacKeyRequest{
- HmacKey: hk,
- UpdateMask: fieldMask,
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- var metadata *storagepb.HmacKeyMetadata
- err := run(ctx, func(ctx context.Context) error {
- var err error
- metadata, err = c.raw.UpdateHmacKey(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- return toHMACKeyFromProto(metadata), nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) CreateHMACKey(ctx context.Context, project, serviceAccountEmail string, opts ...storageOption) (*HMACKey, error) {
- s := callSettings(c.settings, opts...)
- req := &storagepb.CreateHmacKeyRequest{
- Project: toProjectResource(project),
- ServiceAccountEmail: serviceAccountEmail,
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- var res *storagepb.CreateHmacKeyResponse
- err := run(ctx, func(ctx context.Context) error {
- var err error
- res, err = c.raw.CreateHmacKey(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- key := toHMACKeyFromProto(res.Metadata)
- key.Secret = base64.StdEncoding.EncodeToString(res.SecretKeyBytes)
-
- return key, nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) DeleteHMACKey(ctx context.Context, project string, accessID string, opts ...storageOption) error {
- s := callSettings(c.settings, opts...)
- req := &storagepb.DeleteHmacKeyRequest{
- AccessId: accessID,
- Project: toProjectResource(project),
- }
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- return run(ctx, func(ctx context.Context) error {
- return c.raw.DeleteHmacKey(ctx, req, s.gax...)
- }, s.retry, s.idempotent)
+ return errMethodNotSupported
}
-// Notification methods.
+// Notification methods are not implemented in gRPC client.
func (c *grpcStorageClient) ListNotifications(ctx context.Context, bucket string, opts ...storageOption) (n map[string]*Notification, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.ListNotifications")
- defer func() { trace.EndSpan(ctx, err) }()
-
- s := callSettings(c.settings, opts...)
- if s.userProject != "" {
- ctx = setUserProjectMetadata(ctx, s.userProject)
- }
- req := &storagepb.ListNotificationConfigsRequest{
- Parent: bucketResourceName(globalProjectAlias, bucket),
- }
- var notifications []*storagepb.NotificationConfig
- err = run(ctx, func(ctx context.Context) error {
- gitr := c.raw.ListNotificationConfigs(ctx, req, s.gax...)
- for {
- // PageSize is not set and fallbacks to the API default pageSize of 100.
- items, nextPageToken, err := gitr.InternalFetch(int(req.GetPageSize()), req.GetPageToken())
- if err != nil {
- return err
- }
- notifications = append(notifications, items...)
- // If there are no more results, nextPageToken is empty and err is nil.
- if nextPageToken == "" {
- return err
- }
- req.PageToken = nextPageToken
- }
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
-
- return notificationsToMapFromProto(notifications), nil
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) CreateNotification(ctx context.Context, bucket string, n *Notification, opts ...storageOption) (ret *Notification, err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.CreateNotification")
- defer func() { trace.EndSpan(ctx, err) }()
-
- s := callSettings(c.settings, opts...)
- req := &storagepb.CreateNotificationConfigRequest{
- Parent: bucketResourceName(globalProjectAlias, bucket),
- NotificationConfig: toProtoNotification(n),
- }
- var pbn *storagepb.NotificationConfig
- err = run(ctx, func(ctx context.Context) error {
- var err error
- pbn, err = c.raw.CreateNotificationConfig(ctx, req, s.gax...)
- return err
- }, s.retry, s.idempotent)
- if err != nil {
- return nil, err
- }
- return toNotificationFromProto(pbn), err
+ return nil, errMethodNotSupported
}
func (c *grpcStorageClient) DeleteNotification(ctx context.Context, bucket string, id string, opts ...storageOption) (err error) {
- ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.DeleteNotification")
- defer func() { trace.EndSpan(ctx, err) }()
-
- s := callSettings(c.settings, opts...)
- req := &storagepb.DeleteNotificationConfigRequest{Name: id}
- return run(ctx, func(ctx context.Context) error {
- return c.raw.DeleteNotificationConfig(ctx, req, s.gax...)
- }, s.retry, s.idempotent)
+ return errMethodNotSupported
}
// setUserProjectMetadata appends a project ID to the outgoing Context metadata
@@ -1512,17 +1864,46 @@ func setUserProjectMetadata(ctx context.Context, project string) context.Context
}
type readStreamResponse struct {
- stream storagepb.Storage_ReadObjectClient
- response *storagepb.ReadObjectResponse
-}
-
+ stream storagepb.Storage_BidiReadObjectClient
+ decoder *readResponseDecoder
+}
+
+type bidiReadStreamResponse struct {
+ stream storagepb.Storage_BidiReadObjectClient
+ response *storagepb.BidiReadObjectResponse
+}
+
+type gRPCBidiReader struct {
+ stream storagepb.Storage_BidiReadObjectClient
+ cancel context.CancelFunc
+ settings *settings
+ readHandle ReadHandle
+ readID int64
+ reopen func() (*bidiReadStreamResponse, context.CancelFunc, error)
+ readSpec *storagepb.BidiReadObjectSpec
+ data chan []rangeSpec
+ ctx context.Context
+ closeReceiver chan bool
+ closeManager chan bool
+ managerRetry chan bool
+ receiverRetry chan bool
+ mu sync.Mutex // protects all vars in gRPCBidiReader from concurrent access
+ mp map[int64]rangeSpec // always use the mutex when accessing the map
+ done bool // always use the mutex when accessing this variable
+ activeTask int64 // always use the mutex when accessing this variable
+ objectSize int64 // always use the mutex when accessing this variable
+ retrier func(error, string)
+ streamRecreation bool // This helps us identify if stream recreation is in progress or not. If stream recreation gets called from two goroutine then this will stop second one.
+}
+
+// gRPCReader is used by storage.Reader if the experimental option WithGRPCBidiReads is passed.
type gRPCReader struct {
seen, size int64
zeroRange bool
- stream storagepb.Storage_ReadObjectClient
+ stream storagepb.Storage_BidiReadObjectClient
reopen func(seen int64) (*readStreamResponse, context.CancelFunc, error)
leftovers []byte
- databuf []byte
+ currMsg *readResponseDecoder // decoder for the current message
cancel context.CancelFunc
settings *settings
checkCRC bool // should we check the CRC?
@@ -1565,18 +1946,21 @@ func (r *gRPCReader) Read(p []byte) (int, error) {
}
var n int
- // Read leftovers and return what was available to conform to the Reader
+
+ // If there is data remaining in the current message, return what was
+ // available to conform to the Reader
// interface: https://pkg.go.dev/io#Reader.
- if len(r.leftovers) > 0 {
- n = copy(p, r.leftovers)
+ if !r.currMsg.done {
+ n = r.currMsg.readAndUpdateCRC(p, func(b []byte) {
+ r.updateCRC(b)
+ })
r.seen += int64(n)
- r.updateCRC(p[:n])
- r.leftovers = r.leftovers[n:]
return n, nil
}
// Attempt to Recv the next message on the stream.
- content, err := r.recv()
+ // This will update r.currMsg with the decoder for the new message.
+ err := r.recv()
if err != nil {
return 0, err
}
@@ -1588,16 +1972,11 @@ func (r *gRPCReader) Read(p []byte) (int, error) {
// present in the response here.
// TODO: Figure out if we need to support decompressive transcoding
// https://cloud.google.com/storage/docs/transcoding.
- n = copy(p[n:], content)
- leftover := len(content) - n
- if leftover > 0 {
- // Wasn't able to copy all of the data in the message, store for
- // future Read calls.
- r.leftovers = content[n:]
- }
- r.seen += int64(n)
- r.updateCRC(p[:n])
+ n = r.currMsg.readAndUpdateCRC(p, func(b []byte) {
+ r.updateCRC(b)
+ })
+ r.seen += int64(n)
return n, nil
}
@@ -1624,14 +2003,14 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) {
// Track bytes written during before call.
var alreadySeen = r.seen
- // Write any leftovers to the stream. There will be some leftovers from the
+ // Write any already received message to the stream. There will be some leftovers from the
// original NewRangeReader call.
- if len(r.leftovers) > 0 {
- // Write() will write the entire leftovers slice unless there is an error.
- written, err := w.Write(r.leftovers)
+ if r.currMsg != nil && !r.currMsg.done {
+ written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) {
+ r.updateCRC(b)
+ })
r.seen += int64(written)
- r.updateCRC(r.leftovers)
- r.leftovers = nil
+ r.currMsg = nil
if err != nil {
return r.seen - alreadySeen, err
}
@@ -1642,7 +2021,7 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) {
// Attempt to receive the next message on the stream.
// Will terminate with io.EOF once data has all come through.
// recv() handles stream reopening and retry logic so no need for retries here.
- msg, err := r.recv()
+ err := r.recv()
if err != nil {
if err == io.EOF {
// We are done; check the checksum if necessary and return.
@@ -1658,9 +2037,10 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) {
// present in the response here.
// TODO: Figure out if we need to support decompressive transcoding
// https://cloud.google.com/storage/docs/transcoding.
- written, err := w.Write(msg)
+ written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) {
+ r.updateCRC(b)
+ })
r.seen += int64(written)
- r.updateCRC(msg)
if err != nil {
return r.seen - alreadySeen, err
}
@@ -1669,12 +2049,12 @@ func (r *gRPCReader) WriteTo(w io.Writer) (int64, error) {
}
// Close cancels the read stream's context in order for it to be closed and
-// collected.
+// collected, and frees any currently in use buffers.
func (r *gRPCReader) Close() error {
if r.cancel != nil {
r.cancel()
}
- r.stream = nil
+ r.currMsg = nil
return nil
}
@@ -1689,9 +2069,10 @@ func (r *gRPCReader) Close() error {
//
// The last error received is the one that is returned, which could be from
// an attempt to reopen the stream.
-func (r *gRPCReader) recv() ([]byte, error) {
- err := r.stream.RecvMsg(&r.databuf)
+func (r *gRPCReader) recv() error {
+ databufs := mem.BufferSlice{}
+ err := r.stream.RecvMsg(&databufs)
var shouldRetry = ShouldRetry
if r.settings.retry != nil && r.settings.retry.shouldRetry != nil {
shouldRetry = r.settings.retry.shouldRetry
@@ -1700,226 +2081,496 @@ func (r *gRPCReader) recv() ([]byte, error) {
// This will "close" the existing stream and immediately attempt to
// reopen the stream, but will backoff if further attempts are necessary.
// Reopening the stream Recvs the first message, so if retrying is
- // successful, the next logical chunk will be returned.
- msg, err := r.reopenStream()
- return msg.GetChecksummedData().GetContent(), err
+ // successful, r.currMsg will be updated to include the new data.
+ return r.reopenStream()
}
if err != nil {
- return nil, err
+ return err
}
- return readObjectResponseContent(r.databuf)
+ r.currMsg = &readResponseDecoder{databufs: databufs}
+ return r.currMsg.readFullObjectResponse()
}
// ReadObjectResponse field and subfield numbers.
const (
- checksummedDataField = protowire.Number(1)
+ // Top level fields.
+ metadataField = protowire.Number(4)
+ objectRangeDataField = protowire.Number(6)
+ readHandleField = protowire.Number(7)
+ // Nested in ObjectRangeData
+ checksummedDataField = protowire.Number(1)
+ readRangeField = protowire.Number(2)
+ rangeEndField = protowire.Number(3)
+ // Nested in ObjectRangeData.ChecksummedData
checksummedDataContentField = protowire.Number(1)
checksummedDataCRC32CField = protowire.Number(2)
- objectChecksumsField = protowire.Number(2)
- contentRangeField = protowire.Number(3)
- metadataField = protowire.Number(4)
)
-// readObjectResponseContent returns the checksummed_data.content field of a
-// ReadObjectResponse message, or an error if the message is invalid.
-// This can be used on recvs of objects after the first recv, since only the
-// first message will contain non-data fields.
-func readObjectResponseContent(b []byte) ([]byte, error) {
- checksummedData, err := readProtoBytes(b, checksummedDataField)
+// readResponseDecoder is a wrapper on the raw message, used to decode one message
+// without copying object data. It also has methods to write out the resulting object
+// data to the user application.
+type readResponseDecoder struct {
+ databufs mem.BufferSlice // raw bytes of the message being processed
+ // Decoding offsets
+ off uint64 // offset in the messsage relative to the data as a whole
+ currBuf int // index of the current buffer being processed
+ currOff uint64 // offset in the current buffer
+ // Processed data
+ msg *storagepb.BidiReadObjectResponse // processed response message with all fields other than object data populated
+ dataOffsets bufferSliceOffsets // offsets of the object data in the message.
+ done bool // true if the data has been completely read.
+}
+
+type bufferSliceOffsets struct {
+ startBuf, endBuf int // indices of start and end buffers of object data in the msg
+ startOff, endOff uint64 // offsets within these buffers where the data starts and ends.
+ currBuf int // index of current buffer being read out to the user application.
+ currOff uint64 // offset of read in current buffer.
+}
+
+// peek ahead 10 bytes from the current offset in the databufs. This will return a
+// slice of the current buffer if the bytes are all in one buffer, but will copy
+// the bytes into a new buffer if the distance is split across buffers. Use this
+// to allow protowire methods to be used to parse tags & fixed values.
+// The max length of a varint tag is 10 bytes, see
+// https://protobuf.dev/programming-guides/encoding/#varints . Other int types
+// are shorter.
+func (d *readResponseDecoder) peek() []byte {
+ b := d.databufs[d.currBuf].ReadOnlyData()
+ // Check if the tag will fit in the current buffer. If not, copy the next 10
+ // bytes into a new buffer to ensure that we can read the tag correctly
+ // without it being divided between buffers.
+ tagBuf := b[d.currOff:]
+ remainingInBuf := len(tagBuf)
+ // If we have less than 10 bytes remaining and are not in the final buffer,
+ // copy up to 10 bytes ahead from the next buffer.
+ if remainingInBuf < binary.MaxVarintLen64 && d.currBuf != len(d.databufs)-1 {
+ tagBuf = d.copyNextBytes(10)
+ }
+ return tagBuf
+}
+
+// Copies up to next n bytes into a new buffer, or fewer if fewer bytes remain in the
+// buffers overall. Does not advance offsets.
+func (d *readResponseDecoder) copyNextBytes(n int) []byte {
+ remaining := n
+ if r := d.databufs.Len() - int(d.off); r < remaining {
+ remaining = r
+ }
+ currBuf := d.currBuf
+ currOff := d.currOff
+ var buf []byte
+ for remaining > 0 {
+ b := d.databufs[currBuf].ReadOnlyData()
+ remainingInCurr := len(b[currOff:])
+ if remainingInCurr < remaining {
+ buf = append(buf, b[currOff:]...)
+ remaining -= remainingInCurr
+ currBuf++
+ currOff = 0
+ } else {
+ buf = append(buf, b[currOff:currOff+uint64(remaining)]...)
+ remaining = 0
+ }
+ }
+ return buf
+}
+
+// Advance current buffer & byte offset in the decoding by n bytes. Returns an error if we
+// go past the end of the data.
+func (d *readResponseDecoder) advanceOffset(n uint64) error {
+ remaining := n
+ for remaining > 0 {
+ remainingInCurr := uint64(d.databufs[d.currBuf].Len()) - d.currOff
+ if remainingInCurr <= remaining {
+ remaining -= remainingInCurr
+ d.currBuf++
+ d.currOff = 0
+ } else {
+ d.currOff += remaining
+ remaining = 0
+ }
+ }
+ // If we have advanced past the end of the buffers, something went wrong.
+ if (d.currBuf == len(d.databufs) && d.currOff > 0) || d.currBuf > len(d.databufs) {
+ return errors.New("decoding: truncated message, cannot advance offset")
+ }
+ d.off += n
+ return nil
+
+}
+
+// This copies object data from the message into the buffer and returns the number of
+// bytes copied. The data offsets are incremented in the message. The updateCRC
+// function is called on the copied bytes.
+func (d *readResponseDecoder) readAndUpdateCRC(p []byte, updateCRC func([]byte)) int {
+ // For a completely empty message, just return 0
+ if len(d.databufs) == 0 {
+ return 0
+ }
+ databuf := d.databufs[d.dataOffsets.currBuf]
+ startOff := d.dataOffsets.currOff
+ var b []byte
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff]
+ } else {
+ b = databuf.ReadOnlyData()[startOff:]
+ }
+ n := copy(p, b)
+ updateCRC(b[:n])
+ d.dataOffsets.currOff += uint64(n)
+
+ // We've read all the data from this message. Free the underlying buffers.
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf && d.dataOffsets.currOff == d.dataOffsets.endOff {
+ d.done = true
+ d.databufs.Free()
+ }
+ // We are at the end of the current buffer
+ if d.dataOffsets.currBuf != d.dataOffsets.endBuf && d.dataOffsets.currOff == uint64(databuf.Len()) {
+ d.dataOffsets.currOff = 0
+ d.dataOffsets.currBuf++
+ }
+ return n
+}
+
+func (d *readResponseDecoder) writeToAndUpdateCRC(w io.Writer, updateCRC func([]byte)) (int64, error) {
+ // For a completely empty message, just return 0
+ if len(d.databufs) == 0 {
+ return 0, nil
+ }
+ var written int64
+ for !d.done {
+ databuf := d.databufs[d.dataOffsets.currBuf]
+ startOff := d.dataOffsets.currOff
+ var b []byte
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff]
+ } else {
+ b = databuf.ReadOnlyData()[startOff:]
+ }
+ var n int
+ // Write all remaining data from the current buffer
+ n, err := w.Write(b)
+ written += int64(n)
+ updateCRC(b)
+ if err != nil {
+ return written, err
+ }
+ d.dataOffsets.currOff = 0
+ // We've read all the data from this message.
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ d.done = true
+ d.databufs.Free()
+ } else {
+ d.dataOffsets.currBuf++
+ }
+ }
+ return written, nil
+}
+
+// Consume the next available tag in the input data and return the field number and type.
+// Advances the relevant offsets in the data.
+func (d *readResponseDecoder) consumeTag() (protowire.Number, protowire.Type, error) {
+ tagBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ fieldNum, fieldType, tagLength := protowire.ConsumeTag(tagBuf)
+ if tagLength < 0 {
+ return 0, 0, protowire.ParseError(tagLength)
+ }
+ // Update the offsets and current buffer depending on the tag length.
+ if err := d.advanceOffset(uint64(tagLength)); err != nil {
+ return 0, 0, fmt.Errorf("consuming tag: %w", err)
+ }
+ return fieldNum, fieldType, nil
+}
+
+// Consume a varint that represents the length of a bytes field. Return the length of
+// the data, and advance the offsets by the length of the varint.
+func (d *readResponseDecoder) consumeVarint() (uint64, error) {
+ tagBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ dataLength, tagLength := protowire.ConsumeVarint(tagBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return dataLength, nil
+}
+
+func (d *readResponseDecoder) consumeFixed32() (uint32, error) {
+ valueBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ value, tagLength := protowire.ConsumeFixed32(valueBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return value, nil
+}
+
+func (d *readResponseDecoder) consumeFixed64() (uint64, error) {
+ valueBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ value, tagLength := protowire.ConsumeFixed64(valueBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return value, nil
+}
+
+// Consume any field values up to the end offset provided and don't return anything.
+// This is used to skip any values which are not going to be used.
+// msgEndOff is indexed in terms of the overall data across all buffers.
+func (d *readResponseDecoder) consumeFieldValue(fieldNum protowire.Number, fieldType protowire.Type) error {
+ // reimplement protowire.ConsumeFieldValue without the extra case for groups (which
+ // are are complicted and not a thing in proto3).
+ var err error
+ switch fieldType {
+ case protowire.VarintType:
+ _, err = d.consumeVarint()
+ case protowire.Fixed32Type:
+ _, err = d.consumeFixed32()
+ case protowire.Fixed64Type:
+ _, err = d.consumeFixed64()
+ case protowire.BytesType:
+ _, err = d.consumeBytes()
+ default:
+ return fmt.Errorf("unknown field type %v in field %v", fieldType, fieldNum)
+ }
if err != nil {
- return b, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", err)
+ return fmt.Errorf("consuming field %v of type %v: %w", fieldNum, fieldType, err)
}
- content, err := readProtoBytes(checksummedData, checksummedDataContentField)
+
+ return nil
+}
+
+// Consume a bytes field from the input. Returns offsets for the data in the buffer slices
+// and an error.
+func (d *readResponseDecoder) consumeBytes() (bufferSliceOffsets, error) {
+ // m is the length of the data past the tag.
+ m, err := d.consumeVarint()
if err != nil {
- return content, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", err)
+ return bufferSliceOffsets{}, fmt.Errorf("consuming bytes field: %w", err)
}
+ offsets := bufferSliceOffsets{
+ startBuf: d.currBuf,
+ startOff: d.currOff,
+ currBuf: d.currBuf,
+ currOff: d.currOff,
+ }
+
+ // Advance offsets to lengths of bytes field and capture where we end.
+ d.advanceOffset(m)
+ offsets.endBuf = d.currBuf
+ offsets.endOff = d.currOff
+ return offsets, nil
+}
- return content, nil
+// Consume a bytes field from the input and copy into a new buffer if
+// necessary (if the data is split across buffers in databuf). This can be
+// used to leverage proto.Unmarshal for small bytes fields (i.e. anything
+// except object data).
+func (d *readResponseDecoder) consumeBytesCopy() ([]byte, error) {
+ // m is the length of the bytes data.
+ m, err := d.consumeVarint()
+ if err != nil {
+ return nil, fmt.Errorf("consuming varint: %w", err)
+ }
+ // Copy the data into a buffer and advance the offset
+ b := d.copyNextBytes(int(m))
+ if err := d.advanceOffset(m); err != nil {
+ return nil, fmt.Errorf("advancing offset: %w", err)
+ }
+ return b, nil
}
-// readFullObjectResponse returns the ReadObjectResponse that is encoded in the
+// readFullObjectResponse returns the BidiReadObjectResponse that is encoded in the
// wire-encoded message buffer b, or an error if the message is invalid.
// This must be used on the first recv of an object as it may contain all fields
-// of ReadObjectResponse, and we use or pass on those fields to the user.
+// of BidiReadObjectResponse, and we use or pass on those fields to the user.
// This function is essentially identical to proto.Unmarshal, except it aliases
// the data in the input []byte. If the proto library adds a feature to
// Unmarshal that does that, this function can be dropped.
-func readFullObjectResponse(b []byte) (*storagepb.ReadObjectResponse, error) {
- msg := &storagepb.ReadObjectResponse{}
+func (d *readResponseDecoder) readFullObjectResponse() error {
+ msg := &storagepb.BidiReadObjectResponse{}
// Loop over the entire message, extracting fields as we go. This does not
// handle field concatenation, in which the contents of a single field
// are split across multiple protobuf tags.
- off := 0
- for off < len(b) {
- // Consume the next tag. This will tell us which field is next in the
- // buffer, its type, and how much space it takes up.
- fieldNum, fieldType, fieldLength := protowire.ConsumeTag(b[off:])
- if fieldLength < 0 {
- return nil, protowire.ParseError(fieldLength)
+ for d.off < uint64(d.databufs.Len()) {
+ fieldNum, fieldType, err := d.consumeTag()
+ if err != nil {
+ return fmt.Errorf("consuming next tag: %w", err)
}
- off += fieldLength
// Unmarshal the field according to its type. Only fields that are not
// nil will be present.
switch {
- case fieldNum == checksummedDataField && fieldType == protowire.BytesType:
- // The ChecksummedData field was found. Initialize the struct.
- msg.ChecksummedData = &storagepb.ChecksummedData{}
-
- // Get the bytes corresponding to the checksummed data.
- fieldContent, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData: %v", protowire.ParseError(n))
+ // This is a repeated field, so it can occur more than once. But, for now
+ // we can just take the first range per message since Reader only requests
+ // a single range.
+ // See https://protobuf.dev/programming-guides/encoding/#optional
+ // TODO: support multiple ranges once integrated with MultiRangeDownloader.
+ case fieldNum == objectRangeDataField && fieldType == protowire.BytesType:
+ // The object data field was found. Initialize the data ranges assuming
+ // exactly one range in the message.
+ msg.ObjectDataRanges = []*storagepb.ObjectRangeData{{ChecksummedData: &storagepb.ChecksummedData{}, ReadRange: &storagepb.ReadRange{}}}
+ bytesFieldLen, err := d.consumeVarint()
+ if err != nil {
+ return fmt.Errorf("consuming bytes: %v", err)
}
- off += n
-
- // Get the nested fields. We need to do this manually as it contains
- // the object content bytes.
- contentOff := 0
- for contentOff < len(fieldContent) {
- gotNum, gotTyp, n := protowire.ConsumeTag(fieldContent[contentOff:])
- if n < 0 {
- return nil, protowire.ParseError(n)
+ var contentEndOff = d.off + bytesFieldLen
+ for d.off < contentEndOff {
+ gotNum, gotTyp, err := d.consumeTag()
+ if err != nil {
+ return fmt.Errorf("consuming objectRangeData tag: %w", err)
}
- contentOff += n
switch {
- case gotNum == checksummedDataContentField && gotTyp == protowire.BytesType:
- // Get the content bytes.
- bytes, n := protowire.ConsumeBytes(fieldContent[contentOff:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %v", protowire.ParseError(n))
+ case gotNum == checksummedDataField && gotTyp == protowire.BytesType:
+ checksummedDataFieldLen, err := d.consumeVarint()
+ if err != nil {
+ return fmt.Errorf("consuming bytes: %v", err)
}
- msg.ChecksummedData.Content = bytes
- contentOff += n
- case gotNum == checksummedDataCRC32CField && gotTyp == protowire.Fixed32Type:
- v, n := protowire.ConsumeFixed32(fieldContent[contentOff:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %v", protowire.ParseError(n))
+ var checksummedDataEndOff = d.off + checksummedDataFieldLen
+ for d.off < checksummedDataEndOff {
+ gotNum, gotTyp, err := d.consumeTag()
+ if err != nil {
+ return fmt.Errorf("consuming checksummedData tag: %w", err)
+ }
+ switch {
+ case gotNum == checksummedDataContentField && gotTyp == protowire.BytesType:
+ // Get the offsets of the content bytes.
+ d.dataOffsets, err = d.consumeBytes()
+ if err != nil {
+ return fmt.Errorf("invalid BidiReadObjectResponse.ChecksummedData.Content: %w", err)
+ }
+ case gotNum == checksummedDataCRC32CField && gotTyp == protowire.Fixed32Type:
+ v, err := d.consumeFixed32()
+ if err != nil {
+ return fmt.Errorf("invalid BidiReadObjectResponse.ChecksummedData.Crc32C: %w", err)
+ }
+ msg.ObjectDataRanges[0].ChecksummedData.Crc32C = &v
+ default:
+ err := d.consumeFieldValue(gotNum, gotTyp)
+ if err != nil {
+ return fmt.Errorf("invalid field in BidiReadObjectResponse.ChecksummedData: %w", err)
+ }
+ }
}
- msg.ChecksummedData.Crc32C = &v
- contentOff += n
- default:
- n = protowire.ConsumeFieldValue(gotNum, gotTyp, fieldContent[contentOff:])
- if n < 0 {
- return nil, protowire.ParseError(n)
+ case gotNum == readRangeField && gotTyp == protowire.BytesType:
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid ObjectDataRange.ReadRange: %v", err)
}
- contentOff += n
- }
- }
- case fieldNum == objectChecksumsField && fieldType == protowire.BytesType:
- // The field was found. Initialize the struct.
- msg.ObjectChecksums = &storagepb.ObjectChecksums{}
-
- // Get the bytes corresponding to the checksums.
- bytes, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", protowire.ParseError(n))
- }
- off += n
- // Unmarshal.
- if err := proto.Unmarshal(bytes, msg.ObjectChecksums); err != nil {
- return nil, err
- }
- case fieldNum == contentRangeField && fieldType == protowire.BytesType:
- msg.ContentRange = &storagepb.ContentRange{}
-
- bytes, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", protowire.ParseError(n))
- }
- off += n
+ if err := proto.Unmarshal(buf, msg.ObjectDataRanges[0].ReadRange); err != nil {
+ return err
+ }
+ case gotNum == rangeEndField && gotTyp == protowire.VarintType: // proto encodes bool as int32
+ b, err := d.consumeVarint()
+ if err != nil {
+ return fmt.Errorf("invalid ObjectDataRange.RangeEnd: %w", err)
+ }
+ msg.ObjectDataRanges[0].RangeEnd = protowire.DecodeBool(b)
+ }
- if err := proto.Unmarshal(bytes, msg.ContentRange); err != nil {
- return nil, err
}
case fieldNum == metadataField && fieldType == protowire.BytesType:
msg.Metadata = &storagepb.Object{}
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid BidiReadObjectResponse.Metadata: %v", err)
+ }
- bytes, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", protowire.ParseError(n))
+ if err := proto.Unmarshal(buf, msg.Metadata); err != nil {
+ return err
+ }
+ case fieldNum == readHandleField && fieldType == protowire.BytesType:
+ msg.ReadHandle = &storagepb.BidiReadHandle{}
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid BidiReadObjectResponse.ReadHandle: %v", err)
}
- off += n
- if err := proto.Unmarshal(bytes, msg.Metadata); err != nil {
- return nil, err
+ if err := proto.Unmarshal(buf, msg.ReadHandle); err != nil {
+ return err
}
default:
- fieldLength = protowire.ConsumeFieldValue(fieldNum, fieldType, b[off:])
- if fieldLength < 0 {
- return nil, fmt.Errorf("default: %v", protowire.ParseError(fieldLength))
+ err := d.consumeFieldValue(fieldNum, fieldType)
+ if err != nil {
+ return fmt.Errorf("invalid field in BidiReadObjectResponse: %w", err)
}
- off += fieldLength
}
}
+ d.msg = msg
- return msg, nil
-}
-
-// readProtoBytes returns the contents of the protobuf field with number num
-// and type bytes from a wire-encoded message. If the field cannot be found,
-// the returned slice will be nil and no error will be returned.
-//
-// It does not handle field concatenation, in which the contents of a single field
-// are split across multiple protobuf tags. Encoded data containing split fields
-// of this form is technically permissable, but uncommon.
-func readProtoBytes(b []byte, num protowire.Number) ([]byte, error) {
- off := 0
- for off < len(b) {
- gotNum, gotTyp, n := protowire.ConsumeTag(b[off:])
- if n < 0 {
- return nil, protowire.ParseError(n)
- }
- off += n
- if gotNum == num && gotTyp == protowire.BytesType {
- b, n := protowire.ConsumeBytes(b[off:])
- if n < 0 {
- return nil, protowire.ParseError(n)
- }
- return b, nil
- }
- n = protowire.ConsumeFieldValue(gotNum, gotTyp, b[off:])
- if n < 0 {
- return nil, protowire.ParseError(n)
- }
- off += n
- }
- return nil, nil
+ return nil
}
// reopenStream "closes" the existing stream and attempts to reopen a stream and
// sets the Reader's stream and cancelStream properties in the process.
-func (r *gRPCReader) reopenStream() (*storagepb.ReadObjectResponse, error) {
+func (r *gRPCReader) reopenStream() error {
// Close existing stream and initialize new stream with updated offset.
r.Close()
res, cancel, err := r.reopen(r.seen)
if err != nil {
- return nil, err
+ return err
}
r.stream = res.stream
+ r.currMsg = res.decoder
r.cancel = cancel
- return res.response, nil
+ return nil
}
-func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader) *gRPCWriter {
- size := params.chunkSize
+func newGRPCWriter(c *grpcStorageClient, s *settings, params *openWriterParams, r io.Reader) (*gRPCWriter, error) {
+ if params.attrs.Retention != nil {
+ // TO-DO: remove once ObjectRetention is available - see b/308194853
+ return nil, status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC")
+ }
+
+ size := googleapi.MinUploadChunkSize
+ // A completely bufferless upload (params.chunkSize <= 0) is not possible in
+ // gRPC because the buffer must be provided to the message. Use the minimum
+ // size possible.
+ if params.chunkSize > 0 {
+ size = params.chunkSize
+ }
// Round up chunksize to nearest 256KiB
if size%googleapi.MinUploadChunkSize != 0 {
size += googleapi.MinUploadChunkSize - (size % googleapi.MinUploadChunkSize)
}
- // A completely bufferless upload is not possible as it is in JSON because
- // the buffer must be provided to the message. However use the minimum size
- // possible in this case.
- if params.chunkSize == 0 {
- size = googleapi.MinUploadChunkSize
+ if s.userProject != "" {
+ params.ctx = setUserProjectMetadata(params.ctx, s.userProject)
+ }
+
+ spec := &storagepb.WriteObjectSpec{
+ Resource: params.attrs.toProtoObject(params.bucket),
+ Appendable: proto.Bool(params.append),
+ }
+ // WriteObject doesn't support the generation condition, so use default.
+ if err := applyCondsProto("WriteObject", defaultGen, params.conds, spec); err != nil {
+ return nil, err
}
return &gRPCWriter{
@@ -1930,11 +2581,15 @@ func newGRPCWriter(c *grpcStorageClient, params *openWriterParams, r io.Reader)
bucket: params.bucket,
attrs: params.attrs,
conds: params.conds,
+ spec: spec,
encryptionKey: params.encryptionKey,
+ settings: s,
+ progress: params.progress,
sendCRC32C: params.sendCRC32C,
- chunkSize: params.chunkSize,
+ forceOneShot: params.chunkSize <= 0,
forceEmptyContentType: params.forceEmptyContentType,
- }
+ append: params.append,
+ }, nil
}
// gRPCWriter is a wrapper around the the gRPC client-stream API that manages
@@ -1949,321 +2604,327 @@ type gRPCWriter struct {
bucket string
attrs *ObjectAttrs
conds *Conditions
+ spec *storagepb.WriteObjectSpec
encryptionKey []byte
settings *settings
+ progress func(int64)
sendCRC32C bool
- chunkSize int
+ forceOneShot bool
forceEmptyContentType bool
+ append bool
+
+ streamSender gRPCBidiWriteBufferSender
+}
- // The gRPC client-stream used for sending buffers.
- stream storagepb.Storage_BidiWriteObjectClient
+func bucketContext(ctx context.Context, bucket string) context.Context {
+ hds := []string{"x-goog-request-params", fmt.Sprintf("bucket=projects/_/buckets/%s", url.QueryEscape(bucket))}
+ return gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
+}
- // The Resumable Upload ID started by a gRPC-based Writer.
- upid string
+// drainInboundStream calls stream.Recv() repeatedly until an error is returned.
+// It returns the last Resource received on the stream, or nil if no Resource
+// was returned. drainInboundStream always returns a non-nil error. io.EOF
+// indicates all messages were successfully read.
+func drainInboundStream(stream storagepb.Storage_BidiWriteObjectClient) (object *storagepb.Object, err error) {
+ for err == nil {
+ var resp *storagepb.BidiWriteObjectResponse
+ resp, err = stream.Recv()
+ // GetResource() returns nil on a nil response
+ if resp.GetResource() != nil {
+ object = resp.GetResource()
+ }
+ }
+ return object, err
}
-// startResumableUpload initializes a Resumable Upload with gRPC and sets the
-// upload ID on the Writer.
-func (w *gRPCWriter) startResumableUpload() error {
- spec, err := w.writeObjectSpec()
- if err != nil {
- return err
+func bidiWriteObjectRequest(buf []byte, offset int64, flush, finishWrite bool) *storagepb.BidiWriteObjectRequest {
+ return &storagepb.BidiWriteObjectRequest{
+ Data: &storagepb.BidiWriteObjectRequest_ChecksummedData{
+ ChecksummedData: &storagepb.ChecksummedData{
+ Content: buf,
+ },
+ },
+ WriteOffset: offset,
+ FinishWrite: finishWrite,
+ Flush: flush,
+ StateLookup: flush,
+ }
+}
+
+type gRPCBidiWriteBufferSender interface {
+ // sendBuffer implementations should upload buf, respecting flush and
+ // finishWrite. Callers must guarantee that buf is not too long to fit in a
+ // gRPC message.
+ //
+ // If flush is true, implementations must not return until the data in buf is
+ // stable. If finishWrite is true, implementations must return the object on
+ // success.
+ sendBuffer(buf []byte, offset int64, flush, finishWrite bool) (*storagepb.Object, error)
+}
+
+type gRPCOneshotBidiWriteBufferSender struct {
+ ctx context.Context
+ firstMessage *storagepb.BidiWriteObjectRequest
+ raw *gapic.Client
+ stream storagepb.Storage_BidiWriteObjectClient
+ settings *settings
+}
+
+func (w *gRPCWriter) newGRPCOneshotBidiWriteBufferSender() (*gRPCOneshotBidiWriteBufferSender, error) {
+ firstMessage := &storagepb.BidiWriteObjectRequest{
+ FirstMessage: &storagepb.BidiWriteObjectRequest_WriteObjectSpec{
+ WriteObjectSpec: w.spec,
+ },
+ CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey),
+ // For a non-resumable upload, checksums must be sent in this message.
+ // TODO: Currently the checksums are only sent on the first message
+ // of the stream, but in the future, we must also support sending it
+ // on the *last* message of the stream (instead of the first).
+ ObjectChecksums: toProtoChecksums(w.sendCRC32C, w.attrs),
+ }
+
+ return &gRPCOneshotBidiWriteBufferSender{
+ ctx: bucketContext(w.ctx, w.bucket),
+ firstMessage: firstMessage,
+ raw: w.c.raw,
+ settings: w.settings,
+ }, nil
+}
+
+func (s *gRPCOneshotBidiWriteBufferSender) sendBuffer(buf []byte, offset int64, flush, finishWrite bool) (obj *storagepb.Object, err error) {
+ var firstMessage *storagepb.BidiWriteObjectRequest
+ if s.stream == nil {
+ s.stream, err = s.raw.BidiWriteObject(s.ctx, s.settings.gax...)
+ if err != nil {
+ return
+ }
+ firstMessage = s.firstMessage
}
+ req := bidiWriteObjectRequest(buf, offset, flush, finishWrite)
+ if firstMessage != nil {
+ proto.Merge(req, firstMessage)
+ }
+
+ sendErr := s.stream.Send(req)
+ if sendErr != nil {
+ obj, err = drainInboundStream(s.stream)
+ s.stream = nil
+ if sendErr != io.EOF {
+ err = sendErr
+ }
+ return
+ }
+ // Oneshot uploads assume all flushes succeed
+
+ if finishWrite {
+ s.stream.CloseSend()
+ // Oneshot uploads only read from the response stream on completion or
+ // failure
+ obj, err = drainInboundStream(s.stream)
+ s.stream = nil
+ if err == io.EOF {
+ err = nil
+ }
+ }
+ return
+}
+
+type gRPCResumableBidiWriteBufferSender struct {
+ ctx context.Context
+ queryRetry *retryConfig
+ upid string
+ progress func(int64)
+ raw *gapic.Client
+ forceFirstMessage bool
+ stream storagepb.Storage_BidiWriteObjectClient
+ flushOffset int64
+ settings *settings
+}
+
+func (w *gRPCWriter) newGRPCResumableBidiWriteBufferSender() (*gRPCResumableBidiWriteBufferSender, error) {
req := &storagepb.StartResumableWriteRequest{
- WriteObjectSpec: spec,
+ WriteObjectSpec: w.spec,
CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey),
+ // TODO: Currently the checksums are only sent on the request to initialize
+ // the upload, but in the future, we must also support sending it
+ // on the *last* message of the stream.
+ ObjectChecksums: toProtoChecksums(w.sendCRC32C, w.attrs),
}
- // TODO: Currently the checksums are only sent on the request to initialize
- // the upload, but in the future, we must also support sending it
- // on the *last* message of the stream.
- req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs)
- return run(w.ctx, func(ctx context.Context) error {
- upres, err := w.c.raw.StartResumableWrite(w.ctx, req)
- w.upid = upres.GetUploadId()
+
+ ctx := bucketContext(w.ctx, w.bucket)
+ var upid string
+ err := run(ctx, func(ctx context.Context) error {
+ upres, err := w.c.raw.StartResumableWrite(ctx, req, w.settings.gax...)
+ upid = upres.GetUploadId()
return err
}, w.settings.retry, w.settings.idempotent)
+ if err != nil {
+ return nil, err
+ }
+
+ // Set up an initial connection for the 0 offset, so we don't query state
+ // unnecessarily for the first buffer. If we fail, we'll just retry in the
+ // normal connect path.
+ stream, err := w.c.raw.BidiWriteObject(ctx, w.settings.gax...)
+ if err != nil {
+ stream = nil
+ }
+
+ return &gRPCResumableBidiWriteBufferSender{
+ ctx: ctx,
+ queryRetry: w.settings.retry,
+ upid: upid,
+ progress: w.progress,
+ raw: w.c.raw,
+ forceFirstMessage: true,
+ stream: stream,
+ settings: w.settings,
+ }, nil
}
// queryProgress is a helper that queries the status of the resumable upload
// associated with the given upload ID.
-func (w *gRPCWriter) queryProgress() (int64, error) {
+func (s *gRPCResumableBidiWriteBufferSender) queryProgress() (int64, error) {
var persistedSize int64
- err := run(w.ctx, func(ctx context.Context) error {
- q, err := w.c.raw.QueryWriteStatus(w.ctx, &storagepb.QueryWriteStatusRequest{
- UploadId: w.upid,
- })
+ err := run(s.ctx, func(ctx context.Context) error {
+ q, err := s.raw.QueryWriteStatus(ctx, &storagepb.QueryWriteStatusRequest{
+ UploadId: s.upid,
+ }, s.settings.gax...)
+ // q.GetPersistedSize() will return 0 if q is nil.
persistedSize = q.GetPersistedSize()
return err
- }, w.settings.retry, true)
+ }, s.queryRetry, true)
- // q.GetCommittedSize() will return 0 if q is nil.
return persistedSize, err
}
-// uploadBuffer uploads the buffer at the given offset using a bi-directional
-// Write stream. It will open a new stream if necessary (on the first call or
-// after resuming from failure). The resulting write offset after uploading the
-// buffer is returned, as well as well as the final Object if the upload is
-// completed.
-//
-// Returns object, persisted size, and any error that is not retriable.
-func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*storagepb.Object, int64, error) {
- var shouldRetry = ShouldRetry
- if w.settings.retry != nil && w.settings.retry.shouldRetry != nil {
- shouldRetry = w.settings.retry.shouldRetry
- }
-
- var err error
- var lastWriteOfEntireObject bool
-
- sent := 0
- writeOffset := start
-
- toWrite := w.buf[:recvd]
-
- // Send a request with as many bytes as possible.
- // Loop until all bytes are sent.
-sendBytes: // label this loop so that we can use a continue statement from a nested block
- for {
- bytesNotYetSent := recvd - sent
- remainingDataFitsInSingleReq := bytesNotYetSent <= maxPerMessageWriteSize
-
- if remainingDataFitsInSingleReq && doneReading {
- lastWriteOfEntireObject = true
+func (s *gRPCResumableBidiWriteBufferSender) sendBuffer(buf []byte, offset int64, flush, finishWrite bool) (obj *storagepb.Object, err error) {
+ reconnected := false
+ if s.stream == nil {
+ // Determine offset and reconnect
+ s.flushOffset, err = s.queryProgress()
+ if err != nil {
+ return
}
-
- // Send the maximum amount of bytes we can, unless we don't have that many.
- bytesToSendInCurrReq := maxPerMessageWriteSize
- if remainingDataFitsInSingleReq {
- bytesToSendInCurrReq = bytesNotYetSent
+ s.stream, err = s.raw.BidiWriteObject(s.ctx, s.settings.gax...)
+ if err != nil {
+ return
}
+ reconnected = true
+ }
- // Prepare chunk section for upload.
- data := toWrite[sent : sent+bytesToSendInCurrReq]
-
- req := &storagepb.BidiWriteObjectRequest{
- Data: &storagepb.BidiWriteObjectRequest_ChecksummedData{
- ChecksummedData: &storagepb.ChecksummedData{
- Content: data,
- },
- },
- WriteOffset: writeOffset,
- FinishWrite: lastWriteOfEntireObject,
- Flush: remainingDataFitsInSingleReq && !lastWriteOfEntireObject,
- StateLookup: remainingDataFitsInSingleReq && !lastWriteOfEntireObject,
+ // clean up buf. We'll still write the message if a flush/finishWrite was
+ // requested.
+ if offset < s.flushOffset {
+ trim := s.flushOffset - offset
+ if int64(len(buf)) <= trim {
+ trim = int64(len(buf))
}
+ buf = buf[trim:]
+ }
+ if len(buf) == 0 && !flush && !finishWrite {
+ // no need to send anything
+ return nil, nil
+ }
- // Open a new stream if necessary and set the first_message field on
- // the request. The first message on the WriteObject stream must either
- // be the Object or the Resumable Upload ID.
- if w.stream == nil {
- hds := []string{"x-goog-request-params", fmt.Sprintf("bucket=projects/_/buckets/%s", url.QueryEscape(w.bucket))}
- ctx := gax.InsertMetadataIntoOutgoingContext(w.ctx, hds...)
-
- w.stream, err = w.c.raw.BidiWriteObject(ctx)
- if err != nil {
- return nil, 0, err
- }
+ req := bidiWriteObjectRequest(buf, offset, flush, finishWrite)
+ if s.forceFirstMessage || reconnected {
+ req.FirstMessage = &storagepb.BidiWriteObjectRequest_UploadId{UploadId: s.upid}
+ s.forceFirstMessage = false
+ }
- if w.upid != "" { // resumable upload
- req.FirstMessage = &storagepb.BidiWriteObjectRequest_UploadId{UploadId: w.upid}
- } else { // non-resumable
- spec, err := w.writeObjectSpec()
- if err != nil {
- return nil, 0, err
- }
- req.FirstMessage = &storagepb.BidiWriteObjectRequest_WriteObjectSpec{
- WriteObjectSpec: spec,
- }
- req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(w.encryptionKey)
- // For a non-resumable upload, checksums must be sent in this message.
- // TODO: Currently the checksums are only sent on the first message
- // of the stream, but in the future, we must also support sending it
- // on the *last* message of the stream (instead of the first).
- req.ObjectChecksums = toProtoChecksums(w.sendCRC32C, w.attrs)
- }
+ sendErr := s.stream.Send(req)
+ if sendErr != nil {
+ obj, err = drainInboundStream(s.stream)
+ s.stream = nil
+ if err == io.EOF {
+ // This is unexpected - we got an error on Send(), but not on Recv().
+ // Bubble up the sendErr.
+ err = sendErr
}
+ return
+ }
- err = w.stream.Send(req)
+ if finishWrite {
+ s.stream.CloseSend()
+ obj, err = drainInboundStream(s.stream)
+ s.stream = nil
if err == io.EOF {
- // err was io.EOF. The client-side of a stream only gets an EOF on Send
- // when the backend closes the stream and wants to return an error
- // status.
-
- // Receive from the stream Recv() until it returns a non-nil error
- // to receive the server's status as an error. We may get multiple
- // messages before the error due to buffering.
err = nil
- for err == nil {
- _, err = w.stream.Recv()
- }
- // Drop the stream reference as a new one will need to be created if
- // we retry.
- w.stream = nil
-
- // Retriable errors mean we should start over and attempt to
- // resend the entire buffer via a new stream.
- // If not retriable, falling through will return the error received.
- if shouldRetry(err) {
- // TODO: Add test case for failure modes of querying progress.
- writeOffset, err = w.determineOffset(start)
- if err != nil {
- return nil, 0, err
- }
- sent = int(writeOffset) - int(start)
-
- // Continue sending requests, opening a new stream and resending
- // any bytes not yet persisted as per QueryWriteStatus
- continue sendBytes
+ if obj.GetSize() > s.flushOffset {
+ s.progress(obj.GetSize())
}
}
- if err != nil {
- return nil, 0, err
- }
-
- // Update the immediate stream's sent total and the upload offset with
- // the data sent.
- sent += len(data)
- writeOffset += int64(len(data))
-
- // Not done sending data, do not attempt to commit it yet, loop around
- // and send more data.
- if recvd-sent > 0 {
- continue sendBytes
- }
+ return
+ }
- // The buffer has been uploaded and there is still more data to be
- // uploaded, but this is not a resumable upload session. Therefore,
- // don't check persisted data.
- if !lastWriteOfEntireObject && w.chunkSize == 0 {
- return nil, writeOffset, nil
+ if flush {
+ resp, err := s.stream.Recv()
+ if err != nil {
+ return nil, err
}
-
- // Done sending the data in the buffer (remainingDataFitsInSingleReq
- // should == true if we reach this code).
- // If we are done sending the whole object, close the stream and get the final
- // object. Otherwise, receive from the stream to confirm the persisted data.
- if !lastWriteOfEntireObject {
- resp, err := w.stream.Recv()
-
- // Retriable errors mean we should start over and attempt to
- // resend the entire buffer via a new stream.
- // If not retriable, falling through will return the error received
- // from closing the stream.
- if shouldRetry(err) {
- writeOffset, err = w.determineOffset(start)
- if err != nil {
- return nil, 0, err
- }
- sent = int(writeOffset) - int(start)
-
- // Drop the stream reference as a new one will need to be created.
- w.stream = nil
-
- continue sendBytes
- }
- if err != nil {
- return nil, 0, err
- }
-
- if resp.GetPersistedSize() != writeOffset {
- // Retry if not all bytes were persisted.
- writeOffset = resp.GetPersistedSize()
- sent = int(writeOffset) - int(start)
- continue sendBytes
- }
- } else {
- // If the object is done uploading, close the send stream to signal
- // to the server that we are done sending so that we can receive
- // from the stream without blocking.
- err = w.stream.CloseSend()
- if err != nil {
- // CloseSend() retries the send internally. It never returns an
- // error in the current implementation, but we check it anyway in
- // case that it does in the future.
- return nil, 0, err
- }
-
- // Stream receives do not block once send is closed, but we may not
- // receive the response with the object right away; loop until we
- // receive the object or error out.
- var obj *storagepb.Object
- for obj == nil {
- resp, err := w.stream.Recv()
- if shouldRetry(err) {
- writeOffset, err = w.determineOffset(start)
- if err != nil {
- return nil, 0, err
- }
- sent = int(writeOffset) - int(start)
- w.stream = nil
- continue sendBytes
- }
- if err != nil {
- return nil, 0, err
- }
-
- obj = resp.GetResource()
- }
-
- // Even though we received the object response, continue reading
- // until we receive a non-nil error, to ensure the stream does not
- // leak even if the context isn't cancelled. See:
- // https://pkg.go.dev/google.golang.org/grpc#ClientConn.NewStream
- for err == nil {
- _, err = w.stream.Recv()
- }
-
- return obj, writeOffset, nil
+ persistedOffset := resp.GetPersistedSize()
+ if persistedOffset > s.flushOffset {
+ s.flushOffset = persistedOffset
+ s.progress(s.flushOffset)
}
-
- return nil, writeOffset, nil
}
+ return
}
-// determineOffset either returns the offset given to it in the case of a simple
-// upload, or queries the write status in the case a resumable upload is being
-// used.
-func (w *gRPCWriter) determineOffset(offset int64) (int64, error) {
- // For a Resumable Upload, we must start from however much data
- // was committed.
- if w.upid != "" {
- committed, err := w.queryProgress()
+// uploadBuffer uploads the buffer at the given offset using a bi-directional
+// Write stream. It will open a new stream if necessary (on the first call or
+// after resuming from failure) and chunk the buffer per maxPerMessageWriteSize.
+// The final Object is returned on success if doneReading is true.
+//
+// Returns object and any error that is not retriable.
+func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (obj *storagepb.Object, err error) {
+ if w.streamSender == nil {
+ if w.append {
+ // Appendable object semantics
+ w.streamSender, err = w.newGRPCAppendBidiWriteBufferSender()
+ } else if doneReading || w.forceOneShot {
+ // One shot semantics
+ w.streamSender, err = w.newGRPCOneshotBidiWriteBufferSender()
+ } else {
+ // Resumable write semantics
+ w.streamSender, err = w.newGRPCResumableBidiWriteBufferSender()
+ }
if err != nil {
- return 0, err
+ return
}
- offset = committed
}
- return offset, nil
-}
-
-// writeObjectSpec constructs a WriteObjectSpec proto using the Writer's
-// ObjectAttrs and applies its Conditions. This is only used for gRPC.
-func (w *gRPCWriter) writeObjectSpec() (*storagepb.WriteObjectSpec, error) {
- // To avoid modifying the ObjectAttrs embeded in the calling writer, deref
- // the ObjectAttrs pointer to make a copy, then assign the desired name to
- // the attribute.
- attrs := *w.attrs
- spec := &storagepb.WriteObjectSpec{
- Resource: attrs.toProtoObject(w.bucket),
- }
- // WriteObject doesn't support the generation condition, so use default.
- if err := applyCondsProto("WriteObject", defaultGen, w.conds, spec); err != nil {
- return nil, err
+ data := w.buf[:recvd]
+ offset := start
+ // We want to go through this loop at least once, in case we have to
+ // finishWrite with an empty buffer.
+ for {
+ // Send as much as we can fit into a single gRPC message. Only flush once,
+ // when sending the very last message.
+ l := maxPerMessageWriteSize
+ flush := false
+ if len(data) <= l {
+ l = len(data)
+ flush = true
+ }
+ obj, err = w.streamSender.sendBuffer(data[:l], offset, flush, flush && doneReading)
+ if err != nil {
+ return nil, err
+ }
+ data = data[l:]
+ offset += int64(l)
+ if len(data) == 0 {
+ break
+ }
}
- return spec, nil
+ return
}
// read copies the data in the reader to the given buffer and reports how much
// data was read into the buffer and if there is no more data to read (EOF).
-// Furthermore, if the attrs.ContentType is unset, the first bytes of content
-// will be sniffed for a matching content type unless forceEmptyContentType is enabled.
func (w *gRPCWriter) read() (int, bool, error) {
- if w.attrs.ContentType == "" && !w.forceEmptyContentType {
- w.reader, w.attrs.ContentType = gax.DetermineContentType(w.reader)
- }
// Set n to -1 to start the Read loop.
var n, recvd int = -1, 0
var err error
diff --git a/vendor/cloud.google.com/go/storage/grpc_dp.go b/vendor/cloud.google.com/go/storage/grpc_dp.go
new file mode 100644
index 000000000..d34227334
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/grpc_dp.go
@@ -0,0 +1,22 @@
+//go:build !disable_grpc_modules
+
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ _ "google.golang.org/grpc/balancer/rls"
+ _ "google.golang.org/grpc/xds/googledirectpath"
+)
diff --git a/vendor/cloud.google.com/go/storage/grpc_metrics.go b/vendor/cloud.google.com/go/storage/grpc_metrics.go
new file mode 100644
index 000000000..f7bebd1de
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/grpc_metrics.go
@@ -0,0 +1,283 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ mexporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric"
+ "github.com/google/uuid"
+ "go.opentelemetry.io/contrib/detectors/gcp"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "google.golang.org/api/option"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/stats/opentelemetry"
+)
+
+const (
+ monitoredResourceName = "storage.googleapis.com/Client"
+ metricPrefix = "storage.googleapis.com/client/"
+)
+
+// Added to help with tests
+type storageMonitoredResource struct {
+ project string
+ api string
+ location string
+ instance string
+ cloudPlatform string
+ host string
+ resource *resource.Resource
+}
+
+func (smr *storageMonitoredResource) exporter() (metric.Exporter, error) {
+ exporter, err := mexporter.New(
+ mexporter.WithProjectID(smr.project),
+ mexporter.WithMetricDescriptorTypeFormatter(metricFormatter),
+ mexporter.WithCreateServiceTimeSeries(),
+ mexporter.WithMonitoredResourceDescription(monitoredResourceName, []string{"project_id", "location", "cloud_platform", "host_id", "instance_id", "api"}),
+ )
+ if err != nil {
+ return nil, fmt.Errorf("storage: creating metrics exporter: %w", err)
+ }
+ return exporter, nil
+}
+
+func newStorageMonitoredResource(ctx context.Context, project, api string, opts ...resource.Option) (*storageMonitoredResource, error) {
+ detectedAttrs, err := resource.New(ctx, opts...)
+ if err != nil {
+ return nil, err
+ }
+ smr := &storageMonitoredResource{
+ instance: uuid.New().String(),
+ api: api,
+ project: project,
+ }
+ s := detectedAttrs.Set()
+ // Attempt to use resource detector project id if project id wasn't
+ // identified using ADC as a last resort. Otherwise metrics cannot be started.
+ if p, present := s.Value("cloud.account.id"); present && smr.project == "" {
+ smr.project = p.AsString()
+ } else if !present && smr.project == "" {
+ return nil, errors.New("google cloud project is required to start client-side metrics")
+ }
+ if v, ok := s.Value("cloud.region"); ok {
+ smr.location = v.AsString()
+ } else {
+ smr.location = "global"
+ }
+ if v, ok := s.Value("cloud.platform"); ok {
+ smr.cloudPlatform = v.AsString()
+ } else {
+ smr.cloudPlatform = "unknown"
+ }
+ if v, ok := s.Value("host.id"); ok {
+ smr.host = v.AsString()
+ } else if v, ok := s.Value("faas.id"); ok {
+ smr.host = v.AsString()
+ } else {
+ smr.host = "unknown"
+ }
+ smr.resource, err = resource.New(ctx, resource.WithAttributes([]attribute.KeyValue{
+ {Key: "gcp.resource_type", Value: attribute.StringValue(monitoredResourceName)},
+ {Key: "project_id", Value: attribute.StringValue(smr.project)},
+ {Key: "api", Value: attribute.StringValue(smr.api)},
+ {Key: "instance_id", Value: attribute.StringValue(smr.instance)},
+ {Key: "location", Value: attribute.StringValue(smr.location)},
+ {Key: "cloud_platform", Value: attribute.StringValue(smr.cloudPlatform)},
+ {Key: "host_id", Value: attribute.StringValue(smr.host)},
+ }...))
+ if err != nil {
+ return nil, err
+ }
+ return smr, nil
+}
+
+type metricsContext struct {
+ // client options passed to gRPC channels
+ clientOpts []option.ClientOption
+ // instance of metric reader used by gRPC client-side metrics
+ provider *metric.MeterProvider
+ // clean func to call when closing gRPC client
+ close func()
+}
+
+type metricsConfig struct {
+ project string
+ interval time.Duration
+ customExporter *metric.Exporter
+ manualReader *metric.ManualReader // used by tests
+ disableExporter bool // used by tests disables exports
+ resourceOpts []resource.Option // used by tests
+}
+
+func newGRPCMetricContext(ctx context.Context, cfg metricsConfig) (*metricsContext, error) {
+ var exporter metric.Exporter
+ meterOpts := []metric.Option{}
+ if cfg.customExporter == nil {
+ var ropts []resource.Option
+ if cfg.resourceOpts != nil {
+ ropts = cfg.resourceOpts
+ } else {
+ ropts = []resource.Option{resource.WithDetectors(gcp.NewDetector())}
+ }
+ smr, err := newStorageMonitoredResource(ctx, cfg.project, "grpc", ropts...)
+ if err != nil {
+ return nil, err
+ }
+ exporter, err = smr.exporter()
+ if err != nil {
+ return nil, err
+ }
+ meterOpts = append(meterOpts, metric.WithResource(smr.resource))
+ } else {
+ exporter = *cfg.customExporter
+ }
+ interval := time.Minute
+ if cfg.interval > 0 {
+ interval = cfg.interval
+ }
+ meterOpts = append(meterOpts,
+ // Metric views update histogram boundaries to be relevant to GCS
+ // otherwise default OTel histogram boundaries are used.
+ metric.WithView(
+ createHistogramView("grpc.client.attempt.duration", latencyHistogramBoundaries()),
+ createHistogramView("grpc.client.attempt.rcvd_total_compressed_message_size", sizeHistogramBoundaries()),
+ createHistogramView("grpc.client.attempt.sent_total_compressed_message_size", sizeHistogramBoundaries())),
+ )
+ if cfg.manualReader != nil {
+ meterOpts = append(meterOpts, metric.WithReader(cfg.manualReader))
+ }
+ if !cfg.disableExporter {
+ meterOpts = append(meterOpts, metric.WithReader(
+ metric.NewPeriodicReader(&exporterLogSuppressor{Exporter: exporter}, metric.WithInterval(interval))))
+ }
+ provider := metric.NewMeterProvider(meterOpts...)
+ mo := opentelemetry.MetricsOptions{
+ MeterProvider: provider,
+ Metrics: stats.NewMetrics(
+ "grpc.client.attempt.started",
+ "grpc.client.attempt.duration",
+ "grpc.client.attempt.sent_total_compressed_message_size",
+ "grpc.client.attempt.rcvd_total_compressed_message_size",
+ "grpc.client.call.duration",
+ "grpc.lb.wrr.rr_fallback",
+ "grpc.lb.wrr.endpoint_weight_not_yet_usable",
+ "grpc.lb.wrr.endpoint_weight_stale",
+ "grpc.lb.wrr.endpoint_weights",
+ "grpc.lb.rls.cache_entries",
+ "grpc.lb.rls.cache_size",
+ "grpc.lb.rls.default_target_picks",
+ "grpc.lb.rls.target_picks",
+ "grpc.lb.rls.failed_picks",
+ ),
+ OptionalLabels: []string{"grpc.lb.locality"},
+ }
+ opts := []option.ClientOption{
+ option.WithGRPCDialOption(
+ opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})),
+ option.WithGRPCDialOption(
+ grpc.WithDefaultCallOptions(grpc.StaticMethodCallOption{})),
+ }
+ return &metricsContext{
+ clientOpts: opts,
+ provider: provider,
+ close: func() {
+ provider.Shutdown(ctx)
+ },
+ }, nil
+}
+
+// Silences permission errors after initial error is emitted to prevent
+// chatty logs.
+type exporterLogSuppressor struct {
+ metric.Exporter
+ emittedFailure bool
+}
+
+// Implements OTel SDK metric.Exporter interface to prevent noisy logs from
+// lack of credentials after initial failure.
+// https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric@v1.28.0#Exporter
+func (e *exporterLogSuppressor) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ if err := e.Exporter.Export(ctx, rm); err != nil && !e.emittedFailure {
+ if strings.Contains(err.Error(), "PermissionDenied") {
+ e.emittedFailure = true
+ return fmt.Errorf("gRPC metrics failed due permission issue: %w", err)
+ }
+ return err
+ }
+ return nil
+}
+
+func latencyHistogramBoundaries() []float64 {
+ boundaries := []float64{}
+ boundary := 0.0
+ increment := 0.002
+ // 2ms buckets for first 100ms, so we can have higher resolution for uploads and downloads in the 100 KiB range
+ for i := 0; i < 50; i++ {
+ boundaries = append(boundaries, boundary)
+ // increment by 2ms
+ boundary += increment
+ }
+ // For the remaining buckets do 10 10ms, 10 20ms, and so on, up until 5 minutes
+ for i := 0; i < 150 && boundary < 300; i++ {
+ boundaries = append(boundaries, boundary)
+ if i != 0 && i%10 == 0 {
+ increment *= 2
+ }
+ boundary += increment
+ }
+ return boundaries
+}
+
+func sizeHistogramBoundaries() []float64 {
+ kb := 1024.0
+ mb := 1024.0 * kb
+ gb := 1024.0 * mb
+ boundaries := []float64{}
+ boundary := 0.0
+ increment := 128 * kb
+ // 128 KiB increments up to 4MiB, then exponential growth
+ for len(boundaries) < 200 && boundary <= 16*gb {
+ boundaries = append(boundaries, boundary)
+ boundary += increment
+ if boundary >= 4*mb {
+ increment *= 2
+ }
+ }
+ return boundaries
+}
+
+func createHistogramView(name string, boundaries []float64) metric.View {
+ return metric.NewView(metric.Instrument{
+ Name: name,
+ Kind: metric.InstrumentKindHistogram,
+ }, metric.Stream{
+ Name: name,
+ Aggregation: metric.AggregationExplicitBucketHistogram{Boundaries: boundaries},
+ })
+}
+
+func metricFormatter(m metricdata.Metrics) string {
+ return metricPrefix + strings.ReplaceAll(string(m.Name), ".", "/")
+}
diff --git a/vendor/cloud.google.com/go/storage/grpc_reader.go b/vendor/cloud.google.com/go/storage/grpc_reader.go
new file mode 100644
index 000000000..e1dd39781
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/grpc_reader.go
@@ -0,0 +1,870 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io"
+
+ "cloud.google.com/go/internal/trace"
+ "cloud.google.com/go/storage/internal/apiv2/storagepb"
+ "github.com/googleapis/gax-go/v2"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/mem"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/proto"
+)
+
+// Below is the legacy implementation of gRPC downloads using the ReadObject API.
+// It's used by gRPC if the experimental option WithGRPCBidiReads was not passed.
+// TODO: once BidiReadObject is in GA, remove this implementation.
+
+// Custom codec to be used for unmarshaling ReadObjectResponse messages.
+// This is used to avoid a copy of object data in proto.Unmarshal.
+type bytesCodecReadObject struct {
+}
+
+var _ encoding.CodecV2 = bytesCodecReadObject{}
+
+// Marshal is used to encode messages to send for bytesCodecReadObject. Since we are only
+// using this to send ReadObjectRequest messages we don't need to recycle buffers
+// here.
+func (bytesCodecReadObject) Marshal(v any) (mem.BufferSlice, error) {
+ vv, ok := v.(proto.Message)
+ if !ok {
+ return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
+ }
+ var data mem.BufferSlice
+ buf, err := proto.Marshal(vv)
+ if err != nil {
+ return nil, err
+ }
+ data = append(data, mem.SliceBuffer(buf))
+ return data, nil
+}
+
+// Unmarshal is used for data received for ReadObjectResponse. We want to preserve
+// the mem.BufferSlice in most cases rather than copying and calling proto.Unmarshal.
+func (bytesCodecReadObject) Unmarshal(data mem.BufferSlice, v any) error {
+ switch v := v.(type) {
+ case *mem.BufferSlice:
+ *v = data
+ // Pick up a reference to the data so that it is not freed while decoding.
+ data.Ref()
+ return nil
+ case proto.Message:
+ buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
+ return proto.Unmarshal(buf.ReadOnlyData(), v)
+ default:
+ return fmt.Errorf("cannot unmarshal type %T, want proto.Message or mem.BufferSlice", v)
+ }
+}
+
+func (bytesCodecReadObject) Name() string {
+ return ""
+}
+
+func (c *grpcStorageClient) NewRangeReaderReadObject(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
+ ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.grpcStorageClient.NewRangeReaderReadObject")
+ defer func() { trace.EndSpan(ctx, err) }()
+
+ s := callSettings(c.settings, opts...)
+
+ s.gax = append(s.gax, gax.WithGRPCOptions(
+ grpc.ForceCodecV2(bytesCodecReadObject{}),
+ ))
+
+ if s.userProject != "" {
+ ctx = setUserProjectMetadata(ctx, s.userProject)
+ }
+
+ b := bucketResourceName(globalProjectAlias, params.bucket)
+ req := &storagepb.ReadObjectRequest{
+ Bucket: b,
+ Object: params.object,
+ CommonObjectRequestParams: toProtoCommonObjectRequestParams(params.encryptionKey),
+ }
+ // The default is a negative value, which means latest.
+ if params.gen >= 0 {
+ req.Generation = params.gen
+ }
+
+ // Define a function that initiates a Read with offset and length, assuming
+ // we have already read seen bytes.
+ reopen := func(seen int64) (*readStreamResponseReadObject, context.CancelFunc, error) {
+ // If the context has already expired, return immediately without making
+ // we call.
+ if err := ctx.Err(); err != nil {
+ return nil, nil, err
+ }
+
+ cc, cancel := context.WithCancel(ctx)
+
+ req.ReadOffset = params.offset + seen
+
+ // Only set a ReadLimit if length is greater than zero, because <= 0 means
+ // to read it all.
+ if params.length > 0 {
+ req.ReadLimit = params.length - seen
+ }
+
+ if err := applyCondsProto("gRPCReadObjectReader.reopen", params.gen, params.conds, req); err != nil {
+ cancel()
+ return nil, nil, err
+ }
+
+ var stream storagepb.Storage_ReadObjectClient
+ var err error
+ var decoder *readObjectResponseDecoder
+
+ err = run(cc, func(ctx context.Context) error {
+ stream, err = c.raw.ReadObject(ctx, req, s.gax...)
+ if err != nil {
+ return err
+ }
+
+ // Receive the message into databuf as a wire-encoded message so we can
+ // use a custom decoder to avoid an extra copy at the protobuf layer.
+ databufs := mem.BufferSlice{}
+ err := stream.RecvMsg(&databufs)
+ // These types of errors show up on the Recv call, rather than the
+ // initialization of the stream via ReadObject above.
+ if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
+ return ErrObjectNotExist
+ }
+ if err != nil {
+ return err
+ }
+ // Use a custom decoder that uses protobuf unmarshalling for all
+ // fields except the object data. Object data is handled separately
+ // to avoid a copy.
+ decoder = &readObjectResponseDecoder{
+ databufs: databufs,
+ }
+ err = decoder.readFullObjectResponse()
+ return err
+ }, s.retry, s.idempotent)
+ if err != nil {
+ // Close the stream context we just created to ensure we don't leak
+ // resources.
+ cancel()
+ // Free any buffers.
+ if decoder != nil && decoder.databufs != nil {
+ decoder.databufs.Free()
+ }
+ return nil, nil, err
+ }
+
+ return &readStreamResponseReadObject{stream, decoder}, cancel, nil
+ }
+
+ res, cancel, err := reopen(0)
+ if err != nil {
+ return nil, err
+ }
+
+ // The first message was Recv'd on stream open, use it to populate the
+ // object metadata.
+ msg := res.decoder.msg
+ obj := msg.GetMetadata()
+ // This is the size of the entire object, even if only a range was requested.
+ size := obj.GetSize()
+
+ // Only support checksums when reading an entire object, not a range.
+ var (
+ wantCRC uint32
+ checkCRC bool
+ )
+ if checksums := msg.GetObjectChecksums(); checksums != nil && checksums.Crc32C != nil {
+ if params.offset == 0 && params.length < 0 {
+ checkCRC = true
+ }
+ wantCRC = checksums.GetCrc32C()
+ }
+
+ metadata := obj.GetMetadata()
+ r = &Reader{
+ Attrs: ReaderObjectAttrs{
+ Size: size,
+ ContentType: obj.GetContentType(),
+ ContentEncoding: obj.GetContentEncoding(),
+ CacheControl: obj.GetCacheControl(),
+ LastModified: obj.GetUpdateTime().AsTime(),
+ Metageneration: obj.GetMetageneration(),
+ Generation: obj.GetGeneration(),
+ CRC32C: wantCRC,
+ },
+ objectMetadata: &metadata,
+ reader: &gRPCReadObjectReader{
+ stream: res.stream,
+ reopen: reopen,
+ cancel: cancel,
+ size: size,
+ // Preserve the decoder to read out object data when Read/WriteTo is called.
+ currMsg: res.decoder,
+ settings: s,
+ zeroRange: params.length == 0,
+ wantCRC: wantCRC,
+ checkCRC: checkCRC,
+ },
+ checkCRC: checkCRC,
+ }
+
+ cr := msg.GetContentRange()
+ if cr != nil {
+ r.Attrs.StartOffset = cr.GetStart()
+ r.remain = cr.GetEnd() - cr.GetStart()
+ } else {
+ r.remain = size
+ }
+
+ // For a zero-length request, explicitly close the stream and set remaining
+ // bytes to zero.
+ if params.length == 0 {
+ r.remain = 0
+ r.reader.Close()
+ }
+
+ return r, nil
+}
+
+type readStreamResponseReadObject struct {
+ stream storagepb.Storage_ReadObjectClient
+ decoder *readObjectResponseDecoder
+}
+
+type gRPCReadObjectReader struct {
+ seen, size int64
+ zeroRange bool
+ stream storagepb.Storage_ReadObjectClient
+ reopen func(seen int64) (*readStreamResponseReadObject, context.CancelFunc, error)
+ leftovers []byte
+ currMsg *readObjectResponseDecoder // decoder for the current message
+ cancel context.CancelFunc
+ settings *settings
+ checkCRC bool // should we check the CRC?
+ wantCRC uint32 // the CRC32c value the server sent in the header
+ gotCRC uint32 // running crc
+}
+
+// Update the running CRC with the data in the slice, if CRC checking was enabled.
+func (r *gRPCReadObjectReader) updateCRC(b []byte) {
+ if r.checkCRC {
+ r.gotCRC = crc32.Update(r.gotCRC, crc32cTable, b)
+ }
+}
+
+// Checks whether the CRC matches at the conclusion of a read, if CRC checking was enabled.
+func (r *gRPCReadObjectReader) runCRCCheck() error {
+ if r.checkCRC && r.gotCRC != r.wantCRC {
+ return fmt.Errorf("storage: bad CRC on read: got %d, want %d", r.gotCRC, r.wantCRC)
+ }
+ return nil
+}
+
+// Read reads bytes into the user's buffer from an open gRPC stream.
+func (r *gRPCReadObjectReader) Read(p []byte) (int, error) {
+ // The entire object has been read by this reader, check the checksum if
+ // necessary and return EOF.
+ if r.size == r.seen || r.zeroRange {
+ if err := r.runCRCCheck(); err != nil {
+ return 0, err
+ }
+ return 0, io.EOF
+ }
+
+ // No stream to read from, either never initialized or Close was called.
+ // Note: There is a potential concurrency issue if multiple routines are
+ // using the same reader. One encounters an error and the stream is closed
+ // and then reopened while the other routine attempts to read from it.
+ if r.stream == nil {
+ return 0, fmt.Errorf("storage: reader has been closed")
+ }
+
+ var n int
+
+ // If there is data remaining in the current message, return what was
+ // available to conform to the Reader
+ // interface: https://pkg.go.dev/io#Reader.
+ if !r.currMsg.done {
+ n = r.currMsg.readAndUpdateCRC(p, func(b []byte) {
+ r.updateCRC(b)
+ })
+ r.seen += int64(n)
+ return n, nil
+ }
+
+ // Attempt to Recv the next message on the stream.
+ // This will update r.currMsg with the decoder for the new message.
+ err := r.recv()
+ if err != nil {
+ return 0, err
+ }
+
+ // TODO: Determine if we need to capture incremental CRC32C for this
+ // chunk. The Object CRC32C checksum is captured when directed to read
+ // the entire Object. If directed to read a range, we may need to
+ // calculate the range's checksum for verification if the checksum is
+ // present in the response here.
+ // TODO: Figure out if we need to support decompressive transcoding
+ // https://cloud.google.com/storage/docs/transcoding.
+
+ n = r.currMsg.readAndUpdateCRC(p, func(b []byte) {
+ r.updateCRC(b)
+ })
+ r.seen += int64(n)
+ return n, nil
+}
+
+// WriteTo writes all the data requested by the Reader into w, implementing
+// io.WriterTo.
+func (r *gRPCReadObjectReader) WriteTo(w io.Writer) (int64, error) {
+ // The entire object has been read by this reader, check the checksum if
+ // necessary and return nil.
+ if r.size == r.seen || r.zeroRange {
+ if err := r.runCRCCheck(); err != nil {
+ return 0, err
+ }
+ return 0, nil
+ }
+
+ // No stream to read from, either never initialized or Close was called.
+ // Note: There is a potential concurrency issue if multiple routines are
+ // using the same reader. One encounters an error and the stream is closed
+ // and then reopened while the other routine attempts to read from it.
+ if r.stream == nil {
+ return 0, fmt.Errorf("storage: reader has been closed")
+ }
+
+ // Track bytes written during before call.
+ var alreadySeen = r.seen
+
+ // Write any already received message to the stream. There will be some leftovers from the
+ // original NewRangeReaderReadObject call.
+ if r.currMsg != nil && !r.currMsg.done {
+ written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) {
+ r.updateCRC(b)
+ })
+ r.seen += int64(written)
+ r.currMsg = nil
+ if err != nil {
+ return r.seen - alreadySeen, err
+ }
+ }
+
+ // Loop and receive additional messages until the entire data is written.
+ for {
+ // Attempt to receive the next message on the stream.
+ // Will terminate with io.EOF once data has all come through.
+ // recv() handles stream reopening and retry logic so no need for retries here.
+ err := r.recv()
+ if err != nil {
+ if err == io.EOF {
+ // We are done; check the checksum if necessary and return.
+ err = r.runCRCCheck()
+ }
+ return r.seen - alreadySeen, err
+ }
+
+ // TODO: Determine if we need to capture incremental CRC32C for this
+ // chunk. The Object CRC32C checksum is captured when directed to read
+ // the entire Object. If directed to read a range, we may need to
+ // calculate the range's checksum for verification if the checksum is
+ // present in the response here.
+ // TODO: Figure out if we need to support decompressive transcoding
+ // https://cloud.google.com/storage/docs/transcoding.
+ written, err := r.currMsg.writeToAndUpdateCRC(w, func(b []byte) {
+ r.updateCRC(b)
+ })
+ r.seen += int64(written)
+ if err != nil {
+ return r.seen - alreadySeen, err
+ }
+ }
+
+}
+
+// Close cancels the read stream's context in order for it to be closed and
+// collected, and frees any currently in use buffers.
+func (r *gRPCReadObjectReader) Close() error {
+ if r.cancel != nil {
+ r.cancel()
+ }
+ r.stream = nil
+ r.currMsg = nil
+ return nil
+}
+
+// recv attempts to Recv the next message on the stream and extract the object
+// data that it contains. In the event that a retryable error is encountered,
+// the stream will be closed, reopened, and RecvMsg again.
+// This will attempt to Recv until one of the following is true:
+//
+// * Recv is successful
+// * A non-retryable error is encountered
+// * The Reader's context is canceled
+//
+// The last error received is the one that is returned, which could be from
+// an attempt to reopen the stream.
+func (r *gRPCReadObjectReader) recv() error {
+ databufs := mem.BufferSlice{}
+ err := r.stream.RecvMsg(&databufs)
+
+ var shouldRetry = ShouldRetry
+ if r.settings.retry != nil && r.settings.retry.shouldRetry != nil {
+ shouldRetry = r.settings.retry.shouldRetry
+ }
+ if err != nil && shouldRetry(err) {
+ // This will "close" the existing stream and immediately attempt to
+ // reopen the stream, but will backoff if further attempts are necessary.
+ // Reopening the stream Recvs the first message, so if retrying is
+ // successful, r.currMsg will be updated to include the new data.
+ return r.reopenStream()
+ }
+
+ if err != nil {
+ return err
+ }
+
+ r.currMsg = &readObjectResponseDecoder{databufs: databufs}
+ return r.currMsg.readFullObjectResponse()
+}
+
+// ReadObjectResponse field and subfield numbers.
+const (
+ checksummedDataFieldReadObject = protowire.Number(1)
+ checksummedDataContentFieldReadObject = protowire.Number(1)
+ checksummedDataCRC32CFieldReadObject = protowire.Number(2)
+ objectChecksumsFieldReadObject = protowire.Number(2)
+ contentRangeFieldReadObject = protowire.Number(3)
+ metadataFieldReadObject = protowire.Number(4)
+)
+
+// readObjectResponseDecoder is a wrapper on the raw message, used to decode one message
+// without copying object data. It also has methods to write out the resulting object
+// data to the user application.
+type readObjectResponseDecoder struct {
+ databufs mem.BufferSlice // raw bytes of the message being processed
+ // Decoding offsets
+ off uint64 // offset in the messsage relative to the data as a whole
+ currBuf int // index of the current buffer being processed
+ currOff uint64 // offset in the current buffer
+ // Processed data
+ msg *storagepb.ReadObjectResponse // processed response message with all fields other than object data populated
+ dataOffsets bufferSliceOffsetsReadObject // offsets of the object data in the message.
+ done bool // true if the data has been completely read.
+}
+
+type bufferSliceOffsetsReadObject struct {
+ startBuf, endBuf int // indices of start and end buffers of object data in the msg
+ startOff, endOff uint64 // offsets within these buffers where the data starts and ends.
+ currBuf int // index of current buffer being read out to the user application.
+ currOff uint64 // offset of read in current buffer.
+}
+
+// peek ahead 10 bytes from the current offset in the databufs. This will return a
+// slice of the current buffer if the bytes are all in one buffer, but will copy
+// the bytes into a new buffer if the distance is split across buffers. Use this
+// to allow protowire methods to be used to parse tags & fixed values.
+// The max length of a varint tag is 10 bytes, see
+// https://protobuf.dev/programming-guides/encoding/#varints . Other int types
+// are shorter.
+func (d *readObjectResponseDecoder) peek() []byte {
+ b := d.databufs[d.currBuf].ReadOnlyData()
+ // Check if the tag will fit in the current buffer. If not, copy the next 10
+ // bytes into a new buffer to ensure that we can read the tag correctly
+ // without it being divided between buffers.
+ tagBuf := b[d.currOff:]
+ remainingInBuf := len(tagBuf)
+ // If we have less than 10 bytes remaining and are not in the final buffer,
+ // copy up to 10 bytes ahead from the next buffer.
+ if remainingInBuf < binary.MaxVarintLen64 && d.currBuf != len(d.databufs)-1 {
+ tagBuf = d.copyNextBytes(10)
+ }
+ return tagBuf
+}
+
+// Copies up to next n bytes into a new buffer, or fewer if fewer bytes remain in the
+// buffers overall. Does not advance offsets.
+func (d *readObjectResponseDecoder) copyNextBytes(n int) []byte {
+ remaining := n
+ if r := d.databufs.Len() - int(d.off); r < remaining {
+ remaining = r
+ }
+ currBuf := d.currBuf
+ currOff := d.currOff
+ var buf []byte
+ for remaining > 0 {
+ b := d.databufs[currBuf].ReadOnlyData()
+ remainingInCurr := len(b[currOff:])
+ if remainingInCurr < remaining {
+ buf = append(buf, b[currOff:]...)
+ remaining -= remainingInCurr
+ currBuf++
+ currOff = 0
+ } else {
+ buf = append(buf, b[currOff:currOff+uint64(remaining)]...)
+ remaining = 0
+ }
+ }
+ return buf
+}
+
+// Advance current buffer & byte offset in the decoding by n bytes. Returns an error if we
+// go past the end of the data.
+func (d *readObjectResponseDecoder) advanceOffset(n uint64) error {
+ remaining := n
+ for remaining > 0 {
+ remainingInCurr := uint64(d.databufs[d.currBuf].Len()) - d.currOff
+ if remainingInCurr <= remaining {
+ remaining -= remainingInCurr
+ d.currBuf++
+ d.currOff = 0
+ } else {
+ d.currOff += remaining
+ remaining = 0
+ }
+ }
+ // If we have advanced past the end of the buffers, something went wrong.
+ if (d.currBuf == len(d.databufs) && d.currOff > 0) || d.currBuf > len(d.databufs) {
+ return errors.New("decoding: truncated message, cannot advance offset")
+ }
+ d.off += n
+ return nil
+
+}
+
+// This copies object data from the message into the buffer and returns the number of
+// bytes copied. The data offsets are incremented in the message. The updateCRC
+// function is called on the copied bytes.
+func (d *readObjectResponseDecoder) readAndUpdateCRC(p []byte, updateCRC func([]byte)) int {
+ // For a completely empty message, just return 0
+ if len(d.databufs) == 0 {
+ return 0
+ }
+ databuf := d.databufs[d.dataOffsets.currBuf]
+ startOff := d.dataOffsets.currOff
+ var b []byte
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff]
+ } else {
+ b = databuf.ReadOnlyData()[startOff:]
+ }
+ n := copy(p, b)
+ updateCRC(b[:n])
+ d.dataOffsets.currOff += uint64(n)
+
+ // We've read all the data from this message. Free the underlying buffers.
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf && d.dataOffsets.currOff == d.dataOffsets.endOff {
+ d.done = true
+ d.databufs.Free()
+ }
+ // We are at the end of the current buffer
+ if d.dataOffsets.currBuf != d.dataOffsets.endBuf && d.dataOffsets.currOff == uint64(databuf.Len()) {
+ d.dataOffsets.currOff = 0
+ d.dataOffsets.currBuf++
+ }
+ return n
+}
+
+func (d *readObjectResponseDecoder) writeToAndUpdateCRC(w io.Writer, updateCRC func([]byte)) (int64, error) {
+ // For a completely empty message, just return 0
+ if len(d.databufs) == 0 {
+ return 0, nil
+ }
+ var written int64
+ for !d.done {
+ databuf := d.databufs[d.dataOffsets.currBuf]
+ startOff := d.dataOffsets.currOff
+ var b []byte
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ b = databuf.ReadOnlyData()[startOff:d.dataOffsets.endOff]
+ } else {
+ b = databuf.ReadOnlyData()[startOff:]
+ }
+ var n int
+ // Write all remaining data from the current buffer
+ n, err := w.Write(b)
+ written += int64(n)
+ updateCRC(b)
+ if err != nil {
+ return written, err
+ }
+ d.dataOffsets.currOff = 0
+ // We've read all the data from this message.
+ if d.dataOffsets.currBuf == d.dataOffsets.endBuf {
+ d.done = true
+ d.databufs.Free()
+ } else {
+ d.dataOffsets.currBuf++
+ }
+ }
+ return written, nil
+}
+
+// Consume the next available tag in the input data and return the field number and type.
+// Advances the relevant offsets in the data.
+func (d *readObjectResponseDecoder) consumeTag() (protowire.Number, protowire.Type, error) {
+ tagBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ fieldNum, fieldType, tagLength := protowire.ConsumeTag(tagBuf)
+ if tagLength < 0 {
+ return 0, 0, protowire.ParseError(tagLength)
+ }
+ // Update the offsets and current buffer depending on the tag length.
+ if err := d.advanceOffset(uint64(tagLength)); err != nil {
+ return 0, 0, fmt.Errorf("consuming tag: %w", err)
+ }
+ return fieldNum, fieldType, nil
+}
+
+// Consume a varint that represents the length of a bytes field. Return the length of
+// the data, and advance the offsets by the length of the varint.
+func (d *readObjectResponseDecoder) consumeVarint() (uint64, error) {
+ tagBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ dataLength, tagLength := protowire.ConsumeVarint(tagBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return dataLength, nil
+}
+
+func (d *readObjectResponseDecoder) consumeFixed32() (uint32, error) {
+ valueBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ value, tagLength := protowire.ConsumeFixed32(valueBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return value, nil
+}
+
+func (d *readObjectResponseDecoder) consumeFixed64() (uint64, error) {
+ valueBuf := d.peek()
+
+ // Consume the next tag. This will tell us which field is next in the
+ // buffer, its type, and how much space it takes up.
+ value, tagLength := protowire.ConsumeFixed64(valueBuf)
+ if tagLength < 0 {
+ return 0, protowire.ParseError(tagLength)
+ }
+
+ // Update the offsets and current buffer depending on the tag length.
+ d.advanceOffset(uint64(tagLength))
+ return value, nil
+}
+
+// Consume any field values up to the end offset provided and don't return anything.
+// This is used to skip any values which are not going to be used.
+// msgEndOff is indexed in terms of the overall data across all buffers.
+func (d *readObjectResponseDecoder) consumeFieldValue(fieldNum protowire.Number, fieldType protowire.Type) error {
+ // reimplement protowire.ConsumeFieldValue without the extra case for groups (which
+ // are are complicted and not a thing in proto3).
+ var err error
+ switch fieldType {
+ case protowire.VarintType:
+ _, err = d.consumeVarint()
+ case protowire.Fixed32Type:
+ _, err = d.consumeFixed32()
+ case protowire.Fixed64Type:
+ _, err = d.consumeFixed64()
+ case protowire.BytesType:
+ _, err = d.consumeBytes()
+ default:
+ return fmt.Errorf("unknown field type %v in field %v", fieldType, fieldNum)
+ }
+ if err != nil {
+ return fmt.Errorf("consuming field %v of type %v: %w", fieldNum, fieldType, err)
+ }
+
+ return nil
+}
+
+// Consume a bytes field from the input. Returns offsets for the data in the buffer slices
+// and an error.
+func (d *readObjectResponseDecoder) consumeBytes() (bufferSliceOffsetsReadObject, error) {
+ // m is the length of the data past the tag.
+ m, err := d.consumeVarint()
+ if err != nil {
+ return bufferSliceOffsetsReadObject{}, fmt.Errorf("consuming bytes field: %w", err)
+ }
+ offsets := bufferSliceOffsetsReadObject{
+ startBuf: d.currBuf,
+ startOff: d.currOff,
+ currBuf: d.currBuf,
+ currOff: d.currOff,
+ }
+
+ // Advance offsets to lengths of bytes field and capture where we end.
+ d.advanceOffset(m)
+ offsets.endBuf = d.currBuf
+ offsets.endOff = d.currOff
+ return offsets, nil
+}
+
+// Consume a bytes field from the input and copy into a new buffer if
+// necessary (if the data is split across buffers in databuf). This can be
+// used to leverage proto.Unmarshal for small bytes fields (i.e. anything
+// except object data).
+func (d *readObjectResponseDecoder) consumeBytesCopy() ([]byte, error) {
+ // m is the length of the bytes data.
+ m, err := d.consumeVarint()
+ if err != nil {
+ return nil, fmt.Errorf("consuming varint: %w", err)
+ }
+ // Copy the data into a buffer and advance the offset
+ b := d.copyNextBytes(int(m))
+ if err := d.advanceOffset(m); err != nil {
+ return nil, fmt.Errorf("advancing offset: %w", err)
+ }
+ return b, nil
+}
+
+// readFullObjectResponse returns the ReadObjectResponse that is encoded in the
+// wire-encoded message buffer b, or an error if the message is invalid.
+// This must be used on the first recv of an object as it may contain all fields
+// of ReadObjectResponse, and we use or pass on those fields to the user.
+// This function is essentially identical to proto.Unmarshal, except it aliases
+// the data in the input []byte. If the proto library adds a feature to
+// Unmarshal that does that, this function can be dropped.
+func (d *readObjectResponseDecoder) readFullObjectResponse() error {
+ msg := &storagepb.ReadObjectResponse{}
+
+ // Loop over the entire message, extracting fields as we go. This does not
+ // handle field concatenation, in which the contents of a single field
+ // are split across multiple protobuf tags.
+ for d.off < uint64(d.databufs.Len()) {
+ fieldNum, fieldType, err := d.consumeTag()
+ if err != nil {
+ return fmt.Errorf("consuming next tag: %w", err)
+ }
+
+ // Unmarshal the field according to its type. Only fields that are not
+ // nil will be present.
+ switch {
+ case fieldNum == checksummedDataFieldReadObject && fieldType == protowire.BytesType:
+ // The ChecksummedData field was found. Initialize the struct.
+ msg.ChecksummedData = &storagepb.ChecksummedData{}
+
+ bytesFieldLen, err := d.consumeVarint()
+ if err != nil {
+ return fmt.Errorf("consuming bytes: %v", err)
+ }
+
+ var contentEndOff = d.off + bytesFieldLen
+ for d.off < contentEndOff {
+ gotNum, gotTyp, err := d.consumeTag()
+ if err != nil {
+ return fmt.Errorf("consuming checksummedData tag: %w", err)
+ }
+
+ switch {
+ case gotNum == checksummedDataContentFieldReadObject && gotTyp == protowire.BytesType:
+ // Get the offsets of the content bytes.
+ d.dataOffsets, err = d.consumeBytes()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Content: %w", err)
+ }
+ case gotNum == checksummedDataCRC32CFieldReadObject && gotTyp == protowire.Fixed32Type:
+ v, err := d.consumeFixed32()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ChecksummedData.Crc32C: %w", err)
+ }
+ msg.ChecksummedData.Crc32C = &v
+ default:
+ err := d.consumeFieldValue(gotNum, gotTyp)
+ if err != nil {
+ return fmt.Errorf("invalid field in ReadObjectResponse.ChecksummedData: %w", err)
+ }
+ }
+ }
+ case fieldNum == objectChecksumsFieldReadObject && fieldType == protowire.BytesType:
+ // The field was found. Initialize the struct.
+ msg.ObjectChecksums = &storagepb.ObjectChecksums{}
+ // Consume the bytes and copy them into a single buffer if they are split across buffers.
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ObjectChecksums: %v", err)
+ }
+ // Unmarshal.
+ if err := proto.Unmarshal(buf, msg.ObjectChecksums); err != nil {
+ return err
+ }
+ case fieldNum == contentRangeFieldReadObject && fieldType == protowire.BytesType:
+ msg.ContentRange = &storagepb.ContentRange{}
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.ContentRange: %v", err)
+ }
+ if err := proto.Unmarshal(buf, msg.ContentRange); err != nil {
+ return err
+ }
+ case fieldNum == metadataFieldReadObject && fieldType == protowire.BytesType:
+ msg.Metadata = &storagepb.Object{}
+
+ buf, err := d.consumeBytesCopy()
+ if err != nil {
+ return fmt.Errorf("invalid ReadObjectResponse.Metadata: %v", err)
+ }
+
+ if err := proto.Unmarshal(buf, msg.Metadata); err != nil {
+ return err
+ }
+ default:
+ err := d.consumeFieldValue(fieldNum, fieldType)
+ if err != nil {
+ return fmt.Errorf("invalid field in ReadObjectResponse: %w", err)
+ }
+ }
+ }
+ d.msg = msg
+ return nil
+}
+
+// reopenStream "closes" the existing stream and attempts to reopen a stream and
+// sets the Reader's stream and cancelStream properties in the process.
+func (r *gRPCReadObjectReader) reopenStream() error {
+ // Close existing stream and initialize new stream with updated offset.
+ r.Close()
+
+ res, cancel, err := r.reopen(r.seen)
+ if err != nil {
+ return err
+ }
+ r.stream = res.stream
+ r.currMsg = res.decoder
+ r.cancel = cancel
+ return nil
+}
diff --git a/vendor/cloud.google.com/go/storage/grpc_writer.go b/vendor/cloud.google.com/go/storage/grpc_writer.go
new file mode 100644
index 000000000..9c8e8fc30
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/grpc_writer.go
@@ -0,0 +1,305 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+
+ gapic "cloud.google.com/go/storage/internal/apiv2"
+ "cloud.google.com/go/storage/internal/apiv2/storagepb"
+ gax "github.com/googleapis/gax-go/v2"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+type gRPCAppendBidiWriteBufferSender struct {
+ ctx context.Context
+ bucket string
+ routingToken *string
+ raw *gapic.Client
+ settings *settings
+ stream storagepb.Storage_BidiWriteObjectClient
+ firstMessage *storagepb.BidiWriteObjectRequest
+ objectChecksums *storagepb.ObjectChecksums
+
+ forceFirstMessage bool
+ flushOffset int64
+
+ // Fields used to report responses from the receive side of the stream
+ // recvs is closed when the current recv goroutine is complete. recvErr is set
+ // to the result of that stream (including io.EOF to indicate success)
+ recvs <-chan *storagepb.BidiWriteObjectResponse
+ recvErr error
+}
+
+func (w *gRPCWriter) newGRPCAppendBidiWriteBufferSender() (*gRPCAppendBidiWriteBufferSender, error) {
+ s := &gRPCAppendBidiWriteBufferSender{
+ ctx: w.ctx,
+ bucket: w.spec.GetResource().GetBucket(),
+ raw: w.c.raw,
+ settings: w.c.settings,
+ firstMessage: &storagepb.BidiWriteObjectRequest{
+ FirstMessage: &storagepb.BidiWriteObjectRequest_WriteObjectSpec{
+ WriteObjectSpec: w.spec,
+ },
+ CommonObjectRequestParams: toProtoCommonObjectRequestParams(w.encryptionKey),
+ },
+ objectChecksums: toProtoChecksums(w.sendCRC32C, w.attrs),
+ forceFirstMessage: true,
+ }
+ return s, nil
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) connect() (err error) {
+ err = func() error {
+ // If this is a forced first message, we've already determined it's safe to
+ // send.
+ if s.forceFirstMessage {
+ s.forceFirstMessage = false
+ return nil
+ }
+
+ // It's always ok to reconnect if there is a handle. This is the common
+ // case.
+ if s.firstMessage.GetAppendObjectSpec().GetWriteHandle() != nil {
+ return nil
+ }
+
+ // We can also reconnect if the first message has an if_generation_match or
+ // if_metageneration_match condition. Note that negative conditions like
+ // if_generation_not_match are not necessarily safe to retry.
+ aos := s.firstMessage.GetAppendObjectSpec()
+ wos := s.firstMessage.GetWriteObjectSpec()
+
+ if aos != nil && aos.IfMetagenerationMatch != nil {
+ return nil
+ }
+
+ if wos != nil && wos.IfGenerationMatch != nil {
+ return nil
+ }
+ if wos != nil && wos.IfMetagenerationMatch != nil {
+ return nil
+ }
+
+ // Otherwise, it is not safe to reconnect.
+ return errors.New("cannot safely reconnect; no write handle or preconditions")
+ }()
+ if err != nil {
+ return err
+ }
+
+ return s.startReceiver()
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) withRequestParams(ctx context.Context) context.Context {
+ param := fmt.Sprintf("appendable=true&bucket=%s", s.bucket)
+ if s.routingToken != nil {
+ param = param + fmt.Sprintf("&routing_token=%s", *s.routingToken)
+ }
+ return gax.InsertMetadataIntoOutgoingContext(s.ctx, "x-goog-request-params", param)
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) startReceiver() (err error) {
+ s.stream, err = s.raw.BidiWriteObject(s.withRequestParams(s.ctx), s.settings.gax...)
+ if err != nil {
+ return
+ }
+
+ recvs := make(chan *storagepb.BidiWriteObjectResponse)
+ s.recvs = recvs
+ s.recvErr = nil
+ go s.receiveMessages(recvs)
+ return
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) ensureFirstMessageAppendObjectSpec() {
+ if s.firstMessage.GetWriteObjectSpec() != nil {
+ w := s.firstMessage.GetWriteObjectSpec()
+ s.firstMessage.FirstMessage = &storagepb.BidiWriteObjectRequest_AppendObjectSpec{
+ AppendObjectSpec: &storagepb.AppendObjectSpec{
+ Bucket: w.GetResource().GetBucket(),
+ Object: w.GetResource().GetName(),
+ IfMetagenerationMatch: w.IfMetagenerationMatch,
+ IfMetagenerationNotMatch: w.IfMetagenerationNotMatch,
+ },
+ }
+ }
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) maybeUpdateFirstMessage(resp *storagepb.BidiWriteObjectResponse) {
+ // Any affirmative response should switch us to an AppendObjectSpec.
+ s.ensureFirstMessageAppendObjectSpec()
+
+ if r := resp.GetResource(); r != nil {
+ aos := s.firstMessage.GetAppendObjectSpec()
+ aos.Bucket = r.GetBucket()
+ aos.Object = r.GetName()
+ aos.Generation = r.GetGeneration()
+ }
+
+ if h := resp.GetWriteHandle(); h != nil {
+ s.firstMessage.GetAppendObjectSpec().WriteHandle = h
+ }
+}
+
+type bidiWriteObjectRedirectionError struct{}
+
+func (e bidiWriteObjectRedirectionError) Error() string {
+ return "BidiWriteObjectRedirectedError"
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) handleRedirectionError(e *storagepb.BidiWriteObjectRedirectedError) bool {
+ if e.RoutingToken == nil {
+ // This shouldn't happen, but we don't want to blindly retry here. Instead,
+ // surface the error to the caller.
+ return false
+ }
+
+ if e.WriteHandle != nil {
+ // If we get back a write handle, we should use it. We can only use it
+ // on an append object spec.
+ s.ensureFirstMessageAppendObjectSpec()
+ s.firstMessage.GetAppendObjectSpec().WriteHandle = e.WriteHandle
+ // Generation is meant to only come with the WriteHandle, so ignore it
+ // otherwise.
+ if e.Generation != nil {
+ s.firstMessage.GetAppendObjectSpec().Generation = e.GetGeneration()
+ }
+ }
+
+ s.routingToken = e.RoutingToken
+ return true
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) receiveMessages(resps chan<- *storagepb.BidiWriteObjectResponse) {
+ resp, err := s.stream.Recv()
+ for err == nil {
+ s.maybeUpdateFirstMessage(resp)
+
+ if resp.WriteStatus != nil {
+ // We only get a WriteStatus if this was a solicited message (either
+ // state_lookup: true or finish_write: true). Unsolicited messages may
+ // arrive to update our handle if necessary. We don't want to block on
+ // this channel write if this was an unsolicited message.
+ resps <- resp
+ }
+
+ resp, err = s.stream.Recv()
+ }
+
+ if st, ok := status.FromError(err); ok && st.Code() == codes.Aborted {
+ for _, d := range st.Details() {
+ if e, ok := d.(*storagepb.BidiWriteObjectRedirectedError); ok {
+ // If we can handle this error, replace it with the sentinel. Otherwise,
+ // report it to the user.
+ if ok := s.handleRedirectionError(e); ok {
+ err = bidiWriteObjectRedirectionError{}
+ }
+ }
+ }
+ }
+
+ // TODO: automatically reconnect on retriable recv errors, even if there are
+ // no sends occurring.
+ s.recvErr = err
+ close(resps)
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) sendOnConnectedStream(buf []byte, offset int64, flush, finishWrite, sendFirstMessage bool) (obj *storagepb.Object, err error) {
+ req := bidiWriteObjectRequest(buf, offset, flush, finishWrite)
+ if finishWrite {
+ // appendable objects pass checksums on the last message only
+ req.ObjectChecksums = s.objectChecksums
+ }
+ if sendFirstMessage {
+ proto.Merge(req, s.firstMessage)
+ }
+
+ if err = s.stream.Send(req); err != nil {
+ return nil, err
+ }
+
+ if finishWrite {
+ s.stream.CloseSend()
+ for resp := range s.recvs {
+ if resp.GetResource() != nil {
+ obj = resp.GetResource()
+ }
+ }
+ if s.recvErr != io.EOF {
+ return nil, s.recvErr
+ }
+ return
+ }
+
+ if flush {
+ // We don't necessarily expect multiple responses for a single flush, but
+ // this allows the server to send multiple responses if it wants to.
+ for s.flushOffset < offset+int64(len(buf)) {
+ resp, ok := <-s.recvs
+ if !ok {
+ return nil, s.recvErr
+ }
+ pSize := resp.GetPersistedSize()
+ rSize := resp.GetResource().GetSize()
+ if s.flushOffset < pSize {
+ s.flushOffset = pSize
+ }
+ if s.flushOffset < rSize {
+ s.flushOffset = rSize
+ }
+ }
+ }
+
+ return
+}
+
+func (s *gRPCAppendBidiWriteBufferSender) sendBuffer(buf []byte, offset int64, flush, finishWrite bool) (obj *storagepb.Object, err error) {
+ for {
+ sendFirstMessage := false
+ if s.stream == nil {
+ sendFirstMessage = true
+ if err = s.connect(); err != nil {
+ return
+ }
+ }
+
+ obj, err = s.sendOnConnectedStream(buf, offset, flush, finishWrite, sendFirstMessage)
+ if err == nil {
+ return
+ }
+
+ // await recv stream termination
+ for range s.recvs {
+ }
+ if s.recvErr != io.EOF {
+ err = s.recvErr
+ }
+ s.stream = nil
+
+ // Retry transparently on a redirection error
+ if _, ok := err.(bidiWriteObjectRedirectionError); ok {
+ s.forceFirstMessage = true
+ continue
+ }
+
+ return
+ }
+}
diff --git a/vendor/cloud.google.com/go/storage/hmac.go b/vendor/cloud.google.com/go/storage/hmac.go
index 1b9fbe9dd..2387fd33c 100644
--- a/vendor/cloud.google.com/go/storage/hmac.go
+++ b/vendor/cloud.google.com/go/storage/hmac.go
@@ -20,7 +20,6 @@ import (
"fmt"
"time"
- "cloud.google.com/go/storage/internal/apiv2/storagepb"
"google.golang.org/api/iterator"
raw "google.golang.org/api/storage/v1"
)
@@ -103,6 +102,7 @@ func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
//
// Options such as UserProjectForHMACKeys can be used to set the
// userProject to be billed against for operations.
+// Note: gRPC is not supported.
func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMACKey, error) {
desc := new(hmacKeyDesc)
for _, opt := range opts {
@@ -118,6 +118,7 @@ func (hkh *HMACKeyHandle) Get(ctx context.Context, opts ...HMACKeyOption) (*HMAC
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
// Only inactive HMAC keys can be deleted.
// After deletion, a key cannot be used to authenticate requests.
+// Note: gRPC is not supported.
func (hkh *HMACKeyHandle) Delete(ctx context.Context, opts ...HMACKeyOption) error {
desc := new(hmacKeyDesc)
for _, opt := range opts {
@@ -158,23 +159,8 @@ func toHMACKeyFromRaw(hk *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, erro
return hmKey, nil
}
-func toHMACKeyFromProto(pbmd *storagepb.HmacKeyMetadata) *HMACKey {
- if pbmd == nil {
- return nil
- }
-
- return &HMACKey{
- AccessID: pbmd.GetAccessId(),
- ID: pbmd.GetId(),
- State: HMACState(pbmd.GetState()),
- ProjectID: pbmd.GetProject(),
- CreatedTime: convertProtoTime(pbmd.GetCreateTime()),
- UpdatedTime: convertProtoTime(pbmd.GetUpdateTime()),
- ServiceAccountEmail: pbmd.GetServiceAccountEmail(),
- }
-}
-
// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey.
+// Note: gRPC is not supported.
func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string, opts ...HMACKeyOption) (*HMACKey, error) {
if projectID == "" {
return nil, errors.New("storage: expecting a non-blank projectID")
@@ -203,6 +189,7 @@ type HMACKeyAttrsToUpdate struct {
}
// Update mutates the HMACKey referred to by accessID.
+// Note: gRPC is not supported.
func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate, opts ...HMACKeyOption) (*HMACKey, error) {
if au.State != Active && au.State != Inactive {
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive)
@@ -237,6 +224,7 @@ type HMACKeysIterator struct {
// ListHMACKeys returns an iterator for listing HMACKeys.
//
// Note: This iterator is not safe for concurrent operations without explicit synchronization.
+// Note: gRPC is not supported.
func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator {
desc := new(hmacKeyDesc)
for _, opt := range opts {
@@ -272,7 +260,6 @@ func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string,
// TODO: Remove fetch method upon integration. This method is internalized into
// httpStorageClient.ListHMACKeys() as it is the only caller.
call := it.raw.List(it.projectID)
- setClientHeader(call.Header())
if pageToken != "" {
call = call.PageToken(pageToken)
}
diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go
index e01ae9c42..61b20555f 100644
--- a/vendor/cloud.google.com/go/storage/http_client.go
+++ b/vendor/cloud.google.com/go/storage/http_client.go
@@ -22,6 +22,7 @@ import (
"hash/crc32"
"io"
"io/ioutil"
+ "log"
"net/http"
"net/url"
"os"
@@ -47,13 +48,14 @@ import (
// httpStorageClient is the HTTP-JSON API implementation of the transport-agnostic
// storageClient interface.
type httpStorageClient struct {
- creds *google.Credentials
- hc *http.Client
- xmlHost string
- raw *raw.Service
- scheme string
- settings *settings
- config *storageConfig
+ creds *google.Credentials
+ hc *http.Client
+ xmlHost string
+ raw *raw.Service
+ scheme string
+ settings *settings
+ config *storageConfig
+ dynamicReadReqStallTimeout *bucketDelayManager
}
// newHTTPStorageClient initializes a new storageClient that uses the HTTP-JSON
@@ -128,14 +130,29 @@ func newHTTPStorageClient(ctx context.Context, opts ...storageOption) (storageCl
return nil, fmt.Errorf("supplied endpoint %q is not valid: %w", ep, err)
}
+ var bd *bucketDelayManager
+ if config.readStallTimeoutConfig != nil {
+ drrstConfig := config.readStallTimeoutConfig
+ bd, err = newBucketDelayManager(
+ drrstConfig.TargetPercentile,
+ getDynamicReadReqIncreaseRateFromEnv(),
+ getDynamicReadReqInitialTimeoutSecFromEnv(drrstConfig.Min),
+ drrstConfig.Min,
+ defaultDynamicReqdReqMaxTimeout)
+ if err != nil {
+ return nil, fmt.Errorf("creating dynamic-delay: %w", err)
+ }
+ }
+
return &httpStorageClient{
- creds: creds,
- hc: hc,
- xmlHost: u.Host,
- raw: rawService,
- scheme: u.Scheme,
- settings: s,
- config: &config,
+ creds: creds,
+ hc: hc,
+ xmlHost: u.Host,
+ raw: rawService,
+ scheme: u.Scheme,
+ settings: s,
+ config: &config,
+ dynamicReadReqStallTimeout: bd,
}, nil
}
@@ -176,7 +193,6 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket st
bkt.Location = "US"
}
req := c.raw.Buckets.Insert(project, bkt)
- setClientHeader(req.Header())
if attrs != nil && attrs.PredefinedACL != "" {
req.PredefinedAcl(attrs.PredefinedACL)
}
@@ -207,7 +223,6 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt
fetch := func(pageSize int, pageToken string) (token string, err error) {
req := c.raw.Buckets.List(it.projectID)
- setClientHeader(req.Header())
req.Projection("full")
req.Prefix(it.Prefix)
req.PageToken(pageToken)
@@ -245,7 +260,6 @@ func (c *httpStorageClient) ListBuckets(ctx context.Context, project string, opt
func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) error {
s := callSettings(c.settings, opts...)
req := c.raw.Buckets.Delete(bucket)
- setClientHeader(req.Header())
if err := applyBucketConds("httpStorageClient.DeleteBucket", conds, req); err != nil {
return err
}
@@ -259,7 +273,6 @@ func (c *httpStorageClient) DeleteBucket(ctx context.Context, bucket string, con
func (c *httpStorageClient) GetBucket(ctx context.Context, bucket string, conds *BucketConditions, opts ...storageOption) (*BucketAttrs, error) {
s := callSettings(c.settings, opts...)
req := c.raw.Buckets.Get(bucket).Projection("full")
- setClientHeader(req.Header())
err := applyBucketConds("httpStorageClient.GetBucket", conds, req)
if err != nil {
return nil, err
@@ -287,7 +300,6 @@ func (c *httpStorageClient) UpdateBucket(ctx context.Context, bucket string, uat
s := callSettings(c.settings, opts...)
rb := uattrs.toRawBucket()
req := c.raw.Buckets.Patch(bucket, rb).Projection("full")
- setClientHeader(req.Header())
err := applyBucketConds("httpStorageClient.UpdateBucket", conds, req)
if err != nil {
return nil, err
@@ -340,7 +352,6 @@ func (c *httpStorageClient) ListObjects(ctx context.Context, bucket string, q *Q
if it.query.SoftDeleted {
req.SoftDeleted(it.query.SoftDeleted)
}
- setClientHeader(req.Header())
projection := it.query.Projection
if projection == ProjectionDefault {
projection = ProjectionFull
@@ -581,6 +592,31 @@ func (c *httpStorageClient) RestoreObject(ctx context.Context, params *restoreOb
return newObject(obj), err
}
+func (c *httpStorageClient) MoveObject(ctx context.Context, params *moveObjectParams, opts ...storageOption) (*ObjectAttrs, error) {
+ s := callSettings(c.settings, opts...)
+ req := c.raw.Objects.Move(params.bucket, params.srcObject, params.dstObject).Context(ctx)
+ if err := applyConds("MoveObjectDestination", defaultGen, params.dstConds, req); err != nil {
+ return nil, err
+ }
+ if err := applySourceConds("MoveObjectSource", defaultGen, params.srcConds, req); err != nil {
+ return nil, err
+ }
+ if s.userProject != "" {
+ req.UserProject(s.userProject)
+ }
+ if err := setEncryptionHeaders(req.Header(), params.encryptionKey, false); err != nil {
+ return nil, err
+ }
+ var obj *raw.Object
+ var err error
+ err = run(ctx, func(ctx context.Context) error { obj, err = req.Context(ctx).Do(); return err }, s.retry, s.idempotent)
+ var e *googleapi.Error
+ if ok := errors.As(err, &e); ok && e.Code == http.StatusNotFound {
+ return nil, ErrObjectNotExist
+ }
+ return newObject(obj), err
+}
+
// Default Object ACL methods.
func (c *httpStorageClient) DeleteDefaultObjectACL(ctx context.Context, bucket string, entity ACLEntity, opts ...storageOption) error {
@@ -666,7 +702,7 @@ func (c *httpStorageClient) UpdateBucketACL(ctx context.Context, bucket string,
}, s.retry, s.idempotent)
}
-// configureACLCall sets the context, user project and headers on the apiary library call.
+// configureACLCall sets the context and user project on the apiary library call.
// This will panic if the call does not have the correct methods.
func configureACLCall(ctx context.Context, userProject string, call interface{ Header() http.Header }) {
vc := reflect.ValueOf(call)
@@ -674,7 +710,6 @@ func configureACLCall(ctx context.Context, userProject string, call interface{ H
if userProject != "" {
vc.MethodByName("UserProject").Call([]reflect.Value{reflect.ValueOf(userProject)})
}
- setClientHeader(call.Header())
}
// Object ACL methods.
@@ -760,7 +795,6 @@ func (c *httpStorageClient) ComposeObject(ctx context.Context, req *composeObjec
return nil, err
}
var obj *raw.Object
- setClientHeader(call.Header())
var err error
retryCall := func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }
@@ -788,7 +822,7 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
if err := applyConds("Copy destination", defaultGen, req.dstObject.conds, call); err != nil {
return nil, err
}
- if err := applySourceConds(req.srcObject.gen, req.srcObject.conds, call); err != nil {
+ if err := applySourceConds("Copy source", req.srcObject.gen, req.srcObject.conds, call); err != nil {
return nil, err
}
if s.userProject != "" {
@@ -809,7 +843,6 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
var res *raw.RewriteResponse
var err error
- setClientHeader(call.Header())
retryCall := func(ctx context.Context) error { res, err = call.Context(ctx).Do(); return err }
@@ -828,6 +861,11 @@ func (c *httpStorageClient) RewriteObject(ctx context.Context, req *rewriteObjec
return r, nil
}
+// NewMultiRangeDownloader is not supported by http client.
+func (c *httpStorageClient) NewMultiRangeDownloader(ctx context.Context, params *newMultiRangeDownloaderParams, opts ...storageOption) (mr *MultiRangeDownloader, err error) {
+ return nil, errMethodNotSupported
+}
+
func (c *httpStorageClient) NewRangeReader(ctx context.Context, params *newRangeReaderParams, opts ...storageOption) (r *Reader, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.httpStorageClient.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
@@ -864,17 +902,51 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa
return nil, err
}
- // Set custom headers passed in via the context. This is only required for XML;
- // for gRPC & JSON this is handled in the GAPIC and Apiary layers respectively.
- ctxHeaders := callctx.HeadersFromContext(ctx)
- for k, vals := range ctxHeaders {
- for _, v := range vals {
- req.Header.Add(k, v)
- }
- }
-
reopen := readerReopen(ctx, req.Header, params, s,
- func(ctx context.Context) (*http.Response, error) { return c.hc.Do(req.WithContext(ctx)) },
+ func(ctx context.Context) (*http.Response, error) {
+ setHeadersFromCtx(ctx, req.Header)
+
+ if c.dynamicReadReqStallTimeout == nil {
+ return c.hc.Do(req.WithContext(ctx))
+ }
+
+ cancelCtx, cancel := context.WithCancel(ctx)
+ var (
+ res *http.Response
+ err error
+ )
+
+ done := make(chan bool)
+ go func() {
+ reqStartTime := time.Now()
+ res, err = c.hc.Do(req.WithContext(cancelCtx))
+ if err == nil {
+ reqLatency := time.Since(reqStartTime)
+ c.dynamicReadReqStallTimeout.update(params.bucket, reqLatency)
+ } else if errors.Is(err, context.Canceled) {
+ // context.Canceled means operation took more than current dynamicTimeout,
+ // hence should be increased.
+ c.dynamicReadReqStallTimeout.increase(params.bucket)
+ }
+ done <- true
+ }()
+
+ // Wait until stall timeout or request is successful.
+ stallTimeout := c.dynamicReadReqStallTimeout.getValue(params.bucket)
+ timer := time.After(stallTimeout)
+ select {
+ case <-timer:
+ log.Printf("stalled read-req (%p) cancelled after %fs", req, stallTimeout.Seconds())
+ cancel()
+ err = context.DeadlineExceeded
+ if res != nil && res.Body != nil {
+ res.Body.Close()
+ }
+ case <-done:
+ cancel = nil
+ }
+ return res, err
+ },
func() error { return setConditionsHeaders(req.Header, params.conds) },
func() { req.URL.RawQuery = fmt.Sprintf("generation=%d", params.gen) })
@@ -888,7 +960,6 @@ func (c *httpStorageClient) newRangeReaderXML(ctx context.Context, params *newRa
func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newRangeReaderParams, s *settings) (r *Reader, err error) {
call := c.raw.Objects.Get(params.bucket, params.object)
- setClientHeader(call.Header())
call.Projection("full")
if s.userProject != "" {
@@ -911,6 +982,10 @@ func (c *httpStorageClient) newRangeReaderJSON(ctx context.Context, params *newR
}
func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storageOption) (*io.PipeWriter, error) {
+ if params.append {
+ return nil, errors.New("storage: append not supported on HTTP Client; use gRPC")
+ }
+
s := callSettings(c.settings, opts...)
errorf := params.setError
setObj := params.setObj
@@ -926,6 +1001,9 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage
if params.chunkRetryDeadline != 0 {
mediaOpts = append(mediaOpts, googleapi.ChunkRetryDeadline(params.chunkRetryDeadline))
}
+ if params.chunkTransferTimeout != 0 {
+ mediaOpts = append(mediaOpts, googleapi.ChunkTransferTimeout(params.chunkTransferTimeout))
+ }
pr, pw := io.Pipe()
@@ -1004,7 +1082,6 @@ func (c *httpStorageClient) OpenWriter(params *openWriterParams, opts ...storage
func (c *httpStorageClient) GetIamPolicy(ctx context.Context, resource string, version int32, opts ...storageOption) (*iampb.Policy, error) {
s := callSettings(c.settings, opts...)
call := c.raw.Buckets.GetIamPolicy(resource).OptionsRequestedPolicyVersion(int64(version))
- setClientHeader(call.Header())
if s.userProject != "" {
call.UserProject(s.userProject)
}
@@ -1025,7 +1102,6 @@ func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, p
rp := iamToStoragePolicy(policy)
call := c.raw.Buckets.SetIamPolicy(resource, rp)
- setClientHeader(call.Header())
if s.userProject != "" {
call.UserProject(s.userProject)
}
@@ -1039,7 +1115,6 @@ func (c *httpStorageClient) SetIamPolicy(ctx context.Context, resource string, p
func (c *httpStorageClient) TestIamPermissions(ctx context.Context, resource string, permissions []string, opts ...storageOption) ([]string, error) {
s := callSettings(c.settings, opts...)
call := c.raw.Buckets.TestIamPermissions(resource, permissions)
- setClientHeader(call.Header())
if s.userProject != "" {
call.UserProject(s.userProject)
}
@@ -1088,7 +1163,6 @@ func (c *httpStorageClient) ListHMACKeys(ctx context.Context, project, serviceAc
}
fetch := func(pageSize int, pageToken string) (token string, err error) {
call := c.raw.Projects.HmacKeys.List(project)
- setClientHeader(call.Header())
if pageToken != "" {
call = call.PageToken(pageToken)
}
@@ -1435,18 +1509,20 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
}
} else {
size = res.ContentLength
- // Check the CRC iff all of the following hold:
- // - We asked for content (length != 0).
- // - We got all the content (status != PartialContent).
- // - The server sent a CRC header.
- // - The Go http stack did not uncompress the file.
- // - We were not served compressed data that was uncompressed on download.
- // The problem with the last two cases is that the CRC will not match -- GCS
- // computes it on the compressed contents, but we compute it on the
- // uncompressed contents.
- if params.length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
- crc, checkCRC = parseCRC32c(res)
- }
+ }
+
+ // Check the CRC iff all of the following hold:
+ // - We asked for content (length != 0).
+ // - We got all the content (status != PartialContent).
+ // - The server sent a CRC header.
+ // - The Go http stack did not uncompress the file.
+ // - We were not served compressed data that was uncompressed on download.
+ // The problem with the last two cases is that the CRC will not match -- GCS
+ // computes it on the compressed contents, but we compute it on the
+ // uncompressed contents.
+ crc, checkCRC = parseCRC32c(res)
+ if params.length == 0 || res.StatusCode == http.StatusPartialContent || res.Uncompressed || uncompressedByServer(res) {
+ checkCRC = false
}
remain := res.ContentLength
@@ -1474,6 +1550,14 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
}
}
+ metadata := map[string]string{}
+ for key, values := range res.Header {
+ if len(values) > 0 && strings.HasPrefix(key, "X-Goog-Meta-") {
+ key := key[len("X-Goog-Meta-"):]
+ metadata[key] = values[0]
+ }
+ }
+
attrs := ReaderObjectAttrs{
Size: size,
ContentType: res.Header.Get("Content-Type"),
@@ -1483,12 +1567,15 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
StartOffset: startOffset,
Generation: params.gen,
Metageneration: metaGen,
+ CRC32C: crc,
+ Decompressed: res.Uncompressed || uncompressedByServer(res),
}
return &Reader{
- Attrs: attrs,
- size: size,
- remain: remain,
- checkCRC: checkCRC,
+ Attrs: attrs,
+ objectMetadata: &metadata,
+ size: size,
+ remain: remain,
+ checkCRC: checkCRC,
reader: &httpReader{
reopen: reopen,
body: body,
@@ -1497,3 +1584,30 @@ func parseReadResponse(res *http.Response, params *newRangeReaderParams, reopen
},
}, nil
}
+
+// setHeadersFromCtx sets custom headers passed in via the context on the header,
+// replacing any header with the same key (which avoids duplicating invocation headers).
+// This is only required for XML; for gRPC & JSON requests this is handled in
+// the GAPIC and Apiary layers respectively.
+func setHeadersFromCtx(ctx context.Context, header http.Header) {
+ ctxHeaders := callctx.HeadersFromContext(ctx)
+ for k, vals := range ctxHeaders {
+ // Merge x-goog-api-client values into a single space-separated value.
+ if strings.EqualFold(k, xGoogHeaderKey) {
+ alreadySetValues := header.Values(xGoogHeaderKey)
+ vals = append(vals, alreadySetValues...)
+
+ if len(vals) > 0 {
+ xGoogHeader := vals[0]
+ for _, v := range vals[1:] {
+ xGoogHeader = strings.Join([]string{xGoogHeader, v}, " ")
+ }
+ header.Set(k, xGoogHeader)
+ }
+ } else {
+ for _, v := range vals {
+ header.Set(k, v)
+ }
+ }
+ }
+}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go
index 415b2b585..03c3f8c17 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -41,7 +41,7 @@ type BucketIterator struct {
InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Bucket, nextPageToken string, err error)
}
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
func (it *BucketIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
@@ -68,100 +68,6 @@ func (it *BucketIterator) takeBuf() interface{} {
return b
}
-// HmacKeyMetadataIterator manages a stream of *storagepb.HmacKeyMetadata.
-type HmacKeyMetadataIterator struct {
- items []*storagepb.HmacKeyMetadata
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*storagepb.HmacKeyMetadata, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *HmacKeyMetadataIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *HmacKeyMetadataIterator) Next() (*storagepb.HmacKeyMetadata, error) {
- var item *storagepb.HmacKeyMetadata
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *HmacKeyMetadataIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *HmacKeyMetadataIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
-// NotificationConfigIterator manages a stream of *storagepb.NotificationConfig.
-type NotificationConfigIterator struct {
- items []*storagepb.NotificationConfig
- pageInfo *iterator.PageInfo
- nextFunc func() error
-
- // Response is the raw response for the current page.
- // It must be cast to the RPC response type.
- // Calling Next() or InternalFetch() updates this value.
- Response interface{}
-
- // InternalFetch is for use by the Google Cloud Libraries only.
- // It is not part of the stable interface of this package.
- //
- // InternalFetch returns results from a single call to the underlying RPC.
- // The number of results is no greater than pageSize.
- // If there are no more results, nextPageToken is empty and err is nil.
- InternalFetch func(pageSize int, pageToken string) (results []*storagepb.NotificationConfig, nextPageToken string, err error)
-}
-
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
-func (it *NotificationConfigIterator) PageInfo() *iterator.PageInfo {
- return it.pageInfo
-}
-
-// Next returns the next result. Its second return value is iterator.Done if there are no more
-// results. Once Next returns Done, all subsequent calls will return Done.
-func (it *NotificationConfigIterator) Next() (*storagepb.NotificationConfig, error) {
- var item *storagepb.NotificationConfig
- if err := it.nextFunc(); err != nil {
- return item, err
- }
- item = it.items[0]
- it.items = it.items[1:]
- return item, nil
-}
-
-func (it *NotificationConfigIterator) bufLen() int {
- return len(it.items)
-}
-
-func (it *NotificationConfigIterator) takeBuf() interface{} {
- b := it.items
- it.items = nil
- return b
-}
-
// ObjectIterator manages a stream of *storagepb.Object.
type ObjectIterator struct {
items []*storagepb.Object
@@ -182,7 +88,7 @@ type ObjectIterator struct {
InternalFetch func(pageSize int, pageToken string) (results []*storagepb.Object, nextPageToken string, err error)
}
-// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
+// PageInfo supports pagination. See the [google.golang.org/api/iterator] package for details.
func (it *ObjectIterator) PageInfo() *iterator.PageInfo {
return it.pageInfo
}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go
new file mode 100644
index 000000000..a51532f60
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/auxiliary_go123.go
@@ -0,0 +1,38 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+//go:build go1.23
+
+package storage
+
+import (
+ "iter"
+
+ storagepb "cloud.google.com/go/storage/internal/apiv2/storagepb"
+ "github.com/googleapis/gax-go/v2/iterator"
+)
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *BucketIterator) All() iter.Seq2[*storagepb.Bucket, error] {
+ return iterator.RangeAdapter(it.Next)
+}
+
+// All returns an iterator. If an error is returned by the iterator, the
+// iterator will stop after that iteration.
+func (it *ObjectIterator) All() iter.Seq2[*storagepb.Object, error] {
+ return iterator.RangeAdapter(it.Next)
+}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
index 5e2a8f0ad..502fa5678 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/doc.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -17,19 +17,15 @@
// Package storage is an auto-generated package for the
// Cloud Storage API.
//
-// Stop. This folder is likely not what you are looking for. This folder
-// contains protocol buffer definitions for an API only accessible to select
-// customers. Customers not participating should not depend on this file.
-// Please contact Google Cloud sales if you are interested. Unless told
-// otherwise by a Google Cloud representative, do not use or otherwise rely
-// on any of the contents of this folder. If you would like to use Cloud
-// Storage, please consult our official documentation (at
+// This folder contains protocol buffer definitions for an API only
+// accessible to select customers. Customers not participating should not
+// depend on this file. Please contact Google Cloud sales if you are
+// interested. Unless told otherwise by a Google Cloud representative, do not
+// use or otherwise rely on any of the contents of this folder. If you would
+// like to use Cloud Storage, please consult our official documentation (at
// https://cloud.google.com/storage/docs/apis) for details on our XML and
// JSON APIs, or else consider one of our client libraries (at
-// https://cloud.google.com/storage/docs/reference/libraries). This API
-// defined in this folder is unreleased and may shut off, break, or fail at
-// any time for any users who are not registered as a part of a private
-// preview program.
+// https://cloud.google.com/storage/docs/reference/libraries).
//
// # General documentation
//
@@ -47,6 +43,7 @@
//
// To get started with this package, create a client.
//
+// // go get cloud.google.com/go/storage/internal/apiv2@latest
// ctx := context.Background()
// // This snippet has been automatically generated and should be regarded as a code template only.
// // It will require modifications to work:
@@ -65,25 +62,14 @@
//
// # Using the Client
//
-// The following is an example of making an API call with the newly created client.
+// The following is an example of making an API call with the newly created client, mentioned above.
//
-// ctx := context.Background()
-// // This snippet has been automatically generated and should be regarded as a code template only.
-// // It will require modifications to work:
-// // - It may require correct/in-range values for request initialization.
-// // - It may require specifying regional endpoints when creating the service client as shown in:
-// // https://pkg.go.dev/cloud.google.com/go#hdr-Client_Options
-// c, err := storage.NewClient(ctx)
-// if err != nil {
-// // TODO: Handle error.
-// }
-// defer c.Close()
-// stream, err := c.BidiWriteObject(ctx)
+// stream, err := c.BidiReadObject(ctx)
// if err != nil {
// // TODO: Handle error.
// }
// go func() {
-// reqs := []*storagepb.BidiWriteObjectRequest{
+// reqs := []*storagepb.BidiReadObjectRequest{
// // TODO: Create requests.
// }
// for _, req := range reqs {
@@ -119,34 +105,3 @@
// [Debugging Client Libraries]: https://pkg.go.dev/cloud.google.com/go#hdr-Debugging
// [Inspecting errors]: https://pkg.go.dev/cloud.google.com/go#hdr-Inspecting_errors
package storage // import "cloud.google.com/go/storage/internal/apiv2"
-
-import (
- "context"
-
- "google.golang.org/api/option"
-)
-
-// For more information on implementing a client constructor hook, see
-// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
-type clientHookParams struct{}
-type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
-
-var versionClient string
-
-func getVersionClient() string {
- if versionClient == "" {
- return "UNKNOWN"
- }
- return versionClient
-}
-
-// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
-func DefaultAuthScopes() []string {
- return []string{
- "https://www.googleapis.com/auth/cloud-platform",
- "https://www.googleapis.com/auth/cloud-platform.read-only",
- "https://www.googleapis.com/auth/devstorage.full_control",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/devstorage.read_write",
- }
-}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
index 56256bb2c..7e4d99ec9 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/gapic_metadata.json
@@ -10,6 +10,11 @@
"grpc": {
"libraryClient": "Client",
"rpcs": {
+ "BidiReadObject": {
+ "methods": [
+ "BidiReadObject"
+ ]
+ },
"BidiWriteObject": {
"methods": [
"BidiWriteObject"
@@ -30,31 +35,11 @@
"CreateBucket"
]
},
- "CreateHmacKey": {
- "methods": [
- "CreateHmacKey"
- ]
- },
- "CreateNotificationConfig": {
- "methods": [
- "CreateNotificationConfig"
- ]
- },
"DeleteBucket": {
"methods": [
"DeleteBucket"
]
},
- "DeleteHmacKey": {
- "methods": [
- "DeleteHmacKey"
- ]
- },
- "DeleteNotificationConfig": {
- "methods": [
- "DeleteNotificationConfig"
- ]
- },
"DeleteObject": {
"methods": [
"DeleteObject"
@@ -65,46 +50,21 @@
"GetBucket"
]
},
- "GetHmacKey": {
- "methods": [
- "GetHmacKey"
- ]
- },
"GetIamPolicy": {
"methods": [
"GetIamPolicy"
]
},
- "GetNotificationConfig": {
- "methods": [
- "GetNotificationConfig"
- ]
- },
"GetObject": {
"methods": [
"GetObject"
]
},
- "GetServiceAccount": {
- "methods": [
- "GetServiceAccount"
- ]
- },
"ListBuckets": {
"methods": [
"ListBuckets"
]
},
- "ListHmacKeys": {
- "methods": [
- "ListHmacKeys"
- ]
- },
- "ListNotificationConfigs": {
- "methods": [
- "ListNotificationConfigs"
- ]
- },
"ListObjects": {
"methods": [
"ListObjects"
@@ -115,6 +75,11 @@
"LockBucketRetentionPolicy"
]
},
+ "MoveObject": {
+ "methods": [
+ "MoveObject"
+ ]
+ },
"QueryWriteStatus": {
"methods": [
"QueryWriteStatus"
@@ -155,11 +120,6 @@
"UpdateBucket"
]
},
- "UpdateHmacKey": {
- "methods": [
- "UpdateHmacKey"
- ]
- },
"UpdateObject": {
"methods": [
"UpdateObject"
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go b/vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go
new file mode 100644
index 000000000..0de9b31f6
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/helpers.go
@@ -0,0 +1,65 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go_gapic. DO NOT EDIT.
+
+package storage
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/googleapis/gax-go/v2/internallog/grpclog"
+ "google.golang.org/api/option"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/proto"
+)
+
+const serviceName = "storage.googleapis.com"
+
+// For more information on implementing a client constructor hook, see
+// https://github.com/googleapis/google-cloud-go/wiki/Customizing-constructors.
+type clientHookParams struct{}
+type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error)
+
+var versionClient string
+
+func getVersionClient() string {
+ if versionClient == "" {
+ return "UNKNOWN"
+ }
+ return versionClient
+}
+
+// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
+func DefaultAuthScopes() []string {
+ return []string{
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write",
+ }
+}
+
+func executeRPC[I proto.Message, O proto.Message](ctx context.Context, fn func(context.Context, I, ...grpc.CallOption) (O, error), req I, opts []grpc.CallOption, logger *slog.Logger, rpc string) (O, error) {
+ var zero O
+ logger.DebugContext(ctx, "api request", "serviceName", serviceName, "rpcName", rpc, "request", grpclog.ProtoMessageRequest(ctx, req))
+ resp, err := fn(ctx, req, opts...)
+ if err != nil {
+ return zero, err
+ }
+ logger.DebugContext(ctx, "api response", "serviceName", serviceName, "rpcName", rpc, "response", grpclog.ProtoMessageResponse(resp))
+ return resp, err
+}
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
index 47300d7a1..4a50254d8 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storage_client.go
@@ -1,4 +1,4 @@
-// Copyright 2024 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -19,6 +19,7 @@ package storage
import (
"context"
"fmt"
+ "log/slog"
"math"
"net/url"
"regexp"
@@ -50,16 +51,13 @@ type CallOptions struct {
SetIamPolicy []gax.CallOption
TestIamPermissions []gax.CallOption
UpdateBucket []gax.CallOption
- DeleteNotificationConfig []gax.CallOption
- GetNotificationConfig []gax.CallOption
- CreateNotificationConfig []gax.CallOption
- ListNotificationConfigs []gax.CallOption
ComposeObject []gax.CallOption
DeleteObject []gax.CallOption
RestoreObject []gax.CallOption
CancelResumableWrite []gax.CallOption
GetObject []gax.CallOption
ReadObject []gax.CallOption
+ BidiReadObject []gax.CallOption
UpdateObject []gax.CallOption
WriteObject []gax.CallOption
BidiWriteObject []gax.CallOption
@@ -67,12 +65,7 @@ type CallOptions struct {
RewriteObject []gax.CallOption
StartResumableWrite []gax.CallOption
QueryWriteStatus []gax.CallOption
- GetServiceAccount []gax.CallOption
- CreateHmacKey []gax.CallOption
- DeleteHmacKey []gax.CallOption
- GetHmacKey []gax.CallOption
- ListHmacKeys []gax.CallOption
- UpdateHmacKey []gax.CallOption
+ MoveObject []gax.CallOption
}
func defaultGRPCClientOptions() []option.ClientOption {
@@ -84,6 +77,7 @@ func defaultGRPCClientOptions() []option.ClientOption {
internaloption.WithDefaultAudience("https://storage.googleapis.com/"),
internaloption.WithDefaultScopes(DefaultAuthScopes()...),
internaloption.EnableJwtWithScope(),
+ internaloption.EnableNewAuthLibrary(),
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(
grpc.MaxCallRecvMsgSize(math.MaxInt32))),
}
@@ -208,46 +202,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- DeleteNotificationConfig: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- GetNotificationConfig: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- CreateNotificationConfig: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- ListNotificationConfigs: []gax.CallOption{
+ ComposeObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -260,7 +215,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- ComposeObject: []gax.CallOption{
+ DeleteObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -273,7 +228,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- DeleteObject: []gax.CallOption{
+ RestoreObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -286,7 +241,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- RestoreObject: []gax.CallOption{
+ CancelResumableWrite: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -299,7 +254,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- CancelResumableWrite: []gax.CallOption{
+ GetObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -312,8 +267,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- GetObject: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
+ ReadObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -325,7 +279,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- ReadObject: []gax.CallOption{
+ BidiReadObject: []gax.CallOption{
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
@@ -426,72 +380,7 @@ func defaultCallOptions() *CallOptions {
})
}),
},
- GetServiceAccount: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- CreateHmacKey: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- DeleteHmacKey: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- GetHmacKey: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- ListHmacKeys: []gax.CallOption{
- gax.WithTimeout(60000 * time.Millisecond),
- gax.WithRetry(func() gax.Retryer {
- return gax.OnCodes([]codes.Code{
- codes.DeadlineExceeded,
- codes.Unavailable,
- }, gax.Backoff{
- Initial: 1000 * time.Millisecond,
- Max: 60000 * time.Millisecond,
- Multiplier: 2.00,
- })
- }),
- },
- UpdateHmacKey: []gax.CallOption{
+ MoveObject: []gax.CallOption{
gax.WithTimeout(60000 * time.Millisecond),
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
@@ -521,16 +410,13 @@ type internalClient interface {
SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest, ...gax.CallOption) (*iampb.Policy, error)
TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest, ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error)
UpdateBucket(context.Context, *storagepb.UpdateBucketRequest, ...gax.CallOption) (*storagepb.Bucket, error)
- DeleteNotificationConfig(context.Context, *storagepb.DeleteNotificationConfigRequest, ...gax.CallOption) error
- GetNotificationConfig(context.Context, *storagepb.GetNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error)
- CreateNotificationConfig(context.Context, *storagepb.CreateNotificationConfigRequest, ...gax.CallOption) (*storagepb.NotificationConfig, error)
- ListNotificationConfigs(context.Context, *storagepb.ListNotificationConfigsRequest, ...gax.CallOption) *NotificationConfigIterator
ComposeObject(context.Context, *storagepb.ComposeObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
DeleteObject(context.Context, *storagepb.DeleteObjectRequest, ...gax.CallOption) error
RestoreObject(context.Context, *storagepb.RestoreObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
CancelResumableWrite(context.Context, *storagepb.CancelResumableWriteRequest, ...gax.CallOption) (*storagepb.CancelResumableWriteResponse, error)
GetObject(context.Context, *storagepb.GetObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
ReadObject(context.Context, *storagepb.ReadObjectRequest, ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error)
+ BidiReadObject(context.Context, ...gax.CallOption) (storagepb.Storage_BidiReadObjectClient, error)
UpdateObject(context.Context, *storagepb.UpdateObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
WriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error)
BidiWriteObject(context.Context, ...gax.CallOption) (storagepb.Storage_BidiWriteObjectClient, error)
@@ -538,12 +424,7 @@ type internalClient interface {
RewriteObject(context.Context, *storagepb.RewriteObjectRequest, ...gax.CallOption) (*storagepb.RewriteResponse, error)
StartResumableWrite(context.Context, *storagepb.StartResumableWriteRequest, ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error)
QueryWriteStatus(context.Context, *storagepb.QueryWriteStatusRequest, ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error)
- GetServiceAccount(context.Context, *storagepb.GetServiceAccountRequest, ...gax.CallOption) (*storagepb.ServiceAccount, error)
- CreateHmacKey(context.Context, *storagepb.CreateHmacKeyRequest, ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error)
- DeleteHmacKey(context.Context, *storagepb.DeleteHmacKeyRequest, ...gax.CallOption) error
- GetHmacKey(context.Context, *storagepb.GetHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error)
- ListHmacKeys(context.Context, *storagepb.ListHmacKeysRequest, ...gax.CallOption) *HmacKeyMetadataIterator
- UpdateHmacKey(context.Context, *storagepb.UpdateHmacKeyRequest, ...gax.CallOption) (*storagepb.HmacKeyMetadata, error)
+ MoveObject(context.Context, *storagepb.MoveObjectRequest, ...gax.CallOption) (*storagepb.Object, error)
}
// Client is a client for interacting with Cloud Storage API.
@@ -641,11 +522,13 @@ func (c *Client) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyReques
return c.internalClient.SetIamPolicy(ctx, req, opts...)
}
-// TestIamPermissions tests a set of permissions on the given bucket or object to see which, if
-// any, are held by the caller.
+// TestIamPermissions tests a set of permissions on the given bucket, object, or managed folder
+// to see which, if any, are held by the caller.
// The resource field in the request should be
-// projects/_/buckets/{bucket} for a bucket or
-// projects/_/buckets/{bucket}/objects/{object} for an object.
+// projects/_/buckets/{bucket} for a bucket,
+// projects/_/buckets/{bucket}/objects/{object} for an object, or
+// projects/_/buckets/{bucket}/managedFolders/{managedFolder}
+// for a managed folder.
func (c *Client) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest, opts ...gax.CallOption) (*iampb.TestIamPermissionsResponse, error) {
return c.internalClient.TestIamPermissions(ctx, req, opts...)
}
@@ -655,41 +538,32 @@ func (c *Client) UpdateBucket(ctx context.Context, req *storagepb.UpdateBucketRe
return c.internalClient.UpdateBucket(ctx, req, opts...)
}
-// DeleteNotificationConfig permanently deletes a NotificationConfig.
-func (c *Client) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error {
- return c.internalClient.DeleteNotificationConfig(ctx, req, opts...)
-}
-
-// GetNotificationConfig view a NotificationConfig.
-func (c *Client) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- return c.internalClient.GetNotificationConfig(ctx, req, opts...)
-}
-
-// CreateNotificationConfig creates a NotificationConfig for a given bucket.
-// These NotificationConfigs, when triggered, publish messages to the
-// specified Pub/Sub topics. See
-// https://cloud.google.com/storage/docs/pubsub-notifications (at https://cloud.google.com/storage/docs/pubsub-notifications).
-func (c *Client) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- return c.internalClient.CreateNotificationConfig(ctx, req, opts...)
-}
-
-// ListNotificationConfigs retrieves a list of NotificationConfigs for a given bucket.
-func (c *Client) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator {
- return c.internalClient.ListNotificationConfigs(ctx, req, opts...)
-}
-
// ComposeObject concatenates a list of existing objects into a new object in the same
// bucket.
func (c *Client) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
return c.internalClient.ComposeObject(ctx, req, opts...)
}
-// DeleteObject deletes an object and its metadata.
+// DeleteObject deletes an object and its metadata. Deletions are permanent if versioning
+// is not enabled for the bucket, or if the generation parameter is used, or
+// if soft delete (at https://cloud.google.com/storage/docs/soft-delete) is not
+// enabled for the bucket.
+// When this API is used to delete an object from a bucket that has soft
+// delete policy enabled, the object becomes soft deleted, and the
+// softDeleteTime and hardDeleteTime properties are set on the object.
+// This API cannot be used to permanently delete soft-deleted objects.
+// Soft-deleted objects are permanently deleted according to their
+// hardDeleteTime.
//
-// Deletions are normally permanent when versioning is disabled or whenever
-// the generation parameter is used. However, if soft delete is enabled for
-// the bucket, deleted objects can be restored using RestoreObject until the
-// soft delete retention period has passed.
+// You can use the [RestoreObject][google.storage.v2.Storage.RestoreObject]
+// API to restore soft-deleted objects until the soft delete retention period
+// has passed.
+//
+// IAM Permissions:
+//
+// Requires storage.objects.delete
+// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
+// the bucket.
func (c *Client) DeleteObject(ctx context.Context, req *storagepb.DeleteObjectRequest, opts ...gax.CallOption) error {
return c.internalClient.DeleteObject(ctx, req, opts...)
}
@@ -711,16 +585,52 @@ func (c *Client) CancelResumableWrite(ctx context.Context, req *storagepb.Cancel
return c.internalClient.CancelResumableWrite(ctx, req, opts...)
}
-// GetObject retrieves an object’s metadata.
+// GetObject retrieves object metadata.
+//
+// IAM Permissions:
+//
+// Requires storage.objects.get
+// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
+// the bucket. To return object ACLs, the authenticated user must also have
+// the storage.objects.getIamPolicy permission.
func (c *Client) GetObject(ctx context.Context, req *storagepb.GetObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
return c.internalClient.GetObject(ctx, req, opts...)
}
-// ReadObject reads an object’s data.
+// ReadObject retrieves object data.
+//
+// IAM Permissions:
+//
+// Requires storage.objects.get
+// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
+// the bucket.
func (c *Client) ReadObject(ctx context.Context, req *storagepb.ReadObjectRequest, opts ...gax.CallOption) (storagepb.Storage_ReadObjectClient, error) {
return c.internalClient.ReadObject(ctx, req, opts...)
}
+// BidiReadObject reads an object’s data.
+//
+// This is a bi-directional API with the added support for reading multiple
+// ranges within one stream both within and across multiple messages.
+// If the server encountered an error for any of the inputs, the stream will
+// be closed with the relevant error code.
+// Because the API allows for multiple outstanding requests, when the stream
+// is closed the error response will contain a BidiReadObjectRangesError proto
+// in the error extension describing the error for each outstanding read_id.
+//
+// IAM Permissions:
+//
+// # Requires storage.objects.get
+//
+// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
+// the bucket.
+//
+// This API is currently in preview and is not yet available for general
+// use.
+func (c *Client) BidiReadObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiReadObjectClient, error) {
+ return c.internalClient.BidiReadObject(ctx, opts...)
+}
+
// UpdateObject updates an object’s metadata.
// Equivalent to JSON API’s storage.objects.patch.
func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
@@ -790,6 +700,12 @@ func (c *Client) UpdateObject(ctx context.Context, req *storagepb.UpdateObjectRe
// Alternatively, the BidiWriteObject operation may be used to write an
// object with controls over flushing and the ability to fetch the ability to
// determine the current persisted size.
+//
+// IAM Permissions:
+//
+// Requires storage.objects.create
+// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
+// the bucket.
func (c *Client) WriteObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_WriteObjectClient, error) {
return c.internalClient.WriteObject(ctx, opts...)
}
@@ -814,6 +730,13 @@ func (c *Client) BidiWriteObject(ctx context.Context, opts ...gax.CallOption) (s
}
// ListObjects retrieves a list of objects matching the criteria.
+//
+// IAM Permissions:
+//
+// The authenticated user requires storage.objects.list
+// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions)
+// to use this method. To return object ACLs, the authenticated user must also
+// have the storage.objects.getIamPolicy permission.
func (c *Client) ListObjects(ctx context.Context, req *storagepb.ListObjectsRequest, opts ...gax.CallOption) *ObjectIterator {
return c.internalClient.ListObjects(ctx, req, opts...)
}
@@ -824,58 +747,47 @@ func (c *Client) RewriteObject(ctx context.Context, req *storagepb.RewriteObject
return c.internalClient.RewriteObject(ctx, req, opts...)
}
-// StartResumableWrite starts a resumable write. How long the write operation remains valid, and
-// what happens when the write operation becomes invalid, are
-// service-dependent.
+// StartResumableWrite starts a resumable write operation. This
+// method is part of the Resumable
+// upload (at https://cloud.google.com/storage/docs/resumable-uploads) feature.
+// This allows you to upload large objects in multiple chunks, which is more
+// resilient to network interruptions than a single upload. The validity
+// duration of the write operation, and the consequences of it becoming
+// invalid, are service-dependent.
+//
+// IAM Permissions:
+//
+// Requires storage.objects.create
+// IAM permission (at https://cloud.google.com/iam/docs/overview#permissions) on
+// the bucket.
func (c *Client) StartResumableWrite(ctx context.Context, req *storagepb.StartResumableWriteRequest, opts ...gax.CallOption) (*storagepb.StartResumableWriteResponse, error) {
return c.internalClient.StartResumableWrite(ctx, req, opts...)
}
-// QueryWriteStatus determines the persisted_size for an object that is being written, which
-// can then be used as the write_offset for the next Write() call.
+// QueryWriteStatus determines the persisted_size of an object that is being written. This
+// method is part of the resumable
+// upload (at https://cloud.google.com/storage/docs/resumable-uploads) feature.
+// The returned value is the size of the object that has been persisted so
+// far. The value can be used as the write_offset for the next Write()
+// call.
//
-// If the object does not exist (i.e., the object has been deleted, or the
-// first Write() has not yet reached the service), this method returns the
+// If the object does not exist, meaning if it was deleted, or the
+// first Write() has not yet reached the service, this method returns the
// error NOT_FOUND.
//
-// The client may call QueryWriteStatus() at any time to determine how
-// much data has been processed for this object. This is useful if the
-// client is buffering data and needs to know which data can be safely
-// evicted. For any sequence of QueryWriteStatus() calls for a given
-// object name, the sequence of returned persisted_size values will be
+// This method is useful for clients that buffer data and need to know which
+// data can be safely evicted. The client can call QueryWriteStatus() at any
+// time to determine how much data has been logged for this object.
+// For any sequence of QueryWriteStatus() calls for a given
+// object name, the sequence of returned persisted_size values are
// non-decreasing.
func (c *Client) QueryWriteStatus(ctx context.Context, req *storagepb.QueryWriteStatusRequest, opts ...gax.CallOption) (*storagepb.QueryWriteStatusResponse, error) {
return c.internalClient.QueryWriteStatus(ctx, req, opts...)
}
-// GetServiceAccount retrieves the name of a project’s Google Cloud Storage service account.
-func (c *Client) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) {
- return c.internalClient.GetServiceAccount(ctx, req, opts...)
-}
-
-// CreateHmacKey creates a new HMAC key for the given service account.
-func (c *Client) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) {
- return c.internalClient.CreateHmacKey(ctx, req, opts...)
-}
-
-// DeleteHmacKey deletes a given HMAC key. Key must be in an INACTIVE state.
-func (c *Client) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error {
- return c.internalClient.DeleteHmacKey(ctx, req, opts...)
-}
-
-// GetHmacKey gets an existing HMAC key metadata for the given id.
-func (c *Client) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
- return c.internalClient.GetHmacKey(ctx, req, opts...)
-}
-
-// ListHmacKeys lists HMAC keys under a given project with the additional filters provided.
-func (c *Client) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator {
- return c.internalClient.ListHmacKeys(ctx, req, opts...)
-}
-
-// UpdateHmacKey updates a given HMAC key state between ACTIVE and INACTIVE.
-func (c *Client) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
- return c.internalClient.UpdateHmacKey(ctx, req, opts...)
+// MoveObject moves the source object to the destination object in the same bucket.
+func (c *Client) MoveObject(ctx context.Context, req *storagepb.MoveObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
+ return c.internalClient.MoveObject(ctx, req, opts...)
}
// gRPCClient is a client for interacting with Cloud Storage API over gRPC transport.
@@ -893,6 +805,8 @@ type gRPCClient struct {
// The x-goog-* metadata to be sent with each request.
xGoogHeaders []string
+
+ logger *slog.Logger
}
// NewClient creates a new storage client based on gRPC.
@@ -940,6 +854,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
connPool: connPool,
client: storagepb.NewStorageClient(connPool),
CallOptions: &client.CallOptions,
+ logger: internaloption.GetLogger(opts),
}
c.setGoogleClientInfo()
@@ -962,7 +877,9 @@ func (c *gRPCClient) Connection() *grpc.ClientConn {
func (c *gRPCClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", gax.GoVersion}, keyval...)
kv = append(kv, "gapic", getVersionClient(), "gax", gax.Version, "grpc", grpc.Version)
- c.xGoogHeaders = []string{"x-goog-api-client", gax.XGoogHeader(kv...)}
+ c.xGoogHeaders = []string{
+ "x-goog-api-client", gax.XGoogHeader(kv...),
+ }
}
// Close closes the connection to the API service. The user should invoke this when
@@ -988,7 +905,7 @@ func (c *gRPCClient) DeleteBucket(ctx context.Context, req *storagepb.DeleteBuck
opts = append((*c.CallOptions).DeleteBucket[0:len((*c.CallOptions).DeleteBucket):len((*c.CallOptions).DeleteBucket)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- _, err = c.client.DeleteBucket(ctx, req, settings.GRPC...)
+ _, err = executeRPC(ctx, c.client.DeleteBucket, req, settings.GRPC, c.logger, "DeleteBucket")
return err
}, opts...)
return err
@@ -1012,7 +929,7 @@ func (c *gRPCClient) GetBucket(ctx context.Context, req *storagepb.GetBucketRequ
var resp *storagepb.Bucket
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.GetBucket(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.GetBucket, req, settings.GRPC, c.logger, "GetBucket")
return err
}, opts...)
if err != nil {
@@ -1042,7 +959,7 @@ func (c *gRPCClient) CreateBucket(ctx context.Context, req *storagepb.CreateBuck
var resp *storagepb.Bucket
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.CreateBucket(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.CreateBucket, req, settings.GRPC, c.logger, "CreateBucket")
return err
}, opts...)
if err != nil {
@@ -1080,7 +997,7 @@ func (c *gRPCClient) ListBuckets(ctx context.Context, req *storagepb.ListBuckets
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.ListBuckets(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.ListBuckets, req, settings.GRPC, c.logger, "ListBuckets")
return err
}, opts...)
if err != nil {
@@ -1124,7 +1041,7 @@ func (c *gRPCClient) LockBucketRetentionPolicy(ctx context.Context, req *storage
var resp *storagepb.Bucket
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.LockBucketRetentionPolicy(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.LockBucketRetentionPolicy, req, settings.GRPC, c.logger, "LockBucketRetentionPolicy")
return err
}, opts...)
if err != nil {
@@ -1151,7 +1068,7 @@ func (c *gRPCClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRe
var resp *iampb.Policy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.GetIamPolicy(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.GetIamPolicy, req, settings.GRPC, c.logger, "GetIamPolicy")
return err
}, opts...)
if err != nil {
@@ -1178,7 +1095,7 @@ func (c *gRPCClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRe
var resp *iampb.Policy
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.SetIamPolicy(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.SetIamPolicy, req, settings.GRPC, c.logger, "SetIamPolicy")
return err
}, opts...)
if err != nil {
@@ -1196,6 +1113,9 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP
if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/objects(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
}
+ if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)/managedFolders(?:/.*)?"); reg.MatchString(req.GetResource()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])) > 0 {
+ routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetResource())[1])
+ }
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
}
@@ -1208,7 +1128,7 @@ func (c *gRPCClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamP
var resp *iampb.TestIamPermissionsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.TestIamPermissions(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.TestIamPermissions, req, settings.GRPC, c.logger, "TestIamPermissions")
return err
}, opts...)
if err != nil {
@@ -1235,57 +1155,7 @@ func (c *gRPCClient) UpdateBucket(ctx context.Context, req *storagepb.UpdateBuck
var resp *storagepb.Bucket
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.UpdateBucket(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) DeleteNotificationConfig(ctx context.Context, req *storagepb.DeleteNotificationConfigRequest, opts ...gax.CallOption) error {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DeleteNotificationConfig[0:len((*c.CallOptions).DeleteNotificationConfig):len((*c.CallOptions).DeleteNotificationConfig)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.client.DeleteNotificationConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *gRPCClient) GetNotificationConfig(ctx context.Context, req *storagepb.GetNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?Pprojects/[^/]+/buckets/[^/]+)(?:/.*)?"); reg.MatchString(req.GetName()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetName())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetNotificationConfig[0:len((*c.CallOptions).GetNotificationConfig):len((*c.CallOptions).GetNotificationConfig)], opts...)
- var resp *storagepb.NotificationConfig
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.GetNotificationConfig(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.UpdateBucket, req, settings.GRPC, c.logger, "UpdateBucket")
return err
}, opts...)
if err != nil {
@@ -1294,88 +1164,6 @@ func (c *gRPCClient) GetNotificationConfig(ctx context.Context, req *storagepb.G
return resp, nil
}
-func (c *gRPCClient) CreateNotificationConfig(ctx context.Context, req *storagepb.CreateNotificationConfigRequest, opts ...gax.CallOption) (*storagepb.NotificationConfig, error) {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CreateNotificationConfig[0:len((*c.CallOptions).CreateNotificationConfig):len((*c.CallOptions).CreateNotificationConfig)], opts...)
- var resp *storagepb.NotificationConfig
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.CreateNotificationConfig(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) ListNotificationConfigs(ctx context.Context, req *storagepb.ListNotificationConfigsRequest, opts ...gax.CallOption) *NotificationConfigIterator {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetParent()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])) > 0 {
- routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetParent())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListNotificationConfigs[0:len((*c.CallOptions).ListNotificationConfigs):len((*c.CallOptions).ListNotificationConfigs)], opts...)
- it := &NotificationConfigIterator{}
- req = proto.Clone(req).(*storagepb.ListNotificationConfigsRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.NotificationConfig, string, error) {
- resp := &storagepb.ListNotificationConfigsResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.ListNotificationConfigs(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetNotificationConfigs(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
routingHeaders := ""
routingHeadersMap := make(map[string]string)
@@ -1394,7 +1182,7 @@ func (c *gRPCClient) ComposeObject(ctx context.Context, req *storagepb.ComposeOb
var resp *storagepb.Object
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.ComposeObject(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.ComposeObject, req, settings.GRPC, c.logger, "ComposeObject")
return err
}, opts...)
if err != nil {
@@ -1420,7 +1208,7 @@ func (c *gRPCClient) DeleteObject(ctx context.Context, req *storagepb.DeleteObje
opts = append((*c.CallOptions).DeleteObject[0:len((*c.CallOptions).DeleteObject):len((*c.CallOptions).DeleteObject)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- _, err = c.client.DeleteObject(ctx, req, settings.GRPC...)
+ _, err = executeRPC(ctx, c.client.DeleteObject, req, settings.GRPC, c.logger, "DeleteObject")
return err
}, opts...)
return err
@@ -1444,7 +1232,7 @@ func (c *gRPCClient) RestoreObject(ctx context.Context, req *storagepb.RestoreOb
var resp *storagepb.Object
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.RestoreObject(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.RestoreObject, req, settings.GRPC, c.logger, "RestoreObject")
return err
}, opts...)
if err != nil {
@@ -1471,7 +1259,7 @@ func (c *gRPCClient) CancelResumableWrite(ctx context.Context, req *storagepb.Ca
var resp *storagepb.CancelResumableWriteResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.CancelResumableWrite(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.CancelResumableWrite, req, settings.GRPC, c.logger, "CancelResumableWrite")
return err
}, opts...)
if err != nil {
@@ -1498,7 +1286,7 @@ func (c *gRPCClient) GetObject(ctx context.Context, req *storagepb.GetObjectRequ
var resp *storagepb.Object
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.GetObject(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.GetObject, req, settings.GRPC, c.logger, "GetObject")
return err
}, opts...)
if err != nil {
@@ -1525,7 +1313,26 @@ func (c *gRPCClient) ReadObject(ctx context.Context, req *storagepb.ReadObjectRe
var resp storagepb.Storage_ReadObjectClient
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
+ c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "ReadObject")
resp, err = c.client.ReadObject(ctx, req, settings.GRPC...)
+ c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "ReadObject")
+ return err
+ }, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return resp, nil
+}
+
+func (c *gRPCClient) BidiReadObject(ctx context.Context, opts ...gax.CallOption) (storagepb.Storage_BidiReadObjectClient, error) {
+ ctx = gax.InsertMetadataIntoOutgoingContext(ctx, c.xGoogHeaders...)
+ var resp storagepb.Storage_BidiReadObjectClient
+ opts = append((*c.CallOptions).BidiReadObject[0:len((*c.CallOptions).BidiReadObject):len((*c.CallOptions).BidiReadObject)], opts...)
+ err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
+ var err error
+ c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "BidiReadObject")
+ resp, err = c.client.BidiReadObject(ctx, settings.GRPC...)
+ c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "BidiReadObject")
return err
}, opts...)
if err != nil {
@@ -1552,7 +1359,7 @@ func (c *gRPCClient) UpdateObject(ctx context.Context, req *storagepb.UpdateObje
var resp *storagepb.Object
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.UpdateObject(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.UpdateObject, req, settings.GRPC, c.logger, "UpdateObject")
return err
}, opts...)
if err != nil {
@@ -1567,7 +1374,9 @@ func (c *gRPCClient) WriteObject(ctx context.Context, opts ...gax.CallOption) (s
opts = append((*c.CallOptions).WriteObject[0:len((*c.CallOptions).WriteObject):len((*c.CallOptions).WriteObject)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
+ c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "WriteObject")
resp, err = c.client.WriteObject(ctx, settings.GRPC...)
+ c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "WriteObject")
return err
}, opts...)
if err != nil {
@@ -1582,7 +1391,9 @@ func (c *gRPCClient) BidiWriteObject(ctx context.Context, opts ...gax.CallOption
opts = append((*c.CallOptions).BidiWriteObject[0:len((*c.CallOptions).BidiWriteObject):len((*c.CallOptions).BidiWriteObject)], opts...)
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
+ c.logger.DebugContext(ctx, "api streaming client request", "serviceName", serviceName, "rpcName", "BidiWriteObject")
resp, err = c.client.BidiWriteObject(ctx, settings.GRPC...)
+ c.logger.DebugContext(ctx, "api streaming client response", "serviceName", serviceName, "rpcName", "BidiWriteObject")
return err
}, opts...)
if err != nil {
@@ -1620,7 +1431,7 @@ func (c *gRPCClient) ListObjects(ctx context.Context, req *storagepb.ListObjects
}
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.ListObjects(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.ListObjects, req, settings.GRPC, c.logger, "ListObjects")
return err
}, opts...)
if err != nil {
@@ -1667,7 +1478,7 @@ func (c *gRPCClient) RewriteObject(ctx context.Context, req *storagepb.RewriteOb
var resp *storagepb.RewriteResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.RewriteObject(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.RewriteObject, req, settings.GRPC, c.logger, "RewriteObject")
return err
}, opts...)
if err != nil {
@@ -1694,7 +1505,7 @@ func (c *gRPCClient) StartResumableWrite(ctx context.Context, req *storagepb.Sta
var resp *storagepb.StartResumableWriteResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.StartResumableWrite(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.StartResumableWrite, req, settings.GRPC, c.logger, "StartResumableWrite")
return err
}, opts...)
if err != nil {
@@ -1721,111 +1532,7 @@ func (c *gRPCClient) QueryWriteStatus(ctx context.Context, req *storagepb.QueryW
var resp *storagepb.QueryWriteStatusResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.QueryWriteStatus(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) GetServiceAccount(ctx context.Context, req *storagepb.GetServiceAccountRequest, opts ...gax.CallOption) (*storagepb.ServiceAccount, error) {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
- routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetServiceAccount[0:len((*c.CallOptions).GetServiceAccount):len((*c.CallOptions).GetServiceAccount)], opts...)
- var resp *storagepb.ServiceAccount
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.GetServiceAccount(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) CreateHmacKey(ctx context.Context, req *storagepb.CreateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.CreateHmacKeyResponse, error) {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
- routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).CreateHmacKey[0:len((*c.CallOptions).CreateHmacKey):len((*c.CallOptions).CreateHmacKey)], opts...)
- var resp *storagepb.CreateHmacKeyResponse
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.CreateHmacKey(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, err
- }
- return resp, nil
-}
-
-func (c *gRPCClient) DeleteHmacKey(ctx context.Context, req *storagepb.DeleteHmacKeyRequest, opts ...gax.CallOption) error {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
- routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).DeleteHmacKey[0:len((*c.CallOptions).DeleteHmacKey):len((*c.CallOptions).DeleteHmacKey)], opts...)
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- _, err = c.client.DeleteHmacKey(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- return err
-}
-
-func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
- routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).GetHmacKey[0:len((*c.CallOptions).GetHmacKey):len((*c.CallOptions).GetHmacKey)], opts...)
- var resp *storagepb.HmacKeyMetadata
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.GetHmacKey(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.QueryWriteStatus, req, settings.GRPC, c.logger, "QueryWriteStatus")
return err
}, opts...)
if err != nil {
@@ -1834,66 +1541,11 @@ func (c *gRPCClient) GetHmacKey(ctx context.Context, req *storagepb.GetHmacKeyRe
return resp, nil
}
-func (c *gRPCClient) ListHmacKeys(ctx context.Context, req *storagepb.ListHmacKeysRequest, opts ...gax.CallOption) *HmacKeyMetadataIterator {
- routingHeaders := ""
- routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(.*)"); reg.MatchString(req.GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])) > 0 {
- routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetProject())[1])
- }
- for headerName, headerValue := range routingHeadersMap {
- routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
- }
- routingHeaders = strings.TrimSuffix(routingHeaders, "&")
- hds := []string{"x-goog-request-params", routingHeaders}
-
- hds = append(c.xGoogHeaders, hds...)
- ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).ListHmacKeys[0:len((*c.CallOptions).ListHmacKeys):len((*c.CallOptions).ListHmacKeys)], opts...)
- it := &HmacKeyMetadataIterator{}
- req = proto.Clone(req).(*storagepb.ListHmacKeysRequest)
- it.InternalFetch = func(pageSize int, pageToken string) ([]*storagepb.HmacKeyMetadata, string, error) {
- resp := &storagepb.ListHmacKeysResponse{}
- if pageToken != "" {
- req.PageToken = pageToken
- }
- if pageSize > math.MaxInt32 {
- req.PageSize = math.MaxInt32
- } else if pageSize != 0 {
- req.PageSize = int32(pageSize)
- }
- err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
- var err error
- resp, err = c.client.ListHmacKeys(ctx, req, settings.GRPC...)
- return err
- }, opts...)
- if err != nil {
- return nil, "", err
- }
-
- it.Response = resp
- return resp.GetHmacKeys(), resp.GetNextPageToken(), nil
- }
- fetch := func(pageSize int, pageToken string) (string, error) {
- items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
- if err != nil {
- return "", err
- }
- it.items = append(it.items, items...)
- return nextPageToken, nil
- }
-
- it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
- it.pageInfo.MaxSize = int(req.GetPageSize())
- it.pageInfo.Token = req.GetPageToken()
-
- return it
-}
-
-func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHmacKeyRequest, opts ...gax.CallOption) (*storagepb.HmacKeyMetadata, error) {
+func (c *gRPCClient) MoveObject(ctx context.Context, req *storagepb.MoveObjectRequest, opts ...gax.CallOption) (*storagepb.Object, error) {
routingHeaders := ""
routingHeadersMap := make(map[string]string)
- if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetHmacKey().GetProject()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])) > 0 {
- routingHeadersMap["project"] = url.QueryEscape(reg.FindStringSubmatch(req.GetHmacKey().GetProject())[1])
+ if reg := regexp.MustCompile("(?P.*)"); reg.MatchString(req.GetBucket()) && len(url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])) > 0 {
+ routingHeadersMap["bucket"] = url.QueryEscape(reg.FindStringSubmatch(req.GetBucket())[1])
}
for headerName, headerValue := range routingHeadersMap {
routingHeaders = fmt.Sprintf("%s%s=%s&", routingHeaders, headerName, headerValue)
@@ -1903,11 +1555,11 @@ func (c *gRPCClient) UpdateHmacKey(ctx context.Context, req *storagepb.UpdateHma
hds = append(c.xGoogHeaders, hds...)
ctx = gax.InsertMetadataIntoOutgoingContext(ctx, hds...)
- opts = append((*c.CallOptions).UpdateHmacKey[0:len((*c.CallOptions).UpdateHmacKey):len((*c.CallOptions).UpdateHmacKey)], opts...)
- var resp *storagepb.HmacKeyMetadata
+ opts = append((*c.CallOptions).MoveObject[0:len((*c.CallOptions).MoveObject):len((*c.CallOptions).MoveObject)], opts...)
+ var resp *storagepb.Object
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
- resp, err = c.client.UpdateHmacKey(ctx, req, settings.GRPC...)
+ resp, err = executeRPC(ctx, c.client.MoveObject, req, settings.GRPC, c.logger, "MoveObject")
return err
}, opts...)
if err != nil {
diff --git a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
index b63d664e5..7f286f354 100644
--- a/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
+++ b/vendor/cloud.google.com/go/storage/internal/apiv2/storagepb/storage.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.33.0
+// protoc-gen-go v1.35.2
// protoc v4.25.3
// source: google/storage/v2/storage.proto
@@ -27,10 +27,11 @@ import (
iampb "cloud.google.com/go/iam/apiv1/iampb"
_ "google.golang.org/genproto/googleapis/api/annotations"
+ status "google.golang.org/genproto/googleapis/rpc/status"
date "google.golang.org/genproto/googleapis/type/date"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
+ status1 "google.golang.org/grpc/status"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
durationpb "google.golang.org/protobuf/types/known/durationpb"
@@ -197,11 +198,9 @@ type DeleteBucketRequest struct {
func (x *DeleteBucketRequest) Reset() {
*x = DeleteBucketRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DeleteBucketRequest) String() string {
@@ -212,7 +211,7 @@ func (*DeleteBucketRequest) ProtoMessage() {}
func (x *DeleteBucketRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -270,11 +269,9 @@ type GetBucketRequest struct {
func (x *GetBucketRequest) Reset() {
*x = GetBucketRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetBucketRequest) String() string {
@@ -285,7 +282,7 @@ func (*GetBucketRequest) ProtoMessage() {}
func (x *GetBucketRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -360,11 +357,9 @@ type CreateBucketRequest struct {
func (x *CreateBucketRequest) Reset() {
*x = CreateBucketRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CreateBucketRequest) String() string {
@@ -375,7 +370,7 @@ func (*CreateBucketRequest) ProtoMessage() {}
func (x *CreateBucketRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -452,11 +447,9 @@ type ListBucketsRequest struct {
func (x *ListBucketsRequest) Reset() {
*x = ListBucketsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListBucketsRequest) String() string {
@@ -467,7 +460,7 @@ func (*ListBucketsRequest) ProtoMessage() {}
func (x *ListBucketsRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -532,11 +525,9 @@ type ListBucketsResponse struct {
func (x *ListBucketsResponse) Reset() {
*x = ListBucketsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListBucketsResponse) String() string {
@@ -547,7 +538,7 @@ func (*ListBucketsResponse) ProtoMessage() {}
func (x *ListBucketsResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -591,11 +582,9 @@ type LockBucketRetentionPolicyRequest struct {
func (x *LockBucketRetentionPolicyRequest) Reset() {
*x = LockBucketRetentionPolicyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *LockBucketRetentionPolicyRequest) String() string {
@@ -606,7 +595,7 @@ func (*LockBucketRetentionPolicyRequest) ProtoMessage() {}
func (x *LockBucketRetentionPolicyRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -671,11 +660,9 @@ type UpdateBucketRequest struct {
func (x *UpdateBucketRequest) Reset() {
*x = UpdateBucketRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UpdateBucketRequest) String() string {
@@ -686,7 +673,7 @@ func (*UpdateBucketRequest) ProtoMessage() {}
func (x *UpdateBucketRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -743,296 +730,6 @@ func (x *UpdateBucketRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
return nil
}
-// Request message for DeleteNotificationConfig.
-type DeleteNotificationConfigRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The parent bucket of the NotificationConfig.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *DeleteNotificationConfigRequest) Reset() {
- *x = DeleteNotificationConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *DeleteNotificationConfigRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*DeleteNotificationConfigRequest) ProtoMessage() {}
-
-func (x *DeleteNotificationConfigRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use DeleteNotificationConfigRequest.ProtoReflect.Descriptor instead.
-func (*DeleteNotificationConfigRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *DeleteNotificationConfigRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// Request message for GetNotificationConfig.
-type GetNotificationConfigRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The parent bucket of the NotificationConfig.
- // Format:
- // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}`
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-}
-
-func (x *GetNotificationConfigRequest) Reset() {
- *x = GetNotificationConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetNotificationConfigRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetNotificationConfigRequest) ProtoMessage() {}
-
-func (x *GetNotificationConfigRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetNotificationConfigRequest.ProtoReflect.Descriptor instead.
-func (*GetNotificationConfigRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *GetNotificationConfigRequest) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-// Request message for CreateNotificationConfig.
-type CreateNotificationConfigRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The bucket to which this NotificationConfig belongs.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Required. Properties of the NotificationConfig to be inserted.
- NotificationConfig *NotificationConfig `protobuf:"bytes,2,opt,name=notification_config,json=notificationConfig,proto3" json:"notification_config,omitempty"`
-}
-
-func (x *CreateNotificationConfigRequest) Reset() {
- *x = CreateNotificationConfigRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *CreateNotificationConfigRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateNotificationConfigRequest) ProtoMessage() {}
-
-func (x *CreateNotificationConfigRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateNotificationConfigRequest.ProtoReflect.Descriptor instead.
-func (*CreateNotificationConfigRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *CreateNotificationConfigRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *CreateNotificationConfigRequest) GetNotificationConfig() *NotificationConfig {
- if x != nil {
- return x.NotificationConfig
- }
- return nil
-}
-
-// Request message for ListNotifications.
-type ListNotificationConfigsRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Name of a Google Cloud Storage bucket.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // The maximum number of NotificationConfigs to return. The service may
- // return fewer than this value. The default value is 100. Specifying a value
- // above 100 will result in a page_size of 100.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // A page token, received from a previous `ListNotificationConfigs` call.
- // Provide this to retrieve the subsequent page.
- //
- // When paginating, all other parameters provided to `ListNotificationConfigs`
- // must match the call that provided the page token.
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
-}
-
-func (x *ListNotificationConfigsRequest) Reset() {
- *x = ListNotificationConfigsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListNotificationConfigsRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListNotificationConfigsRequest) ProtoMessage() {}
-
-func (x *ListNotificationConfigsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListNotificationConfigsRequest.ProtoReflect.Descriptor instead.
-func (*ListNotificationConfigsRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *ListNotificationConfigsRequest) GetParent() string {
- if x != nil {
- return x.Parent
- }
- return ""
-}
-
-func (x *ListNotificationConfigsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListNotificationConfigsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
-}
-
-// The result of a call to ListNotificationConfigs
-type ListNotificationConfigsResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The list of items.
- NotificationConfigs []*NotificationConfig `protobuf:"bytes,1,rep,name=notification_configs,json=notificationConfigs,proto3" json:"notification_configs,omitempty"`
- // A token, which can be sent as `page_token` to retrieve the next page.
- // If this field is omitted, there are no subsequent pages.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
-}
-
-func (x *ListNotificationConfigsResponse) Reset() {
- *x = ListNotificationConfigsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ListNotificationConfigsResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ListNotificationConfigsResponse) ProtoMessage() {}
-
-func (x *ListNotificationConfigsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ListNotificationConfigsResponse.ProtoReflect.Descriptor instead.
-func (*ListNotificationConfigsResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *ListNotificationConfigsResponse) GetNotificationConfigs() []*NotificationConfig {
- if x != nil {
- return x.NotificationConfigs
- }
- return nil
-}
-
-func (x *ListNotificationConfigsResponse) GetNextPageToken() string {
- if x != nil {
- return x.NextPageToken
- }
- return ""
-}
-
// Request message for ComposeObject.
type ComposeObjectRequest struct {
state protoimpl.MessageState
@@ -1068,11 +765,9 @@ type ComposeObjectRequest struct {
func (x *ComposeObjectRequest) Reset() {
*x = ComposeObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ComposeObjectRequest) String() string {
@@ -1082,8 +777,8 @@ func (x *ComposeObjectRequest) String() string {
func (*ComposeObjectRequest) ProtoMessage() {}
func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[7]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1095,7 +790,7 @@ func (x *ComposeObjectRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ComposeObjectRequest.ProtoReflect.Descriptor instead.
func (*ComposeObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7}
}
func (x *ComposeObjectRequest) GetDestination() *Object {
@@ -1191,11 +886,9 @@ type DeleteObjectRequest struct {
func (x *DeleteObjectRequest) Reset() {
*x = DeleteObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DeleteObjectRequest) String() string {
@@ -1205,8 +898,8 @@ func (x *DeleteObjectRequest) String() string {
func (*DeleteObjectRequest) ProtoMessage() {}
func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[8]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1218,7 +911,7 @@ func (x *DeleteObjectRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use DeleteObjectRequest.ProtoReflect.Descriptor instead.
func (*DeleteObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{8}
}
func (x *DeleteObjectRequest) GetBucket() string {
@@ -1290,6 +983,12 @@ type RestoreObjectRequest struct {
Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
// Required. The specific revision of the object to restore.
Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
+ // Optional. Restore token used to differentiate soft-deleted objects with the
+ // same name and generation. Only applicable for hierarchical namespace
+ // buckets. This parameter is optional, and is only required in the rare case
+ // when there are multiple soft-deleted objects with the same name and
+ // generation.
+ RestoreToken string `protobuf:"bytes,11,opt,name=restore_token,json=restoreToken,proto3" json:"restore_token,omitempty"`
// Makes the operation conditional on whether the object's current generation
// matches the given value. Setting to 0 makes the operation succeed only if
// there are no live versions of the object.
@@ -1315,11 +1014,9 @@ type RestoreObjectRequest struct {
func (x *RestoreObjectRequest) Reset() {
*x = RestoreObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *RestoreObjectRequest) String() string {
@@ -1329,8 +1026,8 @@ func (x *RestoreObjectRequest) String() string {
func (*RestoreObjectRequest) ProtoMessage() {}
func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[9]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1342,7 +1039,7 @@ func (x *RestoreObjectRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use RestoreObjectRequest.ProtoReflect.Descriptor instead.
func (*RestoreObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{9}
}
func (x *RestoreObjectRequest) GetBucket() string {
@@ -1366,6 +1063,13 @@ func (x *RestoreObjectRequest) GetGeneration() int64 {
return 0
}
+func (x *RestoreObjectRequest) GetRestoreToken() string {
+ if x != nil {
+ return x.RestoreToken
+ }
+ return ""
+}
+
func (x *RestoreObjectRequest) GetIfGenerationMatch() int64 {
if x != nil && x.IfGenerationMatch != nil {
return *x.IfGenerationMatch
@@ -1422,11 +1126,9 @@ type CancelResumableWriteRequest struct {
func (x *CancelResumableWriteRequest) Reset() {
*x = CancelResumableWriteRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CancelResumableWriteRequest) String() string {
@@ -1436,8 +1138,8 @@ func (x *CancelResumableWriteRequest) String() string {
func (*CancelResumableWriteRequest) ProtoMessage() {}
func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[10]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1449,7 +1151,7 @@ func (x *CancelResumableWriteRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use CancelResumableWriteRequest.ProtoReflect.Descriptor instead.
func (*CancelResumableWriteRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{10}
}
func (x *CancelResumableWriteRequest) GetUploadId() string {
@@ -1469,11 +1171,9 @@ type CancelResumableWriteResponse struct {
func (x *CancelResumableWriteResponse) Reset() {
*x = CancelResumableWriteResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CancelResumableWriteResponse) String() string {
@@ -1483,8 +1183,8 @@ func (x *CancelResumableWriteResponse) String() string {
func (*CancelResumableWriteResponse) ProtoMessage() {}
func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[11]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1496,7 +1196,7 @@ func (x *CancelResumableWriteResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use CancelResumableWriteResponse.ProtoReflect.Descriptor instead.
func (*CancelResumableWriteResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{11}
}
// Request message for ReadObject.
@@ -1557,11 +1257,9 @@ type ReadObjectRequest struct {
func (x *ReadObjectRequest) Reset() {
*x = ReadObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadObjectRequest) String() string {
@@ -1571,8 +1269,8 @@ func (x *ReadObjectRequest) String() string {
func (*ReadObjectRequest) ProtoMessage() {}
func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[12]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1584,7 +1282,7 @@ func (x *ReadObjectRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReadObjectRequest.ProtoReflect.Descriptor instead.
func (*ReadObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12}
}
func (x *ReadObjectRequest) GetBucket() string {
@@ -1701,15 +1399,19 @@ type GetObjectRequest struct {
// metadata.owner.
// * may be used to mean "all fields".
ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,10,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ // Optional. Restore token used to differentiate soft-deleted objects with the
+ // same name and generation. Only applicable for hierarchical namespace
+ // buckets and if soft_deleted is set to true. This parameter is optional, and
+ // is only required in the rare case when there are multiple soft-deleted
+ // objects with the same name and generation.
+ RestoreToken string `protobuf:"bytes,12,opt,name=restore_token,json=restoreToken,proto3" json:"restore_token,omitempty"`
}
func (x *GetObjectRequest) Reset() {
*x = GetObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GetObjectRequest) String() string {
@@ -1719,8 +1421,8 @@ func (x *GetObjectRequest) String() string {
func (*GetObjectRequest) ProtoMessage() {}
func (x *GetObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[13]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1732,7 +1434,7 @@ func (x *GetObjectRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use GetObjectRequest.ProtoReflect.Descriptor instead.
func (*GetObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{13}
}
func (x *GetObjectRequest) GetBucket() string {
@@ -1805,6 +1507,13 @@ func (x *GetObjectRequest) GetReadMask() *fieldmaskpb.FieldMask {
return nil
}
+func (x *GetObjectRequest) GetRestoreToken() string {
+ if x != nil {
+ return x.RestoreToken
+ }
+ return ""
+}
+
// Response message for ReadObject.
type ReadObjectResponse struct {
state protoimpl.MessageState
@@ -1831,11 +1540,9 @@ type ReadObjectResponse struct {
func (x *ReadObjectResponse) Reset() {
*x = ReadObjectResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ReadObjectResponse) String() string {
@@ -1845,8 +1552,8 @@ func (x *ReadObjectResponse) String() string {
func (*ReadObjectResponse) ProtoMessage() {}
func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[14]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1858,7 +1565,7 @@ func (x *ReadObjectResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ReadObjectResponse.ProtoReflect.Descriptor instead.
func (*ReadObjectResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{14}
}
func (x *ReadObjectResponse) GetChecksummedData() *ChecksummedData {
@@ -1889,61 +1596,72 @@ func (x *ReadObjectResponse) GetMetadata() *Object {
return nil
}
-// Describes an attempt to insert an object, possibly over multiple requests.
-type WriteObjectSpec struct {
+// Describes the object to read in a BidiReadObject request.
+type BidiReadObjectSpec struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. Destination object, including its name and its metadata.
- Resource *Object `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
- // Apply a predefined set of access controls to this object.
- // Valid values are "authenticatedRead", "bucketOwnerFullControl",
- // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
- PredefinedAcl string `protobuf:"bytes,7,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"`
- // Makes the operation conditional on whether the object's current
- // generation matches the given value. Setting to 0 makes the operation
- // succeed only if there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,3,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
- // Makes the operation conditional on whether the object's live
- // generation does not match the given value. If no live object exists, the
- // precondition fails. Setting to 0 makes the operation succeed only if
- // there is a live version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,4,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Required. The name of the bucket containing the object to read.
+ Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
+ // Required. The name of the object to read.
+ Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
+ // If present, selects a specific revision of this object (as opposed
+ // to the latest version, the default).
+ Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
+ // Makes the operation conditional on whether the object's current generation
+ // matches the given value. Setting to 0 makes the operation succeed only if
+ // there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live generation
+ // does not match the given value. If no live object exists, the precondition
+ // fails. Setting to 0 makes the operation succeed only if there is a live
+ // version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
// Makes the operation conditional on whether the object's current
// metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ IfMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
// Makes the operation conditional on whether the object's current
// metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
- // The expected final object size being uploaded.
- // If this value is set, closing the stream after writing fewer or more than
- // `object_size` bytes will result in an OUT_OF_RANGE error.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // Mask specifying which fields to read.
+ // The checksummed_data field and its children will always be present.
+ // If no mask is specified, will default to all fields except metadata.owner
+ // and metadata.acl.
+ // * may be used to mean "all fields".
+ // As per https://google.aip.dev/161, this field is deprecated.
+ // As an alternative, grpc metadata can be used:
+ // https://cloud.google.com/apis/docs/system-parameters#definitions
//
- // This situation is considered a client error, and if such an error occurs
- // you must start the upload over from scratch, this time sending the correct
- // number of bytes.
- ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"`
+ // Deprecated: Marked as deprecated in google/storage/v2/storage.proto.
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,12,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ // The client can optionally set this field. The read handle is an optimized
+ // way of creating new streams. Read handles are generated and periodically
+ // refreshed from prior reads.
+ ReadHandle *BidiReadHandle `protobuf:"bytes,13,opt,name=read_handle,json=readHandle,proto3,oneof" json:"read_handle,omitempty"`
+ // The routing token that influences request routing for the stream. Must be
+ // provided if a BidiReadObjectRedirectedError is returned.
+ RoutingToken *string `protobuf:"bytes,14,opt,name=routing_token,json=routingToken,proto3,oneof" json:"routing_token,omitempty"`
}
-func (x *WriteObjectSpec) Reset() {
- *x = WriteObjectSpec{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *BidiReadObjectSpec) Reset() {
+ *x = BidiReadObjectSpec{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *WriteObjectSpec) String() string {
+func (x *BidiReadObjectSpec) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*WriteObjectSpec) ProtoMessage() {}
+func (*BidiReadObjectSpec) ProtoMessage() {}
-func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *BidiReadObjectSpec) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[15]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1953,127 +1671,123 @@ func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead.
-func (*WriteObjectSpec) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20}
+// Deprecated: Use BidiReadObjectSpec.ProtoReflect.Descriptor instead.
+func (*BidiReadObjectSpec) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{15}
}
-func (x *WriteObjectSpec) GetResource() *Object {
+func (x *BidiReadObjectSpec) GetBucket() string {
if x != nil {
- return x.Resource
+ return x.Bucket
}
- return nil
+ return ""
}
-func (x *WriteObjectSpec) GetPredefinedAcl() string {
+func (x *BidiReadObjectSpec) GetObject() string {
if x != nil {
- return x.PredefinedAcl
+ return x.Object
}
return ""
}
-func (x *WriteObjectSpec) GetIfGenerationMatch() int64 {
+func (x *BidiReadObjectSpec) GetGeneration() int64 {
+ if x != nil {
+ return x.Generation
+ }
+ return 0
+}
+
+func (x *BidiReadObjectSpec) GetIfGenerationMatch() int64 {
if x != nil && x.IfGenerationMatch != nil {
return *x.IfGenerationMatch
}
return 0
}
-func (x *WriteObjectSpec) GetIfGenerationNotMatch() int64 {
+func (x *BidiReadObjectSpec) GetIfGenerationNotMatch() int64 {
if x != nil && x.IfGenerationNotMatch != nil {
return *x.IfGenerationNotMatch
}
return 0
}
-func (x *WriteObjectSpec) GetIfMetagenerationMatch() int64 {
+func (x *BidiReadObjectSpec) GetIfMetagenerationMatch() int64 {
if x != nil && x.IfMetagenerationMatch != nil {
return *x.IfMetagenerationMatch
}
return 0
}
-func (x *WriteObjectSpec) GetIfMetagenerationNotMatch() int64 {
+func (x *BidiReadObjectSpec) GetIfMetagenerationNotMatch() int64 {
if x != nil && x.IfMetagenerationNotMatch != nil {
return *x.IfMetagenerationNotMatch
}
return 0
}
-func (x *WriteObjectSpec) GetObjectSize() int64 {
- if x != nil && x.ObjectSize != nil {
- return *x.ObjectSize
+func (x *BidiReadObjectSpec) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
}
- return 0
+ return nil
}
-// Request message for WriteObject.
-type WriteObjectRequest struct {
+// Deprecated: Marked as deprecated in google/storage/v2/storage.proto.
+func (x *BidiReadObjectSpec) GetReadMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.ReadMask
+ }
+ return nil
+}
+
+func (x *BidiReadObjectSpec) GetReadHandle() *BidiReadHandle {
+ if x != nil {
+ return x.ReadHandle
+ }
+ return nil
+}
+
+func (x *BidiReadObjectSpec) GetRoutingToken() string {
+ if x != nil && x.RoutingToken != nil {
+ return *x.RoutingToken
+ }
+ return ""
+}
+
+// Request message for BidiReadObject.
+type BidiReadObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The first message of each stream should set one of the following.
- //
- // Types that are assignable to FirstMessage:
- //
- // *WriteObjectRequest_UploadId
- // *WriteObjectRequest_WriteObjectSpec
- FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
- // Required. The offset from the beginning of the object at which the data
- // should be written.
- //
- // In the first `WriteObjectRequest` of a `WriteObject()` action, it
- // indicates the initial offset for the `Write()` call. The value **must** be
- // equal to the `persisted_size` that a call to `QueryWriteStatus()` would
- // return (0 if this is the first write to the object).
- //
- // On subsequent calls, this value **must** be no larger than the sum of the
- // first `write_offset` and the sizes of all `data` chunks sent previously on
- // this stream.
- //
- // An incorrect value will cause an error.
- WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"`
- // A portion of the data for the object.
- //
- // Types that are assignable to Data:
- //
- // *WriteObjectRequest_ChecksummedData
- Data isWriteObjectRequest_Data `protobuf_oneof:"data"`
- // Checksums for the complete object. If the checksums computed by the service
- // don't match the specified checksums the call will fail. May only be
- // provided in the first or last request (either with first_message, or
- // finish_write set).
- ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
- // If `true`, this indicates that the write is complete. Sending any
- // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
- // will cause an error.
- // For a non-resumable write (where the upload_id was not set in the first
- // message), it is an error not to set this field in the final message of the
- // stream.
- FinishWrite bool `protobuf:"varint,7,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // The first message of each stream should set this field. If this is not
+ // the first message, an error will be returned. Describes the object to read.
+ ReadObjectSpec *BidiReadObjectSpec `protobuf:"bytes,1,opt,name=read_object_spec,json=readObjectSpec,proto3" json:"read_object_spec,omitempty"`
+ // Provides a list of 0 or more (up to 100) ranges to read. If a single range
+ // is large enough to require multiple responses, they are guaranteed to be
+ // delivered in increasing offset order. There are no ordering guarantees
+ // across ranges. When no ranges are provided, the response message will not
+ // include ObjectRangeData. For full object downloads, the offset and size can
+ // be set to 0.
+ ReadRanges []*ReadRange `protobuf:"bytes,8,rep,name=read_ranges,json=readRanges,proto3" json:"read_ranges,omitempty"`
}
-func (x *WriteObjectRequest) Reset() {
- *x = WriteObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *BidiReadObjectRequest) Reset() {
+ *x = BidiReadObjectRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *WriteObjectRequest) String() string {
+func (x *BidiReadObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*WriteObjectRequest) ProtoMessage() {}
+func (*BidiReadObjectRequest) ProtoMessage() {}
-func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *BidiReadObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[16]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2083,139 +1797,192 @@ func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead.
-func (*WriteObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21}
+// Deprecated: Use BidiReadObjectRequest.ProtoReflect.Descriptor instead.
+func (*BidiReadObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{16}
}
-func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage {
- if m != nil {
- return m.FirstMessage
+func (x *BidiReadObjectRequest) GetReadObjectSpec() *BidiReadObjectSpec {
+ if x != nil {
+ return x.ReadObjectSpec
}
return nil
}
-func (x *WriteObjectRequest) GetUploadId() string {
- if x, ok := x.GetFirstMessage().(*WriteObjectRequest_UploadId); ok {
- return x.UploadId
+func (x *BidiReadObjectRequest) GetReadRanges() []*ReadRange {
+ if x != nil {
+ return x.ReadRanges
}
- return ""
+ return nil
}
-func (x *WriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
- if x, ok := x.GetFirstMessage().(*WriteObjectRequest_WriteObjectSpec); ok {
- return x.WriteObjectSpec
- }
- return nil
+// Response message for BidiReadObject.
+type BidiReadObjectResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A portion of the object's data. The service **may** leave data
+ // empty for any given ReadResponse. This enables the service to inform the
+ // client that the request is still live while it is running an operation to
+ // generate more data.
+ // The service **may** pipeline multiple responses belonging to different read
+ // requests. Each ObjectRangeData entry will have a read_id
+ // set to the same value as the corresponding source read request.
+ ObjectDataRanges []*ObjectRangeData `protobuf:"bytes,6,rep,name=object_data_ranges,json=objectDataRanges,proto3" json:"object_data_ranges,omitempty"`
+ // Metadata of the object whose media is being returned.
+ // Only populated in the first response in the stream and not populated when
+ // the stream is opened with a read handle.
+ Metadata *Object `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ // This field will be periodically refreshed, however it may not be set in
+ // every response. It allows the client to more efficiently open subsequent
+ // bidirectional streams to the same object.
+ ReadHandle *BidiReadHandle `protobuf:"bytes,7,opt,name=read_handle,json=readHandle,proto3" json:"read_handle,omitempty"`
}
-func (x *WriteObjectRequest) GetWriteOffset() int64 {
+func (x *BidiReadObjectResponse) Reset() {
+ *x = BidiReadObjectResponse{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BidiReadObjectResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BidiReadObjectResponse) ProtoMessage() {}
+
+func (x *BidiReadObjectResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[17]
if x != nil {
- return x.WriteOffset
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return 0
+ return mi.MessageOf(x)
}
-func (m *WriteObjectRequest) GetData() isWriteObjectRequest_Data {
- if m != nil {
- return m.Data
+// Deprecated: Use BidiReadObjectResponse.ProtoReflect.Descriptor instead.
+func (*BidiReadObjectResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{17}
+}
+
+func (x *BidiReadObjectResponse) GetObjectDataRanges() []*ObjectRangeData {
+ if x != nil {
+ return x.ObjectDataRanges
}
return nil
}
-func (x *WriteObjectRequest) GetChecksummedData() *ChecksummedData {
- if x, ok := x.GetData().(*WriteObjectRequest_ChecksummedData); ok {
- return x.ChecksummedData
- }
- return nil
-}
-
-func (x *WriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+func (x *BidiReadObjectResponse) GetMetadata() *Object {
if x != nil {
- return x.ObjectChecksums
+ return x.Metadata
}
return nil
}
-func (x *WriteObjectRequest) GetFinishWrite() bool {
- if x != nil {
- return x.FinishWrite
- }
- return false
-}
-
-func (x *WriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *BidiReadObjectResponse) GetReadHandle() *BidiReadHandle {
if x != nil {
- return x.CommonObjectRequestParams
+ return x.ReadHandle
}
return nil
}
-type isWriteObjectRequest_FirstMessage interface {
- isWriteObjectRequest_FirstMessage()
+// Error proto containing details for a redirected read. This error is only
+// returned on initial open in case of a redirect.
+type BidiReadObjectRedirectedError struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The read handle for the redirected read. The client can use this for the
+ // subsequent open.
+ ReadHandle *BidiReadHandle `protobuf:"bytes,1,opt,name=read_handle,json=readHandle,proto3" json:"read_handle,omitempty"`
+ // The routing token that should be used when reopening the read stream.
+ RoutingToken *string `protobuf:"bytes,2,opt,name=routing_token,json=routingToken,proto3,oneof" json:"routing_token,omitempty"`
}
-type WriteObjectRequest_UploadId struct {
- // For resumable uploads. This should be the `upload_id` returned from a
- // call to `StartResumableWriteResponse`.
- UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"`
+func (x *BidiReadObjectRedirectedError) Reset() {
+ *x = BidiReadObjectRedirectedError{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-type WriteObjectRequest_WriteObjectSpec struct {
- // For non-resumable uploads. Describes the overall upload, including the
- // destination bucket and object name, preconditions, etc.
- WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"`
+func (x *BidiReadObjectRedirectedError) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (*WriteObjectRequest_UploadId) isWriteObjectRequest_FirstMessage() {}
+func (*BidiReadObjectRedirectedError) ProtoMessage() {}
-func (*WriteObjectRequest_WriteObjectSpec) isWriteObjectRequest_FirstMessage() {}
+func (x *BidiReadObjectRedirectedError) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[18]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
-type isWriteObjectRequest_Data interface {
- isWriteObjectRequest_Data()
+// Deprecated: Use BidiReadObjectRedirectedError.ProtoReflect.Descriptor instead.
+func (*BidiReadObjectRedirectedError) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{18}
}
-type WriteObjectRequest_ChecksummedData struct {
- // The data to insert. If a crc32c checksum is provided that doesn't match
- // the checksum computed by the service, the request will fail.
- ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"`
+func (x *BidiReadObjectRedirectedError) GetReadHandle() *BidiReadHandle {
+ if x != nil {
+ return x.ReadHandle
+ }
+ return nil
}
-func (*WriteObjectRequest_ChecksummedData) isWriteObjectRequest_Data() {}
+func (x *BidiReadObjectRedirectedError) GetRoutingToken() string {
+ if x != nil && x.RoutingToken != nil {
+ return *x.RoutingToken
+ }
+ return ""
+}
-// Response message for WriteObject.
-type WriteObjectResponse struct {
+// Error proto containing details for a redirected write. This error is only
+// returned on initial open in case of a redirect.
+type BidiWriteObjectRedirectedError struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The response will set one of the following.
- //
- // Types that are assignable to WriteStatus:
- //
- // *WriteObjectResponse_PersistedSize
- // *WriteObjectResponse_Resource
- WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
+ // The routing token that should be used when reopening the write stream.
+ RoutingToken *string `protobuf:"bytes,1,opt,name=routing_token,json=routingToken,proto3,oneof" json:"routing_token,omitempty"`
+ // Opaque value describing a previous write.
+ WriteHandle *BidiWriteHandle `protobuf:"bytes,2,opt,name=write_handle,json=writeHandle,proto3,oneof" json:"write_handle,omitempty"`
+ // The generation of the object that triggered the redirect.
+ // Note that if this error was returned as part of an appendable object
+ // create, this object generation is now successfully created and
+ // append_object_spec should be used when reconnecting.
+ Generation *int64 `protobuf:"varint,3,opt,name=generation,proto3,oneof" json:"generation,omitempty"`
}
-func (x *WriteObjectResponse) Reset() {
- *x = WriteObjectResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *BidiWriteObjectRedirectedError) Reset() {
+ *x = BidiWriteObjectRedirectedError{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *WriteObjectResponse) String() string {
+func (x *BidiWriteObjectRedirectedError) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*WriteObjectResponse) ProtoMessage() {}
+func (*BidiWriteObjectRedirectedError) ProtoMessage() {}
-func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *BidiWriteObjectRedirectedError) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[19]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2225,135 +1992,59 @@ func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead.
-func (*WriteObjectResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22}
-}
-
-func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus {
- if m != nil {
- return m.WriteStatus
- }
- return nil
+// Deprecated: Use BidiWriteObjectRedirectedError.ProtoReflect.Descriptor instead.
+func (*BidiWriteObjectRedirectedError) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{19}
}
-func (x *WriteObjectResponse) GetPersistedSize() int64 {
- if x, ok := x.GetWriteStatus().(*WriteObjectResponse_PersistedSize); ok {
- return x.PersistedSize
+func (x *BidiWriteObjectRedirectedError) GetRoutingToken() string {
+ if x != nil && x.RoutingToken != nil {
+ return *x.RoutingToken
}
- return 0
+ return ""
}
-func (x *WriteObjectResponse) GetResource() *Object {
- if x, ok := x.GetWriteStatus().(*WriteObjectResponse_Resource); ok {
- return x.Resource
+func (x *BidiWriteObjectRedirectedError) GetWriteHandle() *BidiWriteHandle {
+ if x != nil {
+ return x.WriteHandle
}
return nil
}
-type isWriteObjectResponse_WriteStatus interface {
- isWriteObjectResponse_WriteStatus()
-}
-
-type WriteObjectResponse_PersistedSize struct {
- // The total number of bytes that have been processed for the given object
- // from all `WriteObject` calls. Only set if the upload has not finalized.
- PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
-}
-
-type WriteObjectResponse_Resource struct {
- // A resource containing the metadata for the uploaded object. Only set if
- // the upload has finalized.
- Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
+func (x *BidiWriteObjectRedirectedError) GetGeneration() int64 {
+ if x != nil && x.Generation != nil {
+ return *x.Generation
+ }
+ return 0
}
-func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {}
-
-func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {}
-
-// Request message for BidiWriteObject.
-type BidiWriteObjectRequest struct {
+// Error extension proto containing details for all outstanding reads on the
+// failed stream
+type BidiReadObjectError struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The first message of each stream should set one of the following.
- //
- // Types that are assignable to FirstMessage:
- //
- // *BidiWriteObjectRequest_UploadId
- // *BidiWriteObjectRequest_WriteObjectSpec
- FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
- // Required. The offset from the beginning of the object at which the data
- // should be written.
- //
- // In the first `WriteObjectRequest` of a `WriteObject()` action, it
- // indicates the initial offset for the `Write()` call. The value **must** be
- // equal to the `persisted_size` that a call to `QueryWriteStatus()` would
- // return (0 if this is the first write to the object).
- //
- // On subsequent calls, this value **must** be no larger than the sum of the
- // first `write_offset` and the sizes of all `data` chunks sent previously on
- // this stream.
- //
- // An invalid value will cause an error.
- WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"`
- // A portion of the data for the object.
- //
- // Types that are assignable to Data:
- //
- // *BidiWriteObjectRequest_ChecksummedData
- Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"`
- // Checksums for the complete object. If the checksums computed by the service
- // don't match the specified checksums the call will fail. May only be
- // provided in the first or last request (either with first_message, or
- // finish_write set).
- ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
- // For each BidiWriteObjectRequest where state_lookup is `true` or the client
- // closes the stream, the service will send a BidiWriteObjectResponse
- // containing the current persisted size. The persisted size sent in responses
- // covers all the bytes the server has persisted thus far and can be used to
- // decide what data is safe for the client to drop. Note that the object's
- // current size reported by the BidiWriteObjectResponse may lag behind the
- // number of bytes written by the client. This field is ignored if
- // `finish_write` is set to true.
- StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"`
- // Persists data written on the stream, up to and including the current
- // message, to permanent storage. This option should be used sparingly as it
- // may reduce performance. Ongoing writes will periodically be persisted on
- // the server even when `flush` is not set. This field is ignored if
- // `finish_write` is set to true since there's no need to checkpoint or flush
- // if this message completes the write.
- Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"`
- // If `true`, this indicates that the write is complete. Sending any
- // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
- // will cause an error.
- // For a non-resumable write (where the upload_id was not set in the first
- // message), it is an error not to set this field in the final message of the
- // stream.
- FinishWrite bool `protobuf:"varint,9,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // The error code for each outstanding read_range
+ ReadRangeErrors []*ReadRangeError `protobuf:"bytes,1,rep,name=read_range_errors,json=readRangeErrors,proto3" json:"read_range_errors,omitempty"`
}
-func (x *BidiWriteObjectRequest) Reset() {
- *x = BidiWriteObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *BidiReadObjectError) Reset() {
+ *x = BidiReadObjectError{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *BidiWriteObjectRequest) String() string {
+func (x *BidiReadObjectError) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*BidiWriteObjectRequest) ProtoMessage() {}
+func (*BidiReadObjectError) ProtoMessage() {}
-func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *BidiReadObjectError) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[20]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2363,153 +2054,191 @@ func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead.
-func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23}
+// Deprecated: Use BidiReadObjectError.ProtoReflect.Descriptor instead.
+func (*BidiReadObjectError) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{20}
}
-func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage {
- if m != nil {
- return m.FirstMessage
+func (x *BidiReadObjectError) GetReadRangeErrors() []*ReadRangeError {
+ if x != nil {
+ return x.ReadRangeErrors
}
return nil
}
-func (x *BidiWriteObjectRequest) GetUploadId() string {
- if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_UploadId); ok {
- return x.UploadId
- }
- return ""
-}
+// Error extension proto containing details for a single range read
+type ReadRangeError struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
- if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_WriteObjectSpec); ok {
- return x.WriteObjectSpec
- }
- return nil
+ // The id of the corresponding read_range
+ ReadId int64 `protobuf:"varint,1,opt,name=read_id,json=readId,proto3" json:"read_id,omitempty"`
+ // The status which should be an enum value of [google.rpc.Code].
+ Status *status.Status `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
}
-func (x *BidiWriteObjectRequest) GetWriteOffset() int64 {
- if x != nil {
- return x.WriteOffset
- }
- return 0
+func (x *ReadRangeError) Reset() {
+ *x = ReadRangeError{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (m *BidiWriteObjectRequest) GetData() isBidiWriteObjectRequest_Data {
- if m != nil {
- return m.Data
- }
- return nil
+func (x *ReadRangeError) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (x *BidiWriteObjectRequest) GetChecksummedData() *ChecksummedData {
- if x, ok := x.GetData().(*BidiWriteObjectRequest_ChecksummedData); ok {
- return x.ChecksummedData
- }
- return nil
-}
+func (*ReadRangeError) ProtoMessage() {}
-func (x *BidiWriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+func (x *ReadRangeError) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[21]
if x != nil {
- return x.ObjectChecksums
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return nil
+ return mi.MessageOf(x)
}
-func (x *BidiWriteObjectRequest) GetStateLookup() bool {
- if x != nil {
- return x.StateLookup
- }
- return false
+// Deprecated: Use ReadRangeError.ProtoReflect.Descriptor instead.
+func (*ReadRangeError) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{21}
}
-func (x *BidiWriteObjectRequest) GetFlush() bool {
+func (x *ReadRangeError) GetReadId() int64 {
if x != nil {
- return x.Flush
+ return x.ReadId
}
- return false
+ return 0
}
-func (x *BidiWriteObjectRequest) GetFinishWrite() bool {
+func (x *ReadRangeError) GetStatus() *status.Status {
if x != nil {
- return x.FinishWrite
+ return x.Status
}
- return false
+ return nil
}
-func (x *BidiWriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
- if x != nil {
- return x.CommonObjectRequestParams
- }
- return nil
-}
+// Describes a range of bytes to read in a BidiReadObjectRanges request.
+type ReadRange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-type isBidiWriteObjectRequest_FirstMessage interface {
- isBidiWriteObjectRequest_FirstMessage()
+ // Required. The offset for the first byte to return in the read, relative to
+ // the start of the object.
+ //
+ // A negative read_offset value will be interpreted as the number of bytes
+ // back from the end of the object to be returned. For example, if an object's
+ // length is 15 bytes, a ReadObjectRequest with read_offset = -5 and
+ // read_length = 3 would return bytes 10 through 12 of the object. Requesting
+ // a negative offset with magnitude larger than the size of the object will
+ // return the entire object. A read_offset larger than the size of the object
+ // will result in an OutOfRange error.
+ ReadOffset int64 `protobuf:"varint,1,opt,name=read_offset,json=readOffset,proto3" json:"read_offset,omitempty"`
+ // Optional. The maximum number of data bytes the server is allowed to return
+ // across all response messages with the same read_id. A read_length of zero
+ // indicates to read until the resource end, and a negative read_length will
+ // cause an error. If the stream returns fewer bytes than allowed by the
+ // read_length and no error occurred, the stream includes all data from the
+ // read_offset to the resource end.
+ ReadLength int64 `protobuf:"varint,2,opt,name=read_length,json=readLength,proto3" json:"read_length,omitempty"`
+ // Required. Read identifier provided by the client. When the client issues
+ // more than one outstanding ReadRange on the same stream, responses can be
+ // mapped back to their corresponding requests using this value. Clients must
+ // ensure that all outstanding requests have different read_id values. The
+ // server may close the stream with an error if this condition is not met.
+ ReadId int64 `protobuf:"varint,3,opt,name=read_id,json=readId,proto3" json:"read_id,omitempty"`
+}
+
+func (x *ReadRange) Reset() {
+ *x = ReadRange{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-type BidiWriteObjectRequest_UploadId struct {
- // For resumable uploads. This should be the `upload_id` returned from a
- // call to `StartResumableWriteResponse`.
- UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"`
+func (x *ReadRange) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-type BidiWriteObjectRequest_WriteObjectSpec struct {
- // For non-resumable uploads. Describes the overall upload, including the
- // destination bucket and object name, preconditions, etc.
- WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"`
-}
+func (*ReadRange) ProtoMessage() {}
-func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {}
+func (x *ReadRange) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[22]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
-func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {}
+// Deprecated: Use ReadRange.ProtoReflect.Descriptor instead.
+func (*ReadRange) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{22}
+}
-type isBidiWriteObjectRequest_Data interface {
- isBidiWriteObjectRequest_Data()
+func (x *ReadRange) GetReadOffset() int64 {
+ if x != nil {
+ return x.ReadOffset
+ }
+ return 0
}
-type BidiWriteObjectRequest_ChecksummedData struct {
- // The data to insert. If a crc32c checksum is provided that doesn't match
- // the checksum computed by the service, the request will fail.
- ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"`
+func (x *ReadRange) GetReadLength() int64 {
+ if x != nil {
+ return x.ReadLength
+ }
+ return 0
}
-func (*BidiWriteObjectRequest_ChecksummedData) isBidiWriteObjectRequest_Data() {}
+func (x *ReadRange) GetReadId() int64 {
+ if x != nil {
+ return x.ReadId
+ }
+ return 0
+}
-// Response message for BidiWriteObject.
-type BidiWriteObjectResponse struct {
+// Contains data and metadata for a range of an object.
+type ObjectRangeData struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The response will set one of the following.
- //
- // Types that are assignable to WriteStatus:
- //
- // *BidiWriteObjectResponse_PersistedSize
- // *BidiWriteObjectResponse_Resource
- WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
-}
-
-func (x *BidiWriteObjectResponse) Reset() {
- *x = BidiWriteObjectResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ // A portion of the data for the object.
+ ChecksummedData *ChecksummedData `protobuf:"bytes,1,opt,name=checksummed_data,json=checksummedData,proto3" json:"checksummed_data,omitempty"`
+ // The ReadRange describes the content being returned with read_id set to the
+ // corresponding ReadObjectRequest in the stream. Multiple ObjectRangeData
+ // messages may have the same read_id but increasing offsets.
+ // ReadObjectResponse messages with the same read_id are guaranteed to be
+ // delivered in increasing offset order.
+ ReadRange *ReadRange `protobuf:"bytes,2,opt,name=read_range,json=readRange,proto3" json:"read_range,omitempty"`
+ // If set, indicates there are no more bytes to read for the given ReadRange.
+ RangeEnd bool `protobuf:"varint,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+}
+
+func (x *ObjectRangeData) Reset() {
+ *x = ObjectRangeData{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *BidiWriteObjectResponse) String() string {
+func (x *ObjectRangeData) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*BidiWriteObjectResponse) ProtoMessage() {}
+func (*ObjectRangeData) ProtoMessage() {}
-func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *ObjectRangeData) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[23]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2519,132 +2248,60 @@ func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead.
-func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24}
+// Deprecated: Use ObjectRangeData.ProtoReflect.Descriptor instead.
+func (*ObjectRangeData) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{23}
}
-func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus {
- if m != nil {
- return m.WriteStatus
+func (x *ObjectRangeData) GetChecksummedData() *ChecksummedData {
+ if x != nil {
+ return x.ChecksummedData
}
return nil
}
-func (x *BidiWriteObjectResponse) GetPersistedSize() int64 {
- if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_PersistedSize); ok {
- return x.PersistedSize
- }
- return 0
-}
-
-func (x *BidiWriteObjectResponse) GetResource() *Object {
- if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_Resource); ok {
- return x.Resource
+func (x *ObjectRangeData) GetReadRange() *ReadRange {
+ if x != nil {
+ return x.ReadRange
}
return nil
}
-type isBidiWriteObjectResponse_WriteStatus interface {
- isBidiWriteObjectResponse_WriteStatus()
-}
-
-type BidiWriteObjectResponse_PersistedSize struct {
- // The total number of bytes that have been processed for the given object
- // from all `WriteObject` calls. Only set if the upload has not finalized.
- PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
-}
-
-type BidiWriteObjectResponse_Resource struct {
- // A resource containing the metadata for the uploaded object. Only set if
- // the upload has finalized.
- Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
+func (x *ObjectRangeData) GetRangeEnd() bool {
+ if x != nil {
+ return x.RangeEnd
+ }
+ return false
}
-func (*BidiWriteObjectResponse_PersistedSize) isBidiWriteObjectResponse_WriteStatus() {}
-
-func (*BidiWriteObjectResponse_Resource) isBidiWriteObjectResponse_WriteStatus() {}
-
-// Request message for ListObjects.
-type ListObjectsRequest struct {
+// BidiReadHandle contains a handle from a previous BiDiReadObject
+// invocation. The client can use this instead of BidiReadObjectSpec as an
+// optimized way of opening subsequent bidirectional streams to the same object.
+type BidiReadHandle struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. Name of the bucket in which to look for objects.
- Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
- // Maximum number of `items` plus `prefixes` to return
- // in a single page of responses. As duplicate `prefixes` are
- // omitted, fewer total results may be returned than requested. The service
- // will use this parameter or 1,000 items, whichever is smaller.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // A previously-returned page token representing part of the larger set of
- // results to view.
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
- // If set, returns results in a directory-like mode. `items` will contain
- // only objects whose names, aside from the `prefix`, do not
- // contain `delimiter`. Objects whose names, aside from the
- // `prefix`, contain `delimiter` will have their name,
- // truncated after the `delimiter`, returned in
- // `prefixes`. Duplicate `prefixes` are omitted.
- Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"`
- // If true, objects that end in exactly one instance of `delimiter`
- // will have their metadata included in `items` in addition to
- // `prefixes`.
- IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"`
- // Filter results to objects whose names begin with this prefix.
- Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"`
- // If `true`, lists all versions of an object as distinct results.
- // For more information, see
- // [Object
- // Versioning](https://cloud.google.com/storage/docs/object-versioning).
- Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"`
- // Mask specifying which fields to read from each result.
- // If no mask is specified, will default to all fields except items.acl and
- // items.owner.
- // * may be used to mean "all fields".
- ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
- // Optional. Filter results to objects whose names are lexicographically equal
- // to or after lexicographic_start. If lexicographic_end is also set, the
- // objects listed have names between lexicographic_start (inclusive) and
- // lexicographic_end (exclusive).
- LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"`
- // Optional. Filter results to objects whose names are lexicographically
- // before lexicographic_end. If lexicographic_start is also set, the objects
- // listed have names between lexicographic_start (inclusive) and
- // lexicographic_end (exclusive).
- LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"`
- // Optional. If true, only list all soft-deleted versions of the object.
- // Soft delete policy is required to set this option.
- SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"`
- // Optional. If true, will also include folders and managed folders (besides
- // objects) in the returned `prefixes`. Requires `delimiter` to be set to '/'.
- IncludeFoldersAsPrefixes bool `protobuf:"varint,13,opt,name=include_folders_as_prefixes,json=includeFoldersAsPrefixes,proto3" json:"include_folders_as_prefixes,omitempty"`
- // Optional. Filter results to objects and prefixes that match this glob
- // pattern. See [List Objects Using
- // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
- // for the full syntax.
- MatchGlob string `protobuf:"bytes,14,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"`
+ // Required. Opaque value describing a previous read.
+ Handle []byte `protobuf:"bytes,1,opt,name=handle,proto3" json:"handle,omitempty"`
}
-func (x *ListObjectsRequest) Reset() {
- *x = ListObjectsRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *BidiReadHandle) Reset() {
+ *x = BidiReadHandle{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *ListObjectsRequest) String() string {
+func (x *BidiReadHandle) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ListObjectsRequest) ProtoMessage() {}
+func (*BidiReadHandle) ProtoMessage() {}
-func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *BidiReadHandle) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[24]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2654,80 +2311,1035 @@ func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead.
-func (*ListObjectsRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25}
+// Deprecated: Use BidiReadHandle.ProtoReflect.Descriptor instead.
+func (*BidiReadHandle) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{24}
}
-func (x *ListObjectsRequest) GetParent() string {
+func (x *BidiReadHandle) GetHandle() []byte {
if x != nil {
- return x.Parent
+ return x.Handle
}
- return ""
+ return nil
}
-func (x *ListObjectsRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
+// BidiWriteHandle contains a handle from a previous BidiWriteObject
+// invocation. The client can use this as an optimized way of opening subsequent
+// bidirectional streams to the same object.
+type BidiWriteHandle struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (x *ListObjectsRequest) GetPageToken() string {
- if x != nil {
- return x.PageToken
- }
- return ""
+ // Required. Opaque value describing a previous write.
+ Handle []byte `protobuf:"bytes,1,opt,name=handle,proto3" json:"handle,omitempty"`
}
-func (x *ListObjectsRequest) GetDelimiter() string {
- if x != nil {
- return x.Delimiter
- }
- return ""
+func (x *BidiWriteHandle) Reset() {
+ *x = BidiWriteHandle{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *ListObjectsRequest) GetIncludeTrailingDelimiter() bool {
- if x != nil {
- return x.IncludeTrailingDelimiter
- }
- return false
+func (x *BidiWriteHandle) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-func (x *ListObjectsRequest) GetPrefix() string {
+func (*BidiWriteHandle) ProtoMessage() {}
+
+func (x *BidiWriteHandle) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[25]
if x != nil {
- return x.Prefix
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return ""
+ return mi.MessageOf(x)
}
-func (x *ListObjectsRequest) GetVersions() bool {
- if x != nil {
- return x.Versions
- }
- return false
+// Deprecated: Use BidiWriteHandle.ProtoReflect.Descriptor instead.
+func (*BidiWriteHandle) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{25}
}
-func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask {
+func (x *BidiWriteHandle) GetHandle() []byte {
if x != nil {
- return x.ReadMask
+ return x.Handle
}
return nil
}
-func (x *ListObjectsRequest) GetLexicographicStart() string {
- if x != nil {
- return x.LexicographicStart
- }
- return ""
-}
+// Describes an attempt to insert an object, possibly over multiple requests.
+type WriteObjectSpec struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
-func (x *ListObjectsRequest) GetLexicographicEnd() string {
- if x != nil {
- return x.LexicographicEnd
- }
- return ""
-}
+ // Required. Destination object, including its name and its metadata.
+ Resource *Object `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+ // Apply a predefined set of access controls to this object.
+ // Valid values are "authenticatedRead", "bucketOwnerFullControl",
+ // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+ PredefinedAcl string `protobuf:"bytes,7,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // generation matches the given value. Setting to 0 makes the operation
+ // succeed only if there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,3,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live
+ // generation does not match the given value. If no live object exists, the
+ // precondition fails. Setting to 0 makes the operation succeed only if
+ // there is a live version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,4,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration matches the given value.
+ IfMetagenerationMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration does not match the given value.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,6,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // The expected final object size being uploaded.
+ // If this value is set, closing the stream after writing fewer or more than
+ // `object_size` bytes will result in an OUT_OF_RANGE error.
+ //
+ // This situation is considered a client error, and if such an error occurs
+ // you must start the upload over from scratch, this time sending the correct
+ // number of bytes.
+ ObjectSize *int64 `protobuf:"varint,8,opt,name=object_size,json=objectSize,proto3,oneof" json:"object_size,omitempty"`
+ // If true, the object will be created in appendable mode.
+ // This field may only be set when using BidiWriteObject.
+ Appendable *bool `protobuf:"varint,9,opt,name=appendable,proto3,oneof" json:"appendable,omitempty"`
+}
+
+func (x *WriteObjectSpec) Reset() {
+ *x = WriteObjectSpec{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *WriteObjectSpec) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WriteObjectSpec) ProtoMessage() {}
+
+func (x *WriteObjectSpec) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[26]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WriteObjectSpec.ProtoReflect.Descriptor instead.
+func (*WriteObjectSpec) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26}
+}
+
+func (x *WriteObjectSpec) GetResource() *Object {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+func (x *WriteObjectSpec) GetPredefinedAcl() string {
+ if x != nil {
+ return x.PredefinedAcl
+ }
+ return ""
+}
+
+func (x *WriteObjectSpec) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
+ }
+ return 0
+}
+
+func (x *WriteObjectSpec) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
+ }
+ return 0
+}
+
+func (x *WriteObjectSpec) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
+ }
+ return 0
+}
+
+func (x *WriteObjectSpec) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
+ }
+ return 0
+}
+
+func (x *WriteObjectSpec) GetObjectSize() int64 {
+ if x != nil && x.ObjectSize != nil {
+ return *x.ObjectSize
+ }
+ return 0
+}
+
+func (x *WriteObjectSpec) GetAppendable() bool {
+ if x != nil && x.Appendable != nil {
+ return *x.Appendable
+ }
+ return false
+}
+
+// Request message for WriteObject.
+type WriteObjectRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The first message of each stream should set one of the following.
+ //
+ // Types that are assignable to FirstMessage:
+ //
+ // *WriteObjectRequest_UploadId
+ // *WriteObjectRequest_WriteObjectSpec
+ FirstMessage isWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
+ // Required. The offset from the beginning of the object at which the data
+ // should be written.
+ //
+ // In the first `WriteObjectRequest` of a `WriteObject()` action, it
+ // indicates the initial offset for the `Write()` call. The value **must** be
+ // equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+ // return (0 if this is the first write to the object).
+ //
+ // On subsequent calls, this value **must** be no larger than the sum of the
+ // first `write_offset` and the sizes of all `data` chunks sent previously on
+ // this stream.
+ //
+ // An incorrect value will cause an error.
+ WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"`
+ // A portion of the data for the object.
+ //
+ // Types that are assignable to Data:
+ //
+ // *WriteObjectRequest_ChecksummedData
+ Data isWriteObjectRequest_Data `protobuf_oneof:"data"`
+ // Checksums for the complete object. If the checksums computed by the service
+ // don't match the specified checksums the call will fail. May only be
+ // provided in the first or last request (either with first_message, or
+ // finish_write set).
+ ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
+ // If `true`, this indicates that the write is complete. Sending any
+ // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+ // will cause an error.
+ // For a non-resumable write (where the upload_id was not set in the first
+ // message), it is an error not to set this field in the final message of the
+ // stream.
+ FinishWrite bool `protobuf:"varint,7,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+}
+
+func (x *WriteObjectRequest) Reset() {
+ *x = WriteObjectRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *WriteObjectRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WriteObjectRequest) ProtoMessage() {}
+
+func (x *WriteObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[27]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WriteObjectRequest.ProtoReflect.Descriptor instead.
+func (*WriteObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27}
+}
+
+func (m *WriteObjectRequest) GetFirstMessage() isWriteObjectRequest_FirstMessage {
+ if m != nil {
+ return m.FirstMessage
+ }
+ return nil
+}
+
+func (x *WriteObjectRequest) GetUploadId() string {
+ if x, ok := x.GetFirstMessage().(*WriteObjectRequest_UploadId); ok {
+ return x.UploadId
+ }
+ return ""
+}
+
+func (x *WriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
+ if x, ok := x.GetFirstMessage().(*WriteObjectRequest_WriteObjectSpec); ok {
+ return x.WriteObjectSpec
+ }
+ return nil
+}
+
+func (x *WriteObjectRequest) GetWriteOffset() int64 {
+ if x != nil {
+ return x.WriteOffset
+ }
+ return 0
+}
+
+func (m *WriteObjectRequest) GetData() isWriteObjectRequest_Data {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (x *WriteObjectRequest) GetChecksummedData() *ChecksummedData {
+ if x, ok := x.GetData().(*WriteObjectRequest_ChecksummedData); ok {
+ return x.ChecksummedData
+ }
+ return nil
+}
+
+func (x *WriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+ if x != nil {
+ return x.ObjectChecksums
+ }
+ return nil
+}
+
+func (x *WriteObjectRequest) GetFinishWrite() bool {
+ if x != nil {
+ return x.FinishWrite
+ }
+ return false
+}
+
+func (x *WriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
+ }
+ return nil
+}
+
+type isWriteObjectRequest_FirstMessage interface {
+ isWriteObjectRequest_FirstMessage()
+}
+
+type WriteObjectRequest_UploadId struct {
+ // For resumable uploads. This should be the `upload_id` returned from a
+ // call to `StartResumableWriteResponse`.
+ UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"`
+}
+
+type WriteObjectRequest_WriteObjectSpec struct {
+ // For non-resumable uploads. Describes the overall upload, including the
+ // destination bucket and object name, preconditions, etc.
+ WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"`
+}
+
+func (*WriteObjectRequest_UploadId) isWriteObjectRequest_FirstMessage() {}
+
+func (*WriteObjectRequest_WriteObjectSpec) isWriteObjectRequest_FirstMessage() {}
+
+type isWriteObjectRequest_Data interface {
+ isWriteObjectRequest_Data()
+}
+
+type WriteObjectRequest_ChecksummedData struct {
+ // The data to insert. If a crc32c checksum is provided that doesn't match
+ // the checksum computed by the service, the request will fail.
+ ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"`
+}
+
+func (*WriteObjectRequest_ChecksummedData) isWriteObjectRequest_Data() {}
+
+// Response message for WriteObject.
+type WriteObjectResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The response will set one of the following.
+ //
+ // Types that are assignable to WriteStatus:
+ //
+ // *WriteObjectResponse_PersistedSize
+ // *WriteObjectResponse_Resource
+ WriteStatus isWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
+}
+
+func (x *WriteObjectResponse) Reset() {
+ *x = WriteObjectResponse{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *WriteObjectResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*WriteObjectResponse) ProtoMessage() {}
+
+func (x *WriteObjectResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[28]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use WriteObjectResponse.ProtoReflect.Descriptor instead.
+func (*WriteObjectResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28}
+}
+
+func (m *WriteObjectResponse) GetWriteStatus() isWriteObjectResponse_WriteStatus {
+ if m != nil {
+ return m.WriteStatus
+ }
+ return nil
+}
+
+func (x *WriteObjectResponse) GetPersistedSize() int64 {
+ if x, ok := x.GetWriteStatus().(*WriteObjectResponse_PersistedSize); ok {
+ return x.PersistedSize
+ }
+ return 0
+}
+
+func (x *WriteObjectResponse) GetResource() *Object {
+ if x, ok := x.GetWriteStatus().(*WriteObjectResponse_Resource); ok {
+ return x.Resource
+ }
+ return nil
+}
+
+type isWriteObjectResponse_WriteStatus interface {
+ isWriteObjectResponse_WriteStatus()
+}
+
+type WriteObjectResponse_PersistedSize struct {
+ // The total number of bytes that have been processed for the given object
+ // from all `WriteObject` calls. Only set if the upload has not finalized.
+ PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
+}
+
+type WriteObjectResponse_Resource struct {
+ // A resource containing the metadata for the uploaded object. Only set if
+ // the upload has finalized.
+ Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
+}
+
+func (*WriteObjectResponse_PersistedSize) isWriteObjectResponse_WriteStatus() {}
+
+func (*WriteObjectResponse_Resource) isWriteObjectResponse_WriteStatus() {}
+
+// Describes an attempt to append to an object, possibly over multiple requests.
+type AppendObjectSpec struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. The name of the bucket containing the object to write.
+ Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
+ // Required. The name of the object to open for writing.
+ Object string `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"`
+ // Required. The generation number of the object to open for writing.
+ Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration matches the given value.
+ IfMetagenerationMatch *int64 `protobuf:"varint,4,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration does not match the given value.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // An optional routing token that influences request routing for the stream.
+ // Must be provided if a BidiWriteObjectRedirectedError is returned.
+ RoutingToken *string `protobuf:"bytes,6,opt,name=routing_token,json=routingToken,proto3,oneof" json:"routing_token,omitempty"`
+ // An optional write handle returned from a previous BidiWriteObjectResponse
+ // message or a BidiWriteObjectRedirectedError error.
+ WriteHandle *BidiWriteHandle `protobuf:"bytes,7,opt,name=write_handle,json=writeHandle,proto3,oneof" json:"write_handle,omitempty"`
+}
+
+func (x *AppendObjectSpec) Reset() {
+ *x = AppendObjectSpec{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *AppendObjectSpec) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AppendObjectSpec) ProtoMessage() {}
+
+func (x *AppendObjectSpec) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[29]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AppendObjectSpec.ProtoReflect.Descriptor instead.
+func (*AppendObjectSpec) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29}
+}
+
+func (x *AppendObjectSpec) GetBucket() string {
+ if x != nil {
+ return x.Bucket
+ }
+ return ""
+}
+
+func (x *AppendObjectSpec) GetObject() string {
+ if x != nil {
+ return x.Object
+ }
+ return ""
+}
+
+func (x *AppendObjectSpec) GetGeneration() int64 {
+ if x != nil {
+ return x.Generation
+ }
+ return 0
+}
+
+func (x *AppendObjectSpec) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
+ }
+ return 0
+}
+
+func (x *AppendObjectSpec) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
+ }
+ return 0
+}
+
+func (x *AppendObjectSpec) GetRoutingToken() string {
+ if x != nil && x.RoutingToken != nil {
+ return *x.RoutingToken
+ }
+ return ""
+}
+
+func (x *AppendObjectSpec) GetWriteHandle() *BidiWriteHandle {
+ if x != nil {
+ return x.WriteHandle
+ }
+ return nil
+}
+
+// Request message for BidiWriteObject.
+type BidiWriteObjectRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The first message of each stream should set one of the following.
+ //
+ // Types that are assignable to FirstMessage:
+ //
+ // *BidiWriteObjectRequest_UploadId
+ // *BidiWriteObjectRequest_WriteObjectSpec
+ // *BidiWriteObjectRequest_AppendObjectSpec
+ FirstMessage isBidiWriteObjectRequest_FirstMessage `protobuf_oneof:"first_message"`
+ // Required. The offset from the beginning of the object at which the data
+ // should be written.
+ //
+ // In the first `WriteObjectRequest` of a `WriteObject()` action, it
+ // indicates the initial offset for the `Write()` call. The value **must** be
+ // equal to the `persisted_size` that a call to `QueryWriteStatus()` would
+ // return (0 if this is the first write to the object).
+ //
+ // On subsequent calls, this value **must** be no larger than the sum of the
+ // first `write_offset` and the sizes of all `data` chunks sent previously on
+ // this stream.
+ //
+ // An invalid value will cause an error.
+ WriteOffset int64 `protobuf:"varint,3,opt,name=write_offset,json=writeOffset,proto3" json:"write_offset,omitempty"`
+ // A portion of the data for the object.
+ //
+ // Types that are assignable to Data:
+ //
+ // *BidiWriteObjectRequest_ChecksummedData
+ Data isBidiWriteObjectRequest_Data `protobuf_oneof:"data"`
+ // Checksums for the complete object. If the checksums computed by the service
+ // don't match the specified checksums the call will fail. May only be
+ // provided in the first request or the
+ // last request (with finish_write set).
+ ObjectChecksums *ObjectChecksums `protobuf:"bytes,6,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
+ // For each BidiWriteObjectRequest where state_lookup is `true` or the client
+ // closes the stream, the service will send a BidiWriteObjectResponse
+ // containing the current persisted size. The persisted size sent in responses
+ // covers all the bytes the server has persisted thus far and can be used to
+ // decide what data is safe for the client to drop. Note that the object's
+ // current size reported by the BidiWriteObjectResponse may lag behind the
+ // number of bytes written by the client. This field is ignored if
+ // `finish_write` is set to true.
+ StateLookup bool `protobuf:"varint,7,opt,name=state_lookup,json=stateLookup,proto3" json:"state_lookup,omitempty"`
+ // Persists data written on the stream, up to and including the current
+ // message, to permanent storage. This option should be used sparingly as it
+ // may reduce performance. Ongoing writes will periodically be persisted on
+ // the server even when `flush` is not set. This field is ignored if
+ // `finish_write` is set to true since there's no need to checkpoint or flush
+ // if this message completes the write.
+ Flush bool `protobuf:"varint,8,opt,name=flush,proto3" json:"flush,omitempty"`
+ // If `true`, this indicates that the write is complete. Sending any
+ // `WriteObjectRequest`s subsequent to one in which `finish_write` is `true`
+ // will cause an error.
+ // For a non-resumable write (where the upload_id was not set in the first
+ // message), it is an error not to set this field in the final message of the
+ // stream.
+ FinishWrite bool `protobuf:"varint,9,opt,name=finish_write,json=finishWrite,proto3" json:"finish_write,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,10,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+}
+
+func (x *BidiWriteObjectRequest) Reset() {
+ *x = BidiWriteObjectRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BidiWriteObjectRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BidiWriteObjectRequest) ProtoMessage() {}
+
+func (x *BidiWriteObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[30]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BidiWriteObjectRequest.ProtoReflect.Descriptor instead.
+func (*BidiWriteObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30}
+}
+
+func (m *BidiWriteObjectRequest) GetFirstMessage() isBidiWriteObjectRequest_FirstMessage {
+ if m != nil {
+ return m.FirstMessage
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectRequest) GetUploadId() string {
+ if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_UploadId); ok {
+ return x.UploadId
+ }
+ return ""
+}
+
+func (x *BidiWriteObjectRequest) GetWriteObjectSpec() *WriteObjectSpec {
+ if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_WriteObjectSpec); ok {
+ return x.WriteObjectSpec
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectRequest) GetAppendObjectSpec() *AppendObjectSpec {
+ if x, ok := x.GetFirstMessage().(*BidiWriteObjectRequest_AppendObjectSpec); ok {
+ return x.AppendObjectSpec
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectRequest) GetWriteOffset() int64 {
+ if x != nil {
+ return x.WriteOffset
+ }
+ return 0
+}
+
+func (m *BidiWriteObjectRequest) GetData() isBidiWriteObjectRequest_Data {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectRequest) GetChecksummedData() *ChecksummedData {
+ if x, ok := x.GetData().(*BidiWriteObjectRequest_ChecksummedData); ok {
+ return x.ChecksummedData
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
+ if x != nil {
+ return x.ObjectChecksums
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectRequest) GetStateLookup() bool {
+ if x != nil {
+ return x.StateLookup
+ }
+ return false
+}
+
+func (x *BidiWriteObjectRequest) GetFlush() bool {
+ if x != nil {
+ return x.Flush
+ }
+ return false
+}
+
+func (x *BidiWriteObjectRequest) GetFinishWrite() bool {
+ if x != nil {
+ return x.FinishWrite
+ }
+ return false
+}
+
+func (x *BidiWriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
+ }
+ return nil
+}
+
+type isBidiWriteObjectRequest_FirstMessage interface {
+ isBidiWriteObjectRequest_FirstMessage()
+}
+
+type BidiWriteObjectRequest_UploadId struct {
+ // For resumable uploads. This should be the `upload_id` returned from a
+ // call to `StartResumableWriteResponse`.
+ UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3,oneof"`
+}
+
+type BidiWriteObjectRequest_WriteObjectSpec struct {
+ // For non-resumable uploads. Describes the overall upload, including the
+ // destination bucket and object name, preconditions, etc.
+ WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,2,opt,name=write_object_spec,json=writeObjectSpec,proto3,oneof"`
+}
+
+type BidiWriteObjectRequest_AppendObjectSpec struct {
+ // For appendable uploads. Describes the object to append to.
+ AppendObjectSpec *AppendObjectSpec `protobuf:"bytes,11,opt,name=append_object_spec,json=appendObjectSpec,proto3,oneof"`
+}
+
+func (*BidiWriteObjectRequest_UploadId) isBidiWriteObjectRequest_FirstMessage() {}
+
+func (*BidiWriteObjectRequest_WriteObjectSpec) isBidiWriteObjectRequest_FirstMessage() {}
+
+func (*BidiWriteObjectRequest_AppendObjectSpec) isBidiWriteObjectRequest_FirstMessage() {}
+
+type isBidiWriteObjectRequest_Data interface {
+ isBidiWriteObjectRequest_Data()
+}
+
+type BidiWriteObjectRequest_ChecksummedData struct {
+ // The data to insert. If a crc32c checksum is provided that doesn't match
+ // the checksum computed by the service, the request will fail.
+ ChecksummedData *ChecksummedData `protobuf:"bytes,4,opt,name=checksummed_data,json=checksummedData,proto3,oneof"`
+}
+
+func (*BidiWriteObjectRequest_ChecksummedData) isBidiWriteObjectRequest_Data() {}
+
+// Response message for BidiWriteObject.
+type BidiWriteObjectResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The response will set one of the following.
+ //
+ // Types that are assignable to WriteStatus:
+ //
+ // *BidiWriteObjectResponse_PersistedSize
+ // *BidiWriteObjectResponse_Resource
+ WriteStatus isBidiWriteObjectResponse_WriteStatus `protobuf_oneof:"write_status"`
+ // An optional write handle that will periodically be present in response
+ // messages. Clients should save it for later use in establishing a new stream
+ // if a connection is interrupted.
+ WriteHandle *BidiWriteHandle `protobuf:"bytes,3,opt,name=write_handle,json=writeHandle,proto3,oneof" json:"write_handle,omitempty"`
+}
+
+func (x *BidiWriteObjectResponse) Reset() {
+ *x = BidiWriteObjectResponse{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *BidiWriteObjectResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BidiWriteObjectResponse) ProtoMessage() {}
+
+func (x *BidiWriteObjectResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[31]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BidiWriteObjectResponse.ProtoReflect.Descriptor instead.
+func (*BidiWriteObjectResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31}
+}
+
+func (m *BidiWriteObjectResponse) GetWriteStatus() isBidiWriteObjectResponse_WriteStatus {
+ if m != nil {
+ return m.WriteStatus
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectResponse) GetPersistedSize() int64 {
+ if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_PersistedSize); ok {
+ return x.PersistedSize
+ }
+ return 0
+}
+
+func (x *BidiWriteObjectResponse) GetResource() *Object {
+ if x, ok := x.GetWriteStatus().(*BidiWriteObjectResponse_Resource); ok {
+ return x.Resource
+ }
+ return nil
+}
+
+func (x *BidiWriteObjectResponse) GetWriteHandle() *BidiWriteHandle {
+ if x != nil {
+ return x.WriteHandle
+ }
+ return nil
+}
+
+type isBidiWriteObjectResponse_WriteStatus interface {
+ isBidiWriteObjectResponse_WriteStatus()
+}
+
+type BidiWriteObjectResponse_PersistedSize struct {
+ // The total number of bytes that have been processed for the given object
+ // from all `WriteObject` calls. Only set if the upload has not finalized.
+ PersistedSize int64 `protobuf:"varint,1,opt,name=persisted_size,json=persistedSize,proto3,oneof"`
+}
+
+type BidiWriteObjectResponse_Resource struct {
+ // A resource containing the metadata for the uploaded object. Only set if
+ // the upload has finalized.
+ Resource *Object `protobuf:"bytes,2,opt,name=resource,proto3,oneof"`
+}
+
+func (*BidiWriteObjectResponse_PersistedSize) isBidiWriteObjectResponse_WriteStatus() {}
+
+func (*BidiWriteObjectResponse_Resource) isBidiWriteObjectResponse_WriteStatus() {}
+
+// Request message for ListObjects.
+type ListObjectsRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Required. Name of the bucket in which to look for objects.
+ Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
+ // Maximum number of `items` plus `prefixes` to return
+ // in a single page of responses. As duplicate `prefixes` are
+ // omitted, fewer total results may be returned than requested. The service
+ // will use this parameter or 1,000 items, whichever is smaller.
+ PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
+ // A previously-returned page token representing part of the larger set of
+ // results to view.
+ PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
+ // If set, returns results in a directory-like mode. `items` will contain
+ // only objects whose names, aside from the `prefix`, do not
+ // contain `delimiter`. Objects whose names, aside from the
+ // `prefix`, contain `delimiter` will have their name,
+ // truncated after the `delimiter`, returned in
+ // `prefixes`. Duplicate `prefixes` are omitted.
+ Delimiter string `protobuf:"bytes,4,opt,name=delimiter,proto3" json:"delimiter,omitempty"`
+ // If true, objects that end in exactly one instance of `delimiter`
+ // will have their metadata included in `items` in addition to
+ // `prefixes`.
+ IncludeTrailingDelimiter bool `protobuf:"varint,5,opt,name=include_trailing_delimiter,json=includeTrailingDelimiter,proto3" json:"include_trailing_delimiter,omitempty"`
+ // Filter results to objects whose names begin with this prefix.
+ Prefix string `protobuf:"bytes,6,opt,name=prefix,proto3" json:"prefix,omitempty"`
+ // If `true`, lists all versions of an object as distinct results.
+ // For more information, see
+ // [Object
+ // Versioning](https://cloud.google.com/storage/docs/object-versioning).
+ Versions bool `protobuf:"varint,7,opt,name=versions,proto3" json:"versions,omitempty"`
+ // Mask specifying which fields to read from each result.
+ // If no mask is specified, will default to all fields except items.acl and
+ // items.owner.
+ // * may be used to mean "all fields".
+ ReadMask *fieldmaskpb.FieldMask `protobuf:"bytes,8,opt,name=read_mask,json=readMask,proto3,oneof" json:"read_mask,omitempty"`
+ // Optional. Filter results to objects whose names are lexicographically equal
+ // to or after lexicographic_start. If lexicographic_end is also set, the
+ // objects listed have names between lexicographic_start (inclusive) and
+ // lexicographic_end (exclusive).
+ LexicographicStart string `protobuf:"bytes,10,opt,name=lexicographic_start,json=lexicographicStart,proto3" json:"lexicographic_start,omitempty"`
+ // Optional. Filter results to objects whose names are lexicographically
+ // before lexicographic_end. If lexicographic_start is also set, the objects
+ // listed have names between lexicographic_start (inclusive) and
+ // lexicographic_end (exclusive).
+ LexicographicEnd string `protobuf:"bytes,11,opt,name=lexicographic_end,json=lexicographicEnd,proto3" json:"lexicographic_end,omitempty"`
+ // Optional. If true, only list all soft-deleted versions of the object.
+ // Soft delete policy is required to set this option.
+ SoftDeleted bool `protobuf:"varint,12,opt,name=soft_deleted,json=softDeleted,proto3" json:"soft_deleted,omitempty"`
+ // Optional. If true, will also include folders and managed folders (besides
+ // objects) in the returned `prefixes`. Requires `delimiter` to be set to '/'.
+ IncludeFoldersAsPrefixes bool `protobuf:"varint,13,opt,name=include_folders_as_prefixes,json=includeFoldersAsPrefixes,proto3" json:"include_folders_as_prefixes,omitempty"`
+ // Optional. Filter results to objects and prefixes that match this glob
+ // pattern. See [List Objects Using
+ // Glob](https://cloud.google.com/storage/docs/json_api/v1/objects/list#list-objects-and-prefixes-using-glob)
+ // for the full syntax.
+ MatchGlob string `protobuf:"bytes,14,opt,name=match_glob,json=matchGlob,proto3" json:"match_glob,omitempty"`
+}
+
+func (x *ListObjectsRequest) Reset() {
+ *x = ListObjectsRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListObjectsRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListObjectsRequest) ProtoMessage() {}
+
+func (x *ListObjectsRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[32]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListObjectsRequest.ProtoReflect.Descriptor instead.
+func (*ListObjectsRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32}
+}
+
+func (x *ListObjectsRequest) GetParent() string {
+ if x != nil {
+ return x.Parent
+ }
+ return ""
+}
+
+func (x *ListObjectsRequest) GetPageSize() int32 {
+ if x != nil {
+ return x.PageSize
+ }
+ return 0
+}
+
+func (x *ListObjectsRequest) GetPageToken() string {
+ if x != nil {
+ return x.PageToken
+ }
+ return ""
+}
+
+func (x *ListObjectsRequest) GetDelimiter() string {
+ if x != nil {
+ return x.Delimiter
+ }
+ return ""
+}
+
+func (x *ListObjectsRequest) GetIncludeTrailingDelimiter() bool {
+ if x != nil {
+ return x.IncludeTrailingDelimiter
+ }
+ return false
+}
+
+func (x *ListObjectsRequest) GetPrefix() string {
+ if x != nil {
+ return x.Prefix
+ }
+ return ""
+}
+
+func (x *ListObjectsRequest) GetVersions() bool {
+ if x != nil {
+ return x.Versions
+ }
+ return false
+}
+
+func (x *ListObjectsRequest) GetReadMask() *fieldmaskpb.FieldMask {
+ if x != nil {
+ return x.ReadMask
+ }
+ return nil
+}
+
+func (x *ListObjectsRequest) GetLexicographicStart() string {
+ if x != nil {
+ return x.LexicographicStart
+ }
+ return ""
+}
+
+func (x *ListObjectsRequest) GetLexicographicEnd() string {
+ if x != nil {
+ return x.LexicographicEnd
+ }
+ return ""
+}
func (x *ListObjectsRequest) GetSoftDeleted() bool {
if x != nil {
@@ -2765,11 +3377,9 @@ type QueryWriteStatusRequest struct {
func (x *QueryWriteStatusRequest) Reset() {
*x = QueryWriteStatusRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *QueryWriteStatusRequest) String() string {
@@ -2779,8 +3389,8 @@ func (x *QueryWriteStatusRequest) String() string {
func (*QueryWriteStatusRequest) ProtoMessage() {}
func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[33]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2792,7 +3402,7 @@ func (x *QueryWriteStatusRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use QueryWriteStatusRequest.ProtoReflect.Descriptor instead.
func (*QueryWriteStatusRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{26}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33}
}
func (x *QueryWriteStatusRequest) GetUploadId() string {
@@ -2826,11 +3436,9 @@ type QueryWriteStatusResponse struct {
func (x *QueryWriteStatusResponse) Reset() {
*x = QueryWriteStatusResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[34]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *QueryWriteStatusResponse) String() string {
@@ -2840,8 +3448,8 @@ func (x *QueryWriteStatusResponse) String() string {
func (*QueryWriteStatusResponse) ProtoMessage() {}
func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[34]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2853,7 +3461,7 @@ func (x *QueryWriteStatusResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use QueryWriteStatusResponse.ProtoReflect.Descriptor instead.
func (*QueryWriteStatusResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{27}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34}
}
func (m *QueryWriteStatusResponse) GetWriteStatus() isQueryWriteStatusResponse_WriteStatus {
@@ -3004,244 +3612,26 @@ type RewriteObjectRequest struct {
// A set of parameters common to Storage API requests concerning an object.
CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,19,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
// The checksums of the complete object. This will be used to validate the
- // destination object after rewriting.
- ObjectChecksums *ObjectChecksums `protobuf:"bytes,29,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
-}
-
-func (x *RewriteObjectRequest) Reset() {
- *x = RewriteObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *RewriteObjectRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*RewriteObjectRequest) ProtoMessage() {}
-
-func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead.
-func (*RewriteObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{28}
-}
-
-func (x *RewriteObjectRequest) GetDestinationName() string {
- if x != nil {
- return x.DestinationName
- }
- return ""
-}
-
-func (x *RewriteObjectRequest) GetDestinationBucket() string {
- if x != nil {
- return x.DestinationBucket
- }
- return ""
-}
-
-func (x *RewriteObjectRequest) GetDestinationKmsKey() string {
- if x != nil {
- return x.DestinationKmsKey
- }
- return ""
-}
-
-func (x *RewriteObjectRequest) GetDestination() *Object {
- if x != nil {
- return x.Destination
- }
- return nil
-}
-
-func (x *RewriteObjectRequest) GetSourceBucket() string {
- if x != nil {
- return x.SourceBucket
- }
- return ""
-}
-
-func (x *RewriteObjectRequest) GetSourceObject() string {
- if x != nil {
- return x.SourceObject
- }
- return ""
-}
-
-func (x *RewriteObjectRequest) GetSourceGeneration() int64 {
- if x != nil {
- return x.SourceGeneration
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetRewriteToken() string {
- if x != nil {
- return x.RewriteToken
- }
- return ""
-}
-
-func (x *RewriteObjectRequest) GetDestinationPredefinedAcl() string {
- if x != nil {
- return x.DestinationPredefinedAcl
- }
- return ""
-}
-
-func (x *RewriteObjectRequest) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetIfGenerationNotMatch() int64 {
- if x != nil && x.IfGenerationNotMatch != nil {
- return *x.IfGenerationNotMatch
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetIfMetagenerationMatch() int64 {
- if x != nil && x.IfMetagenerationMatch != nil {
- return *x.IfMetagenerationMatch
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetIfMetagenerationNotMatch() int64 {
- if x != nil && x.IfMetagenerationNotMatch != nil {
- return *x.IfMetagenerationNotMatch
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetIfSourceGenerationMatch() int64 {
- if x != nil && x.IfSourceGenerationMatch != nil {
- return *x.IfSourceGenerationMatch
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetIfSourceGenerationNotMatch() int64 {
- if x != nil && x.IfSourceGenerationNotMatch != nil {
- return *x.IfSourceGenerationNotMatch
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetIfSourceMetagenerationMatch() int64 {
- if x != nil && x.IfSourceMetagenerationMatch != nil {
- return *x.IfSourceMetagenerationMatch
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetIfSourceMetagenerationNotMatch() int64 {
- if x != nil && x.IfSourceMetagenerationNotMatch != nil {
- return *x.IfSourceMetagenerationNotMatch
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetMaxBytesRewrittenPerCall() int64 {
- if x != nil {
- return x.MaxBytesRewrittenPerCall
- }
- return 0
-}
-
-func (x *RewriteObjectRequest) GetCopySourceEncryptionAlgorithm() string {
- if x != nil {
- return x.CopySourceEncryptionAlgorithm
- }
- return ""
-}
-
-func (x *RewriteObjectRequest) GetCopySourceEncryptionKeyBytes() []byte {
- if x != nil {
- return x.CopySourceEncryptionKeyBytes
- }
- return nil
-}
-
-func (x *RewriteObjectRequest) GetCopySourceEncryptionKeySha256Bytes() []byte {
- if x != nil {
- return x.CopySourceEncryptionKeySha256Bytes
- }
- return nil
-}
-
-func (x *RewriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
- if x != nil {
- return x.CommonObjectRequestParams
- }
- return nil
-}
-
-func (x *RewriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
- if x != nil {
- return x.ObjectChecksums
- }
- return nil
-}
-
-// A rewrite response.
-type RewriteResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The total bytes written so far, which can be used to provide a waiting user
- // with a progress indicator. This property is always present in the response.
- TotalBytesRewritten int64 `protobuf:"varint,1,opt,name=total_bytes_rewritten,json=totalBytesRewritten,proto3" json:"total_bytes_rewritten,omitempty"`
- // The total size of the object being copied in bytes. This property is always
- // present in the response.
- ObjectSize int64 `protobuf:"varint,2,opt,name=object_size,json=objectSize,proto3" json:"object_size,omitempty"`
- // `true` if the copy is finished; otherwise, `false` if
- // the copy is in progress. This property is always present in the response.
- Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
- // A token to use in subsequent requests to continue copying data. This token
- // is present in the response only when there is more data to copy.
- RewriteToken string `protobuf:"bytes,4,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"`
- // A resource containing the metadata for the copied-to object. This property
- // is present in the response only when copying completes.
- Resource *Object `protobuf:"bytes,5,opt,name=resource,proto3" json:"resource,omitempty"`
+ // destination object after rewriting.
+ ObjectChecksums *ObjectChecksums `protobuf:"bytes,29,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
}
-func (x *RewriteResponse) Reset() {
- *x = RewriteResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *RewriteObjectRequest) Reset() {
+ *x = RewriteObjectRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[35]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *RewriteResponse) String() string {
+func (x *RewriteObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*RewriteResponse) ProtoMessage() {}
+func (*RewriteObjectRequest) ProtoMessage() {}
-func (x *RewriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *RewriteObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[35]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3251,381 +3641,211 @@ func (x *RewriteResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead.
-func (*RewriteResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{29}
-}
-
-func (x *RewriteResponse) GetTotalBytesRewritten() int64 {
- if x != nil {
- return x.TotalBytesRewritten
- }
- return 0
+// Deprecated: Use RewriteObjectRequest.ProtoReflect.Descriptor instead.
+func (*RewriteObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35}
}
-func (x *RewriteResponse) GetObjectSize() int64 {
+func (x *RewriteObjectRequest) GetDestinationName() string {
if x != nil {
- return x.ObjectSize
+ return x.DestinationName
}
- return 0
+ return ""
}
-func (x *RewriteResponse) GetDone() bool {
+func (x *RewriteObjectRequest) GetDestinationBucket() string {
if x != nil {
- return x.Done
+ return x.DestinationBucket
}
- return false
+ return ""
}
-func (x *RewriteResponse) GetRewriteToken() string {
+func (x *RewriteObjectRequest) GetDestinationKmsKey() string {
if x != nil {
- return x.RewriteToken
+ return x.DestinationKmsKey
}
return ""
}
-func (x *RewriteResponse) GetResource() *Object {
+func (x *RewriteObjectRequest) GetDestination() *Object {
if x != nil {
- return x.Resource
+ return x.Destination
}
return nil
}
-// Request message StartResumableWrite.
-type StartResumableWriteRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The destination bucket, object, and metadata, as well as any
- // preconditions.
- WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,1,opt,name=write_object_spec,json=writeObjectSpec,proto3" json:"write_object_spec,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,3,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
- // The checksums of the complete object. This will be used to validate the
- // uploaded object. For each upload, object_checksums can be provided with
- // either StartResumableWriteRequest or the WriteObjectRequest with
- // finish_write set to `true`.
- ObjectChecksums *ObjectChecksums `protobuf:"bytes,5,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
-}
-
-func (x *StartResumableWriteRequest) Reset() {
- *x = StartResumableWriteRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *StartResumableWriteRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StartResumableWriteRequest) ProtoMessage() {}
-
-func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead.
-func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{30}
-}
-
-func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec {
+func (x *RewriteObjectRequest) GetSourceBucket() string {
if x != nil {
- return x.WriteObjectSpec
+ return x.SourceBucket
}
- return nil
+ return ""
}
-func (x *StartResumableWriteRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *RewriteObjectRequest) GetSourceObject() string {
if x != nil {
- return x.CommonObjectRequestParams
+ return x.SourceObject
}
- return nil
+ return ""
}
-func (x *StartResumableWriteRequest) GetObjectChecksums() *ObjectChecksums {
+func (x *RewriteObjectRequest) GetSourceGeneration() int64 {
if x != nil {
- return x.ObjectChecksums
- }
- return nil
-}
-
-// Response object for `StartResumableWrite`.
-type StartResumableWriteResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The upload_id of the newly started resumable write operation. This
- // value should be copied into the `WriteObjectRequest.upload_id` field.
- UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"`
-}
-
-func (x *StartResumableWriteResponse) Reset() {
- *x = StartResumableWriteResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
+ return x.SourceGeneration
}
+ return 0
}
-func (x *StartResumableWriteResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*StartResumableWriteResponse) ProtoMessage() {}
-
-func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
+func (x *RewriteObjectRequest) GetRewriteToken() string {
+ if x != nil {
+ return x.RewriteToken
}
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead.
-func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{31}
+ return ""
}
-func (x *StartResumableWriteResponse) GetUploadId() string {
+func (x *RewriteObjectRequest) GetDestinationPredefinedAcl() string {
if x != nil {
- return x.UploadId
+ return x.DestinationPredefinedAcl
}
return ""
}
-// Request message for UpdateObject.
-type UpdateObjectRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The object to update.
- // The object's bucket and name fields are used to identify the object to
- // update. If present, the object's generation field selects a specific
- // revision of this object whose metadata should be updated. Otherwise,
- // assumes the live version of the object.
- Object *Object `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"`
- // Makes the operation conditional on whether the object's current generation
- // matches the given value. Setting to 0 makes the operation succeed only if
- // there are no live versions of the object.
- IfGenerationMatch *int64 `protobuf:"varint,2,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
- // Makes the operation conditional on whether the object's live generation
- // does not match the given value. If no live object exists, the precondition
- // fails. Setting to 0 makes the operation succeed only if there is a live
- // version of the object.
- IfGenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration matches the given value.
- IfMetagenerationMatch *int64 `protobuf:"varint,4,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
- // Makes the operation conditional on whether the object's current
- // metageneration does not match the given value.
- IfMetagenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
- // Apply a predefined set of access controls to this object.
- // Valid values are "authenticatedRead", "bucketOwnerFullControl",
- // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
- PredefinedAcl string `protobuf:"bytes,10,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"`
- // Required. List of fields to be updated.
- //
- // To specify ALL fields, equivalent to the JSON API's "update" function,
- // specify a single field with the value `*`. Note: not recommended. If a new
- // field is introduced at a later time, an older client updating with the `*`
- // may accidentally reset the new field's value.
- //
- // Not specifying any fields is an error.
- UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
- // A set of parameters common to Storage API requests concerning an object.
- CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
-}
-
-func (x *UpdateObjectRequest) Reset() {
- *x = UpdateObjectRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[32]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
+func (x *RewriteObjectRequest) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
}
+ return 0
}
-func (x *UpdateObjectRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
+func (x *RewriteObjectRequest) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
+ }
+ return 0
}
-func (*UpdateObjectRequest) ProtoMessage() {}
-
-func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[32]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
+func (x *RewriteObjectRequest) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
}
- return mi.MessageOf(x)
+ return 0
}
-// Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead.
-func (*UpdateObjectRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{32}
+func (x *RewriteObjectRequest) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
+ }
+ return 0
}
-func (x *UpdateObjectRequest) GetObject() *Object {
- if x != nil {
- return x.Object
+func (x *RewriteObjectRequest) GetIfSourceGenerationMatch() int64 {
+ if x != nil && x.IfSourceGenerationMatch != nil {
+ return *x.IfSourceGenerationMatch
}
- return nil
+ return 0
}
-func (x *UpdateObjectRequest) GetIfGenerationMatch() int64 {
- if x != nil && x.IfGenerationMatch != nil {
- return *x.IfGenerationMatch
+func (x *RewriteObjectRequest) GetIfSourceGenerationNotMatch() int64 {
+ if x != nil && x.IfSourceGenerationNotMatch != nil {
+ return *x.IfSourceGenerationNotMatch
}
return 0
}
-func (x *UpdateObjectRequest) GetIfGenerationNotMatch() int64 {
- if x != nil && x.IfGenerationNotMatch != nil {
- return *x.IfGenerationNotMatch
+func (x *RewriteObjectRequest) GetIfSourceMetagenerationMatch() int64 {
+ if x != nil && x.IfSourceMetagenerationMatch != nil {
+ return *x.IfSourceMetagenerationMatch
}
return 0
}
-func (x *UpdateObjectRequest) GetIfMetagenerationMatch() int64 {
- if x != nil && x.IfMetagenerationMatch != nil {
- return *x.IfMetagenerationMatch
+func (x *RewriteObjectRequest) GetIfSourceMetagenerationNotMatch() int64 {
+ if x != nil && x.IfSourceMetagenerationNotMatch != nil {
+ return *x.IfSourceMetagenerationNotMatch
}
return 0
}
-func (x *UpdateObjectRequest) GetIfMetagenerationNotMatch() int64 {
- if x != nil && x.IfMetagenerationNotMatch != nil {
- return *x.IfMetagenerationNotMatch
+func (x *RewriteObjectRequest) GetMaxBytesRewrittenPerCall() int64 {
+ if x != nil {
+ return x.MaxBytesRewrittenPerCall
}
return 0
}
-func (x *UpdateObjectRequest) GetPredefinedAcl() string {
+func (x *RewriteObjectRequest) GetCopySourceEncryptionAlgorithm() string {
if x != nil {
- return x.PredefinedAcl
+ return x.CopySourceEncryptionAlgorithm
}
return ""
}
-func (x *UpdateObjectRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+func (x *RewriteObjectRequest) GetCopySourceEncryptionKeyBytes() []byte {
if x != nil {
- return x.UpdateMask
+ return x.CopySourceEncryptionKeyBytes
}
return nil
}
-func (x *UpdateObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+func (x *RewriteObjectRequest) GetCopySourceEncryptionKeySha256Bytes() []byte {
if x != nil {
- return x.CommonObjectRequestParams
+ return x.CopySourceEncryptionKeySha256Bytes
}
return nil
}
-// Request message for GetServiceAccount.
-type GetServiceAccountRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. Project ID, in the format of "projects/{projectIdentifier}".
- // {projectIdentifier} can be the project ID or project number.
- Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"`
-}
-
-func (x *GetServiceAccountRequest) Reset() {
- *x = GetServiceAccountRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[33]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *GetServiceAccountRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*GetServiceAccountRequest) ProtoMessage() {}
-
-func (x *GetServiceAccountRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[33]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
+func (x *RewriteObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
}
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use GetServiceAccountRequest.ProtoReflect.Descriptor instead.
-func (*GetServiceAccountRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{33}
+ return nil
}
-func (x *GetServiceAccountRequest) GetProject() string {
+func (x *RewriteObjectRequest) GetObjectChecksums() *ObjectChecksums {
if x != nil {
- return x.Project
+ return x.ObjectChecksums
}
- return ""
+ return nil
}
-// Request message for CreateHmacKey.
-type CreateHmacKeyRequest struct {
+// A rewrite response.
+type RewriteResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The project that the HMAC-owning service account lives in, in the
- // format of "projects/{projectIdentifier}". {projectIdentifier} can be the
- // project ID or project number.
- Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"`
- // Required. The service account to create the HMAC for.
- ServiceAccountEmail string `protobuf:"bytes,2,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
+ // The total bytes written so far, which can be used to provide a waiting user
+ // with a progress indicator. This property is always present in the response.
+ TotalBytesRewritten int64 `protobuf:"varint,1,opt,name=total_bytes_rewritten,json=totalBytesRewritten,proto3" json:"total_bytes_rewritten,omitempty"`
+ // The total size of the object being copied in bytes. This property is always
+ // present in the response.
+ ObjectSize int64 `protobuf:"varint,2,opt,name=object_size,json=objectSize,proto3" json:"object_size,omitempty"`
+ // `true` if the copy is finished; otherwise, `false` if
+ // the copy is in progress. This property is always present in the response.
+ Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
+ // A token to use in subsequent requests to continue copying data. This token
+ // is present in the response only when there is more data to copy.
+ RewriteToken string `protobuf:"bytes,4,opt,name=rewrite_token,json=rewriteToken,proto3" json:"rewrite_token,omitempty"`
+ // A resource containing the metadata for the copied-to object. This property
+ // is present in the response only when copying completes.
+ Resource *Object `protobuf:"bytes,5,opt,name=resource,proto3" json:"resource,omitempty"`
}
-func (x *CreateHmacKeyRequest) Reset() {
- *x = CreateHmacKeyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[34]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *RewriteResponse) Reset() {
+ *x = RewriteResponse{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[36]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *CreateHmacKeyRequest) String() string {
+func (x *RewriteResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*CreateHmacKeyRequest) ProtoMessage() {}
+func (*RewriteResponse) ProtoMessage() {}
-func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[34]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *RewriteResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[36]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3635,116 +3855,124 @@ func (x *CreateHmacKeyRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use CreateHmacKeyRequest.ProtoReflect.Descriptor instead.
-func (*CreateHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{34}
+// Deprecated: Use RewriteResponse.ProtoReflect.Descriptor instead.
+func (*RewriteResponse) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36}
}
-func (x *CreateHmacKeyRequest) GetProject() string {
+func (x *RewriteResponse) GetTotalBytesRewritten() int64 {
if x != nil {
- return x.Project
+ return x.TotalBytesRewritten
}
- return ""
+ return 0
}
-func (x *CreateHmacKeyRequest) GetServiceAccountEmail() string {
+func (x *RewriteResponse) GetObjectSize() int64 {
if x != nil {
- return x.ServiceAccountEmail
- }
- return ""
-}
-
-// Create hmac response. The only time the secret for an HMAC will be returned.
-type CreateHmacKeyResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Key metadata.
- Metadata *HmacKeyMetadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
- // HMAC key secret material.
- // In raw bytes format (not base64-encoded).
- SecretKeyBytes []byte `protobuf:"bytes,3,opt,name=secret_key_bytes,json=secretKeyBytes,proto3" json:"secret_key_bytes,omitempty"`
-}
-
-func (x *CreateHmacKeyResponse) Reset() {
- *x = CreateHmacKeyResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[35]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
+ return x.ObjectSize
}
+ return 0
}
-func (x *CreateHmacKeyResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*CreateHmacKeyResponse) ProtoMessage() {}
-
-func (x *CreateHmacKeyResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[35]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
+func (x *RewriteResponse) GetDone() bool {
+ if x != nil {
+ return x.Done
}
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use CreateHmacKeyResponse.ProtoReflect.Descriptor instead.
-func (*CreateHmacKeyResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{35}
+ return false
}
-func (x *CreateHmacKeyResponse) GetMetadata() *HmacKeyMetadata {
+func (x *RewriteResponse) GetRewriteToken() string {
if x != nil {
- return x.Metadata
+ return x.RewriteToken
}
- return nil
+ return ""
}
-func (x *CreateHmacKeyResponse) GetSecretKeyBytes() []byte {
+func (x *RewriteResponse) GetResource() *Object {
if x != nil {
- return x.SecretKeyBytes
+ return x.Resource
}
return nil
}
-// Request object to delete a given HMAC key.
-type DeleteHmacKeyRequest struct {
+// Request message for MoveObject.
+type MoveObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The identifying key for the HMAC to delete.
- AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"`
- // Required. The project that owns the HMAC key, in the format of
- // "projects/{projectIdentifier}".
- // {projectIdentifier} can be the project ID or project number.
- Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"`
-}
-
-func (x *DeleteHmacKeyRequest) Reset() {
- *x = DeleteHmacKeyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[36]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ // Required. Name of the bucket in which the object resides.
+ Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
+ // Required. Name of the source object.
+ SourceObject string `protobuf:"bytes,2,opt,name=source_object,json=sourceObject,proto3" json:"source_object,omitempty"`
+ // Required. Name of the destination object.
+ DestinationObject string `protobuf:"bytes,3,opt,name=destination_object,json=destinationObject,proto3" json:"destination_object,omitempty"`
+ // Optional. Makes the operation conditional on whether the source object's
+ // current generation matches the given value. `if_source_generation_match`
+ // and `if_source_generation_not_match` conditions are mutually exclusive:
+ // it's an error for both of them to be set in the request.
+ IfSourceGenerationMatch *int64 `protobuf:"varint,4,opt,name=if_source_generation_match,json=ifSourceGenerationMatch,proto3,oneof" json:"if_source_generation_match,omitempty"`
+ // Optional. Makes the operation conditional on whether the source object's
+ // current generation does not match the given value.
+ // `if_source_generation_match` and `if_source_generation_not_match`
+ // conditions are mutually exclusive: it's an error for both of them to be set
+ // in the request.
+ IfSourceGenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_source_generation_not_match,json=ifSourceGenerationNotMatch,proto3,oneof" json:"if_source_generation_not_match,omitempty"`
+ // Optional. Makes the operation conditional on whether the source object's
+ // current metageneration matches the given value.
+ // `if_source_metageneration_match` and `if_source_metageneration_not_match`
+ // conditions are mutually exclusive: it's an error for both of them to be set
+ // in the request.
+ IfSourceMetagenerationMatch *int64 `protobuf:"varint,6,opt,name=if_source_metageneration_match,json=ifSourceMetagenerationMatch,proto3,oneof" json:"if_source_metageneration_match,omitempty"`
+ // Optional. Makes the operation conditional on whether the source object's
+ // current metageneration does not match the given value.
+ // `if_source_metageneration_match` and `if_source_metageneration_not_match`
+ // conditions are mutually exclusive: it's an error for both of them to be set
+ // in the request.
+ IfSourceMetagenerationNotMatch *int64 `protobuf:"varint,7,opt,name=if_source_metageneration_not_match,json=ifSourceMetagenerationNotMatch,proto3,oneof" json:"if_source_metageneration_not_match,omitempty"`
+ // Optional. Makes the operation conditional on whether the destination
+ // object's current generation matches the given value. Setting to 0 makes the
+ // operation succeed only if there are no live versions of the object.
+ // `if_generation_match` and `if_generation_not_match` conditions are mutually
+ // exclusive: it's an error for both of them to be set in the request.
+ IfGenerationMatch *int64 `protobuf:"varint,8,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Optional. Makes the operation conditional on whether the destination
+ // object's current generation does not match the given value. If no live
+ // object exists, the precondition fails. Setting to 0 makes the operation
+ // succeed only if there is a live version of the object.
+ // `if_generation_match` and `if_generation_not_match` conditions are mutually
+ // exclusive: it's an error for both of them to be set in the request.
+ IfGenerationNotMatch *int64 `protobuf:"varint,9,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Optional. Makes the operation conditional on whether the destination
+ // object's current metageneration matches the given value.
+ // `if_metageneration_match` and `if_metageneration_not_match` conditions are
+ // mutually exclusive: it's an error for both of them to be set in the
+ // request.
+ IfMetagenerationMatch *int64 `protobuf:"varint,10,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Optional. Makes the operation conditional on whether the destination
+ // object's current metageneration does not match the given value.
+ // `if_metageneration_match` and `if_metageneration_not_match` conditions are
+ // mutually exclusive: it's an error for both of them to be set in the
+ // request.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,11,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+}
+
+func (x *MoveObjectRequest) Reset() {
+ *x = MoveObjectRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[37]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *DeleteHmacKeyRequest) String() string {
+func (x *MoveObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*DeleteHmacKeyRequest) ProtoMessage() {}
+func (*MoveObjectRequest) ProtoMessage() {}
-func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[36]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *MoveObjectRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[37]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3754,123 +3982,122 @@ func (x *DeleteHmacKeyRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use DeleteHmacKeyRequest.ProtoReflect.Descriptor instead.
-func (*DeleteHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{36}
+// Deprecated: Use MoveObjectRequest.ProtoReflect.Descriptor instead.
+func (*MoveObjectRequest) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37}
}
-func (x *DeleteHmacKeyRequest) GetAccessId() string {
+func (x *MoveObjectRequest) GetBucket() string {
if x != nil {
- return x.AccessId
+ return x.Bucket
}
return ""
}
-func (x *DeleteHmacKeyRequest) GetProject() string {
+func (x *MoveObjectRequest) GetSourceObject() string {
if x != nil {
- return x.Project
+ return x.SourceObject
}
return ""
}
-// Request object to get metadata on a given HMAC key.
-type GetHmacKeyRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
+func (x *MoveObjectRequest) GetDestinationObject() string {
+ if x != nil {
+ return x.DestinationObject
+ }
+ return ""
+}
- // Required. The identifying key for the HMAC to delete.
- AccessId string `protobuf:"bytes,1,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"`
- // Required. The project the HMAC key lies in, in the format of
- // "projects/{projectIdentifier}".
- // {projectIdentifier} can be the project ID or project number.
- Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"`
+func (x *MoveObjectRequest) GetIfSourceGenerationMatch() int64 {
+ if x != nil && x.IfSourceGenerationMatch != nil {
+ return *x.IfSourceGenerationMatch
+ }
+ return 0
}
-func (x *GetHmacKeyRequest) Reset() {
- *x = GetHmacKeyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[37]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
+func (x *MoveObjectRequest) GetIfSourceGenerationNotMatch() int64 {
+ if x != nil && x.IfSourceGenerationNotMatch != nil {
+ return *x.IfSourceGenerationNotMatch
}
+ return 0
}
-func (x *GetHmacKeyRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
+func (x *MoveObjectRequest) GetIfSourceMetagenerationMatch() int64 {
+ if x != nil && x.IfSourceMetagenerationMatch != nil {
+ return *x.IfSourceMetagenerationMatch
+ }
+ return 0
}
-func (*GetHmacKeyRequest) ProtoMessage() {}
+func (x *MoveObjectRequest) GetIfSourceMetagenerationNotMatch() int64 {
+ if x != nil && x.IfSourceMetagenerationNotMatch != nil {
+ return *x.IfSourceMetagenerationNotMatch
+ }
+ return 0
+}
-func (x *GetHmacKeyRequest) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[37]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
+func (x *MoveObjectRequest) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
}
- return mi.MessageOf(x)
+ return 0
}
-// Deprecated: Use GetHmacKeyRequest.ProtoReflect.Descriptor instead.
-func (*GetHmacKeyRequest) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{37}
+func (x *MoveObjectRequest) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
+ }
+ return 0
}
-func (x *GetHmacKeyRequest) GetAccessId() string {
- if x != nil {
- return x.AccessId
+func (x *MoveObjectRequest) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
}
- return ""
+ return 0
}
-func (x *GetHmacKeyRequest) GetProject() string {
- if x != nil {
- return x.Project
+func (x *MoveObjectRequest) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
}
- return ""
+ return 0
}
-// Request to fetch a list of HMAC keys under a given project.
-type ListHmacKeysRequest struct {
+// Request message StartResumableWrite.
+type StartResumableWriteRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The project to list HMAC keys for, in the format of
- // "projects/{projectIdentifier}".
- // {projectIdentifier} can be the project ID or project number.
- Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"`
- // The maximum number of keys to return.
- PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
- // A previously returned token from ListHmacKeysResponse to get the next page.
- PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
- // If set, filters to only return HMAC keys for specified service account.
- ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
- // If set, return deleted keys that have not yet been wiped out.
- ShowDeletedKeys bool `protobuf:"varint,5,opt,name=show_deleted_keys,json=showDeletedKeys,proto3" json:"show_deleted_keys,omitempty"`
+ // Required. Contains the information necessary to start a resumable write.
+ WriteObjectSpec *WriteObjectSpec `protobuf:"bytes,1,opt,name=write_object_spec,json=writeObjectSpec,proto3" json:"write_object_spec,omitempty"`
+ // A set of parameters common to Storage API requests related to an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,3,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+ // The checksums of the complete object. This is used to validate the
+ // uploaded object. For each upload, `object_checksums` can be provided when
+ // initiating a resumable upload with`StartResumableWriteRequest` or when
+ // completing a write with `WriteObjectRequest` with
+ // `finish_write` set to `true`.
+ ObjectChecksums *ObjectChecksums `protobuf:"bytes,5,opt,name=object_checksums,json=objectChecksums,proto3" json:"object_checksums,omitempty"`
}
-func (x *ListHmacKeysRequest) Reset() {
- *x = ListHmacKeysRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[38]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *StartResumableWriteRequest) Reset() {
+ *x = StartResumableWriteRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[38]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *ListHmacKeysRequest) String() string {
+func (x *StartResumableWriteRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ListHmacKeysRequest) ProtoMessage() {}
+func (*StartResumableWriteRequest) ProtoMessage() {}
-func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message {
+func (x *StartResumableWriteRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[38]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3880,77 +4107,62 @@ func (x *ListHmacKeysRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ListHmacKeysRequest.ProtoReflect.Descriptor instead.
-func (*ListHmacKeysRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use StartResumableWriteRequest.ProtoReflect.Descriptor instead.
+func (*StartResumableWriteRequest) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{38}
}
-func (x *ListHmacKeysRequest) GetProject() string {
- if x != nil {
- return x.Project
- }
- return ""
-}
-
-func (x *ListHmacKeysRequest) GetPageSize() int32 {
- if x != nil {
- return x.PageSize
- }
- return 0
-}
-
-func (x *ListHmacKeysRequest) GetPageToken() string {
+func (x *StartResumableWriteRequest) GetWriteObjectSpec() *WriteObjectSpec {
if x != nil {
- return x.PageToken
+ return x.WriteObjectSpec
}
- return ""
+ return nil
}
-func (x *ListHmacKeysRequest) GetServiceAccountEmail() string {
+func (x *StartResumableWriteRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
if x != nil {
- return x.ServiceAccountEmail
+ return x.CommonObjectRequestParams
}
- return ""
+ return nil
}
-func (x *ListHmacKeysRequest) GetShowDeletedKeys() bool {
+func (x *StartResumableWriteRequest) GetObjectChecksums() *ObjectChecksums {
if x != nil {
- return x.ShowDeletedKeys
+ return x.ObjectChecksums
}
- return false
+ return nil
}
-// Hmac key list response with next page information.
-type ListHmacKeysResponse struct {
+// Response object for `StartResumableWrite`.
+type StartResumableWriteResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The list of items.
- HmacKeys []*HmacKeyMetadata `protobuf:"bytes,1,rep,name=hmac_keys,json=hmacKeys,proto3" json:"hmac_keys,omitempty"`
- // The continuation token, used to page through large result sets. Provide
- // this value in a subsequent request to return the next page of results.
- NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
+ // A unique identifier for the initiated resumable write operation.
+ // As the ID grants write access, you should keep it confidential during
+ // the upload to prevent unauthorized access and data tampering during your
+ // upload. This ID should be included in subsequent `WriteObject` requests to
+ // upload the object data.
+ UploadId string `protobuf:"bytes,1,opt,name=upload_id,json=uploadId,proto3" json:"upload_id,omitempty"`
}
-func (x *ListHmacKeysResponse) Reset() {
- *x = ListHmacKeysResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[39]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+func (x *StartResumableWriteResponse) Reset() {
+ *x = StartResumableWriteResponse{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[39]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *ListHmacKeysResponse) String() string {
+func (x *StartResumableWriteResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*ListHmacKeysResponse) ProtoMessage() {}
+func (*StartResumableWriteResponse) ProtoMessage() {}
-func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message {
+func (x *StartResumableWriteResponse) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[39]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3960,63 +4172,78 @@ func (x *ListHmacKeysResponse) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use ListHmacKeysResponse.ProtoReflect.Descriptor instead.
-func (*ListHmacKeysResponse) Descriptor() ([]byte, []int) {
+// Deprecated: Use StartResumableWriteResponse.ProtoReflect.Descriptor instead.
+func (*StartResumableWriteResponse) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{39}
}
-func (x *ListHmacKeysResponse) GetHmacKeys() []*HmacKeyMetadata {
- if x != nil {
- return x.HmacKeys
- }
- return nil
-}
-
-func (x *ListHmacKeysResponse) GetNextPageToken() string {
+func (x *StartResumableWriteResponse) GetUploadId() string {
if x != nil {
- return x.NextPageToken
+ return x.UploadId
}
return ""
}
-// Request object to update an HMAC key state.
-// HmacKeyMetadata.state is required and the only writable field in
-// UpdateHmacKey operation. Specifying fields other than state will result in an
-// error.
-type UpdateHmacKeyRequest struct {
+// Request message for UpdateObject.
+type UpdateObjectRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // Required. The HMAC key to update.
- // If present, the hmac_key's `id` field will be used to identify the key.
- // Otherwise, the hmac_key's access_id and project fields will be used to
- // identify the key.
- HmacKey *HmacKeyMetadata `protobuf:"bytes,1,opt,name=hmac_key,json=hmacKey,proto3" json:"hmac_key,omitempty"`
- // Update mask for hmac_key.
- // Not specifying any fields will mean only the `state` field is updated to
- // the value specified in `hmac_key`.
- UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
-}
-
-func (x *UpdateHmacKeyRequest) Reset() {
- *x = UpdateHmacKeyRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[40]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ // Required. The object to update.
+ // The object's bucket and name fields are used to identify the object to
+ // update. If present, the object's generation field selects a specific
+ // revision of this object whose metadata should be updated. Otherwise,
+ // assumes the live version of the object.
+ Object *Object `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"`
+ // Makes the operation conditional on whether the object's current generation
+ // matches the given value. Setting to 0 makes the operation succeed only if
+ // there are no live versions of the object.
+ IfGenerationMatch *int64 `protobuf:"varint,2,opt,name=if_generation_match,json=ifGenerationMatch,proto3,oneof" json:"if_generation_match,omitempty"`
+ // Makes the operation conditional on whether the object's live generation
+ // does not match the given value. If no live object exists, the precondition
+ // fails. Setting to 0 makes the operation succeed only if there is a live
+ // version of the object.
+ IfGenerationNotMatch *int64 `protobuf:"varint,3,opt,name=if_generation_not_match,json=ifGenerationNotMatch,proto3,oneof" json:"if_generation_not_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration matches the given value.
+ IfMetagenerationMatch *int64 `protobuf:"varint,4,opt,name=if_metageneration_match,json=ifMetagenerationMatch,proto3,oneof" json:"if_metageneration_match,omitempty"`
+ // Makes the operation conditional on whether the object's current
+ // metageneration does not match the given value.
+ IfMetagenerationNotMatch *int64 `protobuf:"varint,5,opt,name=if_metageneration_not_match,json=ifMetagenerationNotMatch,proto3,oneof" json:"if_metageneration_not_match,omitempty"`
+ // Apply a predefined set of access controls to this object.
+ // Valid values are "authenticatedRead", "bucketOwnerFullControl",
+ // "bucketOwnerRead", "private", "projectPrivate", or "publicRead".
+ PredefinedAcl string `protobuf:"bytes,10,opt,name=predefined_acl,json=predefinedAcl,proto3" json:"predefined_acl,omitempty"`
+ // Required. List of fields to be updated.
+ //
+ // To specify ALL fields, equivalent to the JSON API's "update" function,
+ // specify a single field with the value `*`. Note: not recommended. If a new
+ // field is introduced at a later time, an older client updating with the `*`
+ // may accidentally reset the new field's value.
+ //
+ // Not specifying any fields is an error.
+ UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,7,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
+ // A set of parameters common to Storage API requests concerning an object.
+ CommonObjectRequestParams *CommonObjectRequestParams `protobuf:"bytes,8,opt,name=common_object_request_params,json=commonObjectRequestParams,proto3" json:"common_object_request_params,omitempty"`
+}
+
+func (x *UpdateObjectRequest) Reset() {
+ *x = UpdateObjectRequest{}
+ mi := &file_google_storage_v2_storage_proto_msgTypes[40]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *UpdateHmacKeyRequest) String() string {
+func (x *UpdateObjectRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*UpdateHmacKeyRequest) ProtoMessage() {}
+func (*UpdateObjectRequest) ProtoMessage() {}
-func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message {
+func (x *UpdateObjectRequest) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[40]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4026,25 +4253,67 @@ func (x *UpdateHmacKeyRequest) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use UpdateHmacKeyRequest.ProtoReflect.Descriptor instead.
-func (*UpdateHmacKeyRequest) Descriptor() ([]byte, []int) {
+// Deprecated: Use UpdateObjectRequest.ProtoReflect.Descriptor instead.
+func (*UpdateObjectRequest) Descriptor() ([]byte, []int) {
return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{40}
}
-func (x *UpdateHmacKeyRequest) GetHmacKey() *HmacKeyMetadata {
+func (x *UpdateObjectRequest) GetObject() *Object {
if x != nil {
- return x.HmacKey
+ return x.Object
}
return nil
}
-func (x *UpdateHmacKeyRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
+func (x *UpdateObjectRequest) GetIfGenerationMatch() int64 {
+ if x != nil && x.IfGenerationMatch != nil {
+ return *x.IfGenerationMatch
+ }
+ return 0
+}
+
+func (x *UpdateObjectRequest) GetIfGenerationNotMatch() int64 {
+ if x != nil && x.IfGenerationNotMatch != nil {
+ return *x.IfGenerationNotMatch
+ }
+ return 0
+}
+
+func (x *UpdateObjectRequest) GetIfMetagenerationMatch() int64 {
+ if x != nil && x.IfMetagenerationMatch != nil {
+ return *x.IfMetagenerationMatch
+ }
+ return 0
+}
+
+func (x *UpdateObjectRequest) GetIfMetagenerationNotMatch() int64 {
+ if x != nil && x.IfMetagenerationNotMatch != nil {
+ return *x.IfMetagenerationNotMatch
+ }
+ return 0
+}
+
+func (x *UpdateObjectRequest) GetPredefinedAcl() string {
+ if x != nil {
+ return x.PredefinedAcl
+ }
+ return ""
+}
+
+func (x *UpdateObjectRequest) GetUpdateMask() *fieldmaskpb.FieldMask {
if x != nil {
return x.UpdateMask
}
return nil
}
+func (x *UpdateObjectRequest) GetCommonObjectRequestParams() *CommonObjectRequestParams {
+ if x != nil {
+ return x.CommonObjectRequestParams
+ }
+ return nil
+}
+
// Parameters that can be passed to any object request.
type CommonObjectRequestParams struct {
state protoimpl.MessageState
@@ -4064,11 +4333,9 @@ type CommonObjectRequestParams struct {
func (x *CommonObjectRequestParams) Reset() {
*x = CommonObjectRequestParams{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[41]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[41]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CommonObjectRequestParams) String() string {
@@ -4079,7 +4346,7 @@ func (*CommonObjectRequestParams) ProtoMessage() {}
func (x *CommonObjectRequestParams) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[41]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4124,11 +4391,9 @@ type ServiceConstants struct {
func (x *ServiceConstants) Reset() {
*x = ServiceConstants{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[42]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[42]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServiceConstants) String() string {
@@ -4139,7 +4404,7 @@ func (*ServiceConstants) ProtoMessage() {}
func (x *ServiceConstants) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[42]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4268,7 +4533,8 @@ type Bucket struct {
// Reserved for future use.
SatisfiesPzs bool `protobuf:"varint,25,opt,name=satisfies_pzs,json=satisfiesPzs,proto3" json:"satisfies_pzs,omitempty"`
// Configuration that, if present, specifies the data placement for a
- // [https://cloud.google.com/storage/docs/use-dual-regions][Dual Region].
+ // [https://cloud.google.com/storage/docs/locations#location-dr][configurable
+ // dual-region].
CustomPlacementConfig *Bucket_CustomPlacementConfig `protobuf:"bytes,26,opt,name=custom_placement_config,json=customPlacementConfig,proto3" json:"custom_placement_config,omitempty"`
// The bucket's Autoclass configuration. If there is no configuration, the
// Autoclass feature will be disabled and have no effect on the bucket.
@@ -4284,11 +4550,9 @@ type Bucket struct {
func (x *Bucket) Reset() {
*x = Bucket{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[43]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[43]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket) String() string {
@@ -4299,7 +4563,7 @@ func (*Bucket) ProtoMessage() {}
func (x *Bucket) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[43]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4573,11 +4837,9 @@ type BucketAccessControl struct {
func (x *BucketAccessControl) Reset() {
*x = BucketAccessControl{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[44]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[44]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BucketAccessControl) String() string {
@@ -4588,7 +4850,7 @@ func (*BucketAccessControl) ProtoMessage() {}
func (x *BucketAccessControl) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[44]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4681,11 +4943,9 @@ type ChecksummedData struct {
func (x *ChecksummedData) Reset() {
*x = ChecksummedData{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[45]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[45]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ChecksummedData) String() string {
@@ -4696,7 +4956,7 @@ func (*ChecksummedData) ProtoMessage() {}
func (x *ChecksummedData) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[45]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4741,224 +5001,27 @@ type ObjectChecksums struct {
// [https://cloud.google.com/storage/docs/hashes-etags#json-api][Hashes and
// ETags: Best Practices].
// Not all objects will provide an MD5 hash. For example, composite objects
- // provide only crc32c hashes.
- // This value is equivalent to running `cat object.txt | openssl md5 -binary`
+ // provide only crc32c hashes. This value is equivalent to running `cat
+ // object.txt | openssl md5 -binary`
Md5Hash []byte `protobuf:"bytes,2,opt,name=md5_hash,json=md5Hash,proto3" json:"md5_hash,omitempty"`
}
func (x *ObjectChecksums) Reset() {
*x = ObjectChecksums{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[46]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ObjectChecksums) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ObjectChecksums) ProtoMessage() {}
-
-func (x *ObjectChecksums) ProtoReflect() protoreflect.Message {
mi := &file_google_storage_v2_storage_proto_msgTypes[46]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead.
-func (*ObjectChecksums) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46}
-}
-
-func (x *ObjectChecksums) GetCrc32C() uint32 {
- if x != nil && x.Crc32C != nil {
- return *x.Crc32C
- }
- return 0
-}
-
-func (x *ObjectChecksums) GetMd5Hash() []byte {
- if x != nil {
- return x.Md5Hash
- }
- return nil
-}
-
-// Hmac Key Metadata, which includes all information other than the secret.
-type HmacKeyMetadata struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Immutable. Resource name ID of the key in the format
- // {projectIdentifier}/{accessId}.
- // {projectIdentifier} can be the project ID or project number.
- Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
- // Immutable. Globally unique id for keys.
- AccessId string `protobuf:"bytes,2,opt,name=access_id,json=accessId,proto3" json:"access_id,omitempty"`
- // Immutable. Identifies the project that owns the service account of the
- // specified HMAC key, in the format "projects/{projectIdentifier}".
- // {projectIdentifier} can be the project ID or project number.
- Project string `protobuf:"bytes,3,opt,name=project,proto3" json:"project,omitempty"`
- // Output only. Email of the service account the key authenticates as.
- ServiceAccountEmail string `protobuf:"bytes,4,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"`
- // State of the key. One of ACTIVE, INACTIVE, or DELETED.
- // Writable, can be updated by UpdateHmacKey operation.
- State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"`
- // Output only. The creation time of the HMAC key.
- CreateTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
- // Output only. The last modification time of the HMAC key metadata.
- UpdateTime *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
- // The etag of the HMAC key.
- Etag string `protobuf:"bytes,8,opt,name=etag,proto3" json:"etag,omitempty"`
-}
-
-func (x *HmacKeyMetadata) Reset() {
- *x = HmacKeyMetadata{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[47]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *HmacKeyMetadata) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*HmacKeyMetadata) ProtoMessage() {}
-
-func (x *HmacKeyMetadata) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[47]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use HmacKeyMetadata.ProtoReflect.Descriptor instead.
-func (*HmacKeyMetadata) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47}
-}
-
-func (x *HmacKeyMetadata) GetId() string {
- if x != nil {
- return x.Id
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetAccessId() string {
- if x != nil {
- return x.AccessId
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetProject() string {
- if x != nil {
- return x.Project
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetServiceAccountEmail() string {
- if x != nil {
- return x.ServiceAccountEmail
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetState() string {
- if x != nil {
- return x.State
- }
- return ""
-}
-
-func (x *HmacKeyMetadata) GetCreateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.CreateTime
- }
- return nil
-}
-
-func (x *HmacKeyMetadata) GetUpdateTime() *timestamppb.Timestamp {
- if x != nil {
- return x.UpdateTime
- }
- return nil
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-func (x *HmacKeyMetadata) GetEtag() string {
- if x != nil {
- return x.Etag
- }
- return ""
-}
-
-// A directive to publish Pub/Sub notifications upon changes to a bucket.
-type NotificationConfig struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Required. The resource name of this NotificationConfig.
- // Format:
- // `projects/{project}/buckets/{bucket}/notificationConfigs/{notificationConfig}`
- // The `{project}` portion may be `_` for globally unique buckets.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Required. The Pub/Sub topic to which this subscription publishes. Formatted
- // as:
- // '//pubsub.googleapis.com/projects/{project-identifier}/topics/{my-topic}'
- Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"`
- // The etag of the NotificationConfig.
- // If included in the metadata of GetNotificationConfigRequest, the operation
- // will only be performed if the etag matches that of the NotificationConfig.
- Etag string `protobuf:"bytes,7,opt,name=etag,proto3" json:"etag,omitempty"`
- // If present, only send notifications about listed event types. If
- // empty, sent notifications for all event types.
- EventTypes []string `protobuf:"bytes,3,rep,name=event_types,json=eventTypes,proto3" json:"event_types,omitempty"`
- // A list of additional attributes to attach to each Pub/Sub
- // message published for this NotificationConfig.
- CustomAttributes map[string]string `protobuf:"bytes,4,rep,name=custom_attributes,json=customAttributes,proto3" json:"custom_attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- // If present, only apply this NotificationConfig to object names that
- // begin with this prefix.
- ObjectNamePrefix string `protobuf:"bytes,5,opt,name=object_name_prefix,json=objectNamePrefix,proto3" json:"object_name_prefix,omitempty"`
- // Required. The desired content of the Payload.
- PayloadFormat string `protobuf:"bytes,6,opt,name=payload_format,json=payloadFormat,proto3" json:"payload_format,omitempty"`
-}
-
-func (x *NotificationConfig) Reset() {
- *x = NotificationConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[48]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *NotificationConfig) String() string {
+func (x *ObjectChecksums) String() string {
return protoimpl.X.MessageStringOf(x)
}
-func (*NotificationConfig) ProtoMessage() {}
+func (*ObjectChecksums) ProtoMessage() {}
-func (x *NotificationConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[48]
- if protoimpl.UnsafeEnabled && x != nil {
+func (x *ObjectChecksums) ProtoReflect() protoreflect.Message {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[46]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4968,60 +5031,25 @@ func (x *NotificationConfig) ProtoReflect() protoreflect.Message {
return mi.MessageOf(x)
}
-// Deprecated: Use NotificationConfig.ProtoReflect.Descriptor instead.
-func (*NotificationConfig) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48}
-}
-
-func (x *NotificationConfig) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *NotificationConfig) GetTopic() string {
- if x != nil {
- return x.Topic
- }
- return ""
-}
-
-func (x *NotificationConfig) GetEtag() string {
- if x != nil {
- return x.Etag
- }
- return ""
+// Deprecated: Use ObjectChecksums.ProtoReflect.Descriptor instead.
+func (*ObjectChecksums) Descriptor() ([]byte, []int) {
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{46}
}
-func (x *NotificationConfig) GetEventTypes() []string {
- if x != nil {
- return x.EventTypes
+func (x *ObjectChecksums) GetCrc32C() uint32 {
+ if x != nil && x.Crc32C != nil {
+ return *x.Crc32C
}
- return nil
+ return 0
}
-func (x *NotificationConfig) GetCustomAttributes() map[string]string {
+func (x *ObjectChecksums) GetMd5Hash() []byte {
if x != nil {
- return x.CustomAttributes
+ return x.Md5Hash
}
return nil
}
-func (x *NotificationConfig) GetObjectNamePrefix() string {
- if x != nil {
- return x.ObjectNamePrefix
- }
- return ""
-}
-
-func (x *NotificationConfig) GetPayloadFormat() string {
- if x != nil {
- return x.PayloadFormat
- }
- return ""
-}
-
// Describes the Customer-Supplied Encryption Key mechanism used to store an
// Object's data at rest.
type CustomerEncryption struct {
@@ -5038,11 +5066,9 @@ type CustomerEncryption struct {
func (x *CustomerEncryption) Reset() {
*x = CustomerEncryption{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[49]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[47]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *CustomerEncryption) String() string {
@@ -5052,8 +5078,8 @@ func (x *CustomerEncryption) String() string {
func (*CustomerEncryption) ProtoMessage() {}
func (x *CustomerEncryption) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[49]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[47]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5065,7 +5091,7 @@ func (x *CustomerEncryption) ProtoReflect() protoreflect.Message {
// Deprecated: Use CustomerEncryption.ProtoReflect.Descriptor instead.
func (*CustomerEncryption) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{47}
}
func (x *CustomerEncryption) GetEncryptionAlgorithm() string {
@@ -5106,6 +5132,10 @@ type Object struct {
// Immutable. The content generation of this object. Used for object
// versioning.
Generation int64 `protobuf:"varint,3,opt,name=generation,proto3" json:"generation,omitempty"`
+ // Output only. Restore token used to differentiate deleted objects with the
+ // same name and generation. This field is output only, and only set for
+ // deleted objects in HNS buckets.
+ RestoreToken *string `protobuf:"bytes,35,opt,name=restore_token,json=restoreToken,proto3,oneof" json:"restore_token,omitempty"`
// Output only. The version of the metadata for this generation of this
// object. Used for preconditions and for detecting changes in metadata. A
// metageneration number is only meaningful in the context of a particular
@@ -5137,6 +5167,8 @@ type Object struct {
// Output only. If this object is noncurrent, this is the time when the object
// became noncurrent.
DeleteTime *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=delete_time,json=deleteTime,proto3" json:"delete_time,omitempty"`
+ // Output only. The time when the object was finalized.
+ FinalizeTime *timestamppb.Timestamp `protobuf:"bytes,36,opt,name=finalize_time,json=finalizeTime,proto3" json:"finalize_time,omitempty"`
// Content-Type of the object data, matching
// [https://tools.ietf.org/html/rfc7231#section-3.1.1.5][RFC 7231 §3.1.1.5].
// If an object is stored without a Content-Type, it is served as
@@ -5148,7 +5180,10 @@ type Object struct {
// Components are accumulated by compose operations.
ComponentCount int32 `protobuf:"varint,15,opt,name=component_count,json=componentCount,proto3" json:"component_count,omitempty"`
// Output only. Hashes for the data part of this object. This field is used
- // for output only and will be silently ignored if provided in requests.
+ // for output only and will be silently ignored if provided in requests. The
+ // checksums of the complete object regardless of data range. If the object is
+ // downloaded in full, the client should compute one of these checksums over
+ // the downloaded object and compare it against the value provided here.
Checksums *ObjectChecksums `protobuf:"bytes,16,opt,name=checksums,proto3" json:"checksums,omitempty"`
// Output only. The modification time of the object metadata.
// Set initially to object creation time and then updated whenever any
@@ -5213,11 +5248,9 @@ type Object struct {
func (x *Object) Reset() {
*x = Object{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[50]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[48]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Object) String() string {
@@ -5227,8 +5260,8 @@ func (x *Object) String() string {
func (*Object) ProtoMessage() {}
func (x *Object) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[50]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[48]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5240,7 +5273,7 @@ func (x *Object) ProtoReflect() protoreflect.Message {
// Deprecated: Use Object.ProtoReflect.Descriptor instead.
func (*Object) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{48}
}
func (x *Object) GetName() string {
@@ -5271,6 +5304,13 @@ func (x *Object) GetGeneration() int64 {
return 0
}
+func (x *Object) GetRestoreToken() string {
+ if x != nil && x.RestoreToken != nil {
+ return *x.RestoreToken
+ }
+ return ""
+}
+
func (x *Object) GetMetageneration() int64 {
if x != nil {
return x.Metageneration
@@ -5334,6 +5374,13 @@ func (x *Object) GetDeleteTime() *timestamppb.Timestamp {
return nil
}
+func (x *Object) GetFinalizeTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.FinalizeTime
+ }
+ return nil
+}
+
func (x *Object) GetContentType() string {
if x != nil {
return x.ContentType
@@ -5452,7 +5499,10 @@ type ObjectAccessControl struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
- // The access permission for the entity.
+ // The access permission for the entity. One of the following values:
+ // * `READER`
+ // * `WRITER`
+ // * `OWNER`
Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
// The ID of the access-control entry.
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
@@ -5495,11 +5545,9 @@ type ObjectAccessControl struct {
func (x *ObjectAccessControl) Reset() {
*x = ObjectAccessControl{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[51]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[49]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ObjectAccessControl) String() string {
@@ -5509,8 +5557,8 @@ func (x *ObjectAccessControl) String() string {
func (*ObjectAccessControl) ProtoMessage() {}
func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[51]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[49]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5522,7 +5570,7 @@ func (x *ObjectAccessControl) ProtoReflect() protoreflect.Message {
// Deprecated: Use ObjectAccessControl.ProtoReflect.Descriptor instead.
func (*ObjectAccessControl) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{49}
}
func (x *ObjectAccessControl) GetRole() string {
@@ -5606,11 +5654,9 @@ type ListObjectsResponse struct {
func (x *ListObjectsResponse) Reset() {
*x = ListObjectsResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[52]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[50]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListObjectsResponse) String() string {
@@ -5620,8 +5666,8 @@ func (x *ListObjectsResponse) String() string {
func (*ListObjectsResponse) ProtoMessage() {}
func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[52]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[50]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5633,7 +5679,7 @@ func (x *ListObjectsResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use ListObjectsResponse.ProtoReflect.Descriptor instead.
func (*ListObjectsResponse) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{50}
}
func (x *ListObjectsResponse) GetObjects() []*Object {
@@ -5671,11 +5717,9 @@ type ProjectTeam struct {
func (x *ProjectTeam) Reset() {
*x = ProjectTeam{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[53]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[51]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ProjectTeam) String() string {
@@ -5685,8 +5729,8 @@ func (x *ProjectTeam) String() string {
func (*ProjectTeam) ProtoMessage() {}
func (x *ProjectTeam) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[53]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[51]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5698,7 +5742,7 @@ func (x *ProjectTeam) ProtoReflect() protoreflect.Message {
// Deprecated: Use ProjectTeam.ProtoReflect.Descriptor instead.
func (*ProjectTeam) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{51}
}
func (x *ProjectTeam) GetProjectNumber() string {
@@ -5715,57 +5759,6 @@ func (x *ProjectTeam) GetTeam() string {
return ""
}
-// A service account, owned by Cloud Storage, which may be used when taking
-// action on behalf of a given project, for example to publish Pub/Sub
-// notifications or to retrieve security keys.
-type ServiceAccount struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The ID of the notification.
- EmailAddress string `protobuf:"bytes,1,opt,name=email_address,json=emailAddress,proto3" json:"email_address,omitempty"`
-}
-
-func (x *ServiceAccount) Reset() {
- *x = ServiceAccount{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[54]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ServiceAccount) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ServiceAccount) ProtoMessage() {}
-
-func (x *ServiceAccount) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[54]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ServiceAccount.ProtoReflect.Descriptor instead.
-func (*ServiceAccount) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{54}
-}
-
-func (x *ServiceAccount) GetEmailAddress() string {
- if x != nil {
- return x.EmailAddress
- }
- return ""
-}
-
// The owner of a specific resource.
type Owner struct {
state protoimpl.MessageState
@@ -5780,11 +5773,9 @@ type Owner struct {
func (x *Owner) Reset() {
*x = Owner{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[55]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[52]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Owner) String() string {
@@ -5794,8 +5785,8 @@ func (x *Owner) String() string {
func (*Owner) ProtoMessage() {}
func (x *Owner) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[55]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[52]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5807,7 +5798,7 @@ func (x *Owner) ProtoReflect() protoreflect.Message {
// Deprecated: Use Owner.ProtoReflect.Descriptor instead.
func (*Owner) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{55}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{52}
}
func (x *Owner) GetEntity() string {
@@ -5840,11 +5831,9 @@ type ContentRange struct {
func (x *ContentRange) Reset() {
*x = ContentRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[56]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[53]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ContentRange) String() string {
@@ -5854,8 +5843,8 @@ func (x *ContentRange) String() string {
func (*ContentRange) ProtoMessage() {}
func (x *ContentRange) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[56]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[53]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5867,7 +5856,7 @@ func (x *ContentRange) ProtoReflect() protoreflect.Message {
// Deprecated: Use ContentRange.ProtoReflect.Descriptor instead.
func (*ContentRange) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{56}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{53}
}
func (x *ContentRange) GetStart() int64 {
@@ -5908,11 +5897,9 @@ type ComposeObjectRequest_SourceObject struct {
func (x *ComposeObjectRequest_SourceObject) Reset() {
*x = ComposeObjectRequest_SourceObject{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[57]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[54]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ComposeObjectRequest_SourceObject) String() string {
@@ -5922,8 +5909,8 @@ func (x *ComposeObjectRequest_SourceObject) String() string {
func (*ComposeObjectRequest_SourceObject) ProtoMessage() {}
func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[57]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[54]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -5935,7 +5922,7 @@ func (x *ComposeObjectRequest_SourceObject) ProtoReflect() protoreflect.Message
// Deprecated: Use ComposeObjectRequest_SourceObject.ProtoReflect.Descriptor instead.
func (*ComposeObjectRequest_SourceObject) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7, 0}
}
func (x *ComposeObjectRequest_SourceObject) GetName() string {
@@ -5973,11 +5960,9 @@ type ComposeObjectRequest_SourceObject_ObjectPreconditions struct {
func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) Reset() {
*x = ComposeObjectRequest_SourceObject_ObjectPreconditions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[58]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[55]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string {
@@ -5987,8 +5972,8 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) String() string
func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoMessage() {}
func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[58]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[55]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6000,7 +5985,7 @@ func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) ProtoReflect() p
// Deprecated: Use ComposeObjectRequest_SourceObject_ObjectPreconditions.ProtoReflect.Descriptor instead.
func (*ComposeObjectRequest_SourceObject_ObjectPreconditions) Descriptor() ([]byte, []int) {
- return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{12, 0, 0}
+ return file_google_storage_v2_storage_proto_rawDescGZIP(), []int{7, 0, 0}
}
func (x *ComposeObjectRequest_SourceObject_ObjectPreconditions) GetIfGenerationMatch() int64 {
@@ -6022,11 +6007,9 @@ type Bucket_Billing struct {
func (x *Bucket_Billing) Reset() {
*x = Bucket_Billing{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[59]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[56]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Billing) String() string {
@@ -6036,8 +6019,8 @@ func (x *Bucket_Billing) String() string {
func (*Bucket_Billing) ProtoMessage() {}
func (x *Bucket_Billing) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[59]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[56]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6088,11 +6071,9 @@ type Bucket_Cors struct {
func (x *Bucket_Cors) Reset() {
*x = Bucket_Cors{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[60]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[57]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Cors) String() string {
@@ -6102,8 +6083,8 @@ func (x *Bucket_Cors) String() string {
func (*Bucket_Cors) ProtoMessage() {}
func (x *Bucket_Cors) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[60]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[57]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6159,11 +6140,9 @@ type Bucket_Encryption struct {
func (x *Bucket_Encryption) Reset() {
*x = Bucket_Encryption{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[61]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[58]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Encryption) String() string {
@@ -6173,8 +6152,8 @@ func (x *Bucket_Encryption) String() string {
func (*Bucket_Encryption) ProtoMessage() {}
func (x *Bucket_Encryption) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[61]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[58]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6211,11 +6190,9 @@ type Bucket_IamConfig struct {
func (x *Bucket_IamConfig) Reset() {
*x = Bucket_IamConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[62]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[59]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_IamConfig) String() string {
@@ -6225,8 +6202,8 @@ func (x *Bucket_IamConfig) String() string {
func (*Bucket_IamConfig) ProtoMessage() {}
func (x *Bucket_IamConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[62]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[59]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6269,11 +6246,9 @@ type Bucket_Lifecycle struct {
func (x *Bucket_Lifecycle) Reset() {
*x = Bucket_Lifecycle{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[63]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[60]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Lifecycle) String() string {
@@ -6283,8 +6258,8 @@ func (x *Bucket_Lifecycle) String() string {
func (*Bucket_Lifecycle) ProtoMessage() {}
func (x *Bucket_Lifecycle) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[63]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[60]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6321,11 +6296,9 @@ type Bucket_Logging struct {
func (x *Bucket_Logging) Reset() {
*x = Bucket_Logging{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[64]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[61]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Logging) String() string {
@@ -6335,8 +6308,8 @@ func (x *Bucket_Logging) String() string {
func (*Bucket_Logging) ProtoMessage() {}
func (x *Bucket_Logging) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[64]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[61]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6386,11 +6359,9 @@ type Bucket_RetentionPolicy struct {
func (x *Bucket_RetentionPolicy) Reset() {
*x = Bucket_RetentionPolicy{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[65]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[62]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_RetentionPolicy) String() string {
@@ -6400,8 +6371,8 @@ func (x *Bucket_RetentionPolicy) String() string {
func (*Bucket_RetentionPolicy) ProtoMessage() {}
func (x *Bucket_RetentionPolicy) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[65]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[62]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6453,11 +6424,9 @@ type Bucket_SoftDeletePolicy struct {
func (x *Bucket_SoftDeletePolicy) Reset() {
*x = Bucket_SoftDeletePolicy{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[66]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[63]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_SoftDeletePolicy) String() string {
@@ -6467,8 +6436,8 @@ func (x *Bucket_SoftDeletePolicy) String() string {
func (*Bucket_SoftDeletePolicy) ProtoMessage() {}
func (x *Bucket_SoftDeletePolicy) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[66]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[63]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6511,11 +6480,9 @@ type Bucket_Versioning struct {
func (x *Bucket_Versioning) Reset() {
*x = Bucket_Versioning{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[67]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[64]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Versioning) String() string {
@@ -6525,8 +6492,8 @@ func (x *Bucket_Versioning) String() string {
func (*Bucket_Versioning) ProtoMessage() {}
func (x *Bucket_Versioning) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[67]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[64]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6571,11 +6538,9 @@ type Bucket_Website struct {
func (x *Bucket_Website) Reset() {
*x = Bucket_Website{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[68]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[65]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Website) String() string {
@@ -6585,8 +6550,8 @@ func (x *Bucket_Website) String() string {
func (*Bucket_Website) ProtoMessage() {}
func (x *Bucket_Website) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[68]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[65]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6629,11 +6594,9 @@ type Bucket_CustomPlacementConfig struct {
func (x *Bucket_CustomPlacementConfig) Reset() {
*x = Bucket_CustomPlacementConfig{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[69]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[66]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_CustomPlacementConfig) String() string {
@@ -6643,8 +6606,8 @@ func (x *Bucket_CustomPlacementConfig) String() string {
func (*Bucket_CustomPlacementConfig) ProtoMessage() {}
func (x *Bucket_CustomPlacementConfig) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[69]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[66]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6690,11 +6653,9 @@ type Bucket_Autoclass struct {
func (x *Bucket_Autoclass) Reset() {
*x = Bucket_Autoclass{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[70]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[67]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Autoclass) String() string {
@@ -6704,8 +6665,8 @@ func (x *Bucket_Autoclass) String() string {
func (*Bucket_Autoclass) ProtoMessage() {}
func (x *Bucket_Autoclass) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[70]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[67]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6760,11 +6721,9 @@ type Bucket_HierarchicalNamespace struct {
func (x *Bucket_HierarchicalNamespace) Reset() {
*x = Bucket_HierarchicalNamespace{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[71]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[68]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_HierarchicalNamespace) String() string {
@@ -6774,8 +6733,8 @@ func (x *Bucket_HierarchicalNamespace) String() string {
func (*Bucket_HierarchicalNamespace) ProtoMessage() {}
func (x *Bucket_HierarchicalNamespace) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[71]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[68]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6815,11 +6774,9 @@ type Bucket_IamConfig_UniformBucketLevelAccess struct {
func (x *Bucket_IamConfig_UniformBucketLevelAccess) Reset() {
*x = Bucket_IamConfig_UniformBucketLevelAccess{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[73]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[70]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string {
@@ -6829,8 +6786,8 @@ func (x *Bucket_IamConfig_UniformBucketLevelAccess) String() string {
func (*Bucket_IamConfig_UniformBucketLevelAccess) ProtoMessage() {}
func (x *Bucket_IamConfig_UniformBucketLevelAccess) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[73]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[70]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6874,11 +6831,9 @@ type Bucket_Lifecycle_Rule struct {
func (x *Bucket_Lifecycle_Rule) Reset() {
*x = Bucket_Lifecycle_Rule{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[74]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[71]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Lifecycle_Rule) String() string {
@@ -6888,8 +6843,8 @@ func (x *Bucket_Lifecycle_Rule) String() string {
func (*Bucket_Lifecycle_Rule) ProtoMessage() {}
func (x *Bucket_Lifecycle_Rule) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[74]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[71]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -6934,11 +6889,9 @@ type Bucket_Lifecycle_Rule_Action struct {
func (x *Bucket_Lifecycle_Rule_Action) Reset() {
*x = Bucket_Lifecycle_Rule_Action{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[75]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[72]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Lifecycle_Rule_Action) String() string {
@@ -6948,8 +6901,8 @@ func (x *Bucket_Lifecycle_Rule_Action) String() string {
func (*Bucket_Lifecycle_Rule_Action) ProtoMessage() {}
func (x *Bucket_Lifecycle_Rule_Action) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[75]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[72]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -7032,11 +6985,9 @@ type Bucket_Lifecycle_Rule_Condition struct {
func (x *Bucket_Lifecycle_Rule_Condition) Reset() {
*x = Bucket_Lifecycle_Rule_Condition{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_storage_v2_storage_proto_msgTypes[76]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_storage_v2_storage_proto_msgTypes[73]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Bucket_Lifecycle_Rule_Condition) String() string {
@@ -7046,8 +6997,8 @@ func (x *Bucket_Lifecycle_Rule_Condition) String() string {
func (*Bucket_Lifecycle_Rule_Condition) ProtoMessage() {}
func (x *Bucket_Lifecycle_Rule_Condition) ProtoReflect() protoreflect.Message {
- mi := &file_google_storage_v2_storage_proto_msgTypes[76]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_storage_v2_storage_proto_msgTypes[73]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -7164,13 +7115,103 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{
0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
- 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0,
- 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66,
+ 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x8d, 0x02, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
+ 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x22, 0xd6, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a,
+ 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
+ 0x01, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b,
+ 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42,
+ 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f,
+ 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f,
+ 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x93, 0x02, 0x0a, 0x13, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69,
+ 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70,
+ 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d,
+ 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75,
+ 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44,
+ 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x22,
+ 0xf3, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
+ 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69,
+ 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69,
+ 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61,
+ 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64,
+ 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64,
+ 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74,
+ 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x9e, 0x01, 0x0a, 0x20, 0x4c, 0x6f,
+ 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
+ 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d,
+ 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25,
+ 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a,
+ 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66,
0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69,
0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
@@ -7178,343 +7219,157 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{
0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18,
0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f,
- 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d,
- 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
- 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x02, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa,
- 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
+ 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70,
+ 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41,
+ 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64,
+ 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f,
+ 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65,
+ 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f,
+ 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
+ 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d,
+ 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d,
- 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74,
- 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x02, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d,
- 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
- 0x22, 0x93, 0x02, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f,
- 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e,
- 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64,
- 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65,
- 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64,
- 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x22, 0xf3, 0x01, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0,
- 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x12, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09,
- 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52,
- 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67,
- 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70,
- 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66,
- 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78,
- 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48,
- 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0c,
- 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x72, 0x0a, 0x13,
- 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
- 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74,
- 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
- 0x22, 0x9e, 0x01, 0x0a, 0x20, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
- 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x22, 0xb6, 0x03, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x62, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42,
- 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
- 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64,
- 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64,
- 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x1d, 0x70, 0x72, 0x65,
- 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x1a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x44, 0x65, 0x66, 0x61,
- 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b,
- 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x1a,
- 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69,
- 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x68, 0x0a, 0x1f, 0x44, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02,
- 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x65, 0x0a, 0x1c, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x45, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x0a, 0x29, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xc9, 0x01, 0x0a, 0x1f,
- 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x49, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x31, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
- 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x5b, 0x0a, 0x13, 0x6e, 0x6f,
- 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x12, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xa7, 0x01, 0x0a, 0x1e, 0x4c, 0x69, 0x73, 0x74,
- 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x49, 0x0a, 0x06, 0x70, 0x61,
- 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x31, 0xe0, 0x41, 0x02, 0xfa,
- 0x41, 0x2b, 0x12, 0x29, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x70,
- 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69,
- 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69,
- 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
- 0x6e, 0x22, 0xa3, 0x01, 0x0a, 0x1f, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x14, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20,
- 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x6e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12,
- 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61,
- 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70,
- 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x63, 0x68, 0x22, 0xc3, 0x07, 0x0a, 0x14, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x0b,
+ 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41,
+ 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x5b,
+ 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f,
+ 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0d, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x3c, 0x0a, 0x1a, 0x64,
+ 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65,
+ 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64,
+ 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b,
+ 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48,
+ 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x07, 0x6b,
+ 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41,
+ 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74,
+ 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x1c,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
+ 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18,
+ 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x5b, 0x0a, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43,
- 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12,
- 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70,
- 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8, 0x02, 0x0a, 0x0c, 0x53,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70,
+ 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50,
+ 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f,
+ 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a,
+ 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a,
+ 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xe2, 0x04, 0x0a, 0x13, 0x44, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a,
+ 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a,
0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66,
+ 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66,
0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
- 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43,
- 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79,
- 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
- 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61,
- 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12,
- 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
- 0x75, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x1a, 0xa8,
- 0x02, 0x0a, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x7b, 0x0a, 0x14, 0x6f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f,
- 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e,
- 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x52, 0x13, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x62, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50,
- 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x33, 0x0a, 0x13,
- 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
- 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66,
- 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xe2, 0x04,
- 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00,
- 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61,
- 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
- 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20,
- 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72,
- 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61,
- 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69,
- 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x22, 0xa9, 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02,
- 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
- 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02,
- 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13,
- 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
- 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a,
- 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02,
- 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66,
- 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48,
- 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b,
- 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63,
- 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
- 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69,
- 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a,
- 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69,
+ 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b,
+ 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48,
+ 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69,
0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63,
- 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f,
- 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c,
- 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a,
- 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22,
- 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62,
- 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73,
- 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65, 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69,
- 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00,
- 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61,
- 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
- 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20,
- 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03,
+ 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
+ 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18,
+ 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72,
+ 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16,
+ 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e,
+ 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd3,
+ 0x05, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a,
+ 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06,
+ 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74,
+ 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48,
+ 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65,
0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72,
- 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61,
- 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18,
- 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73,
- 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01,
+ 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
+ 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x88, 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52,
+ 0x0d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x41, 0x63, 0x6c, 0x88, 0x01,
+ 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f,
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
@@ -7522,45 +7377,101 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xe4,
- 0x05, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18,
- 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x5f, 0x61, 0x63, 0x6c, 0x22, 0x3f, 0x0a, 0x1b, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65,
+ 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c,
+ 0x6f, 0x61, 0x64, 0x49, 0x64, 0x22, 0x1e, 0x0a, 0x1c, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52,
+ 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xec, 0x05, 0x0a, 0x11, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02,
+ 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
+ 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f,
+ 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x72, 0x65,
+ 0x61, 0x64, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64,
+ 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x72, 0x65,
+ 0x61, 0x64, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17,
0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
- 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52,
+ 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52,
0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74,
0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d,
0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d,
+ 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d,
0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74,
0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x18, 0x69, 0x66,
+ 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66,
0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f,
0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d,
0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63,
0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d,
- 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f,
+ 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x04, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d,
+ 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a,
+ 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69,
+ 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
+ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f,
+ 0x6d, 0x61, 0x73, 0x6b, 0x22, 0x8e, 0x06, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0b, 0x73,
+ 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a,
+ 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x11, 0x69, 0x66,
+ 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
+ 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b,
+ 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48,
+ 0x03, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69,
+ 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03,
+ 0x48, 0x04, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
+ 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72,
+ 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x3c,
+ 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x48, 0x05, 0x52,
+ 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d,
+ 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0c, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72,
+ 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x73, 0x6f, 0x66, 0x74, 0x5f,
0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67,
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42,
0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
@@ -7588,796 +7499,969 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{
0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d,
- 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x8c, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65,
- 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0d, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33,
- 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69,
- 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68,
- 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
- 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
- 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b,
- 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
- 0x12, 0x24, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18,
- 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53,
- 0x69, 0x7a, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a,
- 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0xc6, 0x06, 0x0a, 0x12, 0x42, 0x69, 0x64, 0x69,
+ 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3d,
+ 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25,
+ 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x1b, 0x0a,
+ 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
+ 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03,
+ 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69,
0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
- 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a,
- 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11,
- 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65,
- 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77,
- 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26,
- 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
- 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64,
- 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
- 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15,
+ 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d,
+ 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f,
+ 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52,
+ 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x6d, 0x0a, 0x1c,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
+ 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x02, 0x18, 0x01, 0x48, 0x04,
+ 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a,
+ 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x0d, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61, 0x64, 0x48,
+ 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x48, 0x05, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x48, 0x61, 0x6e,
+ 0x64, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e,
+ 0x67, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x48, 0x06, 0x52,
+ 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01,
+ 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x42, 0x0e,
+ 0x0a, 0x0c, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x42, 0x10,
+ 0x0a, 0x0e, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x22, 0xa7, 0x01, 0x0a, 0x15, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x72, 0x65,
+ 0x61, 0x64, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61,
+ 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0e, 0x72, 0x65, 0x61,
+ 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3d, 0x0a, 0x0b, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0a,
+ 0x72, 0x65, 0x61, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x16, 0x42,
+ 0x69, 0x64, 0x69, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f,
+ 0x64, 0x61, 0x74, 0x61, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28,
0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63,
- 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68,
- 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66, 0x69,
- 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d,
- 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72, 0x73,
- 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74,
- 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72,
- 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69,
- 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48,
- 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77,
- 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xb5, 0x04, 0x0a, 0x16,
- 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
- 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c,
- 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0,
- 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12,
- 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64,
- 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68,
- 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52,
- 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61,
- 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b,
- 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12,
- 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b,
- 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69,
- 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b,
- 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
- 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69,
- 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64,
- 0x61, 0x74, 0x61, 0x22, 0x8b, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69,
- 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x44, 0x61, 0x74,
+ 0x61, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x42,
+ 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61, 0x64,
+ 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x48, 0x61, 0x6e, 0x64,
+ 0x6c, 0x65, 0x22, 0x9f, 0x01, 0x0a, 0x1d, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61, 0x64, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x65, 0x64, 0x45,
+ 0x72, 0x72, 0x6f, 0x72, 0x12, 0x42, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x68, 0x61, 0x6e,
+ 0x64, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69,
+ 0x64, 0x69, 0x52, 0x65, 0x61, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x52, 0x0a, 0x72, 0x65,
+ 0x61, 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74,
+ 0x69, 0x6e, 0x67, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48,
+ 0x00, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x88,
+ 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xed, 0x01, 0x0a, 0x1e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69,
+ 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x65, 0x64, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69,
+ 0x6e, 0x67, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00,
+ 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01,
+ 0x01, 0x12, 0x4a, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69,
+ 0x57, 0x72, 0x69, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x48, 0x01, 0x52, 0x0b, 0x77,
+ 0x72, 0x69, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a,
+ 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x03, 0x48, 0x02, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88,
+ 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68,
+ 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x64, 0x0a, 0x13, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61, 0x64,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x4d, 0x0a, 0x11, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x0f, 0x72, 0x65, 0x61, 0x64, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x55, 0x0a, 0x0e, 0x52, 0x65,
+ 0x61, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x17, 0x0a, 0x07,
+ 0x72, 0x65, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x72,
+ 0x65, 0x61, 0x64, 0x49, 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
+ 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x22, 0x75, 0x0a, 0x09, 0x52, 0x65, 0x61, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x24,
+ 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x66,
+ 0x66, 0x73, 0x65, 0x74, 0x12, 0x24, 0x0a, 0x0b, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x6e,
+ 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0a,
+ 0x72, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x07, 0x72, 0x65,
+ 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02,
+ 0x52, 0x06, 0x72, 0x65, 0x61, 0x64, 0x49, 0x64, 0x22, 0xba, 0x01, 0x0a, 0x0f, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10,
+ 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63,
+ 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x3b, 0x0a, 0x0a, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x09, 0x72,
+ 0x65, 0x61, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x61, 0x6e, 0x67,
+ 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x61, 0x6e,
+ 0x67, 0x65, 0x45, 0x6e, 0x64, 0x22, 0x2d, 0x0a, 0x0e, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61,
+ 0x64, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x68, 0x61, 0x6e, 0x64, 0x6c,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x68, 0x61,
+ 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x2e, 0x0a, 0x0f, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x06, 0x68, 0x61, 0x6e, 0x64, 0x6c,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x68, 0x61,
+ 0x6e, 0x64, 0x6c, 0x65, 0x22, 0xc0, 0x04, 0x0a, 0x0f, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75,
- 0x73, 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65,
- 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f,
- 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f,
- 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65,
- 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
- 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f,
- 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65,
- 0x72, 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61,
- 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72,
- 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12,
- 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
- 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61,
- 0x73, 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01,
- 0x01, 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68,
- 0x69, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68,
- 0x69, 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63,
- 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67,
- 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66,
- 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42,
- 0x03, 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x64, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x66, 0x6f, 0x6c,
- 0x64, 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73,
- 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x69, 0x6e, 0x63,
- 0x6c, 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x41, 0x73, 0x50, 0x72, 0x65,
- 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67,
- 0x6c, 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09,
- 0x6d, 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65,
- 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72,
- 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c,
- 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70,
- 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e,
+ 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72,
+ 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69,
+ 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
+ 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17,
+ 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52,
+ 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f,
+ 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e,
+ 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03,
+ 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x24, 0x0a,
+ 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x08, 0x20, 0x01,
+ 0x28, 0x03, 0x48, 0x04, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x69, 0x7a, 0x65,
+ 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x61, 0x62, 0x6c,
+ 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x05, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x65, 0x6e,
+ 0x64, 0x61, 0x62, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f,
+ 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18,
+ 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f,
+ 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e,
+ 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x6f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x61, 0x70, 0x70,
+ 0x65, 0x6e, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x22, 0xf8, 0x03, 0x0a, 0x12, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d,
+ 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64, 0x12, 0x50, 0x0a,
+ 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70,
+ 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69,
+ 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x0f,
+ 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12,
+ 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74,
+ 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b,
+ 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65,
+ 0x64, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75,
+ 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69, 0x6e, 0x69, 0x73,
+ 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x66,
+ 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d, 0x66, 0x69, 0x72,
+ 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61,
+ 0x74, 0x61, 0x22, 0x87, 0x01, 0x0a, 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65,
+ 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c,
+ 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0xe9, 0x03, 0x0a,
+ 0x10, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65,
+ 0x63, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x12, 0x1b, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x0a,
+ 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x03, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
+ 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x0c, 0x72, 0x6f,
+ 0x75, 0x74, 0x69, 0x6e, 0x67, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x4a, 0x0a,
+ 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x18, 0x07, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x48, 0x03, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65,
+ 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66,
+ 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e,
+ 0x67, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x77, 0x72, 0x69, 0x74,
+ 0x65, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x8a, 0x05, 0x0a, 0x16, 0x42, 0x69, 0x64,
+ 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
+ 0x49, 0x64, 0x12, 0x50, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65,
+ 0x63, 0x48, 0x00, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x53, 0x70, 0x65, 0x63, 0x12, 0x53, 0x0a, 0x12, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x10, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x26, 0x0a, 0x0c, 0x77, 0x72, 0x69,
+ 0x74, 0x65, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x66, 0x66, 0x73, 0x65,
+ 0x74, 0x12, 0x4f, 0x0a, 0x10, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64,
+ 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
- 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61,
- 0x72, 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61, 0x74, 0x61, 0x48,
+ 0x01, 0x52, 0x0f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x6d, 0x65, 0x64, 0x44, 0x61,
+ 0x74, 0x61, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65,
+ 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73,
+ 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d,
+ 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75,
+ 0x70, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x65, 0x4c, 0x6f,
+ 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x05, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x66, 0x69,
+ 0x6e, 0x69, 0x73, 0x68, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0b, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x6d, 0x0a,
+ 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x0a, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d,
+ 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x0f, 0x0a, 0x0d,
+ 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x06, 0x0a,
+ 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xe8, 0x01, 0x0a, 0x17, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72,
+ 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73,
0x69, 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72,
0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10,
- 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f,
- 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12,
- 0x57, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02,
- 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74,
- 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18,
- 0x1b, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75,
- 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64,
- 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79,
- 0x12, 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a,
- 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
- 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f,
- 0x61, 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69,
- 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64,
- 0x41, 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03,
- 0x48, 0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
- 0x09, 0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
- 0x01, 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74,
- 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
- 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48,
- 0x05, 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
- 0x12, 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65,
- 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66,
- 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d,
- 0x61, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74,
- 0x65, 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69,
- 0x74, 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63,
- 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18,
- 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72,
- 0x69, 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b,
- 0x65, 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c,
- 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27,
- 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35,
- 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63,
- 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65,
- 0x73, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d,
- 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d,
- 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50,
- 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
- 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b,
- 0x73, 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42,
- 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61,
- 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42,
- 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42,
- 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21,
- 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e,
- 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
- 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f,
- 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x32, 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72,
- 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13,
- 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74,
- 0x74, 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69,
- 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x53, 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72,
- 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a,
- 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65,
- 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70,
- 0x65, 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d,
- 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63,
- 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65,
- 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52,
- 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
- 0x49, 0x64, 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48,
+ 0x72, 0x63, 0x65, 0x12, 0x4a, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x6e,
+ 0x64, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69,
+ 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x48, 0x01, 0x52,
+ 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x88, 0x01, 0x01, 0x42,
+ 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42,
+ 0x0f, 0x0a, 0x0d, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65,
+ 0x22, 0xe3, 0x04, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a,
+ 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
+ 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73,
+ 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65,
+ 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18,
+ 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72,
+ 0x12, 0x3c, 0x0a, 0x1a, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x74, 0x72, 0x61, 0x69,
+ 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x54, 0x72, 0x61,
+ 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x65, 0x72, 0x12, 0x16,
+ 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06,
+ 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1a, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73,
+ 0x6b, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x88, 0x01, 0x01,
+ 0x12, 0x34, 0x0a, 0x13, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69,
+ 0x63, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x52, 0x12, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x69,
+ 0x63, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x30, 0x0a, 0x11, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f,
+ 0x67, 0x72, 0x61, 0x70, 0x68, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x10, 0x6c, 0x65, 0x78, 0x69, 0x63, 0x6f, 0x67, 0x72,
+ 0x61, 0x70, 0x68, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x12, 0x26, 0x0a, 0x0c, 0x73, 0x6f, 0x66, 0x74,
+ 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03,
+ 0xe0, 0x41, 0x01, 0x52, 0x0b, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64,
+ 0x12, 0x42, 0x0a, 0x1b, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x66, 0x6f, 0x6c, 0x64,
+ 0x65, 0x72, 0x73, 0x5f, 0x61, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18,
+ 0x0d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x18, 0x69, 0x6e, 0x63, 0x6c,
+ 0x75, 0x64, 0x65, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x41, 0x73, 0x50, 0x72, 0x65, 0x66,
+ 0x69, 0x78, 0x65, 0x73, 0x12, 0x22, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x67, 0x6c,
+ 0x6f, 0x62, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x09, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x47, 0x6c, 0x6f, 0x62, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x72, 0x65, 0x61,
+ 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x22, 0xaa, 0x01, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79,
+ 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f,
+ 0x61, 0x64, 0x49, 0x64, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61,
+ 0x72, 0x61, 0x6d, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72,
+ 0x61, 0x6d, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69,
+ 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+ 0x12, 0x27, 0x0a, 0x0e, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x73, 0x69,
+ 0x7a, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x70, 0x65, 0x72, 0x73,
+ 0x69, 0x73, 0x74, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x48, 0x00, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x22, 0xb5, 0x0e, 0x0a, 0x14, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x10, 0x64,
+ 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x18, 0x20, 0x01, 0x28, 0x09, 0x42, 0x06, 0xe0, 0x41, 0x02, 0xe0, 0x41, 0x05, 0x52, 0x0f, 0x64,
+ 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x57,
+ 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xe0,
+ 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x56, 0x0a, 0x13, 0x64, 0x65, 0x73, 0x74, 0x69,
+ 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x1b,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64,
+ 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
+ 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x11, 0x64, 0x65,
+ 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12,
+ 0x3b, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x0b, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x0d,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x3c, 0x0a, 0x1a, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x5f, 0x61,
+ 0x63, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41,
+ 0x63, 0x6c, 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x48,
0x00, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d,
0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65,
0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
- 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65,
+ 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65,
0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68,
0x88, 0x01, 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09,
0x20, 0x01, 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65,
0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
0x12, 0x42, 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67,
- 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63,
- 0x68, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e,
- 0x65, 0x64, 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72,
- 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41,
- 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a,
- 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d,
- 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14,
- 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65,
+ 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x48, 0x04, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e,
+ 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x03, 0x48, 0x05,
+ 0x52, 0x1a, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12,
+ 0x48, 0x0a, 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x48, 0x06, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4f, 0x0a, 0x22, 0x69, 0x66, 0x5f,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
+ 0x0e, 0x20, 0x01, 0x28, 0x03, 0x48, 0x07, 0x52, 0x1e, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e,
+ 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x1c, 0x6d, 0x61,
+ 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65,
+ 0x6e, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x18, 0x6d, 0x61, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74,
+ 0x74, 0x65, 0x6e, 0x50, 0x65, 0x72, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f,
+ 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x10,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x1d, 0x63, 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69,
+ 0x74, 0x68, 0x6d, 0x12, 0x46, 0x0a, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65,
+ 0x79, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x1c, 0x63,
+ 0x6f, 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x27, 0x63,
+ 0x6f, 0x70, 0x79, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36,
+ 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x22, 0x63, 0x6f,
+ 0x70, 0x79, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73,
+ 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
+ 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61,
+ 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12,
+ 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
+ 0x75, 0x6d, 0x73, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42, 0x16,
+ 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e,
+ 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1d,
+ 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a,
+ 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d,
+ 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xd6, 0x01, 0x0a, 0x0f, 0x52,
+ 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32,
+ 0x0a, 0x15, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x72, 0x65,
+ 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x74,
+ 0x6f, 0x74, 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x74,
+ 0x65, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x69, 0x7a,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53,
+ 0x69, 0x7a, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x77, 0x72, 0x69,
+ 0x74, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
+ 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x35, 0x0a, 0x08,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x22, 0xec, 0x07, 0x0a, 0x11, 0x4d, 0x6f, 0x76, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x02, 0xfa, 0x41,
+ 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x28, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x12, 0x32, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
+ 0xe0, 0x41, 0x02, 0x52, 0x11, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x45, 0x0a, 0x1a, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48,
+ 0x00, 0x52, 0x17, 0x69, 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4c, 0x0a,
+ 0x1e, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x01, 0x52, 0x1a, 0x69, 0x66,
+ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x4d, 0x0a, 0x1e, 0x69,
+ 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x02, 0x52, 0x1b, 0x69, 0x66, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x54, 0x0a, 0x22, 0x69, 0x66,
+ 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
- 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c,
- 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0x69, 0x0a, 0x18,
- 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41,
- 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
- 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x9e, 0x01, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61,
- 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64,
- 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75,
- 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x81, 0x01, 0x0a, 0x15, 0x43, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79,
- 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x12, 0x28, 0x0a, 0x10, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x5f, 0x6b, 0x65, 0x79,
- 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x73, 0x65,
- 0x63, 0x72, 0x65, 0x74, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0x87, 0x01, 0x0a,
- 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61,
- 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d,
- 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d,
- 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x84, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x6d,
- 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x09,
- 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d,
- 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65,
- 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x80, 0x02,
- 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e,
- 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a,
- 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
- 0x12, 0x32, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x13, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45,
- 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x68, 0x6f, 0x77, 0x5f, 0x64, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x0f, 0x73, 0x68, 0x6f, 0x77, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73,
- 0x22, 0x7f, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x68, 0x6d, 0x61, 0x63,
- 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52,
- 0x08, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78,
- 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65,
- 0x6e, 0x22, 0x97, 0x01, 0x0a, 0x14, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63,
- 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x68, 0x6d,
- 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x03, 0x52, 0x1e, 0x69,
+ 0x66, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01,
+ 0x12, 0x38, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0,
+ 0x41, 0x01, 0x48, 0x04, 0x52, 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x17, 0x69, 0x66,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x09, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01,
+ 0x48, 0x05, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x40, 0x0a, 0x17, 0x69,
+ 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x48, 0x06, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x47, 0x0a,
+ 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x0b, 0x20, 0x01,
+ 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x48, 0x07, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74,
+ 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e,
+ 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x21, 0x0a, 0x1f, 0x5f, 0x69, 0x66, 0x5f,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x25, 0x0a, 0x23, 0x5f,
+ 0x69, 0x66, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69,
+ 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65,
+ 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x22, 0xaf, 0x02, 0x0a, 0x1a, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75,
+ 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x53, 0x0a, 0x11, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x70, 0x65, 0x63,
+ 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x53, 0x70, 0x65, 0x63, 0x12, 0x6d, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f,
+ 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
- 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x07, 0x68, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x3b,
- 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x52,
- 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xbf, 0x01, 0x0a, 0x19,
- 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68,
- 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14,
- 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62,
- 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d,
- 0x0a, 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79,
- 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b,
- 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05,
- 0x0a, 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e,
- 0x74, 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a,
- 0x12, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
- 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41,
- 0x44, 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80,
- 0x80, 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f,
- 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01,
- 0x12, 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53,
- 0x49, 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d,
- 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41,
- 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59,
- 0x54, 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55,
- 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49,
- 0x45, 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10,
- 0x80, 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d,
- 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f,
- 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a,
- 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41,
- 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f,
- 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58,
- 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f,
- 0x4e, 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54,
- 0x10, 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59,
- 0x43, 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55,
- 0x43, 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f,
- 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f,
- 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31,
- 0x0a, 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49,
- 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42,
- 0x55, 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80,
- 0x02, 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43,
- 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54,
- 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e,
- 0x47, 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41,
- 0x42, 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55,
- 0x4e, 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45,
- 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e,
- 0x47, 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42,
- 0x45, 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59,
- 0x54, 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42,
- 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c,
- 0x45, 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55,
- 0x45, 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f,
- 0x54, 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f,
- 0x44, 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0xf5, 0x23, 0x0a, 0x06, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20,
- 0x0a, 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64,
- 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
- 0x65, 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63,
- 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61,
- 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03,
- 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79,
- 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c,
- 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73,
- 0x12, 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72,
- 0x70, 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
- 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12,
- 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61,
- 0x63, 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x19, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x4d, 0x0a, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f,
+ 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73,
+ 0x75, 0x6d, 0x73, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b,
+ 0x73, 0x75, 0x6d, 0x73, 0x22, 0x3a, 0x0a, 0x1b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73,
+ 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x49, 0x64,
+ 0x22, 0x87, 0x05, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x52, 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41,
- 0x63, 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18,
- 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x65, 0x63, 0x74, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x12, 0x33, 0x0a, 0x13, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52,
+ 0x11, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x3a, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x14, 0x69, 0x66, 0x47, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01,
+ 0x01, 0x12, 0x3b, 0x0a, 0x17, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x03, 0x48, 0x02, 0x52, 0x15, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88, 0x01, 0x01, 0x12, 0x42,
+ 0x0a, 0x1b, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x03, 0x48, 0x03, 0x52, 0x18, 0x69, 0x66, 0x4d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x6f, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x88,
+ 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64,
+ 0x5f, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x65, 0x64,
+ 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x41, 0x63, 0x6c, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52,
+ 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x1c, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52,
+ 0x19, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x42, 0x16, 0x0a, 0x14, 0x5f, 0x69,
+ 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x42, 0x1a, 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1a,
+ 0x0a, 0x18, 0x5f, 0x69, 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x69,
+ 0x66, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xbf, 0x01, 0x0a, 0x19, 0x43,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x30, 0x0a, 0x14, 0x65,
+ 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x62, 0x79,
+ 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x65, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3d, 0x0a,
+ 0x1b, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x5f,
+ 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0c, 0x52, 0x18, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65,
+ 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xca, 0x05, 0x0a,
+ 0x10, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74,
+ 0x73, 0x22, 0xb5, 0x05, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x12,
+ 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
+ 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x14, 0x4d, 0x41, 0x58, 0x5f, 0x52, 0x45, 0x41, 0x44,
+ 0x5f, 0x43, 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80,
+ 0x01, 0x12, 0x1c, 0x0a, 0x15, 0x4d, 0x41, 0x58, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x43,
+ 0x48, 0x55, 0x4e, 0x4b, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x80, 0x80, 0x01, 0x12,
+ 0x19, 0x0a, 0x12, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x53, 0x49,
+ 0x5a, 0x45, 0x5f, 0x4d, 0x42, 0x10, 0x80, 0x80, 0xc0, 0x02, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41,
+ 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54,
+ 0x41, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x4e, 0x41, 0x4d, 0x45, 0x5f, 0x42, 0x59, 0x54,
+ 0x45, 0x53, 0x10, 0x80, 0x08, 0x12, 0x2a, 0x0a, 0x25, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53,
+ 0x54, 0x4f, 0x4d, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x46, 0x49, 0x45,
+ 0x4c, 0x44, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80,
+ 0x20, 0x12, 0x29, 0x0a, 0x24, 0x4d, 0x41, 0x58, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f,
+ 0x4d, 0x45, 0x54, 0x41, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53,
+ 0x49, 0x5a, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0x40, 0x12, 0x2a, 0x0a, 0x24,
+ 0x4d, 0x41, 0x58, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x5f, 0x4d, 0x45, 0x54, 0x41, 0x44,
+ 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x54, 0x41, 0x4c, 0x5f, 0x53, 0x49, 0x5a, 0x45, 0x5f, 0x42,
+ 0x59, 0x54, 0x45, 0x53, 0x10, 0x80, 0xa0, 0x01, 0x12, 0x27, 0x0a, 0x23, 0x4d, 0x41, 0x58, 0x5f,
+ 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4e,
+ 0x46, 0x49, 0x47, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43, 0x4b, 0x45, 0x54, 0x10,
+ 0x64, 0x12, 0x22, 0x0a, 0x1e, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x49, 0x46, 0x45, 0x43, 0x59, 0x43,
+ 0x4c, 0x45, 0x5f, 0x52, 0x55, 0x4c, 0x45, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x42, 0x55, 0x43,
+ 0x4b, 0x45, 0x54, 0x10, 0x64, 0x12, 0x26, 0x0a, 0x22, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54,
+ 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d,
+ 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55, 0x54, 0x45, 0x53, 0x10, 0x05, 0x12, 0x31, 0x0a,
+ 0x2c, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f,
+ 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52, 0x49, 0x42, 0x55,
+ 0x54, 0x45, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x10, 0x80, 0x02,
+ 0x12, 0x33, 0x0a, 0x2e, 0x4d, 0x41, 0x58, 0x5f, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x55, 0x53, 0x54, 0x4f, 0x4d, 0x5f, 0x41, 0x54, 0x54, 0x52,
+ 0x49, 0x42, 0x55, 0x54, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47,
+ 0x54, 0x48, 0x10, 0x80, 0x08, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42,
+ 0x45, 0x4c, 0x53, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x49, 0x45, 0x53, 0x5f, 0x43, 0x4f, 0x55, 0x4e,
+ 0x54, 0x10, 0x40, 0x12, 0x1f, 0x0a, 0x1b, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45, 0x4c,
+ 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x4c, 0x45, 0x4e, 0x47,
+ 0x54, 0x48, 0x10, 0x3f, 0x12, 0x1f, 0x0a, 0x1a, 0x4d, 0x41, 0x58, 0x5f, 0x4c, 0x41, 0x42, 0x45,
+ 0x4c, 0x53, 0x5f, 0x4b, 0x45, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x5f, 0x42, 0x59, 0x54,
+ 0x45, 0x53, 0x10, 0x80, 0x01, 0x12, 0x2e, 0x0a, 0x29, 0x4d, 0x41, 0x58, 0x5f, 0x4f, 0x42, 0x4a,
+ 0x45, 0x43, 0x54, 0x5f, 0x49, 0x44, 0x53, 0x5f, 0x50, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x4c, 0x45,
+ 0x54, 0x45, 0x5f, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x53, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x45,
+ 0x53, 0x54, 0x10, 0xe8, 0x07, 0x12, 0x1e, 0x0a, 0x1a, 0x53, 0x50, 0x4c, 0x49, 0x54, 0x5f, 0x54,
+ 0x4f, 0x4b, 0x45, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x44,
+ 0x41, 0x59, 0x53, 0x10, 0x0e, 0x1a, 0x02, 0x10, 0x01, 0x22, 0x86, 0x24, 0x0a, 0x06, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a,
+ 0x09, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x08, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x49, 0x64, 0x12,
+ 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65,
+ 0x74, 0x61, 0x67, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67,
+ 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65,
+ 0x63, 0x74, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
+ 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x1f, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x28, 0x0a, 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x79, 0x70,
+ 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12,
+ 0x10, 0x0a, 0x03, 0x72, 0x70, 0x6f, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x70,
+ 0x6f, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x54, 0x0a, 0x12, 0x64,
+ 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x63,
+ 0x6c, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52,
+ 0x10, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63,
+ 0x6c, 0x12, 0x41, 0x0a, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x18, 0x0a,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e,
+ 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65, 0x63,
+ 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18, 0x0c,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e,
+ 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03,
+ 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x18,
+ 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61,
+ 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15,
+ 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73, 0x65,
+ 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18,
+ 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x52, 0x09, 0x6c, 0x69, 0x66, 0x65,
- 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f,
- 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65,
- 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x18,
- 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61,
+ 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x18,
+ 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x2e, 0x43, 0x6f, 0x72, 0x73, 0x52, 0x04, 0x63, 0x6f, 0x72, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41,
- 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x37, 0x0a,
- 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62,
- 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x15, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73,
- 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73,
- 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c,
- 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3b, 0x0a, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65,
- 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69, 0x74,
+ 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x18,
+ 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67, 0x69,
+ 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f, 0x67,
+ 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0,
+ 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e, 0x63,
+ 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
+ 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c, 0x6c,
+ 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a, 0x10,
+ 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79,
+ 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x2e, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x52, 0x07, 0x77, 0x65, 0x62, 0x73, 0x69,
- 0x74, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67,
- 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63,
+ 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69,
+ 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0a, 0x76, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x3b, 0x0a, 0x07, 0x6c, 0x6f, 0x67, 0x67,
- 0x69, 0x6e, 0x67, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6c, 0x6f,
- 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x13,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x65, 0x6e,
- 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x3b, 0x0a, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x15, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x42, 0x69, 0x6c,
- 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x62, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x54, 0x0a,
- 0x10, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61, 0x6d,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73, 0x66,
+ 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73,
+ 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x63,
+ 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c,
+ 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x63,
+ 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73,
+ 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x2e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69,
- 0x63, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0a, 0x69, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61, 0x75,
+ 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x6b, 0x0a, 0x16, 0x68, 0x69, 0x65, 0x72, 0x61,
+ 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x09, 0x69, 0x61,
- 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x61, 0x74, 0x69, 0x73,
- 0x66, 0x69, 0x65, 0x73, 0x5f, 0x70, 0x7a, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c,
- 0x73, 0x61, 0x74, 0x69, 0x73, 0x66, 0x69, 0x65, 0x73, 0x50, 0x7a, 0x73, 0x12, 0x67, 0x0a, 0x17,
- 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74,
- 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e,
+ 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e,
+ 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15, 0x68,
+ 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73,
+ 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c,
+ 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66, 0x74,
+ 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0, 0x41,
+ 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c,
+ 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x25,
+ 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65,
+ 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12, 0x16,
+ 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06,
+ 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x27,
+ 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f, 0x61,
+ 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05,
+ 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x1a,
+ 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a,
+ 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f,
+ 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x0d,
+ 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1, 0x02,
+ 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b, 0x75,
+ 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c, 0x65,
+ 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x18,
+ 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76,
+ 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65, 0x6e,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x69,
+ 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x18,
+ 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d,
+ 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x12,
+ 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50,
- 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15,
- 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43,
- 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x41, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61,
- 0x73, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x09, 0x61,
- 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x6b, 0x0a, 0x16, 0x68, 0x69, 0x65, 0x72,
- 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
- 0x63, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x2e, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c,
- 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x15,
- 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x5d, 0x0a, 0x12, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x1f, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x53, 0x6f, 0x66,
- 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x03, 0xe0,
- 0x41, 0x01, 0x52, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x50, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x1a, 0x30, 0x0a, 0x07, 0x42, 0x69, 0x6c, 0x6c, 0x69, 0x6e, 0x67, 0x12,
- 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x65, 0x72, 0x50, 0x61, 0x79, 0x73, 0x1a, 0x87, 0x01, 0x0a, 0x04, 0x43, 0x6f, 0x72, 0x73, 0x12,
- 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f,
- 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12,
- 0x27, 0x0a, 0x0f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64,
- 0x65, 0x72, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x5f,
- 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x05, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73,
- 0x1a, 0x5c, 0x0a, 0x0a, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e,
- 0x0a, 0x0f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c,
- 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52,
- 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x4b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x1a, 0xb1,
- 0x02, 0x0a, 0x09, 0x49, 0x61, 0x6d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x7b, 0x0a, 0x1b,
- 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6c,
- 0x65, 0x76, 0x65, 0x6c, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x49, 0x61, 0x6d,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52,
- 0x18, 0x75, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65,
- 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x75, 0x62,
- 0x6c, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x76, 0x65,
- 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x16, 0x70, 0x75, 0x62,
- 0x6c, 0x69, 0x63, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x76, 0x65, 0x6e, 0x74,
- 0x69, 0x6f, 0x6e, 0x1a, 0x6d, 0x0a, 0x18, 0x55, 0x6e, 0x69, 0x66, 0x6f, 0x72, 0x6d, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12,
- 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63,
- 0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x69,
- 0x6d, 0x65, 0x1a, 0xdb, 0x07, 0x0a, 0x09, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65,
- 0x12, 0x3c, 0x0a, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79,
- 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f,
- 0x07, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c,
- 0x65, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c,
- 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f,
- 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04,
- 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
- 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73,
- 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x43, 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73,
- 0x88, 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62,
- 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d,
- 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a,
- 0x07, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01,
- 0x52, 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e,
- 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
- 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65,
- 0x77, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32,
- 0x0a, 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d,
- 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61,
- 0x73, 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65,
- 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01,
- 0x28, 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43,
- 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12,
- 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f,
- 0x72, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73,
- 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a,
- 0x1a, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63,
- 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28,
- 0x05, 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f,
- 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12,
- 0x47, 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61,
- 0x74, 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69,
- 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63,
- 0x68, 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
- 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69,
- 0x78, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73,
- 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64,
- 0x61, 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42,
- 0x15, 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f,
- 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d,
- 0x65, 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65,
- 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x1a, 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c,
- 0x6f, 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x09, 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f,
- 0x67, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e,
- 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66,
- 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d,
- 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a,
- 0x09, 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65,
- 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c,
- 0x65, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74,
+ 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63,
+ 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x04, 0x72, 0x75, 0x6c, 0x65, 0x1a, 0x8f, 0x07,
+ 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, 0x47, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x2e, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65,
+ 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x50, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x4c, 0x69,
+ 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x2e, 0x43, 0x6f, 0x6e,
+ 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f,
+ 0x6e, 0x1a, 0x41, 0x0a, 0x06, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12,
+ 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43,
+ 0x6c, 0x61, 0x73, 0x73, 0x1a, 0xa8, 0x05, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x05, 0x48, 0x00, 0x52, 0x07, 0x61, 0x67, 0x65, 0x44, 0x61, 0x79, 0x73, 0x88,
+ 0x01, 0x01, 0x12, 0x38, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x62, 0x65,
+ 0x66, 0x6f, 0x72, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x63,
+ 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x1c, 0x0a, 0x07,
+ 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52,
+ 0x06, 0x69, 0x73, 0x4c, 0x69, 0x76, 0x65, 0x88, 0x01, 0x01, 0x12, 0x31, 0x0a, 0x12, 0x6e, 0x75,
+ 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x48, 0x02, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x4e, 0x65, 0x77,
+ 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x88, 0x01, 0x01, 0x12, 0x32, 0x0a,
+ 0x15, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x73, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73,
+ 0x73, 0x12, 0x38, 0x0a, 0x16, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f,
+ 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x05, 0x48, 0x03, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x43, 0x75,
+ 0x73, 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x12, 0x63,
+ 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72,
+ 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x65, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74,
+ 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x40, 0x0a, 0x1a,
+ 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x6f, 0x6e, 0x63, 0x75,
+ 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05,
+ 0x48, 0x04, 0x52, 0x17, 0x64, 0x61, 0x79, 0x73, 0x53, 0x69, 0x6e, 0x63, 0x65, 0x4e, 0x6f, 0x6e,
+ 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x47,
+ 0x0a, 0x16, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x44, 0x61, 0x74,
+ 0x65, 0x52, 0x14, 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d,
+ 0x65, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x52,
+ 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x25,
+ 0x0a, 0x0e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78,
+ 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x73, 0x53,
+ 0x75, 0x66, 0x66, 0x69, 0x78, 0x42, 0x0b, 0x0a, 0x09, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x61,
+ 0x79, 0x73, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x69, 0x73, 0x5f, 0x6c, 0x69, 0x76, 0x65, 0x42, 0x15,
+ 0x0a, 0x13, 0x5f, 0x6e, 0x75, 0x6d, 0x5f, 0x6e, 0x65, 0x77, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73,
+ 0x69, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x42, 0x1d, 0x0a, 0x1b, 0x5f, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x5f,
+ 0x6e, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a,
+ 0x54, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f,
+ 0x67, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x6c, 0x6f, 0x67, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x6f, 0x67,
+ 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x6f, 0x67, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x50,
+ 0x72, 0x65, 0x66, 0x69, 0x78, 0x1a, 0xbb, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74,
+ 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x41, 0x0a, 0x0e, 0x65, 0x66, 0x66,
+ 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x65,
+ 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09,
+ 0x69, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x08, 0x69, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x72, 0x65, 0x74,
0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x48, 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65,
- 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d,
- 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01,
- 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64,
- 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65,
- 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62,
- 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c,
- 0x65, 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a,
- 0x10, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69,
- 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67,
- 0x65, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66,
- 0x6f, 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0c, 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a,
- 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c,
- 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d,
- 0x64, 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02,
- 0x0a, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65,
- 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e,
- 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f,
- 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
- 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67,
- 0x67, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69,
- 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73,
- 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69,
- 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88,
- 0x01, 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48,
- 0x01, 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d,
- 0x65, 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61,
- 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42,
- 0x25, 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, 0x0a, 0x15, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72,
- 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12,
- 0x1d, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
- 0x42, 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39,
- 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
- 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
- 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x47, 0xea, 0x41, 0x44, 0x0a, 0x1d,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70,
- 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70,
- 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x7d, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x7d, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63,
+ 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x1a, 0xd3, 0x01, 0x0a, 0x10, 0x53, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x4d, 0x0a, 0x12, 0x72, 0x65, 0x74, 0x65,
+ 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48,
+ 0x00, 0x52, 0x11, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x46, 0x0a, 0x0e, 0x65, 0x66, 0x66, 0x65, 0x63,
+ 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x01, 0x52, 0x0d, 0x65,
+ 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x42,
+ 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x11, 0x0a, 0x0f, 0x5f, 0x65, 0x66, 0x66, 0x65, 0x63,
+ 0x74, 0x69, 0x76, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x26, 0x0a, 0x0a, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c,
+ 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
+ 0x64, 0x1a, 0x59, 0x0a, 0x07, 0x57, 0x65, 0x62, 0x73, 0x69, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10,
+ 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6d, 0x61, 0x69, 0x6e, 0x50, 0x61, 0x67, 0x65,
+ 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x5f, 0x66, 0x6f,
+ 0x75, 0x6e, 0x64, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
+ 0x6e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x50, 0x61, 0x67, 0x65, 0x1a, 0x3e, 0x0a, 0x15,
+ 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x50, 0x6c, 0x61, 0x63, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64,
+ 0x61, 0x74, 0x61, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xd6, 0x02, 0x0a,
+ 0x09, 0x41, 0x75, 0x74, 0x6f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e,
+ 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61,
+ 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x0b, 0x74, 0x6f, 0x67, 0x67, 0x6c, 0x65, 0x5f, 0x74,
+ 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x74, 0x6f, 0x67, 0x67,
+ 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x16, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e,
+ 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x14, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e,
+ 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x88, 0x01,
+ 0x01, 0x12, 0x70, 0x0a, 0x22, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x01,
+ 0x52, 0x1e, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
+ 0x88, 0x01, 0x01, 0x42, 0x19, 0x0a, 0x17, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c,
+ 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x42, 0x25,
+ 0x0a, 0x23, 0x5f, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x6c, 0x5f, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x1a, 0x36, 0x0a, 0x15, 0x48, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63,
+ 0x68, 0x69, 0x63, 0x61, 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d,
+ 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42,
+ 0x03, 0xe0, 0x41, 0x01, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x1a, 0x39, 0x0a,
+ 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x58, 0xea, 0x41, 0x55, 0x0a, 0x1d, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d,
+ 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x7d, 0x2a, 0x07, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x32, 0x06, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x41, 0x63, 0x63,
0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f,
0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e,
0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16,
@@ -8405,553 +8489,431 @@ var file_google_storage_v2_storage_proto_rawDesc = []byte{
0x72, 0x63, 0x33, 0x32, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x07, 0x48, 0x00, 0x52, 0x06, 0x63,
0x72, 0x63, 0x33, 0x32, 0x63, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x08, 0x6d, 0x64, 0x35, 0x5f,
0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x64, 0x35, 0x48,
- 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0xfe,
- 0x02, 0x0a, 0x0f, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61,
- 0x74, 0x61, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73,
- 0x73, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52,
- 0x08, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x49, 0x64, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x05, 0xfa,
- 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
- 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52,
- 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69,
- 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x13, 0x73, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69,
- 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74,
- 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x63,
- 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52,
- 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x65,
- 0x74, 0x61, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x22,
- 0x85, 0x04, 0x0a, 0x12, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
- 0x19, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03,
- 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74,
- 0x61, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x1f,
- 0x0a, 0x0b, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12,
- 0x68, 0x0a, 0x11, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62,
- 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e,
- 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
- 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41,
- 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x61, 0x6d,
- 0x65, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x2a, 0x0a, 0x0e, 0x70, 0x61, 0x79, 0x6c, 0x6f,
- 0x61, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x42,
- 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0d, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x46, 0x6f, 0x72,
- 0x6d, 0x61, 0x74, 0x1a, 0x43, 0x0a, 0x15, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x41, 0x74, 0x74,
- 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
- 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x7d, 0xea, 0x41, 0x7a, 0x0a, 0x29, 0x73,
+ 0x61, 0x73, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x63, 0x72, 0x63, 0x33, 0x32, 0x63, 0x22, 0x71,
+ 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a, 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c,
+ 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73,
+ 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53, 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65,
+ 0x73, 0x22, 0xbd, 0x0e, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52,
+ 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25, 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73,
0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69,
- 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x4d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x62, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x7d, 0x2f, 0x6e, 0x6f,
- 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
- 0x73, 0x2f, 0x7b, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
- 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x7d, 0x22, 0x71, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f,
- 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x0a,
- 0x14, 0x65, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x6c, 0x67, 0x6f,
- 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x65, 0x6e, 0x63,
- 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d,
- 0x12, 0x28, 0x0a, 0x10, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x68, 0x61, 0x32, 0x35, 0x36, 0x5f, 0x62,
- 0x79, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x6b, 0x65, 0x79, 0x53,
- 0x68, 0x61, 0x32, 0x35, 0x36, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, 0xb6, 0x0d, 0x0a, 0x06, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d,
- 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x25,
- 0xe0, 0x41, 0x05, 0xfa, 0x41, 0x1f, 0x0a, 0x1d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a,
- 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61,
- 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03,
- 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63,
- 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65,
- 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a,
- 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63,
- 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e,
- 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13,
- 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65,
- 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a,
- 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72,
- 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
- 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61, 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10,
- 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
- 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c,
- 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74,
- 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
- 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e,
- 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b,
- 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x52, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c,
- 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f,
- 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09,
- 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73,
- 0x75, 0x6d, 0x73, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73,
- 0x75, 0x6d, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79,
- 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f,
- 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
- 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06,
- 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
- 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69,
- 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f,
- 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70,
- 0x6f, 0x72, 0x61, 0x72, 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74,
- 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69,
- 0x6d, 0x65, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45,
- 0x78, 0x70, 0x69, 0x72, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74,
- 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f,
+ 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x1b, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41,
+ 0x05, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a,
+ 0x0d, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x23,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x00, 0x52, 0x0c, 0x72, 0x65, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0e,
+ 0x6d, 0x65, 0x74, 0x61, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x67,
+ 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x17,
+ 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x42, 0x03, 0xe0, 0x41,
+ 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69,
+ 0x6e, 0x67, 0x12, 0x2f, 0x0a, 0x13, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x64, 0x69,
+ 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x12, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x44, 0x69, 0x73, 0x70, 0x6f, 0x73, 0x69, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68,
+ 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x38, 0x0a, 0x03, 0x61, 0x63, 0x6c, 0x18,
+ 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x52, 0x03, 0x61,
+ 0x63, 0x6c, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x6c, 0x61,
+ 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x40, 0x0a,
+ 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03,
+ 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12,
+ 0x44, 0x0a, 0x0d, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0c, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e,
+ 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a,
+ 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x63, 0x6f,
+ 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0f, 0x20,
+ 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e,
+ 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x63, 0x68, 0x65, 0x63,
+ 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45,
- 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d,
- 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f,
- 0x6c, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e,
- 0x74, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a,
- 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e,
- 0x65, 0x72, 0x12, 0x56, 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65,
- 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72,
- 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75,
- 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73,
- 0x74, 0x6f, 0x6d, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f,
- 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x48, 0x01, 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f,
- 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0,
- 0x41, 0x03, 0x48, 0x02, 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
- 0x54, 0x69, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64,
- 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x3a, 0x02, 0x38, 0x01, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62,
- 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x6f,
- 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x13,
- 0x0a, 0x11, 0x5f, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74,
- 0x69, 0x6d, 0x65, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63,
- 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72,
- 0x6f, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12,
- 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12,
- 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74,
- 0x79, 0x5f, 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03,
- 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65,
- 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08,
- 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67,
- 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05,
- 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61,
- 0x69, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72,
- 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d,
- 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01,
- 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72,
- 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72,
- 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70,
- 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48,
- 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a,
- 0x0e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75,
- 0x6d, 0x62, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x35, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x6d,
- 0x61, 0x69, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0c, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22,
- 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69,
- 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79,
- 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a,
- 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a,
- 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74,
- 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74,
- 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e,
- 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xaa,
- 0x27, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e,
- 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f,
- 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x42,
+ 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x75, 0x6d, 0x73, 0x12,
+ 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d,
+ 0x65, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x6d, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x12, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x26, 0xfa, 0x41, 0x23, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d,
+ 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x52, 0x06, 0x6b, 0x6d, 0x73, 0x4b,
+ 0x65, 0x79, 0x12, 0x5a, 0x0a, 0x19, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18,
+ 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
+ 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x16, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x25,
+ 0x0a, 0x0e, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x68, 0x6f, 0x6c, 0x64,
+ 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72,
+ 0x79, 0x48, 0x6f, 0x6c, 0x64, 0x12, 0x4e, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x15,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x52, 0x13, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x70, 0x69, 0x72,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74,
+ 0x61, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2d, 0x0a, 0x10, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x17,
+ 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x73,
+ 0x65, 0x64, 0x48, 0x6f, 0x6c, 0x64, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x05, 0x6f, 0x77, 0x6e,
+ 0x65, 0x72, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x77, 0x6e,
+ 0x65, 0x72, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x56,
+ 0x0a, 0x13, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x5f, 0x65, 0x6e, 0x63, 0x72, 0x79,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12,
- 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a,
- 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12,
- 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a,
- 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01,
- 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x12, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x65, 0x72, 0x45, 0x6e, 0x63, 0x72,
+ 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x54,
+ 0x69, 0x6d, 0x65, 0x12, 0x4e, 0x0a, 0x10, 0x73, 0x6f, 0x66, 0x74, 0x5f, 0x64, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x02,
+ 0x52, 0x0e, 0x73, 0x6f, 0x66, 0x74, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
+ 0x88, 0x01, 0x01, 0x12, 0x4e, 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65,
+ 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x48, 0x03,
+ 0x52, 0x0e, 0x68, 0x61, 0x72, 0x64, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65,
+ 0x88, 0x01, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+ 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x74, 0x6f, 0x6b,
+ 0x65, 0x6e, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x62, 0x61, 0x73,
+ 0x65, 0x64, 0x5f, 0x68, 0x6f, 0x6c, 0x64, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x73, 0x6f, 0x66, 0x74,
+ 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x42, 0x13, 0x0a, 0x11,
+ 0x5f, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x22, 0x97, 0x02, 0x0a, 0x13, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6c,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6c, 0x65, 0x12, 0x0e, 0x0a,
+ 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x16, 0x0a,
+ 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x65,
+ 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x22, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f,
+ 0x61, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x03, 0x52, 0x09,
+ 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x41, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74,
+ 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e,
+ 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x74, 0x61, 0x67, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x65, 0x74, 0x61, 0x67, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d,
+ 0x61, 0x69, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c,
+ 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x6a,
+ 0x65, 0x63, 0x74, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x52, 0x0b,
+ 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x22, 0x8e, 0x01, 0x0a, 0x13,
+ 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x01,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66,
+ 0x69, 0x78, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66,
+ 0x69, 0x78, 0x65, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67,
+ 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e,
+ 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x48, 0x0a, 0x0b,
+ 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x65, 0x61, 0x6d, 0x12, 0x25, 0x0a, 0x0e, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x4e, 0x75, 0x6d, 0x62,
+ 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x74, 0x65, 0x61, 0x6d, 0x22, 0x3c, 0x0a, 0x05, 0x4f, 0x77, 0x6e, 0x65, 0x72, 0x12,
+ 0x16, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74,
+ 0x79, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x74, 0x69,
+ 0x74, 0x79, 0x49, 0x64, 0x22, 0x5f, 0x0a, 0x0c, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x27, 0x0a, 0x0f,
+ 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x4c,
+ 0x65, 0x6e, 0x67, 0x74, 0x68, 0x32, 0xc1, 0x1e, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x12, 0x72, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74,
+ 0x79, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x15,
+ 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x6f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x22, 0x22, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
+ 0x15, 0x12, 0x13, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xab, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61,
+ 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x58, 0xda, 0x41, 0x17, 0x70,
+ 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x16, 0x0a, 0x06,
+ 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x1e, 0x0a, 0x0e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x70,
+ 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x85, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a,
- 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a, 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, 0x41, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3,
- 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b,
- 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47,
- 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49,
- 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
- 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e,
- 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73,
- 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a,
- 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69,
+ 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x4c, 0x69, 0x73, 0x74, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x27, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3,
+ 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0c,
+ 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x93, 0x01, 0x0a,
+ 0x19, 0x4c, 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e,
+ 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x33, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c,
+ 0x6f, 0x63, 0x6b, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69,
+ 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x26, 0xda, 0x41, 0x06, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a,
+ 0x2a, 0x7d, 0x12, 0x75, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69,
0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e,
- 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
+ 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda,
- 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d,
- 0x12, 0xd7, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d,
- 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50,
- 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76,
- 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0xda, 0x41,
- 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73,
- 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x4f, 0x12, 0x17, 0x0a, 0x08, 0x72,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2a, 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55,
- 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37,
- 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
- 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63,
- 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x9f, 0x01, 0x0a, 0x18, 0x44, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f,
- 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4e,
- 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
- 0x22, 0x37, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12,
- 0x28, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
- 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xa8, 0x01, 0x0a, 0x15, 0x47, 0x65,
- 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x37, 0xda, 0x41, 0x04,
- 0x6e, 0x61, 0x6d, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x0a, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a,
- 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0xb1, 0x01, 0x0a, 0x18, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e,
- 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69,
- 0x67, 0x12, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
- 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4e, 0x6f, 0x74, 0x69,
- 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69,
- 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, 0xda, 0x41,
- 0x1a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
- 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x4c, 0x69, 0x73,
- 0x74, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e,
- 0x66, 0x69, 0x67, 0x73, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4e, 0x6f, 0x74,
- 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74,
- 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
- 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d,
- 0x2a, 0x2a, 0x7d, 0x12, 0x7e, 0x0a, 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65,
- 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76,
- 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23,
- 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
- 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d,
- 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
- 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d,
- 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62,
- 0x6a, 0x65, 0x63, 0x74, 0x22, 0x38, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba,
- 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62,
- 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63,
- 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63,
- 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c,
- 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09,
- 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b,
+ 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x2a, 0xda,
+ 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19,
+ 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7c, 0x0a, 0x0c, 0x53, 0x65, 0x74,
+ 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d,
+ 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f,
+ 0x6c, 0x69, 0x63, 0x79, 0x22, 0x31, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17,
+ 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x96, 0x02, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74,
+ 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54,
+ 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e,
+ 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d,
+ 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0xaa, 0x01, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x8a, 0xd3, 0xe4,
+ 0x93, 0x02, 0x8c, 0x01, 0x12, 0x17, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x34, 0x0a,
+ 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x7b, 0x62, 0x75, 0x63, 0x6b,
0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75,
- 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09,
- 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65,
- 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19,
+ 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73,
+ 0x2f, 0x2a, 0x2a, 0x12, 0x3b, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12,
+ 0x2f, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
+ 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x6d,
+ 0x61, 0x6e, 0x61, 0x67, 0x65, 0x64, 0x46, 0x6f, 0x6c, 0x64, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2a,
+ 0x12, 0x8a, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x22, 0x37, 0xda, 0x41, 0x12, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c,
+ 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
+ 0x1c, 0x12, 0x1a, 0x0a, 0x0b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x7e, 0x0a,
+ 0x0d, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27,
0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75,
+ 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x22, 0x29, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65,
+ 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01,
+ 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
+ 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x48,
+ 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
+ 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x8d, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73,
+ 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52,
+ 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x38,
+ 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
+ 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xba, 0x01, 0x0a, 0x14, 0x43, 0x61, 0x6e,
+ 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74,
+ 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75,
+ 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x52, 0x65, 0x73, 0x75,
+ 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64,
+ 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64,
+ 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f,
+ 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f,
+ 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
+ 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xa5, 0x01,
+ 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
+ 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75,
0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0xda, 0x41, 0x18, 0x62, 0x75,
0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65,
0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06,
0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d,
- 0x2a, 0x2a, 0x7d, 0x12, 0xa5, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x61,
- 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
- 0x48, 0xda, 0x41, 0x0d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0xda, 0x41, 0x18, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2c, 0x6f, 0x62, 0x6a, 0x65, 0x63,
- 0x74, 0x2c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x8a, 0xd3, 0xe4, 0x93,
- 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62,
- 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22,
- 0x39, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d,
- 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b,
- 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72,
+ 0x2a, 0x2a, 0x7d, 0x30, 0x01, 0x12, 0x99, 0x01, 0x0a, 0x0e, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65,
+ 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64,
+ 0x69, 0x52, 0x65, 0x61, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x52, 0x65, 0x61, 0x64, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2e, 0x8a,
+ 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, 0x0a, 0x17, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x28, 0x01, 0x30,
+ 0x01, 0x12, 0x8c, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x39, 0xda, 0x41, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74,
+ 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93,
+ 0x02, 0x1e, 0x12, 0x1c, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x62, 0x75, 0x63,
+ 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d,
+ 0x12, 0x60, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
+ 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
+ 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+ 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72,
0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x6e, 0x0a, 0x0f,
- 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12,
- 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a,
- 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42,
- 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a,
- 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65,
- 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0xda, 0x41, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06,
- 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d,
- 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f,
- 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74,
- 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34, 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64,
- 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65,
- 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae,
- 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c,
- 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74,
- 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52,
- 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a,
- 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70,
- 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b,
- 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12,
- 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
- 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
- 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda,
- 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02,
- 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20,
- 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73,
- 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a,
- 0x12, 0x80, 0x01, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41,
- 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x41,
- 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x95, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d,
- 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
- 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
- 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e,
- 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0xda, 0x41, 0x1d, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x2c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63,
- 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b,
- 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x77, 0x0a, 0x0d, 0x44,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x25, 0xda,
- 0x41, 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f,
- 0x6a, 0x65, 0x63, 0x74, 0x12, 0x7d, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b,
- 0x65, 0x79, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65,
- 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x6d, 0x61,
- 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0xda, 0x41,
- 0x11, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x69, 0x64, 0x2c, 0x70, 0x72, 0x6f, 0x6a, 0x65,
- 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x7c, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b,
- 0x65, 0x79, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63,
- 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x67, 0x6f,
+ 0x1a, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x69, 0x64, 0x69, 0x57, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01,
+ 0x30, 0x01, 0x12, 0x84, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x12, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72,
+ 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63,
+ 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69,
+ 0x73, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x22, 0x26, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x8a, 0xd3, 0xe4, 0x93,
+ 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x7b, 0x62,
+ 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0x98, 0x01, 0x0a, 0x0d, 0x52, 0x65,
+ 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
- 0x4c, 0x69, 0x73, 0x74, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74,
- 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x0b, 0x12, 0x09, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63,
- 0x74, 0x12, 0x9d, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d, 0x61, 0x63,
- 0x4b, 0x65, 0x79, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x48, 0x6d,
- 0x61, 0x63, 0x4b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x2e, 0x48, 0x6d, 0x61, 0x63, 0x4b, 0x65, 0x79, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
- 0x22, 0x3f, 0xda, 0x41, 0x14, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2c, 0x75, 0x70,
- 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x12,
- 0x20, 0x0a, 0x10, 0x68, 0x6d, 0x61, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x6a,
- 0x65, 0x63, 0x74, 0x12, 0x0c, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x2a, 0x2a,
- 0x7d, 0x1a, 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a,
- 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68,
- 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c,
- 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f,
- 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72,
- 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f,
- 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72,
- 0x61, 0x67, 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
- 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68,
- 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64,
- 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77,
- 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d,
- 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65,
- 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41,
- 0x78, 0x0a, 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74,
- 0x6f, 0x4b, 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b,
- 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65,
- 0x79, 0x52, 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67,
- 0x7d, 0x2f, 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72,
- 0x79, 0x70, 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
+ 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74,
+ 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3a, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x34,
+ 0x12, 0x0f, 0x0a, 0x0d, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x12, 0x21, 0x0a, 0x12, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74,
+ 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x13, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65,
+ 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32,
- 0x42, 0x0c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x3e, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63,
- 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e,
- 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f,
- 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62,
- 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57,
+ 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x53, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6d, 0x61, 0x62, 0x6c, 0x65, 0x57, 0x72,
+ 0x69, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x8a, 0xd3, 0xe4,
+ 0x93, 0x02, 0x32, 0x12, 0x30, 0x0a, 0x21, 0x77, 0x72, 0x69, 0x74, 0x65, 0x5f, 0x6f, 0x62, 0x6a,
+ 0x65, 0x63, 0x74, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2e, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x12, 0xae, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x65, 0x72, 0x79, 0x57,
+ 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51,
+ 0x75, 0x65, 0x72, 0x79, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79,
+ 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x22, 0x41, 0xda, 0x41, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x69,
+ 0x64, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x70, 0x6c, 0x6f, 0x61,
+ 0x64, 0x5f, 0x69, 0x64, 0x12, 0x20, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x73,
+ 0x2f, 0x2a, 0x7d, 0x2f, 0x2a, 0x2a, 0x12, 0x96, 0x01, 0x0a, 0x0a, 0x4d, 0x6f, 0x76, 0x65, 0x4f,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73,
+ 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x6f, 0x76, 0x65, 0x4f, 0x62,
+ 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x2e,
+ 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x47, 0xda, 0x41, 0x27, 0x62, 0x75, 0x63, 0x6b, 0x65,
+ 0x74, 0x2c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2c,
+ 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x62, 0x6a, 0x65,
+ 0x63, 0x74, 0x8a, 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x0b, 0x7b, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x3d, 0x2a, 0x2a, 0x7d, 0x1a,
+ 0xa7, 0x02, 0xca, 0x41, 0x16, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x8a, 0x02, 0x68,
+ 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63,
+ 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74,
+ 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c,
+ 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61,
+ 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77,
+ 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67,
+ 0x65, 0x2e, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2c, 0x68,
+ 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x64,
+ 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f,
+ 0x6e, 0x6c, 0x79, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61,
+ 0x75, 0x74, 0x68, 0x2f, 0x64, 0x65, 0x76, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x72,
+ 0x65, 0x61, 0x64, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x42, 0xe2, 0x01, 0xea, 0x41, 0x78, 0x0a,
+ 0x21, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x6b, 0x6d, 0x73, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x43, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b,
+ 0x65, 0x79, 0x12, 0x53, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72,
+ 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x7b, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x2f, 0x6b, 0x65, 0x79, 0x52,
+ 0x69, 0x6e, 0x67, 0x73, 0x2f, 0x7b, 0x6b, 0x65, 0x79, 0x5f, 0x72, 0x69, 0x6e, 0x67, 0x7d, 0x2f,
+ 0x63, 0x72, 0x79, 0x70, 0x74, 0x6f, 0x4b, 0x65, 0x79, 0x73, 0x2f, 0x7b, 0x63, 0x72, 0x79, 0x70,
+ 0x74, 0x6f, 0x5f, 0x6b, 0x65, 0x79, 0x7d, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x32, 0x42, 0x0c,
+ 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e,
+ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x67, 0x6f, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x73, 0x74, 0x6f, 0x72, 0x61,
+ 0x67, 0x65, 0x70, 0x62, 0x3b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x70, 0x62, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -8967,8 +8929,8 @@ func file_google_storage_v2_storage_proto_rawDescGZIP() []byte {
}
var file_google_storage_v2_storage_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
-var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 79)
-var file_google_storage_v2_storage_proto_goTypes = []interface{}{
+var file_google_storage_v2_storage_proto_msgTypes = make([]protoimpl.MessageInfo, 75)
+var file_google_storage_v2_storage_proto_goTypes = []any{
(ServiceConstants_Values)(0), // 0: google.storage.v2.ServiceConstants.Values
(*DeleteBucketRequest)(nil), // 1: google.storage.v2.DeleteBucketRequest
(*GetBucketRequest)(nil), // 2: google.storage.v2.GetBucketRequest
@@ -8977,262 +8939,252 @@ var file_google_storage_v2_storage_proto_goTypes = []interface{}{
(*ListBucketsResponse)(nil), // 5: google.storage.v2.ListBucketsResponse
(*LockBucketRetentionPolicyRequest)(nil), // 6: google.storage.v2.LockBucketRetentionPolicyRequest
(*UpdateBucketRequest)(nil), // 7: google.storage.v2.UpdateBucketRequest
- (*DeleteNotificationConfigRequest)(nil), // 8: google.storage.v2.DeleteNotificationConfigRequest
- (*GetNotificationConfigRequest)(nil), // 9: google.storage.v2.GetNotificationConfigRequest
- (*CreateNotificationConfigRequest)(nil), // 10: google.storage.v2.CreateNotificationConfigRequest
- (*ListNotificationConfigsRequest)(nil), // 11: google.storage.v2.ListNotificationConfigsRequest
- (*ListNotificationConfigsResponse)(nil), // 12: google.storage.v2.ListNotificationConfigsResponse
- (*ComposeObjectRequest)(nil), // 13: google.storage.v2.ComposeObjectRequest
- (*DeleteObjectRequest)(nil), // 14: google.storage.v2.DeleteObjectRequest
- (*RestoreObjectRequest)(nil), // 15: google.storage.v2.RestoreObjectRequest
- (*CancelResumableWriteRequest)(nil), // 16: google.storage.v2.CancelResumableWriteRequest
- (*CancelResumableWriteResponse)(nil), // 17: google.storage.v2.CancelResumableWriteResponse
- (*ReadObjectRequest)(nil), // 18: google.storage.v2.ReadObjectRequest
- (*GetObjectRequest)(nil), // 19: google.storage.v2.GetObjectRequest
- (*ReadObjectResponse)(nil), // 20: google.storage.v2.ReadObjectResponse
- (*WriteObjectSpec)(nil), // 21: google.storage.v2.WriteObjectSpec
- (*WriteObjectRequest)(nil), // 22: google.storage.v2.WriteObjectRequest
- (*WriteObjectResponse)(nil), // 23: google.storage.v2.WriteObjectResponse
- (*BidiWriteObjectRequest)(nil), // 24: google.storage.v2.BidiWriteObjectRequest
- (*BidiWriteObjectResponse)(nil), // 25: google.storage.v2.BidiWriteObjectResponse
- (*ListObjectsRequest)(nil), // 26: google.storage.v2.ListObjectsRequest
- (*QueryWriteStatusRequest)(nil), // 27: google.storage.v2.QueryWriteStatusRequest
- (*QueryWriteStatusResponse)(nil), // 28: google.storage.v2.QueryWriteStatusResponse
- (*RewriteObjectRequest)(nil), // 29: google.storage.v2.RewriteObjectRequest
- (*RewriteResponse)(nil), // 30: google.storage.v2.RewriteResponse
- (*StartResumableWriteRequest)(nil), // 31: google.storage.v2.StartResumableWriteRequest
- (*StartResumableWriteResponse)(nil), // 32: google.storage.v2.StartResumableWriteResponse
- (*UpdateObjectRequest)(nil), // 33: google.storage.v2.UpdateObjectRequest
- (*GetServiceAccountRequest)(nil), // 34: google.storage.v2.GetServiceAccountRequest
- (*CreateHmacKeyRequest)(nil), // 35: google.storage.v2.CreateHmacKeyRequest
- (*CreateHmacKeyResponse)(nil), // 36: google.storage.v2.CreateHmacKeyResponse
- (*DeleteHmacKeyRequest)(nil), // 37: google.storage.v2.DeleteHmacKeyRequest
- (*GetHmacKeyRequest)(nil), // 38: google.storage.v2.GetHmacKeyRequest
- (*ListHmacKeysRequest)(nil), // 39: google.storage.v2.ListHmacKeysRequest
- (*ListHmacKeysResponse)(nil), // 40: google.storage.v2.ListHmacKeysResponse
- (*UpdateHmacKeyRequest)(nil), // 41: google.storage.v2.UpdateHmacKeyRequest
+ (*ComposeObjectRequest)(nil), // 8: google.storage.v2.ComposeObjectRequest
+ (*DeleteObjectRequest)(nil), // 9: google.storage.v2.DeleteObjectRequest
+ (*RestoreObjectRequest)(nil), // 10: google.storage.v2.RestoreObjectRequest
+ (*CancelResumableWriteRequest)(nil), // 11: google.storage.v2.CancelResumableWriteRequest
+ (*CancelResumableWriteResponse)(nil), // 12: google.storage.v2.CancelResumableWriteResponse
+ (*ReadObjectRequest)(nil), // 13: google.storage.v2.ReadObjectRequest
+ (*GetObjectRequest)(nil), // 14: google.storage.v2.GetObjectRequest
+ (*ReadObjectResponse)(nil), // 15: google.storage.v2.ReadObjectResponse
+ (*BidiReadObjectSpec)(nil), // 16: google.storage.v2.BidiReadObjectSpec
+ (*BidiReadObjectRequest)(nil), // 17: google.storage.v2.BidiReadObjectRequest
+ (*BidiReadObjectResponse)(nil), // 18: google.storage.v2.BidiReadObjectResponse
+ (*BidiReadObjectRedirectedError)(nil), // 19: google.storage.v2.BidiReadObjectRedirectedError
+ (*BidiWriteObjectRedirectedError)(nil), // 20: google.storage.v2.BidiWriteObjectRedirectedError
+ (*BidiReadObjectError)(nil), // 21: google.storage.v2.BidiReadObjectError
+ (*ReadRangeError)(nil), // 22: google.storage.v2.ReadRangeError
+ (*ReadRange)(nil), // 23: google.storage.v2.ReadRange
+ (*ObjectRangeData)(nil), // 24: google.storage.v2.ObjectRangeData
+ (*BidiReadHandle)(nil), // 25: google.storage.v2.BidiReadHandle
+ (*BidiWriteHandle)(nil), // 26: google.storage.v2.BidiWriteHandle
+ (*WriteObjectSpec)(nil), // 27: google.storage.v2.WriteObjectSpec
+ (*WriteObjectRequest)(nil), // 28: google.storage.v2.WriteObjectRequest
+ (*WriteObjectResponse)(nil), // 29: google.storage.v2.WriteObjectResponse
+ (*AppendObjectSpec)(nil), // 30: google.storage.v2.AppendObjectSpec
+ (*BidiWriteObjectRequest)(nil), // 31: google.storage.v2.BidiWriteObjectRequest
+ (*BidiWriteObjectResponse)(nil), // 32: google.storage.v2.BidiWriteObjectResponse
+ (*ListObjectsRequest)(nil), // 33: google.storage.v2.ListObjectsRequest
+ (*QueryWriteStatusRequest)(nil), // 34: google.storage.v2.QueryWriteStatusRequest
+ (*QueryWriteStatusResponse)(nil), // 35: google.storage.v2.QueryWriteStatusResponse
+ (*RewriteObjectRequest)(nil), // 36: google.storage.v2.RewriteObjectRequest
+ (*RewriteResponse)(nil), // 37: google.storage.v2.RewriteResponse
+ (*MoveObjectRequest)(nil), // 38: google.storage.v2.MoveObjectRequest
+ (*StartResumableWriteRequest)(nil), // 39: google.storage.v2.StartResumableWriteRequest
+ (*StartResumableWriteResponse)(nil), // 40: google.storage.v2.StartResumableWriteResponse
+ (*UpdateObjectRequest)(nil), // 41: google.storage.v2.UpdateObjectRequest
(*CommonObjectRequestParams)(nil), // 42: google.storage.v2.CommonObjectRequestParams
(*ServiceConstants)(nil), // 43: google.storage.v2.ServiceConstants
(*Bucket)(nil), // 44: google.storage.v2.Bucket
(*BucketAccessControl)(nil), // 45: google.storage.v2.BucketAccessControl
(*ChecksummedData)(nil), // 46: google.storage.v2.ChecksummedData
(*ObjectChecksums)(nil), // 47: google.storage.v2.ObjectChecksums
- (*HmacKeyMetadata)(nil), // 48: google.storage.v2.HmacKeyMetadata
- (*NotificationConfig)(nil), // 49: google.storage.v2.NotificationConfig
- (*CustomerEncryption)(nil), // 50: google.storage.v2.CustomerEncryption
- (*Object)(nil), // 51: google.storage.v2.Object
- (*ObjectAccessControl)(nil), // 52: google.storage.v2.ObjectAccessControl
- (*ListObjectsResponse)(nil), // 53: google.storage.v2.ListObjectsResponse
- (*ProjectTeam)(nil), // 54: google.storage.v2.ProjectTeam
- (*ServiceAccount)(nil), // 55: google.storage.v2.ServiceAccount
- (*Owner)(nil), // 56: google.storage.v2.Owner
- (*ContentRange)(nil), // 57: google.storage.v2.ContentRange
- (*ComposeObjectRequest_SourceObject)(nil), // 58: google.storage.v2.ComposeObjectRequest.SourceObject
- (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 59: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions
- (*Bucket_Billing)(nil), // 60: google.storage.v2.Bucket.Billing
- (*Bucket_Cors)(nil), // 61: google.storage.v2.Bucket.Cors
- (*Bucket_Encryption)(nil), // 62: google.storage.v2.Bucket.Encryption
- (*Bucket_IamConfig)(nil), // 63: google.storage.v2.Bucket.IamConfig
- (*Bucket_Lifecycle)(nil), // 64: google.storage.v2.Bucket.Lifecycle
- (*Bucket_Logging)(nil), // 65: google.storage.v2.Bucket.Logging
- (*Bucket_RetentionPolicy)(nil), // 66: google.storage.v2.Bucket.RetentionPolicy
- (*Bucket_SoftDeletePolicy)(nil), // 67: google.storage.v2.Bucket.SoftDeletePolicy
- (*Bucket_Versioning)(nil), // 68: google.storage.v2.Bucket.Versioning
- (*Bucket_Website)(nil), // 69: google.storage.v2.Bucket.Website
- (*Bucket_CustomPlacementConfig)(nil), // 70: google.storage.v2.Bucket.CustomPlacementConfig
- (*Bucket_Autoclass)(nil), // 71: google.storage.v2.Bucket.Autoclass
- (*Bucket_HierarchicalNamespace)(nil), // 72: google.storage.v2.Bucket.HierarchicalNamespace
- nil, // 73: google.storage.v2.Bucket.LabelsEntry
- (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 74: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess
- (*Bucket_Lifecycle_Rule)(nil), // 75: google.storage.v2.Bucket.Lifecycle.Rule
- (*Bucket_Lifecycle_Rule_Action)(nil), // 76: google.storage.v2.Bucket.Lifecycle.Rule.Action
- (*Bucket_Lifecycle_Rule_Condition)(nil), // 77: google.storage.v2.Bucket.Lifecycle.Rule.Condition
- nil, // 78: google.storage.v2.NotificationConfig.CustomAttributesEntry
- nil, // 79: google.storage.v2.Object.MetadataEntry
- (*fieldmaskpb.FieldMask)(nil), // 80: google.protobuf.FieldMask
- (*timestamppb.Timestamp)(nil), // 81: google.protobuf.Timestamp
- (*durationpb.Duration)(nil), // 82: google.protobuf.Duration
- (*date.Date)(nil), // 83: google.type.Date
- (*iampb.GetIamPolicyRequest)(nil), // 84: google.iam.v1.GetIamPolicyRequest
- (*iampb.SetIamPolicyRequest)(nil), // 85: google.iam.v1.SetIamPolicyRequest
- (*iampb.TestIamPermissionsRequest)(nil), // 86: google.iam.v1.TestIamPermissionsRequest
- (*emptypb.Empty)(nil), // 87: google.protobuf.Empty
- (*iampb.Policy)(nil), // 88: google.iam.v1.Policy
- (*iampb.TestIamPermissionsResponse)(nil), // 89: google.iam.v1.TestIamPermissionsResponse
+ (*CustomerEncryption)(nil), // 48: google.storage.v2.CustomerEncryption
+ (*Object)(nil), // 49: google.storage.v2.Object
+ (*ObjectAccessControl)(nil), // 50: google.storage.v2.ObjectAccessControl
+ (*ListObjectsResponse)(nil), // 51: google.storage.v2.ListObjectsResponse
+ (*ProjectTeam)(nil), // 52: google.storage.v2.ProjectTeam
+ (*Owner)(nil), // 53: google.storage.v2.Owner
+ (*ContentRange)(nil), // 54: google.storage.v2.ContentRange
+ (*ComposeObjectRequest_SourceObject)(nil), // 55: google.storage.v2.ComposeObjectRequest.SourceObject
+ (*ComposeObjectRequest_SourceObject_ObjectPreconditions)(nil), // 56: google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions
+ (*Bucket_Billing)(nil), // 57: google.storage.v2.Bucket.Billing
+ (*Bucket_Cors)(nil), // 58: google.storage.v2.Bucket.Cors
+ (*Bucket_Encryption)(nil), // 59: google.storage.v2.Bucket.Encryption
+ (*Bucket_IamConfig)(nil), // 60: google.storage.v2.Bucket.IamConfig
+ (*Bucket_Lifecycle)(nil), // 61: google.storage.v2.Bucket.Lifecycle
+ (*Bucket_Logging)(nil), // 62: google.storage.v2.Bucket.Logging
+ (*Bucket_RetentionPolicy)(nil), // 63: google.storage.v2.Bucket.RetentionPolicy
+ (*Bucket_SoftDeletePolicy)(nil), // 64: google.storage.v2.Bucket.SoftDeletePolicy
+ (*Bucket_Versioning)(nil), // 65: google.storage.v2.Bucket.Versioning
+ (*Bucket_Website)(nil), // 66: google.storage.v2.Bucket.Website
+ (*Bucket_CustomPlacementConfig)(nil), // 67: google.storage.v2.Bucket.CustomPlacementConfig
+ (*Bucket_Autoclass)(nil), // 68: google.storage.v2.Bucket.Autoclass
+ (*Bucket_HierarchicalNamespace)(nil), // 69: google.storage.v2.Bucket.HierarchicalNamespace
+ nil, // 70: google.storage.v2.Bucket.LabelsEntry
+ (*Bucket_IamConfig_UniformBucketLevelAccess)(nil), // 71: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess
+ (*Bucket_Lifecycle_Rule)(nil), // 72: google.storage.v2.Bucket.Lifecycle.Rule
+ (*Bucket_Lifecycle_Rule_Action)(nil), // 73: google.storage.v2.Bucket.Lifecycle.Rule.Action
+ (*Bucket_Lifecycle_Rule_Condition)(nil), // 74: google.storage.v2.Bucket.Lifecycle.Rule.Condition
+ nil, // 75: google.storage.v2.Object.MetadataEntry
+ (*fieldmaskpb.FieldMask)(nil), // 76: google.protobuf.FieldMask
+ (*status.Status)(nil), // 77: google.rpc.Status
+ (*timestamppb.Timestamp)(nil), // 78: google.protobuf.Timestamp
+ (*durationpb.Duration)(nil), // 79: google.protobuf.Duration
+ (*date.Date)(nil), // 80: google.type.Date
+ (*iampb.GetIamPolicyRequest)(nil), // 81: google.iam.v1.GetIamPolicyRequest
+ (*iampb.SetIamPolicyRequest)(nil), // 82: google.iam.v1.SetIamPolicyRequest
+ (*iampb.TestIamPermissionsRequest)(nil), // 83: google.iam.v1.TestIamPermissionsRequest
+ (*emptypb.Empty)(nil), // 84: google.protobuf.Empty
+ (*iampb.Policy)(nil), // 85: google.iam.v1.Policy
+ (*iampb.TestIamPermissionsResponse)(nil), // 86: google.iam.v1.TestIamPermissionsResponse
}
var file_google_storage_v2_storage_proto_depIdxs = []int32{
- 80, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 76, // 0: google.storage.v2.GetBucketRequest.read_mask:type_name -> google.protobuf.FieldMask
44, // 1: google.storage.v2.CreateBucketRequest.bucket:type_name -> google.storage.v2.Bucket
- 80, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 76, // 2: google.storage.v2.ListBucketsRequest.read_mask:type_name -> google.protobuf.FieldMask
44, // 3: google.storage.v2.ListBucketsResponse.buckets:type_name -> google.storage.v2.Bucket
44, // 4: google.storage.v2.UpdateBucketRequest.bucket:type_name -> google.storage.v2.Bucket
- 80, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask
- 49, // 6: google.storage.v2.CreateNotificationConfigRequest.notification_config:type_name -> google.storage.v2.NotificationConfig
- 49, // 7: google.storage.v2.ListNotificationConfigsResponse.notification_configs:type_name -> google.storage.v2.NotificationConfig
- 51, // 8: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object
- 58, // 9: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject
- 42, // 10: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 47, // 11: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 42, // 12: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 42, // 13: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 42, // 14: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 80, // 15: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
- 42, // 16: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 80, // 17: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
- 46, // 18: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData
- 47, // 19: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 57, // 20: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange
- 51, // 21: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object
- 51, // 22: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object
- 21, // 23: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
- 46, // 24: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
- 47, // 25: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 42, // 26: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 51, // 27: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object
- 21, // 28: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
- 46, // 29: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
- 47, // 30: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 42, // 31: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 51, // 32: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object
- 80, // 33: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask
- 42, // 34: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 51, // 35: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object
- 51, // 36: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object
- 42, // 37: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 47, // 38: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 51, // 39: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object
- 21, // 40: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
- 42, // 41: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 47, // 42: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
- 51, // 43: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object
- 80, // 44: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask
- 42, // 45: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
- 48, // 46: google.storage.v2.CreateHmacKeyResponse.metadata:type_name -> google.storage.v2.HmacKeyMetadata
- 48, // 47: google.storage.v2.ListHmacKeysResponse.hmac_keys:type_name -> google.storage.v2.HmacKeyMetadata
- 48, // 48: google.storage.v2.UpdateHmacKeyRequest.hmac_key:type_name -> google.storage.v2.HmacKeyMetadata
- 80, // 49: google.storage.v2.UpdateHmacKeyRequest.update_mask:type_name -> google.protobuf.FieldMask
- 45, // 50: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl
- 52, // 51: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl
- 64, // 52: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle
- 81, // 53: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp
- 61, // 54: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors
- 81, // 55: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp
- 73, // 56: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry
- 69, // 57: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website
- 68, // 58: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning
- 65, // 59: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging
- 56, // 60: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner
- 62, // 61: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption
- 60, // 62: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing
- 66, // 63: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy
- 63, // 64: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig
- 70, // 65: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig
- 71, // 66: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass
- 72, // 67: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace
- 67, // 68: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy
- 54, // 69: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
- 81, // 70: google.storage.v2.HmacKeyMetadata.create_time:type_name -> google.protobuf.Timestamp
- 81, // 71: google.storage.v2.HmacKeyMetadata.update_time:type_name -> google.protobuf.Timestamp
- 78, // 72: google.storage.v2.NotificationConfig.custom_attributes:type_name -> google.storage.v2.NotificationConfig.CustomAttributesEntry
- 52, // 73: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl
- 81, // 74: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp
- 81, // 75: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp
- 47, // 76: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums
- 81, // 77: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp
- 81, // 78: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp
- 81, // 79: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp
- 79, // 80: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry
- 56, // 81: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner
- 50, // 82: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption
- 81, // 83: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp
- 81, // 84: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp
- 81, // 85: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp
- 54, // 86: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
- 51, // 87: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object
- 59, // 88: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions
- 74, // 89: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess
- 75, // 90: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule
- 81, // 91: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp
- 82, // 92: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration
- 82, // 93: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration
- 81, // 94: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp
- 81, // 95: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp
- 81, // 96: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp
- 81, // 97: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp
- 76, // 98: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action
- 77, // 99: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition
- 83, // 100: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date
- 83, // 101: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date
- 83, // 102: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date
- 1, // 103: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest
- 2, // 104: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest
- 3, // 105: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest
- 4, // 106: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest
- 6, // 107: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest
- 84, // 108: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest
- 85, // 109: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
- 86, // 110: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
- 7, // 111: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest
- 8, // 112: google.storage.v2.Storage.DeleteNotificationConfig:input_type -> google.storage.v2.DeleteNotificationConfigRequest
- 9, // 113: google.storage.v2.Storage.GetNotificationConfig:input_type -> google.storage.v2.GetNotificationConfigRequest
- 10, // 114: google.storage.v2.Storage.CreateNotificationConfig:input_type -> google.storage.v2.CreateNotificationConfigRequest
- 11, // 115: google.storage.v2.Storage.ListNotificationConfigs:input_type -> google.storage.v2.ListNotificationConfigsRequest
- 13, // 116: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest
- 14, // 117: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest
- 15, // 118: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest
- 16, // 119: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest
- 19, // 120: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest
- 18, // 121: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest
- 33, // 122: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest
- 22, // 123: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest
- 24, // 124: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest
- 26, // 125: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest
- 29, // 126: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest
- 31, // 127: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest
- 27, // 128: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest
- 34, // 129: google.storage.v2.Storage.GetServiceAccount:input_type -> google.storage.v2.GetServiceAccountRequest
- 35, // 130: google.storage.v2.Storage.CreateHmacKey:input_type -> google.storage.v2.CreateHmacKeyRequest
- 37, // 131: google.storage.v2.Storage.DeleteHmacKey:input_type -> google.storage.v2.DeleteHmacKeyRequest
- 38, // 132: google.storage.v2.Storage.GetHmacKey:input_type -> google.storage.v2.GetHmacKeyRequest
- 39, // 133: google.storage.v2.Storage.ListHmacKeys:input_type -> google.storage.v2.ListHmacKeysRequest
- 41, // 134: google.storage.v2.Storage.UpdateHmacKey:input_type -> google.storage.v2.UpdateHmacKeyRequest
- 87, // 135: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty
- 44, // 136: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket
- 44, // 137: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket
- 5, // 138: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse
- 44, // 139: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket
- 88, // 140: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy
- 88, // 141: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy
- 89, // 142: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
- 44, // 143: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket
- 87, // 144: google.storage.v2.Storage.DeleteNotificationConfig:output_type -> google.protobuf.Empty
- 49, // 145: google.storage.v2.Storage.GetNotificationConfig:output_type -> google.storage.v2.NotificationConfig
- 49, // 146: google.storage.v2.Storage.CreateNotificationConfig:output_type -> google.storage.v2.NotificationConfig
- 12, // 147: google.storage.v2.Storage.ListNotificationConfigs:output_type -> google.storage.v2.ListNotificationConfigsResponse
- 51, // 148: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object
- 87, // 149: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty
- 51, // 150: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object
- 17, // 151: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse
- 51, // 152: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object
- 20, // 153: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse
- 51, // 154: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object
- 23, // 155: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse
- 25, // 156: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse
- 53, // 157: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse
- 30, // 158: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse
- 32, // 159: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse
- 28, // 160: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse
- 55, // 161: google.storage.v2.Storage.GetServiceAccount:output_type -> google.storage.v2.ServiceAccount
- 36, // 162: google.storage.v2.Storage.CreateHmacKey:output_type -> google.storage.v2.CreateHmacKeyResponse
- 87, // 163: google.storage.v2.Storage.DeleteHmacKey:output_type -> google.protobuf.Empty
- 48, // 164: google.storage.v2.Storage.GetHmacKey:output_type -> google.storage.v2.HmacKeyMetadata
- 40, // 165: google.storage.v2.Storage.ListHmacKeys:output_type -> google.storage.v2.ListHmacKeysResponse
- 48, // 166: google.storage.v2.Storage.UpdateHmacKey:output_type -> google.storage.v2.HmacKeyMetadata
- 135, // [135:167] is the sub-list for method output_type
- 103, // [103:135] is the sub-list for method input_type
- 103, // [103:103] is the sub-list for extension type_name
- 103, // [103:103] is the sub-list for extension extendee
- 0, // [0:103] is the sub-list for field type_name
+ 76, // 5: google.storage.v2.UpdateBucketRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 49, // 6: google.storage.v2.ComposeObjectRequest.destination:type_name -> google.storage.v2.Object
+ 55, // 7: google.storage.v2.ComposeObjectRequest.source_objects:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject
+ 42, // 8: google.storage.v2.ComposeObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 47, // 9: google.storage.v2.ComposeObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 42, // 10: google.storage.v2.DeleteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 42, // 11: google.storage.v2.RestoreObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 42, // 12: google.storage.v2.ReadObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 76, // 13: google.storage.v2.ReadObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 42, // 14: google.storage.v2.GetObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 76, // 15: google.storage.v2.GetObjectRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 46, // 16: google.storage.v2.ReadObjectResponse.checksummed_data:type_name -> google.storage.v2.ChecksummedData
+ 47, // 17: google.storage.v2.ReadObjectResponse.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 54, // 18: google.storage.v2.ReadObjectResponse.content_range:type_name -> google.storage.v2.ContentRange
+ 49, // 19: google.storage.v2.ReadObjectResponse.metadata:type_name -> google.storage.v2.Object
+ 42, // 20: google.storage.v2.BidiReadObjectSpec.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 76, // 21: google.storage.v2.BidiReadObjectSpec.read_mask:type_name -> google.protobuf.FieldMask
+ 25, // 22: google.storage.v2.BidiReadObjectSpec.read_handle:type_name -> google.storage.v2.BidiReadHandle
+ 16, // 23: google.storage.v2.BidiReadObjectRequest.read_object_spec:type_name -> google.storage.v2.BidiReadObjectSpec
+ 23, // 24: google.storage.v2.BidiReadObjectRequest.read_ranges:type_name -> google.storage.v2.ReadRange
+ 24, // 25: google.storage.v2.BidiReadObjectResponse.object_data_ranges:type_name -> google.storage.v2.ObjectRangeData
+ 49, // 26: google.storage.v2.BidiReadObjectResponse.metadata:type_name -> google.storage.v2.Object
+ 25, // 27: google.storage.v2.BidiReadObjectResponse.read_handle:type_name -> google.storage.v2.BidiReadHandle
+ 25, // 28: google.storage.v2.BidiReadObjectRedirectedError.read_handle:type_name -> google.storage.v2.BidiReadHandle
+ 26, // 29: google.storage.v2.BidiWriteObjectRedirectedError.write_handle:type_name -> google.storage.v2.BidiWriteHandle
+ 22, // 30: google.storage.v2.BidiReadObjectError.read_range_errors:type_name -> google.storage.v2.ReadRangeError
+ 77, // 31: google.storage.v2.ReadRangeError.status:type_name -> google.rpc.Status
+ 46, // 32: google.storage.v2.ObjectRangeData.checksummed_data:type_name -> google.storage.v2.ChecksummedData
+ 23, // 33: google.storage.v2.ObjectRangeData.read_range:type_name -> google.storage.v2.ReadRange
+ 49, // 34: google.storage.v2.WriteObjectSpec.resource:type_name -> google.storage.v2.Object
+ 27, // 35: google.storage.v2.WriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
+ 46, // 36: google.storage.v2.WriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
+ 47, // 37: google.storage.v2.WriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 42, // 38: google.storage.v2.WriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 49, // 39: google.storage.v2.WriteObjectResponse.resource:type_name -> google.storage.v2.Object
+ 26, // 40: google.storage.v2.AppendObjectSpec.write_handle:type_name -> google.storage.v2.BidiWriteHandle
+ 27, // 41: google.storage.v2.BidiWriteObjectRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
+ 30, // 42: google.storage.v2.BidiWriteObjectRequest.append_object_spec:type_name -> google.storage.v2.AppendObjectSpec
+ 46, // 43: google.storage.v2.BidiWriteObjectRequest.checksummed_data:type_name -> google.storage.v2.ChecksummedData
+ 47, // 44: google.storage.v2.BidiWriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 42, // 45: google.storage.v2.BidiWriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 49, // 46: google.storage.v2.BidiWriteObjectResponse.resource:type_name -> google.storage.v2.Object
+ 26, // 47: google.storage.v2.BidiWriteObjectResponse.write_handle:type_name -> google.storage.v2.BidiWriteHandle
+ 76, // 48: google.storage.v2.ListObjectsRequest.read_mask:type_name -> google.protobuf.FieldMask
+ 42, // 49: google.storage.v2.QueryWriteStatusRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 49, // 50: google.storage.v2.QueryWriteStatusResponse.resource:type_name -> google.storage.v2.Object
+ 49, // 51: google.storage.v2.RewriteObjectRequest.destination:type_name -> google.storage.v2.Object
+ 42, // 52: google.storage.v2.RewriteObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 47, // 53: google.storage.v2.RewriteObjectRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 49, // 54: google.storage.v2.RewriteResponse.resource:type_name -> google.storage.v2.Object
+ 27, // 55: google.storage.v2.StartResumableWriteRequest.write_object_spec:type_name -> google.storage.v2.WriteObjectSpec
+ 42, // 56: google.storage.v2.StartResumableWriteRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 47, // 57: google.storage.v2.StartResumableWriteRequest.object_checksums:type_name -> google.storage.v2.ObjectChecksums
+ 49, // 58: google.storage.v2.UpdateObjectRequest.object:type_name -> google.storage.v2.Object
+ 76, // 59: google.storage.v2.UpdateObjectRequest.update_mask:type_name -> google.protobuf.FieldMask
+ 42, // 60: google.storage.v2.UpdateObjectRequest.common_object_request_params:type_name -> google.storage.v2.CommonObjectRequestParams
+ 45, // 61: google.storage.v2.Bucket.acl:type_name -> google.storage.v2.BucketAccessControl
+ 50, // 62: google.storage.v2.Bucket.default_object_acl:type_name -> google.storage.v2.ObjectAccessControl
+ 61, // 63: google.storage.v2.Bucket.lifecycle:type_name -> google.storage.v2.Bucket.Lifecycle
+ 78, // 64: google.storage.v2.Bucket.create_time:type_name -> google.protobuf.Timestamp
+ 58, // 65: google.storage.v2.Bucket.cors:type_name -> google.storage.v2.Bucket.Cors
+ 78, // 66: google.storage.v2.Bucket.update_time:type_name -> google.protobuf.Timestamp
+ 70, // 67: google.storage.v2.Bucket.labels:type_name -> google.storage.v2.Bucket.LabelsEntry
+ 66, // 68: google.storage.v2.Bucket.website:type_name -> google.storage.v2.Bucket.Website
+ 65, // 69: google.storage.v2.Bucket.versioning:type_name -> google.storage.v2.Bucket.Versioning
+ 62, // 70: google.storage.v2.Bucket.logging:type_name -> google.storage.v2.Bucket.Logging
+ 53, // 71: google.storage.v2.Bucket.owner:type_name -> google.storage.v2.Owner
+ 59, // 72: google.storage.v2.Bucket.encryption:type_name -> google.storage.v2.Bucket.Encryption
+ 57, // 73: google.storage.v2.Bucket.billing:type_name -> google.storage.v2.Bucket.Billing
+ 63, // 74: google.storage.v2.Bucket.retention_policy:type_name -> google.storage.v2.Bucket.RetentionPolicy
+ 60, // 75: google.storage.v2.Bucket.iam_config:type_name -> google.storage.v2.Bucket.IamConfig
+ 67, // 76: google.storage.v2.Bucket.custom_placement_config:type_name -> google.storage.v2.Bucket.CustomPlacementConfig
+ 68, // 77: google.storage.v2.Bucket.autoclass:type_name -> google.storage.v2.Bucket.Autoclass
+ 69, // 78: google.storage.v2.Bucket.hierarchical_namespace:type_name -> google.storage.v2.Bucket.HierarchicalNamespace
+ 64, // 79: google.storage.v2.Bucket.soft_delete_policy:type_name -> google.storage.v2.Bucket.SoftDeletePolicy
+ 52, // 80: google.storage.v2.BucketAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
+ 50, // 81: google.storage.v2.Object.acl:type_name -> google.storage.v2.ObjectAccessControl
+ 78, // 82: google.storage.v2.Object.delete_time:type_name -> google.protobuf.Timestamp
+ 78, // 83: google.storage.v2.Object.finalize_time:type_name -> google.protobuf.Timestamp
+ 78, // 84: google.storage.v2.Object.create_time:type_name -> google.protobuf.Timestamp
+ 47, // 85: google.storage.v2.Object.checksums:type_name -> google.storage.v2.ObjectChecksums
+ 78, // 86: google.storage.v2.Object.update_time:type_name -> google.protobuf.Timestamp
+ 78, // 87: google.storage.v2.Object.update_storage_class_time:type_name -> google.protobuf.Timestamp
+ 78, // 88: google.storage.v2.Object.retention_expire_time:type_name -> google.protobuf.Timestamp
+ 75, // 89: google.storage.v2.Object.metadata:type_name -> google.storage.v2.Object.MetadataEntry
+ 53, // 90: google.storage.v2.Object.owner:type_name -> google.storage.v2.Owner
+ 48, // 91: google.storage.v2.Object.customer_encryption:type_name -> google.storage.v2.CustomerEncryption
+ 78, // 92: google.storage.v2.Object.custom_time:type_name -> google.protobuf.Timestamp
+ 78, // 93: google.storage.v2.Object.soft_delete_time:type_name -> google.protobuf.Timestamp
+ 78, // 94: google.storage.v2.Object.hard_delete_time:type_name -> google.protobuf.Timestamp
+ 52, // 95: google.storage.v2.ObjectAccessControl.project_team:type_name -> google.storage.v2.ProjectTeam
+ 49, // 96: google.storage.v2.ListObjectsResponse.objects:type_name -> google.storage.v2.Object
+ 56, // 97: google.storage.v2.ComposeObjectRequest.SourceObject.object_preconditions:type_name -> google.storage.v2.ComposeObjectRequest.SourceObject.ObjectPreconditions
+ 71, // 98: google.storage.v2.Bucket.IamConfig.uniform_bucket_level_access:type_name -> google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess
+ 72, // 99: google.storage.v2.Bucket.Lifecycle.rule:type_name -> google.storage.v2.Bucket.Lifecycle.Rule
+ 78, // 100: google.storage.v2.Bucket.RetentionPolicy.effective_time:type_name -> google.protobuf.Timestamp
+ 79, // 101: google.storage.v2.Bucket.RetentionPolicy.retention_duration:type_name -> google.protobuf.Duration
+ 79, // 102: google.storage.v2.Bucket.SoftDeletePolicy.retention_duration:type_name -> google.protobuf.Duration
+ 78, // 103: google.storage.v2.Bucket.SoftDeletePolicy.effective_time:type_name -> google.protobuf.Timestamp
+ 78, // 104: google.storage.v2.Bucket.Autoclass.toggle_time:type_name -> google.protobuf.Timestamp
+ 78, // 105: google.storage.v2.Bucket.Autoclass.terminal_storage_class_update_time:type_name -> google.protobuf.Timestamp
+ 78, // 106: google.storage.v2.Bucket.IamConfig.UniformBucketLevelAccess.lock_time:type_name -> google.protobuf.Timestamp
+ 73, // 107: google.storage.v2.Bucket.Lifecycle.Rule.action:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Action
+ 74, // 108: google.storage.v2.Bucket.Lifecycle.Rule.condition:type_name -> google.storage.v2.Bucket.Lifecycle.Rule.Condition
+ 80, // 109: google.storage.v2.Bucket.Lifecycle.Rule.Condition.created_before:type_name -> google.type.Date
+ 80, // 110: google.storage.v2.Bucket.Lifecycle.Rule.Condition.custom_time_before:type_name -> google.type.Date
+ 80, // 111: google.storage.v2.Bucket.Lifecycle.Rule.Condition.noncurrent_time_before:type_name -> google.type.Date
+ 1, // 112: google.storage.v2.Storage.DeleteBucket:input_type -> google.storage.v2.DeleteBucketRequest
+ 2, // 113: google.storage.v2.Storage.GetBucket:input_type -> google.storage.v2.GetBucketRequest
+ 3, // 114: google.storage.v2.Storage.CreateBucket:input_type -> google.storage.v2.CreateBucketRequest
+ 4, // 115: google.storage.v2.Storage.ListBuckets:input_type -> google.storage.v2.ListBucketsRequest
+ 6, // 116: google.storage.v2.Storage.LockBucketRetentionPolicy:input_type -> google.storage.v2.LockBucketRetentionPolicyRequest
+ 81, // 117: google.storage.v2.Storage.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest
+ 82, // 118: google.storage.v2.Storage.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest
+ 83, // 119: google.storage.v2.Storage.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest
+ 7, // 120: google.storage.v2.Storage.UpdateBucket:input_type -> google.storage.v2.UpdateBucketRequest
+ 8, // 121: google.storage.v2.Storage.ComposeObject:input_type -> google.storage.v2.ComposeObjectRequest
+ 9, // 122: google.storage.v2.Storage.DeleteObject:input_type -> google.storage.v2.DeleteObjectRequest
+ 10, // 123: google.storage.v2.Storage.RestoreObject:input_type -> google.storage.v2.RestoreObjectRequest
+ 11, // 124: google.storage.v2.Storage.CancelResumableWrite:input_type -> google.storage.v2.CancelResumableWriteRequest
+ 14, // 125: google.storage.v2.Storage.GetObject:input_type -> google.storage.v2.GetObjectRequest
+ 13, // 126: google.storage.v2.Storage.ReadObject:input_type -> google.storage.v2.ReadObjectRequest
+ 17, // 127: google.storage.v2.Storage.BidiReadObject:input_type -> google.storage.v2.BidiReadObjectRequest
+ 41, // 128: google.storage.v2.Storage.UpdateObject:input_type -> google.storage.v2.UpdateObjectRequest
+ 28, // 129: google.storage.v2.Storage.WriteObject:input_type -> google.storage.v2.WriteObjectRequest
+ 31, // 130: google.storage.v2.Storage.BidiWriteObject:input_type -> google.storage.v2.BidiWriteObjectRequest
+ 33, // 131: google.storage.v2.Storage.ListObjects:input_type -> google.storage.v2.ListObjectsRequest
+ 36, // 132: google.storage.v2.Storage.RewriteObject:input_type -> google.storage.v2.RewriteObjectRequest
+ 39, // 133: google.storage.v2.Storage.StartResumableWrite:input_type -> google.storage.v2.StartResumableWriteRequest
+ 34, // 134: google.storage.v2.Storage.QueryWriteStatus:input_type -> google.storage.v2.QueryWriteStatusRequest
+ 38, // 135: google.storage.v2.Storage.MoveObject:input_type -> google.storage.v2.MoveObjectRequest
+ 84, // 136: google.storage.v2.Storage.DeleteBucket:output_type -> google.protobuf.Empty
+ 44, // 137: google.storage.v2.Storage.GetBucket:output_type -> google.storage.v2.Bucket
+ 44, // 138: google.storage.v2.Storage.CreateBucket:output_type -> google.storage.v2.Bucket
+ 5, // 139: google.storage.v2.Storage.ListBuckets:output_type -> google.storage.v2.ListBucketsResponse
+ 44, // 140: google.storage.v2.Storage.LockBucketRetentionPolicy:output_type -> google.storage.v2.Bucket
+ 85, // 141: google.storage.v2.Storage.GetIamPolicy:output_type -> google.iam.v1.Policy
+ 85, // 142: google.storage.v2.Storage.SetIamPolicy:output_type -> google.iam.v1.Policy
+ 86, // 143: google.storage.v2.Storage.TestIamPermissions:output_type -> google.iam.v1.TestIamPermissionsResponse
+ 44, // 144: google.storage.v2.Storage.UpdateBucket:output_type -> google.storage.v2.Bucket
+ 49, // 145: google.storage.v2.Storage.ComposeObject:output_type -> google.storage.v2.Object
+ 84, // 146: google.storage.v2.Storage.DeleteObject:output_type -> google.protobuf.Empty
+ 49, // 147: google.storage.v2.Storage.RestoreObject:output_type -> google.storage.v2.Object
+ 12, // 148: google.storage.v2.Storage.CancelResumableWrite:output_type -> google.storage.v2.CancelResumableWriteResponse
+ 49, // 149: google.storage.v2.Storage.GetObject:output_type -> google.storage.v2.Object
+ 15, // 150: google.storage.v2.Storage.ReadObject:output_type -> google.storage.v2.ReadObjectResponse
+ 18, // 151: google.storage.v2.Storage.BidiReadObject:output_type -> google.storage.v2.BidiReadObjectResponse
+ 49, // 152: google.storage.v2.Storage.UpdateObject:output_type -> google.storage.v2.Object
+ 29, // 153: google.storage.v2.Storage.WriteObject:output_type -> google.storage.v2.WriteObjectResponse
+ 32, // 154: google.storage.v2.Storage.BidiWriteObject:output_type -> google.storage.v2.BidiWriteObjectResponse
+ 51, // 155: google.storage.v2.Storage.ListObjects:output_type -> google.storage.v2.ListObjectsResponse
+ 37, // 156: google.storage.v2.Storage.RewriteObject:output_type -> google.storage.v2.RewriteResponse
+ 40, // 157: google.storage.v2.Storage.StartResumableWrite:output_type -> google.storage.v2.StartResumableWriteResponse
+ 35, // 158: google.storage.v2.Storage.QueryWriteStatus:output_type -> google.storage.v2.QueryWriteStatusResponse
+ 49, // 159: google.storage.v2.Storage.MoveObject:output_type -> google.storage.v2.Object
+ 136, // [136:160] is the sub-list for method output_type
+ 112, // [112:136] is the sub-list for method input_type
+ 112, // [112:112] is the sub-list for extension type_name
+ 112, // [112:112] is the sub-list for extension extendee
+ 0, // [0:112] is the sub-list for field type_name
}
func init() { file_google_storage_v2_storage_proto_init() }
@@ -9240,969 +9192,61 @@ func file_google_storage_v2_storage_proto_init() {
if File_google_storage_v2_storage_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_storage_v2_storage_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DeleteBucketRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetBucketRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateBucketRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListBucketsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListBucketsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*LockBucketRetentionPolicyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateBucketRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DeleteNotificationConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetNotificationConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateNotificationConfigRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListNotificationConfigsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListNotificationConfigsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ComposeObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DeleteObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RestoreObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CancelResumableWriteRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CancelResumableWriteResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReadObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReadObjectResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*WriteObjectSpec); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*WriteObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*WriteObjectResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BidiWriteObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BidiWriteObjectResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListObjectsRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*QueryWriteStatusRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*QueryWriteStatusResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RewriteObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*RewriteResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartResumableWriteRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StartResumableWriteResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateObjectRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetServiceAccountRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateHmacKeyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CreateHmacKeyResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DeleteHmacKeyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GetHmacKeyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListHmacKeysRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListHmacKeysResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UpdateHmacKeyRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CommonObjectRequestParams); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServiceConstants); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BucketAccessControl); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ChecksummedData); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ObjectChecksums); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*HmacKeyMetadata); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*NotificationConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*CustomerEncryption); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Object); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ObjectAccessControl); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListObjectsResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ProjectTeam); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServiceAccount); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Owner); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ContentRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ComposeObjectRequest_SourceObject); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ComposeObjectRequest_SourceObject_ObjectPreconditions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Billing); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Cors); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Encryption); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_IamConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Lifecycle); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Logging); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_RetentionPolicy); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_SoftDeletePolicy); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Versioning); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Website); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_CustomPlacementConfig); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Autoclass); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_HierarchicalNamespace); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_IamConfig_UniformBucketLevelAccess); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Lifecycle_Rule); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Lifecycle_Rule_Action); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Bucket_Lifecycle_Rule_Condition); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[14].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[17].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[20].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[21].OneofWrappers = []interface{}{
+ file_google_storage_v2_storage_proto_msgTypes[0].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[1].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[3].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[6].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[7].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[8].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[9].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[12].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[13].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[15].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[18].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[19].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[26].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []any{
(*WriteObjectRequest_UploadId)(nil),
(*WriteObjectRequest_WriteObjectSpec)(nil),
(*WriteObjectRequest_ChecksummedData)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[22].OneofWrappers = []interface{}{
+ file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []any{
(*WriteObjectResponse_PersistedSize)(nil),
(*WriteObjectResponse_Resource)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[23].OneofWrappers = []interface{}{
+ file_google_storage_v2_storage_proto_msgTypes[29].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[30].OneofWrappers = []any{
(*BidiWriteObjectRequest_UploadId)(nil),
(*BidiWriteObjectRequest_WriteObjectSpec)(nil),
+ (*BidiWriteObjectRequest_AppendObjectSpec)(nil),
(*BidiWriteObjectRequest_ChecksummedData)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[24].OneofWrappers = []interface{}{
+ file_google_storage_v2_storage_proto_msgTypes[31].OneofWrappers = []any{
(*BidiWriteObjectResponse_PersistedSize)(nil),
(*BidiWriteObjectResponse_Resource)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[25].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[27].OneofWrappers = []interface{}{
+ file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[34].OneofWrappers = []any{
(*QueryWriteStatusResponse_PersistedSize)(nil),
(*QueryWriteStatusResponse_Resource)(nil),
}
- file_google_storage_v2_storage_proto_msgTypes[28].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[32].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[50].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[58].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[66].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[70].OneofWrappers = []interface{}{}
- file_google_storage_v2_storage_proto_msgTypes[76].OneofWrappers = []interface{}{}
+ file_google_storage_v2_storage_proto_msgTypes[35].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[37].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[40].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[45].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[46].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[48].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[55].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[63].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[67].OneofWrappers = []any{}
+ file_google_storage_v2_storage_proto_msgTypes[73].OneofWrappers = []any{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_storage_v2_storage_proto_rawDesc,
NumEnums: 1,
- NumMessages: 79,
+ NumMessages: 75,
NumExtensions: 0,
NumServices: 1,
},
@@ -10247,34 +9291,39 @@ type StorageClient interface {
// The `resource` field in the request should be
// `projects/_/buckets/{bucket}`.
SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error)
- // Tests a set of permissions on the given bucket or object to see which, if
- // any, are held by the caller.
+ // Tests a set of permissions on the given bucket, object, or managed folder
+ // to see which, if any, are held by the caller.
// The `resource` field in the request should be
- // `projects/_/buckets/{bucket}` for a bucket or
- // `projects/_/buckets/{bucket}/objects/{object}` for an object.
+ // `projects/_/buckets/{bucket}` for a bucket,
+ // `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+ // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+ // for a managed folder.
TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error)
// Updates a bucket. Equivalent to JSON API's storage.buckets.patch method.
UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error)
- // Permanently deletes a NotificationConfig.
- DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // View a NotificationConfig.
- GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
- // Creates a NotificationConfig for a given bucket.
- // These NotificationConfigs, when triggered, publish messages to the
- // specified Pub/Sub topics. See
- // https://cloud.google.com/storage/docs/pubsub-notifications.
- CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error)
- // Retrieves a list of NotificationConfigs for a given bucket.
- ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error)
// Concatenates a list of existing objects into a new object in the same
// bucket.
ComposeObject(ctx context.Context, in *ComposeObjectRequest, opts ...grpc.CallOption) (*Object, error)
- // Deletes an object and its metadata.
+ // Deletes an object and its metadata. Deletions are permanent if versioning
+ // is not enabled for the bucket, or if the generation parameter is used, or
+ // if [soft delete](https://cloud.google.com/storage/docs/soft-delete) is not
+ // enabled for the bucket.
+ // When this API is used to delete an object from a bucket that has soft
+ // delete policy enabled, the object becomes soft deleted, and the
+ // `softDeleteTime` and `hardDeleteTime` properties are set on the object.
+ // This API cannot be used to permanently delete soft-deleted objects.
+ // Soft-deleted objects are permanently deleted according to their
+ // `hardDeleteTime`.
//
- // Deletions are normally permanent when versioning is disabled or whenever
- // the generation parameter is used. However, if soft delete is enabled for
- // the bucket, deleted objects can be restored using RestoreObject until the
- // soft delete retention period has passed.
+ // You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject]
+ // API to restore soft-deleted objects until the soft delete retention period
+ // has passed.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.delete`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
DeleteObject(ctx context.Context, in *DeleteObjectRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
// Restores a soft-deleted object.
RestoreObject(ctx context.Context, in *RestoreObjectRequest, opts ...grpc.CallOption) (*Object, error)
@@ -10287,10 +9336,43 @@ type StorageClient interface {
// they could either complete before the cancellation or fail if the
// cancellation completes first.
CancelResumableWrite(ctx context.Context, in *CancelResumableWriteRequest, opts ...grpc.CallOption) (*CancelResumableWriteResponse, error)
- // Retrieves an object's metadata.
+ // Retrieves object metadata.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.get`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket. To return object ACLs, the authenticated user must also have
+ // the `storage.objects.getIamPolicy` permission.
GetObject(ctx context.Context, in *GetObjectRequest, opts ...grpc.CallOption) (*Object, error)
- // Reads an object's data.
+ // Retrieves object data.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.get`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
ReadObject(ctx context.Context, in *ReadObjectRequest, opts ...grpc.CallOption) (Storage_ReadObjectClient, error)
+ // Reads an object's data.
+ //
+ // This is a bi-directional API with the added support for reading multiple
+ // ranges within one stream both within and across multiple messages.
+ // If the server encountered an error for any of the inputs, the stream will
+ // be closed with the relevant error code.
+ // Because the API allows for multiple outstanding requests, when the stream
+ // is closed the error response will contain a BidiReadObjectRangesError proto
+ // in the error extension describing the error for each outstanding read_id.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.get`
+ //
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
+ //
+ // This API is currently in preview and is not yet available for general
+ // use.
+ BidiReadObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiReadObjectClient, error)
// Updates an object's metadata.
// Equivalent to JSON API's storage.objects.patch.
UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error)
@@ -10347,12 +9429,18 @@ type StorageClient interface {
// whether the service views the object as complete.
//
// Attempting to resume an already finalized object will result in an OK
- // status, with a WriteObjectResponse containing the finalized object's
+ // status, with a `WriteObjectResponse` containing the finalized object's
// metadata.
//
// Alternatively, the BidiWriteObject operation may be used to write an
// object with controls over flushing and the ability to fetch the ability to
// determine the current persisted size.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.create`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error)
// Stores a new object and metadata.
//
@@ -10371,40 +9459,51 @@ type StorageClient interface {
// always be sent to the client, regardless of the value of `state_lookup`.
BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error)
// Retrieves a list of objects matching the criteria.
+ //
+ // **IAM Permissions**:
+ //
+ // The authenticated user requires `storage.objects.list`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions)
+ // to use this method. To return object ACLs, the authenticated user must also
+ // have the `storage.objects.getIamPolicy` permission.
ListObjects(ctx context.Context, in *ListObjectsRequest, opts ...grpc.CallOption) (*ListObjectsResponse, error)
// Rewrites a source object to a destination object. Optionally overrides
// metadata.
RewriteObject(ctx context.Context, in *RewriteObjectRequest, opts ...grpc.CallOption) (*RewriteResponse, error)
- // Starts a resumable write. How long the write operation remains valid, and
- // what happens when the write operation becomes invalid, are
- // service-dependent.
+ // Starts a resumable write operation. This
+ // method is part of the [Resumable
+ // upload](https://cloud.google.com/storage/docs/resumable-uploads) feature.
+ // This allows you to upload large objects in multiple chunks, which is more
+ // resilient to network interruptions than a single upload. The validity
+ // duration of the write operation, and the consequences of it becoming
+ // invalid, are service-dependent.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.create`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
StartResumableWrite(ctx context.Context, in *StartResumableWriteRequest, opts ...grpc.CallOption) (*StartResumableWriteResponse, error)
- // Determines the `persisted_size` for an object that is being written, which
- // can then be used as the `write_offset` for the next `Write()` call.
+ // Determines the `persisted_size` of an object that is being written. This
+ // method is part of the [resumable
+ // upload](https://cloud.google.com/storage/docs/resumable-uploads) feature.
+ // The returned value is the size of the object that has been persisted so
+ // far. The value can be used as the `write_offset` for the next `Write()`
+ // call.
//
- // If the object does not exist (i.e., the object has been deleted, or the
- // first `Write()` has not yet reached the service), this method returns the
+ // If the object does not exist, meaning if it was deleted, or the
+ // first `Write()` has not yet reached the service, this method returns the
// error `NOT_FOUND`.
//
- // The client **may** call `QueryWriteStatus()` at any time to determine how
- // much data has been processed for this object. This is useful if the
- // client is buffering data and needs to know which data can be safely
- // evicted. For any sequence of `QueryWriteStatus()` calls for a given
- // object name, the sequence of returned `persisted_size` values will be
+ // This method is useful for clients that buffer data and need to know which
+ // data can be safely evicted. The client can call `QueryWriteStatus()` at any
+ // time to determine how much data has been logged for this object.
+ // For any sequence of `QueryWriteStatus()` calls for a given
+ // object name, the sequence of returned `persisted_size` values are
// non-decreasing.
QueryWriteStatus(ctx context.Context, in *QueryWriteStatusRequest, opts ...grpc.CallOption) (*QueryWriteStatusResponse, error)
- // Retrieves the name of a project's Google Cloud Storage service account.
- GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error)
- // Creates a new HMAC key for the given service account.
- CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error)
- // Deletes a given HMAC key. Key must be in an INACTIVE state.
- DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
- // Gets an existing HMAC key metadata for the given id.
- GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error)
- // Lists HMAC keys under a given project with the additional filters provided.
- ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error)
- // Updates a given HMAC key state between ACTIVE and INACTIVE.
- UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error)
+ // Moves the source object to the destination object in the same bucket.
+ MoveObject(ctx context.Context, in *MoveObjectRequest, opts ...grpc.CallOption) (*Object, error)
}
type storageClient struct {
@@ -10444,88 +9543,52 @@ func (c *storageClient) CreateBucket(ctx context.Context, in *CreateBucketReques
func (c *storageClient) ListBuckets(ctx context.Context, in *ListBucketsRequest, opts ...grpc.CallOption) (*ListBucketsResponse, error) {
out := new(ListBucketsResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListBuckets", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) {
- out := new(Bucket)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/LockBucketRetentionPolicy", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
- out := new(iampb.Policy)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetIamPolicy", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
- out := new(iampb.Policy)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/SetIamPolicy", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) {
- out := new(iampb.TestIamPermissionsResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/TestIamPermissions", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListBuckets", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) {
+func (c *storageClient) LockBucketRetentionPolicy(ctx context.Context, in *LockBucketRetentionPolicyRequest, opts ...grpc.CallOption) (*Bucket, error) {
out := new(Bucket)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateBucket", in, out, opts...)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/LockBucketRetentionPolicy", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) DeleteNotificationConfig(ctx context.Context, in *DeleteNotificationConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteNotificationConfig", in, out, opts...)
+func (c *storageClient) GetIamPolicy(ctx context.Context, in *iampb.GetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
+ out := new(iampb.Policy)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetIamPolicy", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) GetNotificationConfig(ctx context.Context, in *GetNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
- out := new(NotificationConfig)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetNotificationConfig", in, out, opts...)
+func (c *storageClient) SetIamPolicy(ctx context.Context, in *iampb.SetIamPolicyRequest, opts ...grpc.CallOption) (*iampb.Policy, error) {
+ out := new(iampb.Policy)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/SetIamPolicy", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) CreateNotificationConfig(ctx context.Context, in *CreateNotificationConfigRequest, opts ...grpc.CallOption) (*NotificationConfig, error) {
- out := new(NotificationConfig)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateNotificationConfig", in, out, opts...)
+func (c *storageClient) TestIamPermissions(ctx context.Context, in *iampb.TestIamPermissionsRequest, opts ...grpc.CallOption) (*iampb.TestIamPermissionsResponse, error) {
+ out := new(iampb.TestIamPermissionsResponse)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/TestIamPermissions", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
-func (c *storageClient) ListNotificationConfigs(ctx context.Context, in *ListNotificationConfigsRequest, opts ...grpc.CallOption) (*ListNotificationConfigsResponse, error) {
- out := new(ListNotificationConfigsResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListNotificationConfigs", in, out, opts...)
+func (c *storageClient) UpdateBucket(ctx context.Context, in *UpdateBucketRequest, opts ...grpc.CallOption) (*Bucket, error) {
+ out := new(Bucket)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateBucket", in, out, opts...)
if err != nil {
return nil, err
}
@@ -10609,6 +9672,37 @@ func (x *storageReadObjectClient) Recv() (*ReadObjectResponse, error) {
return m, nil
}
+func (c *storageClient) BidiReadObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiReadObjectClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[1], "/google.storage.v2.Storage/BidiReadObject", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &storageBidiReadObjectClient{stream}
+ return x, nil
+}
+
+type Storage_BidiReadObjectClient interface {
+ Send(*BidiReadObjectRequest) error
+ Recv() (*BidiReadObjectResponse, error)
+ grpc.ClientStream
+}
+
+type storageBidiReadObjectClient struct {
+ grpc.ClientStream
+}
+
+func (x *storageBidiReadObjectClient) Send(m *BidiReadObjectRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *storageBidiReadObjectClient) Recv() (*BidiReadObjectResponse, error) {
+ m := new(BidiReadObjectResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
func (c *storageClient) UpdateObject(ctx context.Context, in *UpdateObjectRequest, opts ...grpc.CallOption) (*Object, error) {
out := new(Object)
err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateObject", in, out, opts...)
@@ -10619,7 +9713,7 @@ func (c *storageClient) UpdateObject(ctx context.Context, in *UpdateObjectReques
}
func (c *storageClient) WriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_WriteObjectClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[1], "/google.storage.v2.Storage/WriteObject", opts...)
+ stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[2], "/google.storage.v2.Storage/WriteObject", opts...)
if err != nil {
return nil, err
}
@@ -10653,7 +9747,7 @@ func (x *storageWriteObjectClient) CloseAndRecv() (*WriteObjectResponse, error)
}
func (c *storageClient) BidiWriteObject(ctx context.Context, opts ...grpc.CallOption) (Storage_BidiWriteObjectClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[2], "/google.storage.v2.Storage/BidiWriteObject", opts...)
+ stream, err := c.cc.NewStream(ctx, &_Storage_serviceDesc.Streams[3], "/google.storage.v2.Storage/BidiWriteObject", opts...)
if err != nil {
return nil, err
}
@@ -10719,54 +9813,9 @@ func (c *storageClient) QueryWriteStatus(ctx context.Context, in *QueryWriteStat
return out, nil
}
-func (c *storageClient) GetServiceAccount(ctx context.Context, in *GetServiceAccountRequest, opts ...grpc.CallOption) (*ServiceAccount, error) {
- out := new(ServiceAccount)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetServiceAccount", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) CreateHmacKey(ctx context.Context, in *CreateHmacKeyRequest, opts ...grpc.CallOption) (*CreateHmacKeyResponse, error) {
- out := new(CreateHmacKeyResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/CreateHmacKey", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) DeleteHmacKey(ctx context.Context, in *DeleteHmacKeyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) {
- out := new(emptypb.Empty)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/DeleteHmacKey", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) GetHmacKey(ctx context.Context, in *GetHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) {
- out := new(HmacKeyMetadata)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/GetHmacKey", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) ListHmacKeys(ctx context.Context, in *ListHmacKeysRequest, opts ...grpc.CallOption) (*ListHmacKeysResponse, error) {
- out := new(ListHmacKeysResponse)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/ListHmacKeys", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *storageClient) UpdateHmacKey(ctx context.Context, in *UpdateHmacKeyRequest, opts ...grpc.CallOption) (*HmacKeyMetadata, error) {
- out := new(HmacKeyMetadata)
- err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/UpdateHmacKey", in, out, opts...)
+func (c *storageClient) MoveObject(ctx context.Context, in *MoveObjectRequest, opts ...grpc.CallOption) (*Object, error) {
+ out := new(Object)
+ err := c.cc.Invoke(ctx, "/google.storage.v2.Storage/MoveObject", in, out, opts...)
if err != nil {
return nil, err
}
@@ -10793,34 +9842,39 @@ type StorageServer interface {
// The `resource` field in the request should be
// `projects/_/buckets/{bucket}`.
SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error)
- // Tests a set of permissions on the given bucket or object to see which, if
- // any, are held by the caller.
+ // Tests a set of permissions on the given bucket, object, or managed folder
+ // to see which, if any, are held by the caller.
// The `resource` field in the request should be
- // `projects/_/buckets/{bucket}` for a bucket or
- // `projects/_/buckets/{bucket}/objects/{object}` for an object.
+ // `projects/_/buckets/{bucket}` for a bucket,
+ // `projects/_/buckets/{bucket}/objects/{object}` for an object, or
+ // `projects/_/buckets/{bucket}/managedFolders/{managedFolder}`
+ // for a managed folder.
TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error)
// Updates a bucket. Equivalent to JSON API's storage.buckets.patch method.
UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error)
- // Permanently deletes a NotificationConfig.
- DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error)
- // View a NotificationConfig.
- GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error)
- // Creates a NotificationConfig for a given bucket.
- // These NotificationConfigs, when triggered, publish messages to the
- // specified Pub/Sub topics. See
- // https://cloud.google.com/storage/docs/pubsub-notifications.
- CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error)
- // Retrieves a list of NotificationConfigs for a given bucket.
- ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error)
// Concatenates a list of existing objects into a new object in the same
// bucket.
ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error)
- // Deletes an object and its metadata.
+ // Deletes an object and its metadata. Deletions are permanent if versioning
+ // is not enabled for the bucket, or if the generation parameter is used, or
+ // if [soft delete](https://cloud.google.com/storage/docs/soft-delete) is not
+ // enabled for the bucket.
+ // When this API is used to delete an object from a bucket that has soft
+ // delete policy enabled, the object becomes soft deleted, and the
+ // `softDeleteTime` and `hardDeleteTime` properties are set on the object.
+ // This API cannot be used to permanently delete soft-deleted objects.
+ // Soft-deleted objects are permanently deleted according to their
+ // `hardDeleteTime`.
+ //
+ // You can use the [`RestoreObject`][google.storage.v2.Storage.RestoreObject]
+ // API to restore soft-deleted objects until the soft delete retention period
+ // has passed.
//
- // Deletions are normally permanent when versioning is disabled or whenever
- // the generation parameter is used. However, if soft delete is enabled for
- // the bucket, deleted objects can be restored using RestoreObject until the
- // soft delete retention period has passed.
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.delete`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error)
// Restores a soft-deleted object.
RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error)
@@ -10833,10 +9887,43 @@ type StorageServer interface {
// they could either complete before the cancellation or fail if the
// cancellation completes first.
CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error)
- // Retrieves an object's metadata.
+ // Retrieves object metadata.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.get`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket. To return object ACLs, the authenticated user must also have
+ // the `storage.objects.getIamPolicy` permission.
GetObject(context.Context, *GetObjectRequest) (*Object, error)
- // Reads an object's data.
+ // Retrieves object data.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.get`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error
+ // Reads an object's data.
+ //
+ // This is a bi-directional API with the added support for reading multiple
+ // ranges within one stream both within and across multiple messages.
+ // If the server encountered an error for any of the inputs, the stream will
+ // be closed with the relevant error code.
+ // Because the API allows for multiple outstanding requests, when the stream
+ // is closed the error response will contain a BidiReadObjectRangesError proto
+ // in the error extension describing the error for each outstanding read_id.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.get`
+ //
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
+ //
+ // This API is currently in preview and is not yet available for general
+ // use.
+ BidiReadObject(Storage_BidiReadObjectServer) error
// Updates an object's metadata.
// Equivalent to JSON API's storage.objects.patch.
UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error)
@@ -10893,12 +9980,18 @@ type StorageServer interface {
// whether the service views the object as complete.
//
// Attempting to resume an already finalized object will result in an OK
- // status, with a WriteObjectResponse containing the finalized object's
+ // status, with a `WriteObjectResponse` containing the finalized object's
// metadata.
//
// Alternatively, the BidiWriteObject operation may be used to write an
// object with controls over flushing and the ability to fetch the ability to
// determine the current persisted size.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.create`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
WriteObject(Storage_WriteObjectServer) error
// Stores a new object and metadata.
//
@@ -10917,40 +10010,51 @@ type StorageServer interface {
// always be sent to the client, regardless of the value of `state_lookup`.
BidiWriteObject(Storage_BidiWriteObjectServer) error
// Retrieves a list of objects matching the criteria.
+ //
+ // **IAM Permissions**:
+ //
+ // The authenticated user requires `storage.objects.list`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions)
+ // to use this method. To return object ACLs, the authenticated user must also
+ // have the `storage.objects.getIamPolicy` permission.
ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error)
// Rewrites a source object to a destination object. Optionally overrides
// metadata.
RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error)
- // Starts a resumable write. How long the write operation remains valid, and
- // what happens when the write operation becomes invalid, are
- // service-dependent.
+ // Starts a resumable write operation. This
+ // method is part of the [Resumable
+ // upload](https://cloud.google.com/storage/docs/resumable-uploads) feature.
+ // This allows you to upload large objects in multiple chunks, which is more
+ // resilient to network interruptions than a single upload. The validity
+ // duration of the write operation, and the consequences of it becoming
+ // invalid, are service-dependent.
+ //
+ // **IAM Permissions**:
+ //
+ // Requires `storage.objects.create`
+ // [IAM permission](https://cloud.google.com/iam/docs/overview#permissions) on
+ // the bucket.
StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error)
- // Determines the `persisted_size` for an object that is being written, which
- // can then be used as the `write_offset` for the next `Write()` call.
+ // Determines the `persisted_size` of an object that is being written. This
+ // method is part of the [resumable
+ // upload](https://cloud.google.com/storage/docs/resumable-uploads) feature.
+ // The returned value is the size of the object that has been persisted so
+ // far. The value can be used as the `write_offset` for the next `Write()`
+ // call.
//
- // If the object does not exist (i.e., the object has been deleted, or the
- // first `Write()` has not yet reached the service), this method returns the
+ // If the object does not exist, meaning if it was deleted, or the
+ // first `Write()` has not yet reached the service, this method returns the
// error `NOT_FOUND`.
//
- // The client **may** call `QueryWriteStatus()` at any time to determine how
- // much data has been processed for this object. This is useful if the
- // client is buffering data and needs to know which data can be safely
- // evicted. For any sequence of `QueryWriteStatus()` calls for a given
- // object name, the sequence of returned `persisted_size` values will be
+ // This method is useful for clients that buffer data and need to know which
+ // data can be safely evicted. The client can call `QueryWriteStatus()` at any
+ // time to determine how much data has been logged for this object.
+ // For any sequence of `QueryWriteStatus()` calls for a given
+ // object name, the sequence of returned `persisted_size` values are
// non-decreasing.
QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error)
- // Retrieves the name of a project's Google Cloud Storage service account.
- GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error)
- // Creates a new HMAC key for the given service account.
- CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error)
- // Deletes a given HMAC key. Key must be in an INACTIVE state.
- DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error)
- // Gets an existing HMAC key metadata for the given id.
- GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error)
- // Lists HMAC keys under a given project with the additional filters provided.
- ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error)
- // Updates a given HMAC key state between ACTIVE and INACTIVE.
- UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error)
+ // Moves the source object to the destination object in the same bucket.
+ MoveObject(context.Context, *MoveObjectRequest) (*Object, error)
}
// UnimplementedStorageServer can be embedded to have forward compatible implementations.
@@ -10958,100 +10062,76 @@ type UnimplementedStorageServer struct {
}
func (*UnimplementedStorageServer) DeleteBucket(context.Context, *DeleteBucketRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteBucket not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method DeleteBucket not implemented")
}
func (*UnimplementedStorageServer) GetBucket(context.Context, *GetBucketRequest) (*Bucket, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetBucket not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method GetBucket not implemented")
}
func (*UnimplementedStorageServer) CreateBucket(context.Context, *CreateBucketRequest) (*Bucket, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateBucket not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method CreateBucket not implemented")
}
func (*UnimplementedStorageServer) ListBuckets(context.Context, *ListBucketsRequest) (*ListBucketsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListBuckets not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method ListBuckets not implemented")
}
func (*UnimplementedStorageServer) LockBucketRetentionPolicy(context.Context, *LockBucketRetentionPolicyRequest) (*Bucket, error) {
- return nil, status.Errorf(codes.Unimplemented, "method LockBucketRetentionPolicy not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method LockBucketRetentionPolicy not implemented")
}
func (*UnimplementedStorageServer) GetIamPolicy(context.Context, *iampb.GetIamPolicyRequest) (*iampb.Policy, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method GetIamPolicy not implemented")
}
func (*UnimplementedStorageServer) SetIamPolicy(context.Context, *iampb.SetIamPolicyRequest) (*iampb.Policy, error) {
- return nil, status.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method SetIamPolicy not implemented")
}
func (*UnimplementedStorageServer) TestIamPermissions(context.Context, *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method TestIamPermissions not implemented")
}
func (*UnimplementedStorageServer) UpdateBucket(context.Context, *UpdateBucketRequest) (*Bucket, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateBucket not implemented")
-}
-func (*UnimplementedStorageServer) DeleteNotificationConfig(context.Context, *DeleteNotificationConfigRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteNotificationConfig not implemented")
-}
-func (*UnimplementedStorageServer) GetNotificationConfig(context.Context, *GetNotificationConfigRequest) (*NotificationConfig, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetNotificationConfig not implemented")
-}
-func (*UnimplementedStorageServer) CreateNotificationConfig(context.Context, *CreateNotificationConfigRequest) (*NotificationConfig, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateNotificationConfig not implemented")
-}
-func (*UnimplementedStorageServer) ListNotificationConfigs(context.Context, *ListNotificationConfigsRequest) (*ListNotificationConfigsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListNotificationConfigs not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method UpdateBucket not implemented")
}
func (*UnimplementedStorageServer) ComposeObject(context.Context, *ComposeObjectRequest) (*Object, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ComposeObject not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method ComposeObject not implemented")
}
func (*UnimplementedStorageServer) DeleteObject(context.Context, *DeleteObjectRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteObject not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method DeleteObject not implemented")
}
func (*UnimplementedStorageServer) RestoreObject(context.Context, *RestoreObjectRequest) (*Object, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RestoreObject not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method RestoreObject not implemented")
}
func (*UnimplementedStorageServer) CancelResumableWrite(context.Context, *CancelResumableWriteRequest) (*CancelResumableWriteResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method CancelResumableWrite not implemented")
}
func (*UnimplementedStorageServer) GetObject(context.Context, *GetObjectRequest) (*Object, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetObject not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method GetObject not implemented")
}
func (*UnimplementedStorageServer) ReadObject(*ReadObjectRequest, Storage_ReadObjectServer) error {
- return status.Errorf(codes.Unimplemented, "method ReadObject not implemented")
+ return status1.Errorf(codes.Unimplemented, "method ReadObject not implemented")
+}
+func (*UnimplementedStorageServer) BidiReadObject(Storage_BidiReadObjectServer) error {
+ return status1.Errorf(codes.Unimplemented, "method BidiReadObject not implemented")
}
func (*UnimplementedStorageServer) UpdateObject(context.Context, *UpdateObjectRequest) (*Object, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateObject not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method UpdateObject not implemented")
}
func (*UnimplementedStorageServer) WriteObject(Storage_WriteObjectServer) error {
- return status.Errorf(codes.Unimplemented, "method WriteObject not implemented")
+ return status1.Errorf(codes.Unimplemented, "method WriteObject not implemented")
}
func (*UnimplementedStorageServer) BidiWriteObject(Storage_BidiWriteObjectServer) error {
- return status.Errorf(codes.Unimplemented, "method BidiWriteObject not implemented")
+ return status1.Errorf(codes.Unimplemented, "method BidiWriteObject not implemented")
}
func (*UnimplementedStorageServer) ListObjects(context.Context, *ListObjectsRequest) (*ListObjectsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListObjects not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method ListObjects not implemented")
}
func (*UnimplementedStorageServer) RewriteObject(context.Context, *RewriteObjectRequest) (*RewriteResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RewriteObject not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method RewriteObject not implemented")
}
func (*UnimplementedStorageServer) StartResumableWrite(context.Context, *StartResumableWriteRequest) (*StartResumableWriteResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method StartResumableWrite not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method StartResumableWrite not implemented")
}
func (*UnimplementedStorageServer) QueryWriteStatus(context.Context, *QueryWriteStatusRequest) (*QueryWriteStatusResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method QueryWriteStatus not implemented")
-}
-func (*UnimplementedStorageServer) GetServiceAccount(context.Context, *GetServiceAccountRequest) (*ServiceAccount, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetServiceAccount not implemented")
+ return nil, status1.Errorf(codes.Unimplemented, "method QueryWriteStatus not implemented")
}
-func (*UnimplementedStorageServer) CreateHmacKey(context.Context, *CreateHmacKeyRequest) (*CreateHmacKeyResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateHmacKey not implemented")
-}
-func (*UnimplementedStorageServer) DeleteHmacKey(context.Context, *DeleteHmacKeyRequest) (*emptypb.Empty, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteHmacKey not implemented")
-}
-func (*UnimplementedStorageServer) GetHmacKey(context.Context, *GetHmacKeyRequest) (*HmacKeyMetadata, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetHmacKey not implemented")
-}
-func (*UnimplementedStorageServer) ListHmacKeys(context.Context, *ListHmacKeysRequest) (*ListHmacKeysResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListHmacKeys not implemented")
-}
-func (*UnimplementedStorageServer) UpdateHmacKey(context.Context, *UpdateHmacKeyRequest) (*HmacKeyMetadata, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateHmacKey not implemented")
+func (*UnimplementedStorageServer) MoveObject(context.Context, *MoveObjectRequest) (*Object, error) {
+ return nil, status1.Errorf(codes.Unimplemented, "method MoveObject not implemented")
}
func RegisterStorageServer(s *grpc.Server, srv StorageServer) {
@@ -11220,78 +10300,6 @@ func _Storage_UpdateBucket_Handler(srv interface{}, ctx context.Context, dec fun
return interceptor(ctx, in, info, handler)
}
-func _Storage_DeleteNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteNotificationConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).DeleteNotificationConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/DeleteNotificationConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).DeleteNotificationConfig(ctx, req.(*DeleteNotificationConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_GetNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetNotificationConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).GetNotificationConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/GetNotificationConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).GetNotificationConfig(ctx, req.(*GetNotificationConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_CreateNotificationConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateNotificationConfigRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).CreateNotificationConfig(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/CreateNotificationConfig",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).CreateNotificationConfig(ctx, req.(*CreateNotificationConfigRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_ListNotificationConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListNotificationConfigsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).ListNotificationConfigs(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/ListNotificationConfigs",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).ListNotificationConfigs(ctx, req.(*ListNotificationConfigsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
func _Storage_ComposeObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ComposeObjectRequest)
if err := dec(in); err != nil {
@@ -11403,6 +10411,32 @@ func (x *storageReadObjectServer) Send(m *ReadObjectResponse) error {
return x.ServerStream.SendMsg(m)
}
+func _Storage_BidiReadObject_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(StorageServer).BidiReadObject(&storageBidiReadObjectServer{stream})
+}
+
+type Storage_BidiReadObjectServer interface {
+ Send(*BidiReadObjectResponse) error
+ Recv() (*BidiReadObjectRequest, error)
+ grpc.ServerStream
+}
+
+type storageBidiReadObjectServer struct {
+ grpc.ServerStream
+}
+
+func (x *storageBidiReadObjectServer) Send(m *BidiReadObjectResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *storageBidiReadObjectServer) Recv() (*BidiReadObjectRequest, error) {
+ m := new(BidiReadObjectRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
func _Storage_UpdateObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(UpdateObjectRequest)
if err := dec(in); err != nil {
@@ -11545,110 +10579,20 @@ func _Storage_QueryWriteStatus_Handler(srv interface{}, ctx context.Context, dec
return interceptor(ctx, in, info, handler)
}
-func _Storage_GetServiceAccount_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetServiceAccountRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).GetServiceAccount(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/GetServiceAccount",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).GetServiceAccount(ctx, req.(*GetServiceAccountRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_CreateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateHmacKeyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).CreateHmacKey(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/CreateHmacKey",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).CreateHmacKey(ctx, req.(*CreateHmacKeyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_DeleteHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteHmacKeyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).DeleteHmacKey(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/DeleteHmacKey",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).DeleteHmacKey(ctx, req.(*DeleteHmacKeyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_GetHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetHmacKeyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).GetHmacKey(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/GetHmacKey",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).GetHmacKey(ctx, req.(*GetHmacKeyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_ListHmacKeys_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListHmacKeysRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(StorageServer).ListHmacKeys(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/google.storage.v2.Storage/ListHmacKeys",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).ListHmacKeys(ctx, req.(*ListHmacKeysRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Storage_UpdateHmacKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdateHmacKeyRequest)
+func _Storage_MoveObject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MoveObjectRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
- return srv.(StorageServer).UpdateHmacKey(ctx, in)
+ return srv.(StorageServer).MoveObject(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/google.storage.v2.Storage/UpdateHmacKey",
+ FullMethod: "/google.storage.v2.Storage/MoveObject",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(StorageServer).UpdateHmacKey(ctx, req.(*UpdateHmacKeyRequest))
+ return srv.(StorageServer).MoveObject(ctx, req.(*MoveObjectRequest))
}
return interceptor(ctx, in, info, handler)
}
@@ -11693,22 +10637,6 @@ var _Storage_serviceDesc = grpc.ServiceDesc{
MethodName: "UpdateBucket",
Handler: _Storage_UpdateBucket_Handler,
},
- {
- MethodName: "DeleteNotificationConfig",
- Handler: _Storage_DeleteNotificationConfig_Handler,
- },
- {
- MethodName: "GetNotificationConfig",
- Handler: _Storage_GetNotificationConfig_Handler,
- },
- {
- MethodName: "CreateNotificationConfig",
- Handler: _Storage_CreateNotificationConfig_Handler,
- },
- {
- MethodName: "ListNotificationConfigs",
- Handler: _Storage_ListNotificationConfigs_Handler,
- },
{
MethodName: "ComposeObject",
Handler: _Storage_ComposeObject_Handler,
@@ -11750,28 +10678,8 @@ var _Storage_serviceDesc = grpc.ServiceDesc{
Handler: _Storage_QueryWriteStatus_Handler,
},
{
- MethodName: "GetServiceAccount",
- Handler: _Storage_GetServiceAccount_Handler,
- },
- {
- MethodName: "CreateHmacKey",
- Handler: _Storage_CreateHmacKey_Handler,
- },
- {
- MethodName: "DeleteHmacKey",
- Handler: _Storage_DeleteHmacKey_Handler,
- },
- {
- MethodName: "GetHmacKey",
- Handler: _Storage_GetHmacKey_Handler,
- },
- {
- MethodName: "ListHmacKeys",
- Handler: _Storage_ListHmacKeys_Handler,
- },
- {
- MethodName: "UpdateHmacKey",
- Handler: _Storage_UpdateHmacKey_Handler,
+ MethodName: "MoveObject",
+ Handler: _Storage_MoveObject_Handler,
},
},
Streams: []grpc.StreamDesc{
@@ -11780,6 +10688,12 @@ var _Storage_serviceDesc = grpc.ServiceDesc{
Handler: _Storage_ReadObject_Handler,
ServerStreams: true,
},
+ {
+ StreamName: "BidiReadObject",
+ Handler: _Storage_BidiReadObject_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
{
StreamName: "WriteObject",
Handler: _Storage_WriteObject_Handler,
diff --git a/vendor/cloud.google.com/go/storage/internal/experimental.go b/vendor/cloud.google.com/go/storage/internal/experimental.go
new file mode 100644
index 000000000..2fd5111fb
--- /dev/null
+++ b/vendor/cloud.google.com/go/storage/internal/experimental.go
@@ -0,0 +1,36 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// All options in this package are experimental.
+
+package internal
+
+var (
+ // WithMetricInterval is a function which is implemented by storage package.
+ // It sets how often to emit metrics when using NewPeriodicReader and must be
+ // greater than 1 minute.
+ WithMetricInterval any // func (*time.Duration) option.ClientOption
+
+ // WithMetricExporter is a function which is implemented by storage package.
+ // Set an alternate client-side metric Exporter to emit metrics through.
+ WithMetricExporter any // func (*metric.Exporter) option.ClientOption
+
+ // WithReadStallTimeout is a function which is implemented by storage package.
+ // It takes ReadStallTimeoutConfig as inputs and returns a option.ClientOption.
+ WithReadStallTimeout any // func (*ReadStallTimeoutConfig) option.ClientOption
+
+ // WithGRPCBidiReads is a function which is implemented by the storage package.
+ // It sets the gRPC client to use the BidiReadObject API for downloads.
+ WithGRPCBidiReads any // func() option.ClientOption
+)
diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go
index c3cf41cb7..ba56cacd8 100644
--- a/vendor/cloud.google.com/go/storage/internal/version.go
+++ b/vendor/cloud.google.com/go/storage/internal/version.go
@@ -15,4 +15,4 @@
package internal
// Version is the current tagged release of the library.
-const Version = "1.41.0"
+const Version = "1.50.0"
diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go
index ffc49a808..99783f3df 100644
--- a/vendor/cloud.google.com/go/storage/invoke.go
+++ b/vendor/cloud.google.com/go/storage/invoke.go
@@ -74,7 +74,15 @@ func run(ctx context.Context, call func(ctx context.Context) error, retry *retry
return true, fmt.Errorf("storage: retry failed after %v attempts; last error: %w", *retry.maxAttempts, err)
}
attempts++
- return !errorFunc(err), err
+ retryable := errorFunc(err)
+ // Explicitly check context cancellation so that we can distinguish between a
+ // DEADLINE_EXCEEDED error from the server and a user-set context deadline.
+ // Unfortunately gRPC will codes.DeadlineExceeded (which may be retryable if it's
+ // sent by the server) in both cases.
+ if ctxErr := ctx.Err(); errors.Is(ctxErr, context.Canceled) || errors.Is(ctxErr, context.DeadlineExceeded) {
+ retryable = false
+ }
+ return !retryable, err
})
}
@@ -118,20 +126,24 @@ func ShouldRetry(err error) bool {
// Retry socket-level errors ECONNREFUSED and ECONNRESET (from syscall).
// Unfortunately the error type is unexported, so we resort to string
// matching.
- retriable := []string{"connection refused", "connection reset"}
+ retriable := []string{"connection refused", "connection reset", "broken pipe"}
for _, s := range retriable {
if strings.Contains(e.Error(), s) {
return true
}
}
+ case *net.DNSError:
+ if e.IsTemporary {
+ return true
+ }
case interface{ Temporary() bool }:
if e.Temporary() {
return true
}
}
- // UNAVAILABLE, RESOURCE_EXHAUSTED, and INTERNAL codes are all retryable for gRPC.
+ // UNAVAILABLE, RESOURCE_EXHAUSTED, INTERNAL, and DEADLINE_EXCEEDED codes are all retryable for gRPC.
if st, ok := status.FromError(err); ok {
- if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal {
+ if code := st.Code(); code == codes.Unavailable || code == codes.ResourceExhausted || code == codes.Internal || code == codes.DeadlineExceeded {
return true
}
}
diff --git a/vendor/cloud.google.com/go/storage/notifications.go b/vendor/cloud.google.com/go/storage/notifications.go
index 1d6cfdf59..bc15900f0 100644
--- a/vendor/cloud.google.com/go/storage/notifications.go
+++ b/vendor/cloud.google.com/go/storage/notifications.go
@@ -21,7 +21,6 @@ import (
"regexp"
"cloud.google.com/go/internal/trace"
- "cloud.google.com/go/storage/internal/apiv2/storagepb"
raw "google.golang.org/api/storage/v1"
)
@@ -92,30 +91,6 @@ func toNotification(rn *raw.Notification) *Notification {
return n
}
-func toNotificationFromProto(pbn *storagepb.NotificationConfig) *Notification {
- n := &Notification{
- ID: pbn.GetName(),
- EventTypes: pbn.GetEventTypes(),
- ObjectNamePrefix: pbn.GetObjectNamePrefix(),
- CustomAttributes: pbn.GetCustomAttributes(),
- PayloadFormat: pbn.GetPayloadFormat(),
- }
- n.TopicProjectID, n.TopicID = parseNotificationTopic(pbn.Topic)
- return n
-}
-
-func toProtoNotification(n *Notification) *storagepb.NotificationConfig {
- return &storagepb.NotificationConfig{
- Name: n.ID,
- Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
- n.TopicProjectID, n.TopicID),
- EventTypes: n.EventTypes,
- ObjectNamePrefix: n.ObjectNamePrefix,
- CustomAttributes: n.CustomAttributes,
- PayloadFormat: n.PayloadFormat,
- }
-}
-
var topicRE = regexp.MustCompile(`^//pubsub\.googleapis\.com/projects/([^/]+)/topics/([^/]+)`)
// parseNotificationTopic extracts the project and topic IDs from from the full
@@ -144,6 +119,7 @@ func toRawNotification(n *Notification) *raw.Notification {
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
// returned Notification's ID can be used to refer to it.
+// Note: gRPC is not supported.
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
defer func() { trace.EndSpan(ctx, err) }()
@@ -165,6 +141,7 @@ func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (re
// Notifications returns all the Notifications configured for this bucket, as a map
// indexed by notification ID.
+// Note: gRPC is not supported.
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
defer func() { trace.EndSpan(ctx, err) }()
@@ -182,15 +159,8 @@ func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
return m
}
-func notificationsToMapFromProto(ns []*storagepb.NotificationConfig) map[string]*Notification {
- m := map[string]*Notification{}
- for _, n := range ns {
- m[n.Name] = toNotificationFromProto(n)
- }
- return m
-}
-
// DeleteNotification deletes the notification with the given ID.
+// Note: gRPC is not supported.
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
defer func() { trace.EndSpan(ctx, err) }()
diff --git a/vendor/cloud.google.com/go/storage/option.go b/vendor/cloud.google.com/go/storage/option.go
index e72ceb78f..16d57644a 100644
--- a/vendor/cloud.google.com/go/storage/option.go
+++ b/vendor/cloud.google.com/go/storage/option.go
@@ -15,15 +15,74 @@
package storage
import (
+ "os"
+ "strconv"
+ "time"
+
+ "cloud.google.com/go/storage/experimental"
+ storageinternal "cloud.google.com/go/storage/internal"
+ "go.opentelemetry.io/otel/sdk/metric"
"google.golang.org/api/option"
"google.golang.org/api/option/internaloption"
)
-// storageConfig contains the Storage client option configuration that can be
+const (
+ dynamicReadReqIncreaseRateEnv = "DYNAMIC_READ_REQ_INCREASE_RATE"
+ dynamicReadReqInitialTimeoutEnv = "DYNAMIC_READ_REQ_INITIAL_TIMEOUT"
+ defaultDynamicReadReqIncreaseRate = 15.0
+ defaultDynamicReqdReqMaxTimeout = 1 * time.Hour
+ defaultDynamicReadReqMinTimeout = 500 * time.Millisecond
+ defaultTargetPercentile = 0.99
+)
+
+func init() {
+ // initialize experimental options
+ storageinternal.WithMetricExporter = withMetricExporter
+ storageinternal.WithMetricInterval = withMetricInterval
+ storageinternal.WithReadStallTimeout = withReadStallTimeout
+ storageinternal.WithGRPCBidiReads = withGRPCBidiReads
+}
+
+// getDynamicReadReqIncreaseRateFromEnv returns the value set in the env variable.
+// It returns defaultDynamicReadReqIncreaseRate if env is not set or the set value is invalid.
+func getDynamicReadReqIncreaseRateFromEnv() float64 {
+ increaseRate := os.Getenv(dynamicReadReqIncreaseRateEnv)
+ if increaseRate == "" {
+ return defaultDynamicReadReqIncreaseRate
+ }
+
+ val, err := strconv.ParseFloat(increaseRate, 64)
+ if err != nil {
+ return defaultDynamicReadReqIncreaseRate
+ }
+ return val
+}
+
+// getDynamicReadReqInitialTimeoutSecFromEnv returns the value set in the env variable.
+// It returns the passed defaultVal if env is not set or the set value is invalid.
+func getDynamicReadReqInitialTimeoutSecFromEnv(defaultVal time.Duration) time.Duration {
+ initialTimeout := os.Getenv(dynamicReadReqInitialTimeoutEnv)
+ if initialTimeout == "" {
+ return defaultVal
+ }
+
+ val, err := time.ParseDuration(initialTimeout)
+ if err != nil {
+ return defaultVal
+ }
+ return val
+}
+
// set through storageClientOptions.
type storageConfig struct {
- useJSONforReads bool
- readAPIWasSet bool
+ useJSONforReads bool
+ readAPIWasSet bool
+ disableClientMetrics bool
+ metricExporter *metric.Exporter
+ metricInterval time.Duration
+ manualReader *metric.ManualReader
+ readStallTimeoutConfig *experimental.ReadStallTimeoutConfig
+ grpcBidiReads bool
}
// newStorageConfig generates a new storageConfig with all the given
@@ -44,10 +103,14 @@ type storageClientOption interface {
ApplyStorageOpt(*storageConfig)
}
-// WithJSONReads is an option that may be passed to a Storage Client on creation.
-// It sets the client to use the JSON API for object reads. Currently, the
-// default API used for reads is XML.
-// Setting this option is required to use the GenerationNotMatch condition.
+// WithJSONReads is an option that may be passed to [NewClient].
+// It sets the client to use the Cloud Storage JSON API for object
+// reads. Currently, the default API used for reads is XML, but JSON will
+// become the default in a future release.
+//
+// Setting this option is required to use the GenerationNotMatch condition. We
+// also recommend using JSON reads to ensure consistency with other client
+// operations (all of which use JSON by default).
//
// Note that when this option is set, reads will return a zero date for
// [ReaderObjectAttrs].LastModified and may return a different value for
@@ -56,10 +119,11 @@ func WithJSONReads() option.ClientOption {
return &withReadAPI{useJSON: true}
}
-// WithXMLReads is an option that may be passed to a Storage Client on creation.
-// It sets the client to use the XML API for object reads.
+// WithXMLReads is an option that may be passed to [NewClient].
+// It sets the client to use the Cloud Storage XML API for object reads.
//
-// This is the current default.
+// This is the current default, but the default will switch to JSON in a future
+// release.
func WithXMLReads() option.ClientOption {
return &withReadAPI{useJSON: false}
}
@@ -73,3 +137,120 @@ func (w *withReadAPI) ApplyStorageOpt(c *storageConfig) {
c.useJSONforReads = w.useJSON
c.readAPIWasSet = true
}
+
+type withDisabledClientMetrics struct {
+ internaloption.EmbeddableAdapter
+ disabledClientMetrics bool
+}
+
+// WithDisabledClientMetrics is an option that may be passed to [NewClient].
+// gRPC metrics are enabled by default in the GCS client and will export the
+// gRPC telemetry discussed in [gRFC/66] and [gRFC/78] to
+// [Google Cloud Monitoring]. The option is used to disable metrics.
+// Google Cloud Support can use this information to more quickly diagnose
+// problems related to GCS and gRPC.
+// Sending this data does not incur any billing charges, and requires minimal
+// CPU (a single RPC every few minutes) or memory (a few KiB to batch the
+// telemetry).
+//
+// The default is to enable client metrics. To opt-out of metrics collected use
+// this option.
+//
+// [gRFC/66]: https://github.com/grpc/proposal/blob/master/A66-otel-stats.md
+// [gRFC/78]: https://github.com/grpc/proposal/blob/master/A78-grpc-metrics-wrr-pf-xds.md
+// [Google Cloud Monitoring]: https://cloud.google.com/monitoring/docs
+func WithDisabledClientMetrics() option.ClientOption {
+ return &withDisabledClientMetrics{disabledClientMetrics: true}
+}
+
+func (w *withDisabledClientMetrics) ApplyStorageOpt(c *storageConfig) {
+ c.disableClientMetrics = w.disabledClientMetrics
+}
+
+type withMeterOptions struct {
+ internaloption.EmbeddableAdapter
+ // set sampling interval
+ interval time.Duration
+}
+
+func withMetricInterval(interval time.Duration) option.ClientOption {
+ return &withMeterOptions{interval: interval}
+}
+
+func (w *withMeterOptions) ApplyStorageOpt(c *storageConfig) {
+ c.metricInterval = w.interval
+}
+
+type withMetricExporterConfig struct {
+ internaloption.EmbeddableAdapter
+ // exporter override
+ metricExporter *metric.Exporter
+}
+
+func withMetricExporter(ex *metric.Exporter) option.ClientOption {
+ return &withMetricExporterConfig{metricExporter: ex}
+}
+
+func (w *withMetricExporterConfig) ApplyStorageOpt(c *storageConfig) {
+ c.metricExporter = w.metricExporter
+}
+
+type withTestMetricReaderConfig struct {
+ internaloption.EmbeddableAdapter
+ // reader override
+ metricReader *metric.ManualReader
+}
+
+func withTestMetricReader(ex *metric.ManualReader) option.ClientOption {
+ return &withTestMetricReaderConfig{metricReader: ex}
+}
+
+func (w *withTestMetricReaderConfig) ApplyStorageOpt(c *storageConfig) {
+ c.manualReader = w.metricReader
+}
+
+// WithReadStallTimeout is an option that may be passed to [NewClient].
+// It enables the client to retry the stalled read request, happens as part of
+// storage.Reader creation. As the name suggest, timeout is adjusted dynamically
+// based on past observed read-req latencies.
+//
+// This is only supported for the read operation and that too for http(XML) client.
+// Grpc read-operation will be supported soon.
+func withReadStallTimeout(rstc *experimental.ReadStallTimeoutConfig) option.ClientOption {
+ // TODO (raj-prince): To keep separate dynamicDelay instance for different BucketHandle.
+ // Currently, dynamicTimeout is kept at the client and hence shared across all the
+ // BucketHandle, which is not the ideal state. As latency depends on location of VM
+ // and Bucket, and read latency of different buckets may lie in different range.
+ // Hence having a separate dynamicTimeout instance at BucketHandle level will
+ // be better
+ if rstc.Min == time.Duration(0) {
+ rstc.Min = defaultDynamicReadReqMinTimeout
+ }
+ if rstc.TargetPercentile == 0 {
+ rstc.TargetPercentile = defaultTargetPercentile
+ }
+ return &withReadStallTimeoutConfig{
+ readStallTimeoutConfig: rstc,
+ }
+}
+
+type withReadStallTimeoutConfig struct {
+ internaloption.EmbeddableAdapter
+ readStallTimeoutConfig *experimental.ReadStallTimeoutConfig
+}
+
+func (wrstc *withReadStallTimeoutConfig) ApplyStorageOpt(config *storageConfig) {
+ config.readStallTimeoutConfig = wrstc.readStallTimeoutConfig
+}
+
+func withGRPCBidiReads() option.ClientOption {
+ return &withGRPCBidiReadsConfig{}
+}
+
+type withGRPCBidiReadsConfig struct {
+ internaloption.EmbeddableAdapter
+}
+
+func (w *withGRPCBidiReadsConfig) ApplyStorageOpt(config *storageConfig) {
+ config.grpcBidiReads = true
+}
diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go
index 0b228a6a7..6b14fd1dc 100644
--- a/vendor/cloud.google.com/go/storage/reader.go
+++ b/vendor/cloud.google.com/go/storage/reader.go
@@ -22,6 +22,7 @@ import (
"io/ioutil"
"net/http"
"strings"
+ "sync"
"time"
"cloud.google.com/go/internal/trace"
@@ -65,6 +66,19 @@ type ReaderObjectAttrs struct {
// meaningful in the context of a particular generation of a
// particular object.
Metageneration int64
+
+ // CRC32C is the CRC32 checksum of the entire object's content using the
+ // Castagnoli93 polynomial, if available.
+ CRC32C uint32
+
+ // Decompressed is true if the object is stored as a gzip file and was
+ // decompressed when read.
+ // Objects are automatically decompressed if the object's metadata property
+ // "Content-Encoding" is set to "gzip" or satisfies decompressive
+ // transcoding as per https://cloud.google.com/storage/docs/transcoding.
+ //
+ // To prevent decompression on reads, use [ObjectHandle.ReadCompressed].
+ Decompressed bool
}
// NewReader creates a new Reader to read the contents of the
@@ -72,6 +86,12 @@ type ReaderObjectAttrs struct {
// ErrObjectNotExist will be returned if the object is not found.
//
// The caller must call Close on the returned Reader when done reading.
+//
+// By default, reads are made using the Cloud Storage XML API. We recommend
+// using the JSON API instead, which can be done by setting [WithJSONReads]
+// when calling [NewClient]. This ensures consistency with other client
+// operations, which all use JSON. JSON will become the default in a future
+// release.
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
return o.NewRangeReader(ctx, 0, -1)
}
@@ -85,7 +105,14 @@ func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
// If the object's metadata property "Content-Encoding" is set to "gzip" or satisfies
// decompressive transcoding per https://cloud.google.com/storage/docs/transcoding
// that file will be served back whole, regardless of the requested range as
-// Google Cloud Storage dictates.
+// Google Cloud Storage dictates. If decompressive transcoding occurs,
+// [Reader.Attrs.Decompressed] will be true.
+//
+// By default, reads are made using the Cloud Storage XML API. We recommend
+// using the JSON API instead, which can be done by setting [WithJSONReads]
+// when calling [NewClient]. This ensures consistency with other client
+// operations, which all use JSON. JSON will become the default in a future
+// release.
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) {
// This span covers the life of the reader. It is closed via the context
// in Reader.Close.
@@ -114,6 +141,7 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
encryptionKey: o.encryptionKey,
conds: o.conds,
readCompressed: o.readCompressed,
+ handle: &o.readHandle,
}
r, err = o.c.tc.NewRangeReader(ctx, params, opts...)
@@ -129,6 +157,49 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
return r, err
}
+// NewMultiRangeDownloader creates a multi-range reader for an object.
+// Must be called on a gRPC client created using [NewGRPCClient].
+//
+// This uses the gRPC-specific bi-directional read API, which is in private
+// preview; please contact your account manager if interested.
+func (o *ObjectHandle) NewMultiRangeDownloader(ctx context.Context) (mrd *MultiRangeDownloader, err error) {
+ // This span covers the life of the reader. It is closed via the context
+ // in Reader.Close.
+ ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.MultiRangeDownloader")
+
+ if err := o.validate(); err != nil {
+ return nil, err
+ }
+ if o.conds != nil {
+ if err := o.conds.validate("NewMultiRangeDownloader"); err != nil {
+ return nil, err
+ }
+ }
+
+ opts := makeStorageOpts(true, o.retry, o.userProject)
+
+ params := &newMultiRangeDownloaderParams{
+ bucket: o.bucket,
+ conds: o.conds,
+ encryptionKey: o.encryptionKey,
+ gen: o.gen,
+ object: o.object,
+ handle: &o.readHandle,
+ }
+
+ r, err := o.c.tc.NewMultiRangeDownloader(ctx, params, opts...)
+
+ // Pass the context so that the span can be closed in MultiRangeDownloader.Close(), or close the
+ // span now if there is an error.
+ if err == nil {
+ r.ctx = ctx
+ } else {
+ trace.EndSpan(ctx, err)
+ }
+
+ return r, err
+}
+
// decompressiveTranscoding returns true if the request was served decompressed
// and different than its original storage form. This happens when the "Content-Encoding"
// header is "gzip".
@@ -196,12 +267,16 @@ var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
type Reader struct {
- Attrs ReaderObjectAttrs
+ Attrs ReaderObjectAttrs
+ objectMetadata *map[string]string
+
seen, remain, size int64
checkCRC bool // Did we check the CRC? This is now only used by tests.
reader io.ReadCloser
ctx context.Context
+ mu sync.Mutex
+ handle *ReadHandle
}
// Close closes the Reader. It must be called when done reading.
@@ -272,3 +347,95 @@ func (r *Reader) CacheControl() string {
func (r *Reader) LastModified() (time.Time, error) {
return r.Attrs.LastModified, nil
}
+
+// Metadata returns user-provided metadata, in key/value pairs.
+//
+// It can be nil if no metadata is present, or if the client uses the JSON
+// API for downloads. Only the XML and gRPC APIs support getting
+// custom metadata via the Reader; for JSON make a separate call to
+// ObjectHandle.Attrs.
+func (r *Reader) Metadata() map[string]string {
+ if r.objectMetadata != nil {
+ return *r.objectMetadata
+ }
+ return nil
+}
+
+// ReadHandle returns the read handle associated with an object.
+// ReadHandle will be periodically refreshed.
+//
+// ReadHandle requires the gRPC-specific bi-directional read API, which is in
+// private preview; please contact your account manager if interested.
+// Note that this only valid for gRPC and only with zonal buckets.
+func (r *Reader) ReadHandle() ReadHandle {
+ if r.handle == nil {
+ r.handle = &ReadHandle{}
+ }
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ return (*r.handle)
+}
+
+// MultiRangeDownloader reads a Cloud Storage object.
+//
+// Typically, a MultiRangeDownloader opens a stream to which we can add
+// different ranges to read from the object.
+//
+// This API is currently in preview and is not yet available for general use.
+type MultiRangeDownloader struct {
+ Attrs ReaderObjectAttrs
+ reader multiRangeDownloader
+ ctx context.Context
+}
+
+type multiRangeDownloader interface {
+ add(output io.Writer, offset, limit int64, callback func(int64, int64, error))
+ wait()
+ close() error
+ getHandle() []byte
+}
+
+// Add adds a new range to MultiRangeDownloader.
+//
+// The offset for the first byte to return in the read, relative to the start
+// of the object.
+//
+// A negative offset value will be interpreted as the number of bytes from the
+// end of the object to be returned. Requesting a negative offset with magnitude
+// larger than the size of the object will return the entire object. An offset
+// larger than the size of the object will result in an OutOfRange error.
+//
+// A limit of zero indicates that there is no limit, and a negative limit will
+// cause an error.
+//
+// This will initiate the read range but is non-blocking; call callback to
+// process the result. Add is thread-safe and can be called simultaneously
+// from different goroutines.
+func (mrd *MultiRangeDownloader) Add(output io.Writer, offset, length int64, callback func(int64, int64, error)) {
+ mrd.reader.add(output, offset, length, callback)
+}
+
+// Close the MultiRangeDownloader. It must be called when done reading.
+// Adding new ranges after this has been called will cause an error.
+//
+// This will immediately close the stream and can result in a
+// "stream closed early" error if a response for a range is still not processed.
+// Call [MultiRangeDownloader.Wait] to avoid this error.
+func (mrd *MultiRangeDownloader) Close() error {
+ err := mrd.reader.close()
+ trace.EndSpan(mrd.ctx, err)
+ return err
+}
+
+// Wait for all the responses to process on the stream.
+// Adding new ranges after this has been called will cause an error.
+// Wait will wait for all callbacks to finish.
+func (mrd *MultiRangeDownloader) Wait() {
+ mrd.reader.wait()
+}
+
+// GetHandle returns the read handle. This can be used to further speed up the
+// follow up read if the same object is read through a different stream.
+func (mrd *MultiRangeDownloader) GetHandle() []byte {
+ return mrd.reader.getHandle()
+}
diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go
index 0c335f38a..9c40ca1b4 100644
--- a/vendor/cloud.google.com/go/storage/storage.go
+++ b/vendor/cloud.google.com/go/storage/storage.go
@@ -43,6 +43,9 @@ import (
"cloud.google.com/go/storage/internal"
"cloud.google.com/go/storage/internal/apiv2/storagepb"
"github.com/googleapis/gax-go/v2"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
@@ -50,6 +53,8 @@ import (
raw "google.golang.org/api/storage/v1"
"google.golang.org/api/transport"
htransport "google.golang.org/api/transport/http"
+ "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/stats/opentelemetry"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/known/fieldmaskpb"
@@ -67,8 +72,8 @@ var (
// errMethodNotSupported indicates that the method called is not currently supported by the client.
// TODO: Export this error when launching the transport-agnostic client.
errMethodNotSupported = errors.New("storage: method is not currently supported")
- // errMethodNotValid indicates that given HTTP method is not valid.
- errMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys())
+ // errSignedURLMethodNotValid indicates that given HTTP method is not valid.
+ errSignedURLMethodNotValid = fmt.Errorf("storage: HTTP method should be one of %v", reflect.ValueOf(signedURLMethods).MapKeys())
)
var userAgent = fmt.Sprintf("gcloud-golang-storage/%s", internal.Version)
@@ -117,10 +122,6 @@ type Client struct {
// tc is the transport-agnostic client implemented with either gRPC or HTTP.
tc storageClient
- // useGRPC flags whether the client uses gRPC. This is needed while the
- // integration piece is only partially complete.
- // TODO: remove before merging to main.
- useGRPC bool
}
// NewClient creates a new Google Cloud Storage client using the HTTP transport.
@@ -218,14 +219,11 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
// NewGRPCClient creates a new Storage client using the gRPC transport and API.
// Client methods which have not been implemented in gRPC will return an error.
-// In particular, methods for Cloud Pub/Sub notifications are not supported.
+// In particular, methods for Cloud Pub/Sub notifications, Service Account HMAC
+// keys, and ServiceAccount are not supported.
// Using a non-default universe domain is also not supported with the Storage
// gRPC client.
//
-// The storage gRPC API is still in preview and not yet publicly available.
-// If you would like to use the API, please first contact your GCP account rep to
-// request access. The API may be subject to breaking changes.
-//
// Clients should be reused instead of created as needed. The methods of Client
// are safe for concurrent use by multiple goroutines.
//
@@ -237,7 +235,63 @@ func NewGRPCClient(ctx context.Context, opts ...option.ClientOption) (*Client, e
return nil, err
}
- return &Client{tc: tc, useGRPC: true}, nil
+ return &Client{tc: tc}, nil
+}
+
+// CheckDirectConnectivitySupported checks if gRPC direct connectivity
+// is available for a specific bucket from the environment where the client
+// is running. A `nil` error represents Direct Connectivity was detected.
+// Direct connectivity is expected to be available when running from inside
+// GCP and connecting to a bucket in the same region.
+//
+// Experimental helper that's subject to change.
+//
+// You can pass in [option.ClientOption] you plan on passing to [NewGRPCClient]
+func CheckDirectConnectivitySupported(ctx context.Context, bucket string, opts ...option.ClientOption) error {
+ view := metric.NewView(
+ metric.Instrument{
+ Name: "grpc.client.attempt.duration",
+ Kind: metric.InstrumentKindHistogram,
+ },
+ metric.Stream{AttributeFilter: attribute.NewAllowKeysFilter("grpc.lb.locality")},
+ )
+ mr := metric.NewManualReader()
+ provider := metric.NewMeterProvider(metric.WithReader(mr), metric.WithView(view))
+ // Provider handles shutting down ManualReader
+ defer provider.Shutdown(ctx)
+ mo := opentelemetry.MetricsOptions{
+ MeterProvider: provider,
+ Metrics: stats.NewMetrics("grpc.client.attempt.duration"),
+ OptionalLabels: []string{"grpc.lb.locality"},
+ }
+ combinedOpts := append(opts, WithDisabledClientMetrics(), option.WithGRPCDialOption(opentelemetry.DialOption(opentelemetry.Options{MetricsOptions: mo})))
+ client, err := NewGRPCClient(ctx, combinedOpts...)
+ if err != nil {
+ return fmt.Errorf("storage.NewGRPCClient: %w", err)
+ }
+ defer client.Close()
+ if _, err = client.Bucket(bucket).Attrs(ctx); err != nil {
+ return fmt.Errorf("Bucket.Attrs: %w", err)
+ }
+ // Call manual reader to collect metric
+ rm := metricdata.ResourceMetrics{}
+ if err = mr.Collect(context.Background(), &rm); err != nil {
+ return fmt.Errorf("ManualReader.Collect: %w", err)
+ }
+ for _, sm := range rm.ScopeMetrics {
+ for _, m := range sm.Metrics {
+ if m.Name == "grpc.client.attempt.duration" {
+ hist := m.Data.(metricdata.Histogram[float64])
+ for _, d := range hist.DataPoints {
+ v, present := d.Attributes.Value("grpc.lb.locality")
+ if present && v.AsString() != "" && v.AsString() != "{}" {
+ return nil
+ }
+ }
+ }
+ }
+ }
+ return errors.New("storage: direct connectivity not detected")
}
// Close closes the Client.
@@ -635,7 +689,7 @@ func validateOptions(opts *SignedURLOptions, now time.Time) error {
}
opts.Method = strings.ToUpper(opts.Method)
if _, ok := signedURLMethods[opts.Method]; !ok {
- return errMethodNotValid
+ return errSignedURLMethodNotValid
}
if opts.Expires.IsZero() {
return errors.New("storage: missing required expires option")
@@ -883,6 +937,9 @@ func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) {
return u.String(), nil
}
+// ReadHandle associated with the object. This is periodically refreshed.
+type ReadHandle []byte
+
// ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
// Use BucketHandle.Object to get a handle.
type ObjectHandle struct {
@@ -898,6 +955,23 @@ type ObjectHandle struct {
retry *retryConfig
overrideRetention *bool
softDeleted bool
+ readHandle ReadHandle
+}
+
+// ReadHandle returns a new ObjectHandle that uses the ReadHandle to open the objects.
+//
+// Objects that have already been opened can be opened an additional time,
+// using a read handle returned in the response, at lower latency.
+// This produces the exact same object and generation and does not check if
+// the generation is still the newest one.
+// Note that this will be a noop unless it's set on a gRPC client on buckets with
+// bi-directional read API access.
+// Also note that you can get a ReadHandle only via calling reader.ReadHandle() on a
+// previous read of the same object.
+func (o *ObjectHandle) ReadHandle(r ReadHandle) *ObjectHandle {
+ o2 := *o
+ o2.readHandle = r
+ return &o2
}
// ACL provides access to the object's access control list.
@@ -975,7 +1049,8 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
gen: o.gen,
encryptionKey: o.encryptionKey,
conds: o.conds,
- overrideRetention: o.overrideRetention}, opts...)
+ overrideRetention: o.overrideRetention,
+ }, opts...)
}
// BucketName returns the name of the bucket.
@@ -1101,6 +1176,38 @@ func (o *ObjectHandle) Restore(ctx context.Context, opts *RestoreOptions) (*Obje
}, sOpts...)
}
+// Move changes the name of the object to the destination name.
+// It can only be used to rename an object within the same bucket. The
+// bucket must have [HierarchicalNamespace] enabled to use this method.
+//
+// Any preconditions set on the ObjectHandle will be applied for the source
+// object. Set preconditions on the destination object using
+// [MoveObjectDestination.Conditions].
+//
+// This API is in preview and is not yet publicly available.
+func (o *ObjectHandle) Move(ctx context.Context, destination MoveObjectDestination) (*ObjectAttrs, error) {
+ if err := o.validate(); err != nil {
+ return nil, err
+ }
+
+ sOpts := makeStorageOpts(true, o.retry, o.userProject)
+ return o.c.tc.MoveObject(ctx, &moveObjectParams{
+ bucket: o.bucket,
+ srcObject: o.object,
+ dstObject: destination.Object,
+ srcConds: o.conds,
+ dstConds: destination.Conditions,
+ encryptionKey: o.encryptionKey,
+ }, sOpts...)
+}
+
+// MoveObjectDestination provides the destination object name and (optional) preconditions
+// for [ObjectHandle.Move].
+type MoveObjectDestination struct {
+ Object string
+ Conditions *Conditions
+}
+
// NewWriter returns a storage Writer that writes to the GCS object
// associated with this ObjectHandle.
//
@@ -1698,7 +1805,6 @@ type Query struct {
// IncludeFoldersAsPrefixes includes Folders and Managed Folders in the set of
// prefixes returned by the query. Only applicable if Delimiter is set to /.
- // IncludeFoldersAsPrefixes is not yet implemented in the gRPC API.
IncludeFoldersAsPrefixes bool
// SoftDeleted indicates whether to list soft-deleted objects.
@@ -2001,56 +2107,91 @@ func applyConds(method string, gen int64, conds *Conditions, call interface{}) e
return nil
}
-func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error {
+// applySourceConds modifies the provided call using the conditions in conds.
+// call is something that quacks like a *raw.WhateverCall.
+// This is specifically for calls like Rewrite and Move which have a source and destination
+// object.
+func applySourceConds(method string, gen int64, conds *Conditions, call interface{}) error {
+ cval := reflect.ValueOf(call)
if gen >= 0 {
- call.SourceGeneration(gen)
+ if !setSourceGeneration(cval, gen) {
+ return fmt.Errorf("storage: %s: source generation not supported", method)
+ }
}
if conds == nil {
return nil
}
- if err := conds.validate("CopyTo source"); err != nil {
+ if err := conds.validate(method); err != nil {
return err
}
switch {
case conds.GenerationMatch != 0:
- call.IfSourceGenerationMatch(conds.GenerationMatch)
+ if !setIfSourceGenerationMatch(cval, conds.GenerationMatch) {
+ return fmt.Errorf("storage: %s: ifSourceGenerationMatch not supported", method)
+ }
case conds.GenerationNotMatch != 0:
- call.IfSourceGenerationNotMatch(conds.GenerationNotMatch)
+ if !setIfSourceGenerationNotMatch(cval, conds.GenerationNotMatch) {
+ return fmt.Errorf("storage: %s: ifSourceGenerationNotMatch not supported", method)
+ }
case conds.DoesNotExist:
- call.IfSourceGenerationMatch(0)
+ if !setIfSourceGenerationMatch(cval, int64(0)) {
+ return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
+ }
}
switch {
case conds.MetagenerationMatch != 0:
- call.IfSourceMetagenerationMatch(conds.MetagenerationMatch)
+ if !setIfSourceMetagenerationMatch(cval, conds.MetagenerationMatch) {
+ return fmt.Errorf("storage: %s: ifSourceMetagenerationMatch not supported", method)
+ }
case conds.MetagenerationNotMatch != 0:
- call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch)
+ if !setIfSourceMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) {
+ return fmt.Errorf("storage: %s: ifSourceMetagenerationNotMatch not supported", method)
+ }
}
return nil
}
-func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.RewriteObjectRequest) error {
+// applySourceCondsProto validates and attempts to set the conditions on a protobuf
+// message using protobuf reflection. This is specifically for RPCs which have separate
+// preconditions for source and destination objects (e.g. Rewrite and Move).
+func applySourceCondsProto(method string, gen int64, conds *Conditions, msg proto.Message) error {
+ rmsg := msg.ProtoReflect()
+
if gen >= 0 {
- call.SourceGeneration = gen
+ if !setConditionProtoField(rmsg, "source_generation", gen) {
+ return fmt.Errorf("storage: %s: generation not supported", method)
+ }
}
if conds == nil {
return nil
}
- if err := conds.validate("CopyTo source"); err != nil {
+ if err := conds.validate(method); err != nil {
return err
}
+
switch {
case conds.GenerationMatch != 0:
- call.IfSourceGenerationMatch = proto.Int64(conds.GenerationMatch)
+ if !setConditionProtoField(rmsg, "if_source_generation_match", conds.GenerationMatch) {
+ return fmt.Errorf("storage: %s: ifSourceGenerationMatch not supported", method)
+ }
case conds.GenerationNotMatch != 0:
- call.IfSourceGenerationNotMatch = proto.Int64(conds.GenerationNotMatch)
+ if !setConditionProtoField(rmsg, "if_source_generation_not_match", conds.GenerationNotMatch) {
+ return fmt.Errorf("storage: %s: ifSourceGenerationNotMatch not supported", method)
+ }
case conds.DoesNotExist:
- call.IfSourceGenerationMatch = proto.Int64(0)
+ if !setConditionProtoField(rmsg, "if_source_generation_match", int64(0)) {
+ return fmt.Errorf("storage: %s: DoesNotExist not supported", method)
+ }
}
switch {
case conds.MetagenerationMatch != 0:
- call.IfSourceMetagenerationMatch = proto.Int64(conds.MetagenerationMatch)
+ if !setConditionProtoField(rmsg, "if_source_metageneration_match", conds.MetagenerationMatch) {
+ return fmt.Errorf("storage: %s: ifSourceMetagenerationMatch not supported", method)
+ }
case conds.MetagenerationNotMatch != 0:
- call.IfSourceMetagenerationNotMatch = proto.Int64(conds.MetagenerationNotMatch)
+ if !setConditionProtoField(rmsg, "if_source_metageneration_not_match", conds.MetagenerationNotMatch) {
+ return fmt.Errorf("storage: %s: ifSourceMetagenerationNotMatch not supported", method)
+ }
}
return nil
}
@@ -2089,6 +2230,27 @@ func setIfMetagenerationNotMatch(cval reflect.Value, value interface{}) bool {
return setCondition(cval.MethodByName("IfMetagenerationNotMatch"), value)
}
+// More methods to set source object precondition fields (used by Rewrite and Move APIs).
+func setSourceGeneration(cval reflect.Value, value interface{}) bool {
+ return setCondition(cval.MethodByName("SourceGeneration"), value)
+}
+
+func setIfSourceGenerationMatch(cval reflect.Value, value interface{}) bool {
+ return setCondition(cval.MethodByName("IfSourceGenerationMatch"), value)
+}
+
+func setIfSourceGenerationNotMatch(cval reflect.Value, value interface{}) bool {
+ return setCondition(cval.MethodByName("IfSourceGenerationNotMatch"), value)
+}
+
+func setIfSourceMetagenerationMatch(cval reflect.Value, value interface{}) bool {
+ return setCondition(cval.MethodByName("IfSourceMetagenerationMatch"), value)
+}
+
+func setIfSourceMetagenerationNotMatch(cval reflect.Value, value interface{}) bool {
+ return setCondition(cval.MethodByName("IfSourceMetagenerationNotMatch"), value)
+}
+
func setCondition(setter reflect.Value, value interface{}) bool {
if setter.IsValid() {
setter.Call([]reflect.Value{reflect.ValueOf(value)})
@@ -2353,10 +2515,10 @@ func toProtoChecksums(sendCRC32C bool, attrs *ObjectAttrs) *storagepb.ObjectChec
}
// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account.
+// Note: gRPC is not supported.
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
o := makeStorageOpts(true, c.retry, "")
return c.tc.GetServiceAccount(ctx, projectID, o...)
-
}
// bucketResourceName formats the given project ID and bucketResourceName ID
diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go
index 43a0f0d10..ae8f6a639 100644
--- a/vendor/cloud.google.com/go/storage/writer.go
+++ b/vendor/cloud.google.com/go/storage/writer.go
@@ -88,11 +88,29 @@ type Writer struct {
// cancellation.
ChunkRetryDeadline time.Duration
+ // ChunkTransferTimeout sets a per-chunk request timeout for resumable uploads.
+ //
+ // For resumable uploads, the Writer will terminate the request and attempt a retry
+ // if the request to upload a particular chunk stalls for longer than this duration. Retries
+ // may continue until the ChunkRetryDeadline is reached.
+ //
+ // The default value is no timeout.
+ ChunkTransferTimeout time.Duration
+
// ForceEmptyContentType is an optional parameter that is used to disable
// auto-detection of Content-Type. By default, if a blank Content-Type
// is provided, then gax.DetermineContentType is called to sniff the type.
ForceEmptyContentType bool
+ // Append is a parameter to indicate whether the writer should use appendable
+ // object semantics for the new object generation. Appendable objects are
+ // visible on the first Write() call, and can be appended to until they are
+ // finalized. The object is finalized on a call to Close().
+ //
+ // Append is only supported for gRPC. This feature is in preview and is not
+ // yet available for general use.
+ Append bool
+
// ProgressFunc can be used to monitor the progress of a large write
// operation. If ProgressFunc is not nil and writing requires multiple
// calls to the underlying service (see
@@ -188,11 +206,13 @@ func (w *Writer) openWriter() (err error) {
ctx: w.ctx,
chunkSize: w.ChunkSize,
chunkRetryDeadline: w.ChunkRetryDeadline,
+ chunkTransferTimeout: w.ChunkTransferTimeout,
bucket: w.o.bucket,
attrs: &w.ObjectAttrs,
conds: w.o.conds,
encryptionKey: w.o.encryptionKey,
sendCRC32C: w.SendCRC32C,
+ append: w.Append,
donec: w.donec,
setError: w.error,
progress: w.progress,
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
index 1a9cedbaf..d99d53093 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/CHANGELOG.md
@@ -1,5 +1,67 @@
# Release History
+## 1.18.1 (2025-07-10)
+
+### Bugs Fixed
+
+* Fixed incorrect request/response logging try info when logging a request that's being retried.
+* Fixed a data race in `ResourceID.String()`
+
+## 1.18.0 (2025-04-03)
+
+### Features Added
+
+* Added `AccessToken.RefreshOn` and updated `BearerTokenPolicy` to consider nonzero values of it when deciding whether to request a new token
+
+## 1.17.1 (2025-03-20)
+
+### Other Changes
+
+* Upgraded to Go 1.23
+* Upgraded dependencies
+
+## 1.17.0 (2025-01-07)
+
+### Features Added
+
+* Added field `OperationLocationResultPath` to `runtime.NewPollerOptions[T]` for LROs that use the `Operation-Location` pattern.
+* Support `encoding.TextMarshaler` and `encoding.TextUnmarshaler` interfaces in `arm.ResourceID`.
+
+## 1.16.0 (2024-10-17)
+
+### Features Added
+
+* Added field `Kind` to `runtime.StartSpanOptions` to allow a kind to be set when starting a span.
+
+### Bugs Fixed
+
+* `BearerTokenPolicy` now rewinds request bodies before retrying
+
+## 1.15.0 (2024-10-14)
+
+### Features Added
+
+* `BearerTokenPolicy` handles CAE claims challenges
+
+### Bugs Fixed
+
+* Omit the `ResponseError.RawResponse` field from JSON marshaling so instances can be marshaled.
+* Fixed an integer overflow in the retry policy.
+
+### Other Changes
+
+* Update dependencies.
+
+## 1.14.0 (2024-08-07)
+
+### Features Added
+
+* Added field `Attributes` to `runtime.StartSpanOptions` to simplify creating spans with attributes.
+
+### Other Changes
+
+* Include the HTTP verb and URL in `log.EventRetryPolicy` log entries so it's clear which operation is being retried.
+
## 1.13.0 (2024-07-16)
### Features Added
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
index 00f2d5a0a..a08d3d0ff 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/internal/resource/resource_identifier.go
@@ -27,7 +27,8 @@ var RootResourceID = &ResourceID{
}
// ResourceID represents a resource ID such as `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myRg`.
-// Don't create this type directly, use ParseResourceID instead.
+// Don't create this type directly, use [ParseResourceID] instead. Fields are considered immutable and shouldn't be
+// modified after creation.
type ResourceID struct {
// Parent is the parent ResourceID of this instance.
// Can be nil if there is no parent.
@@ -85,29 +86,22 @@ func ParseResourceID(id string) (*ResourceID, error) {
// String returns the string of the ResourceID
func (id *ResourceID) String() string {
- if len(id.stringValue) > 0 {
- return id.stringValue
- }
-
- if id.Parent == nil {
- return ""
- }
+ return id.stringValue
+}
- builder := strings.Builder{}
- builder.WriteString(id.Parent.String())
+// MarshalText returns a textual representation of the ResourceID
+func (id *ResourceID) MarshalText() ([]byte, error) {
+ return []byte(id.String()), nil
+}
- if id.isChild {
- builder.WriteString(fmt.Sprintf("/%s", id.ResourceType.lastType()))
- if len(id.Name) > 0 {
- builder.WriteString(fmt.Sprintf("/%s", id.Name))
- }
- } else {
- builder.WriteString(fmt.Sprintf("/providers/%s/%s/%s", id.ResourceType.Namespace, id.ResourceType.Type, id.Name))
+// UnmarshalText decodes the textual representation of a ResourceID
+func (id *ResourceID) UnmarshalText(text []byte) error {
+ newId, err := ParseResourceID(string(text))
+ if err != nil {
+ return err
}
-
- id.stringValue = builder.String()
-
- return id.stringValue
+ *id = *newId
+ return nil
}
func newResourceID(parent *ResourceID, resourceTypeName string, resourceName string) *ResourceID {
@@ -170,6 +164,15 @@ func (id *ResourceID) init(parent *ResourceID, resourceType ResourceType, name s
id.isChild = isChild
id.ResourceType = resourceType
id.Name = name
+ id.stringValue = id.Parent.String()
+ if id.isChild {
+ id.stringValue += "/" + id.ResourceType.lastType()
+ if id.Name != "" {
+ id.stringValue += "/" + id.Name
+ }
+ } else {
+ id.stringValue += fmt.Sprintf("/providers/%s/%s/%s", id.ResourceType.Namespace, id.ResourceType.Type, id.Name)
+ }
}
func appendNext(parent *ResourceID, parts []string, id string) (*ResourceID, error) {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
index 765fbc684..8ad3d5400 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime/policy_bearer_token.go
@@ -5,7 +5,6 @@ package runtime
import (
"context"
- "encoding/base64"
"fmt"
"net/http"
"strings"
@@ -66,31 +65,16 @@ func NewBearerTokenPolicy(cred azcore.TokenCredential, opts *armpolicy.BearerTok
p.btp = azruntime.NewBearerTokenPolicy(cred, opts.Scopes, &azpolicy.BearerTokenOptions{
InsecureAllowCredentialWithHTTP: opts.InsecureAllowCredentialWithHTTP,
AuthorizationHandler: azpolicy.AuthorizationHandler{
- OnChallenge: p.onChallenge,
- OnRequest: p.onRequest,
+ OnRequest: p.onRequest,
},
})
return p
}
-func (b *BearerTokenPolicy) onChallenge(req *azpolicy.Request, res *http.Response, authNZ func(azpolicy.TokenRequestOptions) error) error {
- challenge := res.Header.Get(shared.HeaderWWWAuthenticate)
- claims, err := parseChallenge(challenge)
- if err != nil {
- // the challenge contains claims we can't parse
- return err
- } else if claims != "" {
- // request a new token having the specified claims, send the request again
- return authNZ(azpolicy.TokenRequestOptions{Claims: claims, EnableCAE: true, Scopes: b.scopes})
- }
- // auth challenge didn't include claims, so this is a simple authorization failure
- return azruntime.NewResponseError(res)
-}
-
// onRequest authorizes requests with one or more bearer tokens
func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolicy.TokenRequestOptions) error) error {
// authorize the request with a token for the primary tenant
- err := authNZ(azpolicy.TokenRequestOptions{EnableCAE: true, Scopes: b.scopes})
+ err := authNZ(azpolicy.TokenRequestOptions{Scopes: b.scopes})
if err != nil || len(b.auxResources) == 0 {
return err
}
@@ -116,31 +100,3 @@ func (b *BearerTokenPolicy) onRequest(req *azpolicy.Request, authNZ func(azpolic
func (b *BearerTokenPolicy) Do(req *azpolicy.Request) (*http.Response, error) {
return b.btp.Do(req)
}
-
-// parseChallenge parses claims from an authentication challenge issued by ARM so a client can request a token
-// that will satisfy conditional access policies. It returns a non-nil error when the given value contains
-// claims it can't parse. If the value contains no claims, it returns an empty string and a nil error.
-func parseChallenge(wwwAuthenticate string) (string, error) {
- claims := ""
- var err error
- for _, param := range strings.Split(wwwAuthenticate, ",") {
- if _, after, found := strings.Cut(param, "claims="); found {
- if claims != "" {
- // The header contains multiple challenges, at least two of which specify claims. The specs allow this
- // but it's unclear what a client should do in this case and there's as yet no concrete example of it.
- err = fmt.Errorf("found multiple claims challenges in %q", wwwAuthenticate)
- break
- }
- // trim stuff that would get an error from RawURLEncoding; claims may or may not be padded
- claims = strings.Trim(after, `\"=`)
- // we don't return this error because it's something unhelpful like "illegal base64 data at input byte 42"
- if b, decErr := base64.RawURLEncoding.DecodeString(claims); decErr == nil {
- claims = string(b)
- } else {
- err = fmt.Errorf("failed to parse claims from %q", wwwAuthenticate)
- break
- }
- }
- }
- return claims, err
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml
index 99348527b..b81b62103 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/ci.yml
@@ -27,3 +27,5 @@ extends:
template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
parameters:
ServiceDirectory: azcore
+ TriggeringPaths:
+ - /eng/
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go
index 17bd50c67..03cb227d0 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/errors.go
@@ -11,4 +11,7 @@ import "github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
// ResponseError is returned when a request is made to a service and
// the service returns a non-success HTTP status code.
// Use errors.As() to access this type in the error chain.
+//
+// When marshaling instances, the RawResponse field will be omitted.
+// However, the contents returned by Error() will be preserved.
type ResponseError = exported.ResponseError
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
index f2b296b6d..460170034 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/exported.go
@@ -47,8 +47,13 @@ func HasStatusCode(resp *http.Response, statusCodes ...int) bool {
// AccessToken represents an Azure service bearer access token with expiry information.
// Exported as azcore.AccessToken.
type AccessToken struct {
- Token string
+ // Token is the access token
+ Token string
+ // ExpiresOn indicates when the token expires
ExpiresOn time.Time
+ // RefreshOn is a suggested time to refresh the token.
+ // Clients should ignore this value when it's zero.
+ RefreshOn time.Time
}
// TokenRequestOptions contain specific parameter that may be used by credentials types when attempting to get a token.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
index e3e2d4e58..9b3f5badb 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/request.go
@@ -71,7 +71,8 @@ func (ov opValues) get(value any) bool {
// NewRequestFromRequest creates a new policy.Request with an existing *http.Request
// Exported as runtime.NewRequestFromRequest().
func NewRequestFromRequest(req *http.Request) (*Request, error) {
- policyReq := &Request{req: req}
+ // populate values so that the same instance is propagated across policies
+ policyReq := &Request{req: req, values: opValues{}}
if req.Body != nil {
// we can avoid a body copy here if the underlying stream is already a
@@ -117,7 +118,8 @@ func NewRequest(ctx context.Context, httpMethod string, endpoint string) (*Reque
if !(req.URL.Scheme == "http" || req.URL.Scheme == "https") {
return nil, fmt.Errorf("unsupported protocol scheme %s", req.URL.Scheme)
}
- return &Request{req: req}, nil
+ // populate values so that the same instance is propagated across policies
+ return &Request{req: req, values: opValues{}}, nil
}
// Body returns the original body specified when the Request was created.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
index 08a954587..8aec256bd 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported/response_error.go
@@ -117,12 +117,18 @@ type ResponseError struct {
StatusCode int
// RawResponse is the underlying HTTP response.
- RawResponse *http.Response
+ RawResponse *http.Response `json:"-"`
+
+ errMsg string
}
// Error implements the error interface for type ResponseError.
// Note that the message contents are not contractual and can change over time.
func (e *ResponseError) Error() string {
+ if e.errMsg != "" {
+ return e.errMsg
+ }
+
const separator = "--------------------------------------------------------------------------------"
// write the request method and URL with response status code
msg := &bytes.Buffer{}
@@ -163,5 +169,33 @@ func (e *ResponseError) Error() string {
}
fmt.Fprintln(msg, separator)
- return msg.String()
+ e.errMsg = msg.String()
+ return e.errMsg
+}
+
+// internal type used for marshaling/unmarshaling
+type responseError struct {
+ ErrorCode string `json:"errorCode"`
+ StatusCode int `json:"statusCode"`
+ ErrorMessage string `json:"errorMessage"`
+}
+
+func (e ResponseError) MarshalJSON() ([]byte, error) {
+ return json.Marshal(responseError{
+ ErrorCode: e.ErrorCode,
+ StatusCode: e.StatusCode,
+ ErrorMessage: e.Error(),
+ })
+}
+
+func (e *ResponseError) UnmarshalJSON(data []byte) error {
+ re := responseError{}
+ if err := json.Unmarshal(data, &re); err != nil {
+ return err
+ }
+
+ e.ErrorCode = re.ErrorCode
+ e.StatusCode = re.StatusCode
+ e.errMsg = re.ErrorMessage
+ return nil
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
index 03699fd76..f49633189 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/pollers/op/op.go
@@ -40,12 +40,13 @@ type Poller[T any] struct {
OrigURL string `json:"origURL"`
Method string `json:"method"`
FinalState pollers.FinalStateVia `json:"finalState"`
+ ResultPath string `json:"resultPath"`
CurState string `json:"state"`
}
// New creates a new Poller from the provided initial response.
// Pass nil for response to create an empty Poller for rehydration.
-func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia) (*Poller[T], error) {
+func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.FinalStateVia, resultPath string) (*Poller[T], error) {
if resp == nil {
log.Write(log.EventLRO, "Resuming Operation-Location poller.")
return &Poller[T]{pl: pl}, nil
@@ -82,6 +83,7 @@ func New[T any](pl exported.Pipeline, resp *http.Response, finalState pollers.Fi
OrigURL: resp.Request.URL.String(),
Method: resp.Request.Method,
FinalState: finalState,
+ ResultPath: resultPath,
CurState: curState,
}, nil
}
@@ -116,10 +118,6 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error {
var req *exported.Request
var err error
- // when the payload is included with the status monitor on
- // terminal success it's in the "result" JSON property
- payloadPath := "result"
-
if p.FinalState == pollers.FinalStateViaLocation && p.LocURL != "" {
req, err = exported.NewRequest(ctx, http.MethodGet, p.LocURL)
} else if rl, rlErr := poller.GetResourceLocation(p.resp); rlErr != nil && !errors.Is(rlErr, poller.ErrNoBody) {
@@ -138,7 +136,7 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error {
// if a final GET request has been created, execute it
if req != nil {
// no JSON path when making a final GET request
- payloadPath = ""
+ p.ResultPath = ""
resp, err := p.pl.Do(req)
if err != nil {
return err
@@ -146,5 +144,5 @@ func (p *Poller[T]) Result(ctx context.Context, out *T) error {
p.resp = resp
}
- return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), payloadPath, out)
+ return pollers.ResultHelper(p.resp, poller.Failed(p.CurState), p.ResultPath, out)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
index e5b28a9b1..23788b14d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/shared/constants.go
@@ -40,5 +40,5 @@ const (
Module = "azcore"
// Version is the semantic version (see http://semver.org) of this module.
- Version = "v1.13.0"
+ Version = "v1.18.1"
)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
index 8d9845358..368a2199e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/policy/policy.go
@@ -103,7 +103,7 @@ type RetryOptions struct {
// RetryDelay specifies the initial amount of delay to use before retrying an operation.
// The value is used only if the HTTP response does not contain a Retry-After header.
// The delay increases exponentially with each retry up to the maximum specified by MaxRetryDelay.
- // The default value is four seconds. A value less than zero means no delay between retries.
+ // The default value is 800 milliseconds. A value less than zero means no delay between retries.
RetryDelay time.Duration
// MaxRetryDelay specifies the maximum delay allowed before retrying an operation.
@@ -161,19 +161,20 @@ type BearerTokenOptions struct {
// AuthorizationHandler allows SDK developers to insert custom logic that runs when BearerTokenPolicy must authorize a request.
type AuthorizationHandler struct {
- // OnRequest is called each time the policy receives a request. Its func parameter authorizes the request with a token
- // from the policy's given credential. Implementations that need to perform I/O should use the Request's context,
- // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't
- // send the request. When OnRequest is nil, the policy follows its default behavior, authorizing the request with a
- // token from its credential according to its configuration.
+ // OnRequest provides TokenRequestOptions the policy can use to acquire a token for a request. The policy calls OnRequest
+ // whenever it needs a token and may call it multiple times for the same request. Its func parameter authorizes the request
+ // with a token from the policy's credential. Implementations that need to perform I/O should use the Request's context,
+ // available from Request.Raw().Context(). When OnRequest returns an error, the policy propagates that error and doesn't send
+ // the request. When OnRequest is nil, the policy follows its default behavior, which is to authorize the request with a token
+ // from its credential according to its configuration.
OnRequest func(*Request, func(TokenRequestOptions) error) error
- // OnChallenge is called when the policy receives a 401 response, allowing the AuthorizationHandler to re-authorize the
- // request according to an authentication challenge (the Response's WWW-Authenticate header). OnChallenge is responsible
- // for parsing parameters from the challenge. Its func parameter will authorize the request with a token from the policy's
- // given credential. Implementations that need to perform I/O should use the Request's context, available from
- // Request.Raw().Context(). When OnChallenge returns nil, the policy will send the request again. When OnChallenge is nil,
- // the policy will return any 401 response to the client.
+ // OnChallenge allows clients to implement custom HTTP authentication challenge handling. BearerTokenPolicy calls it upon
+ // receiving a 401 response containing multiple Bearer challenges or a challenge BearerTokenPolicy itself can't handle.
+ // OnChallenge is responsible for parsing challenge(s) (the Response's WWW-Authenticate header) and reauthorizing the
+ // Request accordingly. Its func argument authorizes the Request with a token from the policy's credential using the given
+ // TokenRequestOptions. OnChallenge should honor the Request's context, available from Request.Raw().Context(). When
+ // OnChallenge returns nil, the policy will send the Request again.
OnChallenge func(*Request, *http.Response, func(TokenRequestOptions) error) error
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go
index b960cff0b..c66fc0a90 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/pager.go
@@ -32,6 +32,7 @@ type PagingHandler[T any] struct {
}
// Pager provides operations for iterating over paged responses.
+// Methods on this type are not safe for concurrent use.
type Pager[T any] struct {
current *T
handler PagingHandler[T]
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
index cb2a69528..1950a2e5b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_bearer_token.go
@@ -4,9 +4,12 @@
package runtime
import (
+ "encoding/base64"
"errors"
"net/http"
+ "regexp"
"strings"
+ "sync"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/internal/exported"
@@ -17,6 +20,11 @@ import (
)
// BearerTokenPolicy authorizes requests with bearer tokens acquired from a TokenCredential.
+// It handles [Continuous Access Evaluation] (CAE) challenges. Clients needing to handle
+// additional authentication challenges, or needing more control over authorization, should
+// provide a [policy.AuthorizationHandler] in [policy.BearerTokenOptions].
+//
+// [Continuous Access Evaluation]: https://learn.microsoft.com/entra/identity/conditional-access/concept-continuous-access-evaluation
type BearerTokenPolicy struct {
// mainResource is the resource to be retreived using the tenant specified in the credential
mainResource *temporal.Resource[exported.AccessToken, acquiringResourceState]
@@ -43,6 +51,15 @@ func acquire(state acquiringResourceState) (newResource exported.AccessToken, ne
return tk, tk.ExpiresOn, nil
}
+// shouldRefresh determines whether the token should be refreshed. It's a variable so tests can replace it.
+var shouldRefresh = func(tk exported.AccessToken, _ acquiringResourceState) bool {
+ if tk.RefreshOn.IsZero() {
+ return tk.ExpiresOn.Add(-5 * time.Minute).Before(time.Now())
+ }
+ // no offset in this case because the authority suggested a refresh window--between RefreshOn and ExpiresOn
+ return tk.RefreshOn.Before(time.Now())
+}
+
// NewBearerTokenPolicy creates a policy object that authorizes requests with bearer tokens.
// cred: an azcore.TokenCredential implementation such as a credential object from azidentity
// scopes: the list of permission scopes required for the token.
@@ -51,11 +68,24 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *
if opts == nil {
opts = &policy.BearerTokenOptions{}
}
+ ah := opts.AuthorizationHandler
+ if ah.OnRequest == nil {
+ // Set a default OnRequest that simply requests a token with the given scopes. OnChallenge
+ // doesn't get a default so the policy can use a nil check to determine whether the caller
+ // provided an implementation.
+ ah.OnRequest = func(_ *policy.Request, authNZ func(policy.TokenRequestOptions) error) error {
+ // authNZ sets EnableCAE: true in all cases, no need to duplicate that here
+ return authNZ(policy.TokenRequestOptions{Scopes: scopes})
+ }
+ }
+ mr := temporal.NewResourceWithOptions(acquire, temporal.ResourceOptions[exported.AccessToken, acquiringResourceState]{
+ ShouldRefresh: shouldRefresh,
+ })
return &BearerTokenPolicy{
- authzHandler: opts.AuthorizationHandler,
+ authzHandler: ah,
cred: cred,
scopes: scopes,
- mainResource: temporal.NewResource(acquire),
+ mainResource: mr,
allowHTTP: opts.InsecureAllowCredentialWithHTTP,
}
}
@@ -63,6 +93,7 @@ func NewBearerTokenPolicy(cred exported.TokenCredential, scopes []string, opts *
// authenticateAndAuthorize returns a function which authorizes req with a token from the policy's credential
func (b *BearerTokenPolicy) authenticateAndAuthorize(req *policy.Request) func(policy.TokenRequestOptions) error {
return func(tro policy.TokenRequestOptions) error {
+ tro.EnableCAE = true
as := acquiringResourceState{p: b, req: req, tro: tro}
tk, err := b.mainResource.Get(as)
if err != nil {
@@ -86,12 +117,7 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) {
return nil, err
}
- var err error
- if b.authzHandler.OnRequest != nil {
- err = b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req))
- } else {
- err = b.authenticateAndAuthorize(req)(policy.TokenRequestOptions{Scopes: b.scopes})
- }
+ err := b.authzHandler.OnRequest(req, b.authenticateAndAuthorize(req))
if err != nil {
return nil, errorinfo.NonRetriableError(err)
}
@@ -101,17 +127,54 @@ func (b *BearerTokenPolicy) Do(req *policy.Request) (*http.Response, error) {
return nil, err
}
+ res, err = b.handleChallenge(req, res, false)
+ return res, err
+}
+
+// handleChallenge handles authentication challenges either directly (for CAE challenges) or by calling
+// the AuthorizationHandler. It's a no-op when the response doesn't include an authentication challenge.
+// It will recurse at most once, to handle a CAE challenge following a non-CAE challenge handled by the
+// AuthorizationHandler.
+func (b *BearerTokenPolicy) handleChallenge(req *policy.Request, res *http.Response, recursed bool) (*http.Response, error) {
+ var err error
if res.StatusCode == http.StatusUnauthorized {
b.mainResource.Expire()
- if res.Header.Get("WWW-Authenticate") != "" && b.authzHandler.OnChallenge != nil {
- if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil {
- res, err = req.Next()
+ if res.Header.Get(shared.HeaderWWWAuthenticate) != "" {
+ caeChallenge, parseErr := parseCAEChallenge(res)
+ if parseErr != nil {
+ return res, parseErr
+ }
+ switch {
+ case caeChallenge != nil:
+ authNZ := func(tro policy.TokenRequestOptions) error {
+ // Take the TokenRequestOptions provided by OnRequest and add the challenge claims. The value
+ // will be empty at time of writing because CAE is the only feature involving claims. If in
+ // the future some client needs to specify unrelated claims, this function may need to merge
+ // them with the challenge claims.
+ tro.Claims = caeChallenge.params["claims"]
+ return b.authenticateAndAuthorize(req)(tro)
+ }
+ if err = b.authzHandler.OnRequest(req, authNZ); err == nil {
+ if err = req.RewindBody(); err == nil {
+ res, err = req.Next()
+ }
+ }
+ case b.authzHandler.OnChallenge != nil && !recursed:
+ if err = b.authzHandler.OnChallenge(req, res, b.authenticateAndAuthorize(req)); err == nil {
+ if err = req.RewindBody(); err == nil {
+ if res, err = req.Next(); err == nil {
+ res, err = b.handleChallenge(req, res, true)
+ }
+ }
+ } else {
+ // don't retry challenge handling errors
+ err = errorinfo.NonRetriableError(err)
+ }
+ default:
+ // return the response to the pipeline
}
}
}
- if err != nil {
- err = errorinfo.NonRetriableError(err)
- }
return res, err
}
@@ -121,3 +184,65 @@ func checkHTTPSForAuth(req *policy.Request, allowHTTP bool) error {
}
return nil
}
+
+// parseCAEChallenge returns a *authChallenge representing Response's CAE challenge (nil when Response has none).
+// If Response includes a CAE challenge having invalid claims, it returns a NonRetriableError.
+func parseCAEChallenge(res *http.Response) (*authChallenge, error) {
+ var (
+ caeChallenge *authChallenge
+ err error
+ )
+ for _, c := range parseChallenges(res) {
+ if c.scheme == "Bearer" {
+ if claims := c.params["claims"]; claims != "" && c.params["error"] == "insufficient_claims" {
+ if b, de := base64.StdEncoding.DecodeString(claims); de == nil {
+ c.params["claims"] = string(b)
+ caeChallenge = &c
+ } else {
+ // don't include the decoding error because it's something
+ // unhelpful like "illegal base64 data at input byte 42"
+ err = errorinfo.NonRetriableError(errors.New("authentication challenge contains invalid claims: " + claims))
+ }
+ break
+ }
+ }
+ }
+ return caeChallenge, err
+}
+
+var (
+ challenge, challengeParams *regexp.Regexp
+ once = &sync.Once{}
+)
+
+type authChallenge struct {
+ scheme string
+ params map[string]string
+}
+
+// parseChallenges assumes authentication challenges have quoted parameter values
+func parseChallenges(res *http.Response) []authChallenge {
+ once.Do(func() {
+ // matches challenges having quoted parameters, capturing scheme and parameters
+ challenge = regexp.MustCompile(`(?:(\w+) ((?:\w+="[^"]*",?\s*)+))`)
+ // captures parameter names and values in a match of the above expression
+ challengeParams = regexp.MustCompile(`(\w+)="([^"]*)"`)
+ })
+ parsed := []authChallenge{}
+ // WWW-Authenticate can have multiple values, each containing multiple challenges
+ for _, h := range res.Header.Values(shared.HeaderWWWAuthenticate) {
+ for _, sm := range challenge.FindAllStringSubmatch(h, -1) {
+ // sm is [challenge, scheme, params] (see regexp documentation on submatches)
+ c := authChallenge{
+ params: make(map[string]string),
+ scheme: sm[1],
+ }
+ for _, sm := range challengeParams.FindAllStringSubmatch(sm[2], -1) {
+ // sm is [key="value", key, value] (see regexp documentation on submatches)
+ c.params[sm[1]] = sm[2]
+ }
+ parsed = append(parsed, c)
+ }
+ }
+ return parsed
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go
index 3df1c1218..f375195c4 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_http_trace.go
@@ -96,7 +96,10 @@ func (h *httpTracePolicy) Do(req *policy.Request) (resp *http.Response, err erro
// StartSpanOptions contains the optional values for StartSpan.
type StartSpanOptions struct {
- // for future expansion
+ // Kind indicates the kind of Span.
+ Kind tracing.SpanKind
+ // Attributes contains key-value pairs of attributes for the span.
+ Attributes []tracing.Attribute
}
// StartSpan starts a new tracing span.
@@ -114,7 +117,6 @@ func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options
// we MUST propagate the active tracer before returning so that the trace policy can access it
ctx = context.WithValue(ctx, shared.CtxWithTracingTracer{}, tracer)
- const newSpanKind = tracing.SpanKindInternal
if activeSpan := ctx.Value(ctxActiveSpan{}); activeSpan != nil {
// per the design guidelines, if a SDK method Foo() calls SDK method Bar(),
// then the span for Bar() must be suppressed. however, if Bar() makes a REST
@@ -126,10 +128,19 @@ func StartSpan(ctx context.Context, name string, tracer tracing.Tracer, options
return ctx, func(err error) {}
}
}
+
+ if options == nil {
+ options = &StartSpanOptions{}
+ }
+ if options.Kind == 0 {
+ options.Kind = tracing.SpanKindInternal
+ }
+
ctx, span := tracer.Start(ctx, name, &tracing.SpanOptions{
- Kind: newSpanKind,
+ Kind: options.Kind,
+ Attributes: options.Attributes,
})
- ctx = context.WithValue(ctx, ctxActiveSpan{}, newSpanKind)
+ ctx = context.WithValue(ctx, ctxActiveSpan{}, options.Kind)
return ctx, func(err error) {
if err != nil {
errType := strings.Replace(fmt.Sprintf("%T", err), "*exported.", "*azcore.", 1)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
index 04d7bb4ec..4c3a31fea 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/policy_retry.go
@@ -59,13 +59,33 @@ func setDefaults(o *policy.RetryOptions) {
}
func calcDelay(o policy.RetryOptions, try int32) time.Duration { // try is >=1; never 0
- delay := time.Duration((1< o.MaxRetryDelay {
+ delayFloat := float64(delay) * jitterMultiplier
+ if delayFloat > float64(math.MaxInt64) {
+ // the jitter pushed us over MaxInt64, so just use MaxInt64
+ delay = time.Duration(math.MaxInt64)
+ } else {
+ delay = time.Duration(delayFloat)
+ }
+
+ if delay > o.MaxRetryDelay { // MaxRetryDelay is backfilled with non-negative value
delay = o.MaxRetryDelay
}
+
return delay
}
@@ -102,7 +122,8 @@ func (p *retryPolicy) Do(req *policy.Request) (resp *http.Response, err error) {
try := int32(1)
for {
resp = nil // reset
- log.Writef(log.EventRetryPolicy, "=====> Try=%d", try)
+ // unfortunately we don't have access to the custom allow-list of query params, so we'll redact everything but the default allowed QPs
+ log.Writef(log.EventRetryPolicy, "=====> Try=%d for %s %s", try, req.Raw().Method, getSanitizedURL(*req.Raw().URL, getAllowedQueryParams(nil)))
// For each try, seek to the beginning of the Body stream. We do this even for the 1st try because
// the stream may not be at offset 0 when we first get it and we want the same behavior for the
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
index 03f76c9aa..4f90e4474 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime/poller.go
@@ -50,8 +50,14 @@ const (
// NewPollerOptions contains the optional parameters for NewPoller.
type NewPollerOptions[T any] struct {
// FinalStateVia contains the final-state-via value for the LRO.
+ // NOTE: used only for Azure-AsyncOperation and Operation-Location LROs.
FinalStateVia FinalStateVia
+ // OperationLocationResultPath contains the JSON path to the result's
+ // payload when it's included with the terminal success response.
+ // NOTE: only used for Operation-Location LROs.
+ OperationLocationResultPath string
+
// Response contains a preconstructed response type.
// The final payload will be unmarshaled into it and returned.
Response *T
@@ -98,7 +104,7 @@ func NewPoller[T any](resp *http.Response, pl exported.Pipeline, options *NewPol
opr, err = async.New[T](pl, resp, options.FinalStateVia)
} else if op.Applicable(resp) {
// op poller must be checked before loc as it can also have a location header
- opr, err = op.New[T](pl, resp, options.FinalStateVia)
+ opr, err = op.New[T](pl, resp, options.FinalStateVia, options.OperationLocationResultPath)
} else if loc.Applicable(resp) {
opr, err = loc.New[T](pl, resp)
} else if body.Applicable(resp) {
@@ -172,7 +178,7 @@ func NewPollerFromResumeToken[T any](token string, pl exported.Pipeline, options
} else if loc.CanResume(asJSON) {
opr, _ = loc.New[T](pl, nil)
} else if op.CanResume(asJSON) {
- opr, _ = op.New[T](pl, nil, "")
+ opr, _ = op.New[T](pl, nil, "", "")
} else {
return nil, fmt.Errorf("unhandled poller token %s", string(raw))
}
@@ -200,6 +206,7 @@ type PollingHandler[T any] interface {
}
// Poller encapsulates a long-running operation, providing polling facilities until the operation reaches a terminal state.
+// Methods on this type are not safe for concurrent use.
type Poller[T any] struct {
op PollingHandler[T]
resp *http.Response
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md
new file mode 100644
index 000000000..567e6975b
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/BREAKING_CHANGES.md
@@ -0,0 +1,20 @@
+# Breaking Changes
+
+## v1.8.0
+
+### New errors from `NewManagedIdentityCredential` in some environments
+
+`NewManagedIdentityCredential` now returns an error when `ManagedIdentityCredentialOptions.ID` is set in a hosting environment whose managed identity API doesn't support user-assigned identities. `ManagedIdentityCredential.GetToken()` formerly logged a warning in these cases. Returning an error instead prevents the credential authenticating an unexpected identity. The affected hosting environments are:
+ * Azure Arc
+ * Azure ML (when a resource or object ID is specified; client IDs are supported)
+ * Cloud Shell
+ * Service Fabric
+
+## v1.6.0
+
+### Behavioral change to `DefaultAzureCredential` in IMDS managed identity scenarios
+
+As of `azidentity` v1.6.0, `DefaultAzureCredential` makes a minor behavioral change when it uses IMDS managed
+identity. It sends its first request to IMDS without the "Metadata" header, to expedite validating whether the endpoint
+is available. This precedes the credential's first token request and is guaranteed to fail with a 400 error. This error
+response can appear in logs but doesn't indicate authentication failed.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
index a8c2feb6d..84e7941e4 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/CHANGELOG.md
@@ -1,5 +1,95 @@
# Release History
+## 1.10.1 (2025-06-10)
+
+### Bugs Fixed
+- `AzureCLICredential` and `AzureDeveloperCLICredential` could wait indefinitely for subprocess output
+
+## 1.10.0 (2025-05-14)
+
+### Features Added
+- `DefaultAzureCredential` reads environment variable `AZURE_TOKEN_CREDENTIALS` to enable a subset of its credentials:
+ - `dev` selects `AzureCLICredential` and `AzureDeveloperCLICredential`
+ - `prod` selects `EnvironmentCredential`, `WorkloadIdentityCredential` and `ManagedIdentityCredential`
+
+## 1.9.0 (2025-04-08)
+
+### Features Added
+* `GetToken()` sets `AccessToken.RefreshOn` when the token provider specifies a value
+
+### Other Changes
+* `NewManagedIdentityCredential` logs the configured user-assigned identity, if any
+* Deprecated `UsernamePasswordCredential` because it can't support multifactor
+ authentication (MFA), which Microsoft Entra ID requires for most tenants. See
+ https://aka.ms/azsdk/identity/mfa for migration guidance.
+* Updated dependencies
+
+## 1.8.2 (2025-02-12)
+
+### Other Changes
+* Upgraded dependencies
+
+## 1.8.1 (2025-01-15)
+
+### Bugs Fixed
+* User credential types inconsistently log access token scopes
+* `DefaultAzureCredential` skips managed identity in Azure Container Instances
+* Credentials having optional tenant IDs such as `AzureCLICredential` and
+ `InteractiveBrowserCredential` require setting `AdditionallyAllowedTenants`
+ when used with some clients
+
+### Other Changes
+* `ChainedTokenCredential` and `DefaultAzureCredential` continue to their next
+ credential after `ManagedIdentityCredential` receives an unexpected response
+ from IMDS, indicating the response is from something else such as a proxy
+
+## 1.8.0 (2024-10-08)
+
+### Other Changes
+* `AzurePipelinesCredential` sets an additional OIDC request header so that it
+ receives a 401 instead of a 302 after presenting an invalid system access token
+* Allow logging of debugging headers for `AzurePipelinesCredential` and include
+ them in error messages
+
+## 1.8.0-beta.3 (2024-09-17)
+
+### Features Added
+* Added `ObjectID` type for `ManagedIdentityCredentialOptions.ID`
+
+### Other Changes
+* Removed redundant content from error messages
+
+## 1.8.0-beta.2 (2024-08-06)
+
+### Breaking Changes
+* `NewManagedIdentityCredential` now returns an error when a user-assigned identity
+ is specified on a platform whose managed identity API doesn't support that.
+ `ManagedIdentityCredential.GetToken()` formerly logged a warning in these cases.
+ Returning an error instead prevents the credential authenticating an unexpected
+ identity, causing a client to act with unexpected privileges. The affected
+ platforms are:
+ * Azure Arc
+ * Azure ML (when a resource ID is specified; client IDs are supported)
+ * Cloud Shell
+ * Service Fabric
+
+### Other Changes
+* If `DefaultAzureCredential` receives a non-JSON response when probing IMDS before
+ attempting to authenticate a managed identity, it continues to the next credential
+ in the chain instead of immediately returning an error.
+
+## 1.8.0-beta.1 (2024-07-17)
+
+### Features Added
+* Restored persistent token caching feature
+
+### Breaking Changes
+> These changes affect only code written against a beta version such as v1.7.0-beta.1
+* Redesigned the persistent caching API. Encryption is now required in all cases
+ and persistent cache construction is separate from credential construction.
+ The `PersistentUserAuthentication` example in the package docs has been updated
+ to demonstrate the new API.
+
## 1.7.0 (2024-06-20)
### Features Added
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
index 4404be824..29b60baec 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/MIGRATION.md
@@ -304,4 +304,4 @@ client := subscriptions.NewClient()
client.Authorizer = azidext.NewTokenCredentialAdapter(cred, []string{"https://management.azure.com//.default"})
```
-
+
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
index 7e201ea2f..069bc688d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/README.md
@@ -21,7 +21,7 @@ go get -u github.com/Azure/azure-sdk-for-go/sdk/azidentity
## Prerequisites
- an [Azure subscription](https://azure.microsoft.com/free/)
-- Go 1.18
+- [Supported](https://aka.ms/azsdk/go/supported-versions) version of Go
### Authenticating during local development
@@ -54,17 +54,7 @@ The `azidentity` module focuses on OAuth authentication with Microsoft Entra ID.
### DefaultAzureCredential
-`DefaultAzureCredential` is appropriate for most apps that will be deployed to Azure. It combines common production credentials with development credentials. It attempts to authenticate via the following mechanisms in this order, stopping when one succeeds:
-
-
-
-1. **Environment** - `DefaultAzureCredential` will read account information specified via [environment variables](#environment-variables) and use it to authenticate.
-1. **Workload Identity** - If the app is deployed on Kubernetes with environment variables set by the workload identity webhook, `DefaultAzureCredential` will authenticate the configured identity.
-1. **Managed Identity** - If the app is deployed to an Azure host with managed identity enabled, `DefaultAzureCredential` will authenticate with it.
-1. **Azure CLI** - If a user or service principal has authenticated via the Azure CLI `az login` command, `DefaultAzureCredential` will authenticate that identity.
-1. **Azure Developer CLI** - If the developer has authenticated via the Azure Developer CLI `azd auth login` command, the `DefaultAzureCredential` will authenticate with that account.
-
-> Note: `DefaultAzureCredential` is intended to simplify getting started with the SDK by handling common scenarios with reasonable default behaviors. Developers who want more control or whose scenario isn't served by the default settings should use other credential types.
+`DefaultAzureCredential` simplifies authentication while developing apps that deploy to Azure by combining credentials used in Azure hosting environments with credentials used in local development. For more information, see [DefaultAzureCredential overview][dac_overview].
## Managed Identity
@@ -126,12 +116,17 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
## Credential Types
-### Authenticating Azure Hosted Applications
+### Credential chains
+
+|Credential|Usage|Reference
+|-|-|-
+|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps|[DefaultAzureCredential overview][dac_overview]|
+|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials|[ChainedTokenCredential overview][ctc_overview]|
+
+### Authenticating Azure-Hosted Applications
|Credential|Usage
|-|-
-|[DefaultAzureCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DefaultAzureCredential)|Simplified authentication experience for getting started developing Azure apps
-|[ChainedTokenCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ChainedTokenCredential)|Define custom authentication flows, composing multiple credentials
|[EnvironmentCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#EnvironmentCredential)|Authenticate a service principal or user configured by environment variables
|[ManagedIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#ManagedIdentityCredential)|Authenticate the managed identity of an Azure resource
|[WorkloadIdentityCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#WorkloadIdentityCredential)|Authenticate a workload identity on Kubernetes
@@ -151,20 +146,19 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|-|-
|[InteractiveBrowserCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#InteractiveBrowserCredential)|Interactively authenticate a user with the default web browser
|[DeviceCodeCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#DeviceCodeCredential)|Interactively authenticate a user on a device with limited UI
-|[UsernamePasswordCredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#UsernamePasswordCredential)|Authenticate a user with a username and password
### Authenticating via Development Tools
|Credential|Usage
|-|-
|[AzureCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureCLICredential)|Authenticate as the user signed in to the Azure CLI
-|[`AzureDeveloperCLICredential`](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI
+|[AzureDeveloperCLICredential](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#AzureDeveloperCLICredential)|Authenticates as the user signed in to the Azure Developer CLI
## Environment Variables
`DefaultAzureCredential` and `EnvironmentCredential` can be configured with environment variables. Each type of authentication requires values for specific variables:
-#### Service principal with secret
+### Service principal with secret
|variable name|value
|-|-
@@ -172,7 +166,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|`AZURE_TENANT_ID`|ID of the application's Microsoft Entra tenant
|`AZURE_CLIENT_SECRET`|one of the application's client secrets
-#### Service principal with certificate
+### Service principal with certificate
|variable name|value
|-|-
@@ -181,16 +175,7 @@ client := armresources.NewResourceGroupsClient("subscription ID", chain, nil)
|`AZURE_CLIENT_CERTIFICATE_PATH`|path to a certificate file including private key
|`AZURE_CLIENT_CERTIFICATE_PASSWORD`|password of the certificate file, if any
-#### Username and password
-
-|variable name|value
-|-|-
-|`AZURE_CLIENT_ID`|ID of a Microsoft Entra application
-|`AZURE_USERNAME`|a username (usually an email address)
-|`AZURE_PASSWORD`|that user's password
-
-Configuration is attempted in the above order. For example, if values for a
-client secret and certificate are both present, the client secret will be used.
+Configuration is attempted in the above order. For example, if values for a client secret and certificate are both present, the client secret will be used.
## Token caching
@@ -255,4 +240,8 @@ For more information, see the
or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any
additional questions or comments.
-
+
+[ctc_overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview
+[dac_overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview
+
+
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
index fbaa29220..da2094e36 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD
@@ -1,60 +1,43 @@
## Token caching in the Azure Identity client module
-*Token caching* is a feature provided by the Azure Identity library that allows apps to:
+Token caching helps apps:
- Improve their resilience and performance.
-- Reduce the number of requests made to Microsoft Entra ID to obtain access tokens.
-- Reduce the number of times the user is prompted to authenticate.
+- Reduce the number of requests sent to Microsoft Entra ID to obtain access tokens.
+- Reduce the number of times users are prompted to authenticate.
-When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID. Obtaining that token involves sending a request to Entra ID and may also involve prompting the user. Entra ID then validates the credentials provided in the request and issues an access token.
+When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID by sending an HTTP request and sometimes prompting a user to authenticate interactively. Credentials with caches (see [the below table](#credentials-supporting-token-caching) for a list) store access tokens either [in memory](#in-memory-token-caching) or, optionally, [on disk](#persistent-token-caching). These credentials return cached tokens whenever possible, to avoid unnecessary token requests or user interaction. Both cache implementations are safe for concurrent use.
-Token caching, via the Azure Identity library, allows the app to store this access token [in memory](#in-memory-token-caching), where it's accessible to the current process, or [on disk](#persistent-token-caching) where it can be accessed across application or process invocations. The token can then be retrieved quickly and easily the next time the app needs to access the same resource. The app can avoid making another request to Entra ID, which reduces network traffic and improves resilience. Additionally, in scenarios where the app is authenticating users, token caching also avoids prompting the user each time new tokens are requested.
+#### Caching can't be disabled
-### In-memory token caching
-
-*In-memory token caching* is the default option provided by the Azure Identity library. This caching approach allows apps to store access tokens in memory. With in-memory token caching, the library first determines if a valid access token for the requested resource is already stored in memory. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. If a valid token isn't found, the library will automatically acquire a token by sending a request to Entra ID. The in-memory token cache provided by the Azure Identity library is thread-safe.
-
-**Note:** When Azure Identity library credentials are used with Azure service libraries (for example, Azure Blob Storage), the in-memory token caching is active in the `Pipeline` layer as well. All `TokenCredential` implementations are supported there, including custom implementations external to the Azure Identity library.
+Whether a credential caches tokens isn't configurable. If a credential has a cache of either kind, it requests a new token only when it can't provide one from its cache. Azure SDK service clients have an additional, independent layer of in-memory token caching, to prevent redundant token requests. This cache works with any credential type, even a custom implementation defined outside the Azure SDK, and can't be disabled. Disabling token caching is therefore impossible when using Azure SDK clients or most `azidentity` credential types. However, in-memory caches can be cleared by constructing new credential and client instances.
-#### Caching cannot be disabled
+### In-memory token caching
-As there are many levels of caching, it's not possible disable in-memory caching. However, the in-memory cache may be cleared by creating a new credential instance.
+Credential types that support caching store tokens in memory by default and require no configuration to do so. Each instance of these types has its own cache, and two credential instances never share an in-memory cache.
### Persistent token caching
-> Only azidentity v1.5.0-beta versions support persistent token caching
-
-*Persistent disk token caching* is an opt-in feature in the Azure Identity library. The feature allows apps to cache access tokens in an encrypted, persistent storage mechanism. As indicated in the following table, the storage mechanism differs across operating systems.
+Some credential types support opt-in persistent token caching (see [the below table](#credentials-supporting-token-caching) for a list). This feature enables credentials to store and retrieve tokens across process executions, so an application doesn't need to authenticate every time it runs.
-| Operating system | Storage mechanism |
-|------------------|---------------------------------------|
-| Linux | kernel key retention service (keyctl) |
-| macOS | Keychain |
-| Windows | DPAPI |
+Persistent caches are encrypted at rest using a mechanism that depends on the operating system:
-By default the token cache will protect any data which is persisted using the user data protection APIs available on the current platform.
-However, there are cases where no data protection is available, and applications may choose to allow storing the token cache in an unencrypted state by setting `TokenCachePersistenceOptions.AllowUnencryptedStorage` to `true`. This allows a credential to fall back to unencrypted storage if it can't encrypt the cache. However, we do not recommend using this storage method due to its significantly lower security measures. In addition, tokens are not encrypted solely to the current user, which could potentially allow unauthorized access to the cache by individuals with machine access.
+| Operating system | Encryption facility | Limitations |
+| ---------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
+| Linux | kernel key retention service (keyctl) | Cache data is lost on system shutdown because kernel keys are stored in memory. Depending on kernel compile options, data may also be lost on logout, or storage may be impossible because the key retention service isn't available. |
+| macOS | Keychain | Building requires cgo and native build tools. Keychain access requires a graphical session, so persistent caching isn't possible in a headless environment such as an SSH session (macOS as host). |
+| Windows | Data Protection API (DPAPI) | No specific limitations. |
-With persistent disk token caching enabled, the library first determines if a valid access token for the requested resource is already stored in the persistent cache. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. Additionally, the tokens are preserved across app runs, which:
-
-- Makes the app more resilient to failures.
-- Ensures the app can continue to function during an Entra ID outage or disruption.
-- Avoids having to prompt users to authenticate each time the process is restarted.
-
->IMPORTANT! The token cache contains sensitive data and **MUST** be protected to prevent compromising accounts. All application decisions regarding the persistence of the token cache must consider that a breach of its content will fully compromise all the accounts it contains.
-
-#### Example code
-
-See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.6.0-beta.2#pkg-overview) for example code demonstrating how to configure persistent caching and access cached data.
+Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the package documentation for examples showing how to configure persistent caching and access cached data for [users][user_example] and [service principals][sp_example].
### Credentials supporting token caching
The following table indicates the state of in-memory and persistent caching in each credential type.
-**Note:** In-memory caching is activated by default. Persistent token caching needs to be enabled as shown in [this example](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#example-package-PersistentCache).
+**Note:** in-memory caching is enabled by default for every type supporting it. Persistent token caching must be enabled explicitly. See the [package documentation][user_example] for an example showing how to do this for credential types authenticating users. For types that authenticate service principals, set the `Cache` field on the constructor's options as shown in [this example][sp_example].
| Credential | In-memory token caching | Persistent token caching |
-|--------------------------------|---------------------------------------------------------------------|--------------------------|
+| ------------------------------ | ------------------------------------------------------------------- | ------------------------ |
| `AzureCLICredential` | Not Supported | Not Supported |
| `AzureDeveloperCLICredential` | Not Supported | Not Supported |
| `AzurePipelinesCredential` | Supported | Supported |
@@ -66,6 +49,8 @@ The following table indicates the state of in-memory and persistent caching in e
| `EnvironmentCredential` | Supported | Not Supported |
| `InteractiveBrowserCredential` | Supported | Supported |
| `ManagedIdentityCredential` | Supported | Not Supported |
-| `OnBehalfOfCredential` | Supported | Supported |
-| `UsernamePasswordCredential` | Supported | Supported |
+| `OnBehalfOfCredential` | Supported | Not Supported |
| `WorkloadIdentityCredential` | Supported | Supported |
+
+[sp_example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentServicePrincipalAuthentication
+[user_example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
index 54016a070..91f4f05cc 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md
@@ -8,6 +8,7 @@ This troubleshooting guide covers failure investigation techniques, common error
- [Permission issues](#permission-issues)
- [Find relevant information in errors](#find-relevant-information-in-errors)
- [Enable and configure logging](#enable-and-configure-logging)
+- [Troubleshoot persistent token caching issues](#troubleshoot-persistent-token-caching-issues)
- [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues)
- [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues)
- [Troubleshoot AzurePipelinesCredential authentication issues](#troubleshoot-azurepipelinescredential-authentication-issues)
@@ -19,7 +20,6 @@ This troubleshooting guide covers failure investigation techniques, common error
- [Azure App Service and Azure Functions managed identity](#azure-app-service-and-azure-functions-managed-identity)
- [Azure Kubernetes Service managed identity](#azure-kubernetes-service-managed-identity)
- [Azure Virtual Machine managed identity](#azure-virtual-machine-managed-identity)
-- [Troubleshoot UsernamePasswordCredential authentication issues](#troubleshoot-usernamepasswordcredential-authentication-issues)
- [Troubleshoot WorkloadIdentityCredential authentication issues](#troubleshoot-workloadidentitycredential-authentication-issues)
- [Get additional help](#get-additional-help)
@@ -110,13 +110,6 @@ azlog.SetEvents(azidentity.EventAuthentication)
|AADSTS700027|Client assertion contains an invalid signature.|Ensure the specified certificate has been uploaded to the application registration as described in [Microsoft Entra ID documentation](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal#option-1-upload-a-certificate).|
|AADSTS700016|The specified application wasn't found in the specified tenant.|Ensure the client and tenant IDs provided to the credential constructor are correct for your application registration. For multi-tenant apps, ensure the application has been added to the desired tenant by a tenant admin. To add a new application in the desired tenant, follow the [Microsoft Entra ID instructions](https://learn.microsoft.com/entra/identity-platform/howto-create-service-principal-portal).|
-
-## Troubleshoot UsernamePasswordCredential authentication issues
-
-| Error Code | Issue | Mitigation |
-|---|---|---|
-|AADSTS50126|The provided username or password is invalid.|Ensure the username and password provided to the credential constructor are valid.|
-
## Troubleshoot ManagedIdentityCredential authentication issues
@@ -180,6 +173,7 @@ curl "$IDENTITY_ENDPOINT?resource=https://management.core.windows.net&api-versio
|---|---|---|
|Azure CLI not found on path|The Azure CLI isn’t installed or isn't on the application's path.|- Ensure the Azure CLI is installed as described in [Azure CLI documentation](https://learn.microsoft.com/cli/azure/install-azure-cli).
- Validate the installation location is in the application's `PATH` environment variable.
|
|Please run 'az login' to set up account|No account is currently logged into the Azure CLI, or the login has expired.|- Run `az login` to log into the Azure CLI. More information about Azure CLI authentication is available in the [Azure CLI documentation](https://learn.microsoft.com/cli/azure/authenticate-azure-cli).
- Verify that the Azure CLI can obtain tokens. See [below](#verify-the-azure-cli-can-obtain-tokens) for instructions.
|
+|Subscription "[your subscription]" contains invalid characters. If this is the name of a subscription, use its ID instead|The subscription name contains a character that may not be safe in a command line.|Use the subscription's ID instead of its name. You can get this from the Azure CLI: `az account show --name "[your subscription]" --query "id"`
#### Verify the Azure CLI can obtain tokens
@@ -225,7 +219,7 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul
| Error Message |Description| Mitigation |
|---|---|---|
-|no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.- If your application runs on Azure Kubernetes Servide (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
- If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions`
+|no client ID/tenant ID/token file specified|Incomplete configuration|In most cases these values are provided via environment variables set by Azure Workload Identity.
- If your application runs on Azure Kubernetes Service (AKS) or a cluster that has deployed the Azure Workload Identity admission webhook, check pod labels and service account configuration. See the [AKS documentation](https://learn.microsoft.com/azure/aks/workload-identity-deploy-cluster#disable-workload-identity) and [Azure Workload Identity troubleshooting guide](https://azure.github.io/azure-workload-identity/docs/troubleshooting.html) for more details.
- If your application isn't running on AKS or your cluster hasn't deployed the Workload Identity admission webhook, set these values in `WorkloadIdentityCredentialOptions`
## Troubleshoot AzurePipelinesCredential authentication issues
@@ -234,7 +228,30 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul
|---|---|---|
| AADSTS900023: Specified tenant identifier 'some tenant ID' is neither a valid DNS name, nor a valid external domain.|The `tenantID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the tenant ID. It must identify the tenant of the user-assigned managed identity or service principal configured for the service connection.|
| No service connection found with identifier |The `serviceConnectionID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the service connection ID. This parameter refers to the `resourceId` of the Azure Service Connection. It can also be found in the query string of the service connection's configuration in Azure DevOps. [Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) has more information about service connections.|
-|302 (Found) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).|
+|401 (Unauthorized) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).|
+
+## Troubleshoot persistent token caching issues
+
+### macOS
+
+[azidentity/cache](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache) encrypts persistent caches with the system Keychain on macOS. You may see build and runtime errors there because calling the Keychain API requires cgo and macOS prohibits Keychain access in some scenarios.
+
+#### Build errors
+
+Build errors about undefined `accessor` symbols indicate that cgo wasn't enabled. For example:
+```
+$ GOOS=darwin go build
+# github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache
+../../go/pkg/mod/github.com/!azure/azure-sdk-for-go/sdk/azidentity/cache@v0.3.0/darwin.go:18:19: undefined: accessor.New
+../../go/pkg/mod/github.com/!azure/azure-sdk-for-go/sdk/azidentity/cache@v0.3.0/darwin.go:18:38: undefined: accessor.WithAccount
+```
+
+Try `go build` again with `CGO_ENABLED=1`. You may need to install native build tools.
+
+#### Runtime errors
+
+macOS prohibits Keychain access from environments without a GUI such as SSH sessions. If your application calls the persistent cache constructor ([cache.New](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache#New)) from an SSH session on a macOS host, you'll see an error like
+`persistent storage isn't available due to error "User interaction is not allowed. (-25308)"`. This doesn't mean authentication is impossible, only that credentials can't persist data and the application must reauthenticate the next time it runs.
## Get additional help
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
index bff0c44da..4118f99ef 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json
@@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/azidentity",
- "Tag": "go/azidentity_087379b475"
+ "Tag": "go/azidentity_191110b0dd"
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go
index ada4d6501..840a71469 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go
@@ -18,10 +18,10 @@ import (
var supportedAuthRecordVersions = []string{"1.0"}
-// authenticationRecord is non-secret account information about an authenticated user that user credentials such as
+// AuthenticationRecord is non-secret account information about an authenticated user that user credentials such as
// [DeviceCodeCredential] and [InteractiveBrowserCredential] can use to access previously cached authentication
-// data. Call these credentials' Authenticate method to get an authenticationRecord for a user.
-type authenticationRecord struct {
+// data. Call these credentials' Authenticate method to get an AuthenticationRecord for a user.
+type AuthenticationRecord struct {
// Authority is the URL of the authority that issued the token.
Authority string `json:"authority"`
@@ -42,11 +42,11 @@ type authenticationRecord struct {
}
// UnmarshalJSON implements json.Unmarshaler for AuthenticationRecord
-func (a *authenticationRecord) UnmarshalJSON(b []byte) error {
+func (a *AuthenticationRecord) UnmarshalJSON(b []byte) error {
// Default unmarshaling is fine but we want to return an error if the record's version isn't supported i.e., we
// want to inspect the unmarshalled values before deciding whether to return an error. Unmarshaling a formally
// different type enables this by assigning all the fields without recursing into this method.
- type r authenticationRecord
+ type r AuthenticationRecord
err := json.Unmarshal(b, (*r)(a))
if err != nil {
return err
@@ -63,7 +63,7 @@ func (a *authenticationRecord) UnmarshalJSON(b []byte) error {
}
// account returns the AuthenticationRecord as an MSAL Account. The account is zero-valued when the AuthenticationRecord is zero-valued.
-func (a *authenticationRecord) account() public.Account {
+func (a *AuthenticationRecord) account() public.Account {
return public.Account{
Environment: a.Authority,
HomeAccountID: a.HomeAccountID,
@@ -71,10 +71,10 @@ func (a *authenticationRecord) account() public.Account {
}
}
-func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) {
+func newAuthenticationRecord(ar public.AuthResult) (AuthenticationRecord, error) {
u, err := url.Parse(ar.IDToken.Issuer)
if err != nil {
- return authenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer)
+ return AuthenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer)
}
tenant := ar.IDToken.TenantID
if tenant == "" {
@@ -84,7 +84,7 @@ func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error)
if username == "" {
username = ar.IDToken.UPN
}
- return authenticationRecord{
+ return AuthenticationRecord{
Authority: fmt.Sprintf("%s://%s", u.Scheme, u.Host),
ClientID: ar.IDToken.Audience,
HomeAccountID: ar.Account.HomeAccountID,
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
index b0965036b..bd196ddd3 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/public"
)
@@ -42,6 +43,8 @@ const (
developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46"
defaultSuffix = "/.default"
+ scopeLogFmt = "%s.GetToken() acquired a token for scope %q"
+
traceNamespace = "Microsoft.Entra"
traceOpGetToken = "GetToken"
traceOpAuthenticate = "Authenticate"
@@ -53,8 +56,14 @@ var (
errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names")
)
-// tokenCachePersistenceOptions contains options for persistent token caching
-type tokenCachePersistenceOptions = internal.TokenCachePersistenceOptions
+// Cache represents a persistent cache that makes authentication data available across processes.
+// Construct one with [github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache.New]. This package's
+// [persistent user authentication example] shows how to use a persistent cache to reuse user
+// logins across application runs. For service principal credential types such as
+// [ClientCertificateCredential], simply set the Cache field on the credential options.
+//
+// [persistent user authentication example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication
+type Cache = internal.Cache
// setAuthorityHost initializes the authority host for credentials. Precedence is:
// 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user
@@ -97,7 +106,16 @@ func resolveAdditionalTenants(tenants []string) []string {
return cp
}
-// resolveTenant returns the correct tenant for a token request
+// resolveTenant returns the correct tenant for a token request, or "" when the calling credential doesn't
+// have an explicitly configured tenant and the caller didn't specify a tenant for the token request.
+//
+// - defaultTenant: tenant set when constructing the credential, if any. "" is valid for credentials
+// having an optional or implicit tenant such as dev tool and interactive user credentials. Those
+// default to the tool's configured tenant or the user's home tenant, respectively.
+// - specified: tenant specified for this token request i.e., TokenRequestOptions.TenantID. May be "".
+// - credName: name of the calling credential type; for error messages
+// - additionalTenants: optional allow list of tenants the credential may acquire tokens from in
+// addition to defaultTenant i.e., the credential's AdditionallyAllowedTenants option
func resolveTenant(defaultTenant, specified, credName string, additionalTenants []string) (string, error) {
if specified == "" || specified == defaultTenant {
return defaultTenant, nil
@@ -113,6 +131,17 @@ func resolveTenant(defaultTenant, specified, credName string, additionalTenants
return specified, nil
}
}
+ if len(additionalTenants) == 0 {
+ switch defaultTenant {
+ case "", organizationsTenantID:
+ // The application didn't specify a tenant or allow list when constructing the credential. Allow the
+ // tenant specified for this token request because we have nothing to compare it to (i.e., it vacuously
+ // satisfies the credential's configuration); don't know whether the application is multitenant; and
+ // don't want to return an error in the common case that the specified tenant matches the credential's
+ // default tenant determined elsewhere e.g., in some dev tool's configuration.
+ return specified, nil
+ }
+ }
return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified)
}
@@ -180,6 +209,10 @@ type msalConfidentialClient interface {
AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, options ...confidential.AcquireOnBehalfOfOption) (confidential.AuthResult, error)
}
+type msalManagedIdentityClient interface {
+ AcquireToken(context.Context, string, ...managedidentity.AcquireTokenOption) (managedidentity.AuthResult, error)
+}
+
// enables fakes for test scenarios
type msalPublicClient interface {
AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
index b9976f5fe..0fd03f456 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go
@@ -30,9 +30,9 @@ type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscrip
// AzureCLICredentialOptions contains optional parameters for AzureCLICredential.
type AzureCLICredentialOptions struct {
- // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition
- // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the
- // logged in account can access.
+ // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
+ // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
+ // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
AdditionallyAllowedTenants []string
// Subscription is the name or ID of a subscription. Set this to acquire tokens for an account other
@@ -70,7 +70,11 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent
}
for _, r := range cp.Subscription {
if !(alphanumeric(r) || r == '-' || r == '_' || r == ' ' || r == '.') {
- return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.Subscription)
+ return nil, fmt.Errorf(
+ "%s: Subscription %q contains invalid characters. If this is the name of a subscription, use its ID instead",
+ credNameAzureCLI,
+ cp.Subscription,
+ )
}
}
if cp.TenantID != "" && !validTenantID(cp.TenantID) {
@@ -144,8 +148,14 @@ var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes []
cliCmd.Env = os.Environ()
var stderr bytes.Buffer
cliCmd.Stderr = &stderr
+ cliCmd.WaitDelay = 100 * time.Millisecond
- output, err := cliCmd.Output()
+ stdout, err := cliCmd.Output()
+ if errors.Is(err, exec.ErrWaitDelay) && len(stdout) > 0 {
+ // The child process wrote to stdout and exited without closing it.
+ // Swallow this error and return stdout because it may contain a token.
+ return stdout, nil
+ }
if err != nil {
msg := stderr.String()
var exErr *exec.ExitError
@@ -158,7 +168,7 @@ var defaultAzTokenProvider azTokenProvider = func(ctx context.Context, scopes []
return nil, newCredentialUnavailableError(credNameAzureCLI, msg)
}
- return output, nil
+ return stdout, nil
}
func (c *AzureCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go
index cbe7c4c2d..1bd3720b6 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go
@@ -30,9 +30,9 @@ type azdTokenProvider func(ctx context.Context, scopes []string, tenant string)
// AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential.
type AzureDeveloperCLICredentialOptions struct {
- // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition
- // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the
- // logged in account can access.
+ // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
+ // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
+ // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
AdditionallyAllowedTenants []string
// TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment,
@@ -130,7 +130,14 @@ var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes
cliCmd.Env = os.Environ()
var stderr bytes.Buffer
cliCmd.Stderr = &stderr
- output, err := cliCmd.Output()
+ cliCmd.WaitDelay = 100 * time.Millisecond
+
+ stdout, err := cliCmd.Output()
+ if errors.Is(err, exec.ErrWaitDelay) && len(stdout) > 0 {
+ // The child process wrote to stdout and exited without closing it.
+ // Swallow this error and return stdout because it may contain a token.
+ return stdout, nil
+ }
if err != nil {
msg := stderr.String()
var exErr *exec.ExitError
@@ -144,7 +151,7 @@ var defaultAzdTokenProvider azdTokenProvider = func(ctx context.Context, scopes
}
return nil, newCredentialUnavailableError(credNameAzureDeveloperCLI, msg)
}
- return output, nil
+ return stdout, nil
}
func (c *AzureDeveloperCLICredential) createAccessToken(tk []byte) (azcore.AccessToken, error) {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go
index 80c1806bb..a4b8ab6f4 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go
@@ -20,6 +20,8 @@ const (
credNameAzurePipelines = "AzurePipelinesCredential"
oidcAPIVersion = "7.1"
systemOIDCRequestURI = "SYSTEM_OIDCREQUESTURI"
+ xMsEdgeRef = "x-msedge-ref"
+ xVssE2eId = "x-vss-e2eid"
)
// AzurePipelinesCredential authenticates with workload identity federation in an Azure Pipeline. See
@@ -40,6 +42,11 @@ type AzurePipelinesCredentialOptions struct {
// application is registered.
AdditionallyAllowedTenants []string
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
@@ -81,8 +88,11 @@ func NewAzurePipelinesCredential(tenantID, clientID, serviceConnectionID, system
if options == nil {
options = &AzurePipelinesCredentialOptions{}
}
+ // these headers are useful to the DevOps team when debugging OIDC error responses
+ options.ClientOptions.Logging.AllowedHeaders = append(options.ClientOptions.Logging.AllowedHeaders, xMsEdgeRef, xVssE2eId)
caco := ClientAssertionCredentialOptions{
AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ Cache: options.Cache,
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
}
@@ -108,33 +118,40 @@ func (a *AzurePipelinesCredential) getAssertion(ctx context.Context) (string, er
url := a.oidcURI + "?api-version=" + oidcAPIVersion + "&serviceConnectionId=" + a.connectionID
url, err := runtime.EncodeQueryParams(url)
if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil, nil)
+ return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil)
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil, nil)
+ return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil)
}
req.Header.Set("Authorization", "Bearer "+a.systemAccessToken)
+ // instruct endpoint to return 401 instead of 302, if the system access token is invalid
+ req.Header.Set("X-TFS-FedAuthRedirect", "Suppress")
res, err := doForClient(a.cred.client.azClient, req)
if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil, nil)
+ return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil)
}
if res.StatusCode != http.StatusOK {
- msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration"
+ msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration."
+ for _, h := range []string{xMsEdgeRef, xVssE2eId} {
+ if v := res.Header.Get(h); v != "" {
+ msg += fmt.Sprintf("\n%s: %s", h, v)
+ }
+ }
// include the response because its body, if any, probably contains an error message.
// OK responses aren't included with errors because they probably contain secrets
- return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res, nil)
+ return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res)
}
b, err := runtime.Payload(res)
if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil, nil)
+ return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil)
}
var r struct {
OIDCToken string `json:"oidcToken"`
}
err = json.Unmarshal(b, &r)
if err != nil {
- return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil, nil)
+ return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil)
}
return r.OIDCToken, nil
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go
index 6c35a941b..82342a025 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go
@@ -27,7 +27,10 @@ type ChainedTokenCredentialOptions struct {
}
// ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default,
-// it tries all the credentials until one authenticates, after which it always uses that credential.
+// it tries all the credentials until one authenticates, after which it always uses that credential. For more information,
+// see [ChainedTokenCredential overview].
+//
+// [ChainedTokenCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview
type ChainedTokenCredential struct {
cond *sync.Cond
iterating bool
@@ -46,6 +49,9 @@ func NewChainedTokenCredential(sources []azcore.TokenCredential, options *Chaine
if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil
return nil, errors.New("sources cannot contain nil")
}
+ if mc, ok := source.(*ManagedIdentityCredential); ok {
+ mc.mic.chained = true
+ }
}
cp := make([]azcore.TokenCredential, len(sources))
copy(cp, sources)
@@ -113,11 +119,19 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token
if err != nil {
// return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise
msg := createChainedErrorMessage(errs)
- if errors.As(err, &unavailableErr) {
+ var authFailedErr *AuthenticationFailedError
+ switch {
+ case errors.As(err, &authFailedErr):
+ err = newAuthenticationFailedError(c.name, msg, authFailedErr.RawResponse)
+ if af, ok := err.(*AuthenticationFailedError); ok {
+ // stop Error() printing the response again; it's already in msg
+ af.omitResponse = true
+ }
+ case errors.As(err, &unavailableErr):
err = newCredentialUnavailableError(c.name, msg)
- } else {
+ default:
res := getResponseFromError(err)
- err = newAuthenticationFailedError(c.name, msg, res, err)
+ err = newAuthenticationFailedError(c.name, msg, res)
}
}
return token, err
@@ -126,7 +140,7 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token
func createChainedErrorMessage(errs []error) string {
msg := "failed to acquire a token.\nAttempted credentials:"
for _, err := range errs {
- msg += fmt.Sprintf("\n\t%s", err.Error())
+ msg += fmt.Sprintf("\n\t%s", strings.ReplaceAll(err.Error(), "\n", "\n\t\t"))
}
return msg
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
index 4cd8c5144..c3af0cdc2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml
@@ -27,15 +27,15 @@ extends:
CloudConfig:
Public:
SubscriptionConfigurations:
- - $(sub-config-azure-cloud-test-resources)
- $(sub-config-identity-test-resources)
- EnvVars:
- SYSTEM_ACCESSTOKEN: $(System.AccessToken)
+ EnableRaceDetector: true
+ Location: westus2
RunLiveTests: true
ServiceDirectory: azidentity
UsePipelineProxy: false
${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}:
+ PersistOidcToken: true
MatrixConfigs:
- Name: managed_identity_matrix
GenerateVMJobs: true
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go
index b588750ef..2307da86f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go
@@ -37,14 +37,16 @@ type ClientAssertionCredentialOptions struct {
// application is registered.
AdditionallyAllowedTenants []string
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
-
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults.
@@ -61,10 +63,10 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c
},
)
msalOpts := confidentialClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
+ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ Cache: options.Cache,
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
}
c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts)
if err != nil {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go
index 80cd96b56..9e6bca1c9 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go
@@ -31,6 +31,11 @@ type ClientCertificateCredentialOptions struct {
// application is registered.
AdditionallyAllowedTenants []string
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
@@ -41,9 +46,6 @@ type ClientCertificateCredentialOptions struct {
// header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication.
// Defaults to False.
SendCertificateChain bool
-
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// ClientCertificateCredential authenticates a service principal with a certificate.
@@ -65,11 +67,11 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x
return nil, err
}
msalOpts := confidentialClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- SendX5C: options.SendCertificateChain,
- tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
+ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ Cache: options.Cache,
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
+ SendX5C: options.SendCertificateChain,
}
c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts)
if err != nil {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go
index 9e6772e9b..f0890fe1e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go
@@ -32,8 +32,10 @@ type ClientSecretCredentialOptions struct {
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
}
// ClientSecretCredential authenticates an application with a client secret.
@@ -51,10 +53,10 @@ func NewClientSecretCredential(tenantID string, clientID string, clientSecret st
return nil, err
}
msalOpts := confidentialClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- tokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
+ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ Cache: options.Cache,
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
}
c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts)
if err != nil {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
index 3bd08c685..58c4b585c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go
@@ -29,8 +29,8 @@ type confidentialClientOptions struct {
AdditionallyAllowedTenants []string
// Assertion for on-behalf-of authentication
Assertion string
+ Cache Cache
DisableInstanceDiscovery, SendX5C bool
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// confidentialClient wraps the MSAL confidential client
@@ -107,18 +107,18 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque
}
}
if err != nil {
- // We could get a credentialUnavailableError from managed identity authentication because in that case the error comes from our code.
- // We return it directly because it affects the behavior of credential chains. Otherwise, we return AuthenticationFailedError.
- var unavailableErr credentialUnavailable
- if !errors.As(err, &unavailableErr) {
- res := getResponseFromError(err)
- err = newAuthenticationFailedError(c.name, err.Error(), res, err)
+ var (
+ authFailedErr *AuthenticationFailedError
+ unavailableErr credentialUnavailable
+ )
+ if !(errors.As(err, &unavailableErr) || errors.As(err, &authFailedErr)) {
+ err = newAuthenticationFailedErrorFromMSAL(c.name, err)
}
} else {
- msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", "))
+ msg := fmt.Sprintf(scopeLogFmt, c.name, strings.Join(ar.GrantedScopes, ", "))
log.Write(EventAuthentication, msg)
}
- return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err
}
func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) {
@@ -145,7 +145,7 @@ func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfide
}
func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) {
- cache, err := internal.NewCache(c.opts.tokenCachePersistenceOptions, enableCAE)
+ cache, err := internal.ExportReplace(c.opts.Cache, enableCAE)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
index 551d31994..f2a31ee6a 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go
@@ -8,6 +8,7 @@ package azidentity
import (
"context"
+ "fmt"
"os"
"strings"
@@ -16,6 +17,8 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
)
+const azureTokenCredentials = "AZURE_TOKEN_CREDENTIALS"
+
// DefaultAzureCredentialOptions contains optional parameters for DefaultAzureCredential.
// These options may not apply to all credentials in the chain.
type DefaultAzureCredentialOptions struct {
@@ -23,23 +26,30 @@ type DefaultAzureCredentialOptions struct {
// to credential types that authenticate via external tools such as the Azure CLI.
azcore.ClientOptions
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add
- // the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be
- // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS.
+ // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
+ // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
+ // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
+ // This value can also be set as a semicolon delimited list of tenants in the environment variable
+ // AZURE_ADDITIONALLY_ALLOWED_TENANTS.
AdditionallyAllowedTenants []string
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
- // TenantID sets the default tenant for authentication via the Azure CLI and workload identity.
+
+ // TenantID sets the default tenant for authentication via the Azure CLI, Azure Developer CLI, and workload identity.
TenantID string
}
-// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure.
-// It combines credentials suitable for deployment with credentials suitable for local development.
-// It attempts to authenticate with each of these credential types, in the following order, stopping
-// when one provides a token:
+// DefaultAzureCredential simplifies authentication while developing applications that deploy to Azure by
+// combining credentials used in Azure hosting environments and credentials used in local development. In
+// production, it's better to use a specific credential type so authentication is more predictable and easier
+// to debug. For more information, see [DefaultAzureCredential overview].
+//
+// DefaultAzureCredential attempts to authenticate with each of these credential types, in the following order,
+// stopping when one provides a token:
//
// - [EnvironmentCredential]
// - [WorkloadIdentityCredential], if environment variable configuration is set by the Azure workload
@@ -52,14 +62,30 @@ type DefaultAzureCredentialOptions struct {
// Consult the documentation for these credential types for more information on how they authenticate.
// Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for
// every subsequent authentication.
+//
+// [DefaultAzureCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview
type DefaultAzureCredential struct {
chain *ChainedTokenCredential
}
// NewDefaultAzureCredential creates a DefaultAzureCredential. Pass nil for options to accept defaults.
func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*DefaultAzureCredential, error) {
- var creds []azcore.TokenCredential
- var errorMessages []string
+ var (
+ creds []azcore.TokenCredential
+ errorMessages []string
+ includeDev, includeProd = true, true
+ )
+
+ if c, ok := os.LookupEnv(azureTokenCredentials); ok {
+ switch c {
+ case "dev":
+ includeProd = false
+ case "prod":
+ includeDev = false
+ default:
+ return nil, fmt.Errorf(`invalid %s value %q. Valid values are "dev" and "prod"`, azureTokenCredentials, c)
+ }
+ }
if options == nil {
options = &DefaultAzureCredentialOptions{}
@@ -71,60 +97,63 @@ func NewDefaultAzureCredential(options *DefaultAzureCredentialOptions) (*Default
}
}
- envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- additionallyAllowedTenants: additionalTenants,
- })
- if err == nil {
- creds = append(creds, envCred)
- } else {
- errorMessages = append(errorMessages, "EnvironmentCredential: "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err})
- }
-
- wic, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{
- AdditionallyAllowedTenants: additionalTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- TenantID: options.TenantID,
- })
- if err == nil {
- creds = append(creds, wic)
- } else {
- errorMessages = append(errorMessages, credNameWorkloadIdentity+": "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err})
- }
+ if includeProd {
+ envCred, err := NewEnvironmentCredential(&EnvironmentCredentialOptions{
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
+ additionallyAllowedTenants: additionalTenants,
+ })
+ if err == nil {
+ creds = append(creds, envCred)
+ } else {
+ errorMessages = append(errorMessages, "EnvironmentCredential: "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: "EnvironmentCredential", err: err})
+ }
- o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions, dac: true}
- if ID, ok := os.LookupEnv(azureClientID); ok {
- o.ID = ClientID(ID)
- }
- miCred, err := NewManagedIdentityCredential(o)
- if err == nil {
- creds = append(creds, miCred)
- } else {
- errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err})
- }
+ wic, err := NewWorkloadIdentityCredential(&WorkloadIdentityCredentialOptions{
+ AdditionallyAllowedTenants: additionalTenants,
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
+ TenantID: options.TenantID,
+ })
+ if err == nil {
+ creds = append(creds, wic)
+ } else {
+ errorMessages = append(errorMessages, credNameWorkloadIdentity+": "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: credNameWorkloadIdentity, err: err})
+ }
- cliCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{AdditionallyAllowedTenants: additionalTenants, TenantID: options.TenantID})
- if err == nil {
- creds = append(creds, cliCred)
- } else {
- errorMessages = append(errorMessages, credNameAzureCLI+": "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err})
+ o := &ManagedIdentityCredentialOptions{ClientOptions: options.ClientOptions, dac: true}
+ if ID, ok := os.LookupEnv(azureClientID); ok {
+ o.ID = ClientID(ID)
+ }
+ miCred, err := NewManagedIdentityCredential(o)
+ if err == nil {
+ creds = append(creds, miCred)
+ } else {
+ errorMessages = append(errorMessages, credNameManagedIdentity+": "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: credNameManagedIdentity, err: err})
+ }
}
+ if includeDev {
+ azCred, err := NewAzureCLICredential(&AzureCLICredentialOptions{AdditionallyAllowedTenants: additionalTenants, TenantID: options.TenantID})
+ if err == nil {
+ creds = append(creds, azCred)
+ } else {
+ errorMessages = append(errorMessages, credNameAzureCLI+": "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureCLI, err: err})
+ }
- azdCred, err := NewAzureDeveloperCLICredential(&AzureDeveloperCLICredentialOptions{
- AdditionallyAllowedTenants: additionalTenants,
- TenantID: options.TenantID,
- })
- if err == nil {
- creds = append(creds, azdCred)
- } else {
- errorMessages = append(errorMessages, credNameAzureDeveloperCLI+": "+err.Error())
- creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureDeveloperCLI, err: err})
+ azdCred, err := NewAzureDeveloperCLICredential(&AzureDeveloperCLICredentialOptions{
+ AdditionallyAllowedTenants: additionalTenants,
+ TenantID: options.TenantID,
+ })
+ if err == nil {
+ creds = append(creds, azdCred)
+ } else {
+ errorMessages = append(errorMessages, credNameAzureDeveloperCLI+": "+err.Error())
+ creds = append(creds, &defaultCredentialErrorReporter{credType: credNameAzureDeveloperCLI, err: err})
+ }
}
if len(errorMessages) > 0 {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
index cd30bedd5..53ae9767f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go
@@ -21,22 +21,31 @@ const credNameDeviceCode = "DeviceCodeCredential"
type DeviceCodeCredentialOptions struct {
azcore.ClientOptions
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire
- // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant.
+ // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
+ // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
+ // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
AdditionallyAllowedTenants []string
- // authenticationRecord returned by a call to a credential's Authenticate method. Set this option
+ // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option
// to enable the credential to use data from a previous authentication.
- authenticationRecord authenticationRecord
-
- // ClientID is the ID of the application users will authenticate to.
- // Defaults to the ID of an Azure development application.
+ AuthenticationRecord AuthenticationRecord
+
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
+
+ // ClientID is the ID of the application to which users will authenticate. When not set, users
+ // will authenticate to an Azure development application, which isn't recommended for production
+ // scenarios. In production, developers should instead register their applications and assign
+ // appropriate roles. See https://aka.ms/azsdk/identity/AppRegistrationAndRoleAssignment for more
+ // information.
ClientID string
- // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
- // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary
+ // DisableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
+ // When this option is true, GetToken will return AuthenticationRequiredError when user interaction is necessary
// to acquire a token.
- disableAutomaticAuthentication bool
+ DisableAutomaticAuthentication bool
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
@@ -49,9 +58,6 @@ type DeviceCodeCredentialOptions struct {
// applications.
TenantID string
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
-
// UserPrompt controls how the credential presents authentication instructions. The credential calls
// this function with authentication details when it receives a device code. By default, the credential
// prints these details to stdout.
@@ -101,12 +107,12 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC
cp.init()
msalOpts := publicClientOptions{
AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants,
+ Cache: cp.Cache,
ClientOptions: cp.ClientOptions,
DeviceCodePrompt: cp.UserPrompt,
- DisableAutomaticAuthentication: cp.disableAutomaticAuthentication,
+ DisableAutomaticAuthentication: cp.DisableAutomaticAuthentication,
DisableInstanceDiscovery: cp.DisableInstanceDiscovery,
- Record: cp.authenticationRecord,
- TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions,
+ Record: cp.AuthenticationRecord,
}
c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts)
if err != nil {
@@ -116,8 +122,9 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC
return &DeviceCodeCredential{client: c}, nil
}
-// Authenticate a user via the device code flow. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
-func (c *DeviceCodeCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
+// Authenticate prompts a user to log in via the device code flow. Subsequent
+// GetToken calls will automatically use the returned AuthenticationRecord.
+func (c *DeviceCodeCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) {
var err error
ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
defer func() { endSpan(err) }()
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
index b30f5474f..9b5e17dcd 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go
@@ -60,21 +60,13 @@ type EnvironmentCredentialOptions struct {
// Note that this credential uses [ParseCertificates] to load the certificate and key from the file. If this
// function isn't able to parse your certificate, use [ClientCertificateCredential] instead.
//
-// # User with username and password
-//
-// AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations".
-//
-// AZURE_CLIENT_ID: client ID of the application the user will authenticate to
-//
-// AZURE_USERNAME: a username (usually an email address)
-//
-// AZURE_PASSWORD: the user's password
-//
// # Configuration for multitenant applications
//
// To enable multitenant authentication, set AZURE_ADDITIONALLY_ALLOWED_TENANTS with a semicolon delimited list of tenants
// the credential may request tokens from in addition to the tenant specified by AZURE_TENANT_ID. Set
// AZURE_ADDITIONALLY_ALLOWED_TENANTS to "*" to enable the credential to request a token from any tenant.
+//
+// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa
type EnvironmentCredential struct {
cred azcore.TokenCredential
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go
index 35fa01d13..a6d7c6cbc 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go
@@ -38,18 +38,30 @@ type AuthenticationFailedError struct {
// RawResponse is the HTTP response motivating the error, if available.
RawResponse *http.Response
- credType string
- message string
- err error
+ credType, message string
+ omitResponse bool
}
-func newAuthenticationFailedError(credType string, message string, resp *http.Response, err error) error {
- return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp, err: err}
+func newAuthenticationFailedError(credType, message string, resp *http.Response) error {
+ return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp}
+}
+
+// newAuthenticationFailedErrorFromMSAL creates an AuthenticationFailedError from an MSAL error.
+// If the error is an MSAL CallErr, the new error includes an HTTP response and not the MSAL error
+// message, because that message is redundant given the response. If the original error isn't a
+// CallErr, the returned error incorporates its message.
+func newAuthenticationFailedErrorFromMSAL(credType string, err error) error {
+ msg := ""
+ res := getResponseFromError(err)
+ if res == nil {
+ msg = err.Error()
+ }
+ return newAuthenticationFailedError(credType, msg, res)
}
// Error implements the error interface. Note that the message contents are not contractual and can change over time.
func (e *AuthenticationFailedError) Error() string {
- if e.RawResponse == nil {
+ if e.RawResponse == nil || e.omitResponse {
return e.credType + ": " + e.message
}
msg := &bytes.Buffer{}
@@ -62,7 +74,7 @@ func (e *AuthenticationFailedError) Error() string {
fmt.Fprintln(msg, "Request information not available")
}
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
- fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status)
+ fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status)
fmt.Fprintln(msg, "--------------------------------------------------------------------------------")
body, err := runtime.Payload(e.RawResponse)
switch {
@@ -91,8 +103,6 @@ func (e *AuthenticationFailedError) Error() string {
anchor = "client-secret"
case credNameManagedIdentity:
anchor = "managed-id"
- case credNameUserPassword:
- anchor = "username-password"
case credNameWorkloadIdentity:
anchor = "workload"
}
@@ -109,17 +119,17 @@ func (*AuthenticationFailedError) NonRetriable() {
var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil)
-// authenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token
+// AuthenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token
// because the credential requires user interaction and is configured not to request it automatically.
-type authenticationRequiredError struct {
+type AuthenticationRequiredError struct {
credentialUnavailableError
// TokenRequestOptions for the required token. Pass this to the credential's Authenticate method.
TokenRequestOptions policy.TokenRequestOptions
}
-func newauthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error {
- return &authenticationRequiredError{
+func newAuthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error {
+ return &AuthenticationRequiredError{
credentialUnavailableError: credentialUnavailableError{
credType + " can't acquire a token without user interaction. Call Authenticate to authenticate a user interactively",
},
@@ -128,8 +138,8 @@ func newauthenticationRequiredError(credType string, tro policy.TokenRequestOpti
}
var (
- _ credentialUnavailable = (*authenticationRequiredError)(nil)
- _ errorinfo.NonRetriable = (*authenticationRequiredError)(nil)
+ _ credentialUnavailable = (*AuthenticationRequiredError)(nil)
+ _ errorinfo.NonRetriable = (*AuthenticationRequiredError)(nil)
)
type credentialUnavailable interface {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work
index 04ea962b4..6dd5b3d64 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work
@@ -1,4 +1,4 @@
-go 1.18
+go 1.23.0
use (
.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum
deleted file mode 100644
index c592f283b..000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum
+++ /dev/null
@@ -1,60 +0,0 @@
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 h1:ODs3brnqQM99Tq1PffODpAViYv3Bf8zOg464MU7p5ew=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w=
-github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM=
-github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
-github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/keybase/dbus v0.0.0-20220506165403-5aa21ea2c23a/go.mod h1:YPNKjjE7Ubp9dTbnWvsP3HT+hYnY6TfXzubYTBeUxc8=
-github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
-github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
-github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
-github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
-github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
-github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
-github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
-golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
-golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
-golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc=
-golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
-golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
-golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
-golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
-golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
-golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
-golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
-golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
-golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
-golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
-golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
-golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
-golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
-golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
-golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
-gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go
index 056785a8a..ec89de9b5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go
@@ -20,22 +20,31 @@ const credNameBrowser = "InteractiveBrowserCredential"
type InteractiveBrowserCredentialOptions struct {
azcore.ClientOptions
- // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire
- // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant.
+ // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to
+ // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to
+ // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant.
AdditionallyAllowedTenants []string
- // authenticationRecord returned by a call to a credential's Authenticate method. Set this option
+ // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option
// to enable the credential to use data from a previous authentication.
- authenticationRecord authenticationRecord
-
- // ClientID is the ID of the application users will authenticate to.
- // Defaults to the ID of an Azure development application.
+ AuthenticationRecord AuthenticationRecord
+
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
+
+ // ClientID is the ID of the application to which users will authenticate. When not set, users
+ // will authenticate to an Azure development application, which isn't recommended for production
+ // scenarios. In production, developers should instead register their applications and assign
+ // appropriate roles. See https://aka.ms/azsdk/identity/AppRegistrationAndRoleAssignment for more
+ // information.
ClientID string
- // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
- // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary
+ // DisableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate.
+ // When this option is true, GetToken will return AuthenticationRequiredError when user interaction is necessary
// to acquire a token.
- disableAutomaticAuthentication bool
+ DisableAutomaticAuthentication bool
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
@@ -54,9 +63,6 @@ type InteractiveBrowserCredentialOptions struct {
// TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the
// "organizations" tenant, which can authenticate work and school accounts.
TenantID string
-
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
func (o *InteractiveBrowserCredentialOptions) init() {
@@ -82,13 +88,13 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption
cp.init()
msalOpts := publicClientOptions{
AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants,
+ Cache: cp.Cache,
ClientOptions: cp.ClientOptions,
- DisableAutomaticAuthentication: cp.disableAutomaticAuthentication,
+ DisableAutomaticAuthentication: cp.DisableAutomaticAuthentication,
DisableInstanceDiscovery: cp.DisableInstanceDiscovery,
LoginHint: cp.LoginHint,
- Record: cp.authenticationRecord,
+ Record: cp.AuthenticationRecord,
RedirectURL: cp.RedirectURL,
- TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions,
}
c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts)
if err != nil {
@@ -97,8 +103,9 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption
return &InteractiveBrowserCredential{client: c}, nil
}
-// Authenticate a user via the default browser. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
-func (c *InteractiveBrowserCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
+// Authenticate opens the default browser so a user can log in. Subsequent
+// GetToken calls will automatically use the returned AuthenticationRecord.
+func (c *InteractiveBrowserCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) {
var err error
ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
defer func() { endSpan(err) }()
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go
new file mode 100644
index 000000000..c0cfe7606
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go
@@ -0,0 +1,86 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License.
+
+package internal
+
+import (
+ "sync"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
+)
+
+// Cache represents a persistent cache that makes authentication data available across processes.
+// Construct one with [github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache.New]. This package's
+// [persistent user authentication example] shows how to use a persistent cache to reuse user
+// logins across application runs. For service principal credential types such as
+// [ClientCertificateCredential], simply set the Cache field on the credential options.
+//
+// [persistent user authentication example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication
+type Cache struct {
+ // impl is a pointer so a Cache can carry persistent state across copies
+ impl *impl
+}
+
+// impl is a Cache's private implementation
+type impl struct {
+ // factory constructs storage implementations
+ factory func(bool) (cache.ExportReplace, error)
+ // cae and noCAE are previously constructed storage implementations. CAE
+ // and non-CAE tokens must be stored separately because MSAL's cache doesn't
+ // observe token claims. If a single storage implementation held both kinds
+ // of tokens, it could create a reauthentication or error loop by returning
+ // a non-CAE token lacking a required claim.
+ cae, noCAE cache.ExportReplace
+ // mu synchronizes around cae and noCAE
+ mu *sync.RWMutex
+}
+
+func (i *impl) exportReplace(cae bool) (cache.ExportReplace, error) {
+ if i == nil {
+ // zero-value Cache: return a nil ExportReplace and MSAL will cache in memory
+ return nil, nil
+ }
+ var (
+ err error
+ xr cache.ExportReplace
+ )
+ i.mu.RLock()
+ xr = i.cae
+ if !cae {
+ xr = i.noCAE
+ }
+ i.mu.RUnlock()
+ if xr != nil {
+ return xr, nil
+ }
+ i.mu.Lock()
+ defer i.mu.Unlock()
+ if cae {
+ if i.cae == nil {
+ if xr, err = i.factory(cae); err == nil {
+ i.cae = xr
+ }
+ }
+ return i.cae, err
+ }
+ if i.noCAE == nil {
+ if xr, err = i.factory(cae); err == nil {
+ i.noCAE = xr
+ }
+ }
+ return i.noCAE, err
+}
+
+// NewCache is the constructor for Cache. It takes a factory instead of an instance
+// because it doesn't know whether the Cache will store both CAE and non-CAE tokens.
+func NewCache(factory func(cae bool) (cache.ExportReplace, error)) Cache {
+ return Cache{&impl{factory: factory, mu: &sync.RWMutex{}}}
+}
+
+// ExportReplace returns an implementation satisfying MSAL's ExportReplace interface.
+// It's a function instead of a method on Cache so packages in azidentity and
+// azidentity/cache can call it while applications can't. "cae" declares whether the
+// caller intends this implementation to store CAE tokens.
+func ExportReplace(c Cache, cae bool) (cache.ExportReplace, error) {
+ return c.impl.exportReplace(cae)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go
deleted file mode 100644
index b1b4d5c8b..000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go
+++ /dev/null
@@ -1,18 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package internal
-
-// TokenCachePersistenceOptions contains options for persistent token caching
-type TokenCachePersistenceOptions struct {
- // AllowUnencryptedStorage controls whether the cache should fall back to storing its data in plain text
- // when encryption isn't possible. Setting this true doesn't disable encryption. The cache always attempts
- // encryption before falling back to plaintext storage.
- AllowUnencryptedStorage bool
-
- // Name identifies the cache. Set this to isolate data from other applications.
- Name string
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go
deleted file mode 100644
index c1498b464..000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go
+++ /dev/null
@@ -1,31 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License.
-
-package internal
-
-import (
- "errors"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
-)
-
-var errMissingImport = errors.New("import github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache to enable persistent caching")
-
-// NewCache constructs a persistent token cache when "o" isn't nil. Applications that intend to
-// use a persistent cache must first import the cache module, which will replace this function
-// with a platform-specific implementation.
-var NewCache = func(o *TokenCachePersistenceOptions, enableCAE bool) (cache.ExportReplace, error) {
- if o == nil {
- return nil, nil
- }
- return nil, errMissingImport
-}
-
-// CacheFilePath returns the path to the cache file for the given name.
-// Defining it in this package makes it available to azidentity tests.
-var CacheFilePath = func(name string) (string, error) {
- return "", errMissingImport
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
index 1c3791777..edd56f9d5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json
@@ -9,7 +9,7 @@
}
},
"GoVersion": [
- "1.22.1"
+ "env:GO_VERSION_PREVIOUS"
],
"IDENTITY_IMDS_AVAILABLE": "1"
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
index 6122cc700..b3a0f8588 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go
@@ -8,24 +8,18 @@ package azidentity
import (
"context"
- "encoding/json"
"errors"
"fmt"
"net/http"
- "net/url"
- "os"
- "path/filepath"
- "runtime"
- "strconv"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
+ msalerrors "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity"
)
const (
@@ -41,56 +35,20 @@ const (
msiResID = "msi_res_id"
msiSecret = "MSI_SECRET"
imdsAPIVersion = "2018-02-01"
- azureArcAPIVersion = "2019-08-15"
+ azureArcAPIVersion = "2020-06-01"
qpClientID = "client_id"
serviceFabricAPIVersion = "2019-07-01-preview"
)
var imdsProbeTimeout = time.Second
-type msiType int
-
-const (
- msiTypeAppService msiType = iota
- msiTypeAzureArc
- msiTypeAzureML
- msiTypeCloudShell
- msiTypeIMDS
- msiTypeServiceFabric
-)
-
type managedIdentityClient struct {
- azClient *azcore.Client
- endpoint string
- id ManagedIDKind
- msiType msiType
- probeIMDS bool
-}
-
-// arcKeyDirectory returns the directory expected to contain Azure Arc keys
-var arcKeyDirectory = func() (string, error) {
- switch runtime.GOOS {
- case "linux":
- return "/var/opt/azcmagent/tokens", nil
- case "windows":
- pd := os.Getenv("ProgramData")
- if pd == "" {
- return "", errors.New("environment variable ProgramData has no value")
- }
- return filepath.Join(pd, "AzureConnectedMachineAgent", "Tokens"), nil
- default:
- return "", fmt.Errorf("unsupported OS %q", runtime.GOOS)
- }
-}
-
-type wrappedNumber json.Number
-
-func (n *wrappedNumber) UnmarshalJSON(b []byte) error {
- c := string(b)
- if c == "\"\"" {
- return nil
- }
- return json.Unmarshal(b, (*json.Number)(n))
+ azClient *azcore.Client
+ imds, probeIMDS, userAssigned bool
+ // chained indicates whether the client is part of a credential chain. If true, the client will return
+ // a credentialUnavailableError instead of an AuthenticationFailedError for an unexpected IMDS response.
+ chained bool
+ msalClient msalManagedIdentityClient
}
// setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS
@@ -138,39 +96,20 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag
options = &ManagedIdentityCredentialOptions{}
}
cp := options.ClientOptions
- c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS}
- env := "IMDS"
- if endpoint, ok := os.LookupEnv(identityEndpoint); ok {
- if _, ok := os.LookupEnv(identityHeader); ok {
- if _, ok := os.LookupEnv(identityServerThumbprint); ok {
- env = "Service Fabric"
- c.endpoint = endpoint
- c.msiType = msiTypeServiceFabric
- } else {
- env = "App Service"
- c.endpoint = endpoint
- c.msiType = msiTypeAppService
- }
- } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok {
- env = "Azure Arc"
- c.endpoint = endpoint
- c.msiType = msiTypeAzureArc
- }
- } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok {
- c.endpoint = endpoint
- if _, ok := os.LookupEnv(msiSecret); ok {
- env = "Azure ML"
- c.msiType = msiTypeAzureML
- } else {
- env = "Cloud Shell"
- c.msiType = msiTypeCloudShell
- }
- } else {
+ c := managedIdentityClient{}
+ source, err := managedidentity.GetSource()
+ if err != nil {
+ return nil, err
+ }
+ env := string(source)
+ if source == managedidentity.DefaultToIMDS {
+ env = "IMDS"
+ c.imds = true
c.probeIMDS = options.dac
setIMDSRetryOptionDefaults(&cp.Retry)
}
- client, err := azcore.NewClient(module, version, azruntime.PipelineOptions{
+ c.azClient, err = azcore.NewClient(module, version, azruntime.PipelineOptions{
Tracing: azruntime.TracingOptions{
Namespace: traceNamespace,
},
@@ -178,39 +117,65 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag
if err != nil {
return nil, err
}
- c.azClient = client
+
+ id := managedidentity.SystemAssigned()
+ if options.ID != nil {
+ c.userAssigned = true
+ switch s := options.ID.String(); options.ID.idKind() {
+ case miClientID:
+ id = managedidentity.UserAssignedClientID(s)
+ case miObjectID:
+ id = managedidentity.UserAssignedObjectID(s)
+ case miResourceID:
+ id = managedidentity.UserAssignedResourceID(s)
+ }
+ }
+ msalClient, err := managedidentity.New(id, managedidentity.WithHTTPClient(&c), managedidentity.WithRetryPolicyDisabled())
+ if err != nil {
+ return nil, err
+ }
+ c.msalClient = &msalClient
if log.Should(EventAuthentication) {
- log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env)
+ msg := fmt.Sprintf("%s will use %s managed identity", credNameManagedIdentity, env)
+ if options.ID != nil {
+ kind := "client"
+ switch options.ID.(type) {
+ case ObjectID:
+ kind = "object"
+ case ResourceID:
+ kind = "resource"
+ }
+ msg += fmt.Sprintf(" with %s ID %q", kind, options.ID.String())
+ }
+ log.Write(EventAuthentication, msg)
}
return &c, nil
}
-// provideToken acquires a token for MSAL's confidential.Client, which caches the token
-func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) {
- result := confidential.TokenProviderResult{}
- tk, err := c.authenticate(ctx, c.id, params.Scopes)
- if err == nil {
- result.AccessToken = tk.Token
- result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds())
- }
- return result, err
+func (*managedIdentityClient) CloseIdleConnections() {
+ // do nothing
+}
+
+func (c *managedIdentityClient) Do(r *http.Request) (*http.Response, error) {
+ return doForClient(c.azClient, r)
}
// authenticate acquires an access token
-func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) {
+func (c *managedIdentityClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) {
// no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client,
// and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block
if c.probeIMDS {
+ // send a malformed request (no Metadata header) to IMDS to determine whether the endpoint is available
cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout)
defer cancel()
cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1})
- req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint)
- if err == nil {
- _, err = c.azClient.Pipeline().Do(req)
- }
+ req, err := azruntime.NewRequest(cx, http.MethodGet, imdsEndpoint)
if err != nil {
+ return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err)
+ }
+ if _, err = c.azClient.Pipeline().Do(req); err != nil {
msg := err.Error()
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
msg = "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information"
@@ -221,25 +186,27 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
c.probeIMDS = false
}
- msg, err := c.createAuthRequest(ctx, id, scopes)
- if err != nil {
- return azcore.AccessToken{}, err
- }
-
- resp, err := c.azClient.Pipeline().Do(msg)
- if err != nil {
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err)
- }
-
- if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) {
- return c.createAccessToken(resp)
+ ar, err := c.msalClient.AcquireToken(ctx, tro.Scopes[0], managedidentity.WithClaims(tro.Claims))
+ if err == nil {
+ msg := fmt.Sprintf(scopeLogFmt, credNameManagedIdentity, strings.Join(ar.GrantedScopes, ", "))
+ log.Write(EventAuthentication, msg)
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err
}
-
- if c.msiType == msiTypeIMDS {
+ if c.imds {
+ var ije msalerrors.InvalidJsonErr
+ if c.chained && errors.As(err, &ije) {
+ // an unmarshaling error implies the response is from something other than IMDS such as a proxy listening at
+ // the same address. Return a credentialUnavailableError so credential chains continue to their next credential
+ return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, err.Error())
+ }
+ resp := getResponseFromError(err)
+ if resp == nil {
+ return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSAL(credNameManagedIdentity, err)
+ }
switch resp.StatusCode {
case http.StatusBadRequest:
- if id != nil {
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil)
+ if c.userAssigned {
+ return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp)
}
msg := "failed to authenticate a system assigned identity"
if body, err := azruntime.Payload(resp); err == nil && len(body) > 0 {
@@ -255,247 +222,6 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi
}
}
}
-
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil)
-}
-
-func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) {
- value := struct {
- // these are the only fields that we use
- Token string `json:"access_token,omitempty"`
- RefreshToken string `json:"refresh_token,omitempty"`
- ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid
- ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string
- }{}
- if err := azruntime.UnmarshalAsJSON(res, &value); err != nil {
- return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err)
- }
- if value.ExpiresIn != "" {
- expiresIn, err := json.Number(value.ExpiresIn).Int64()
- if err != nil {
- return azcore.AccessToken{}, err
- }
- return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil
- }
- switch v := value.ExpiresOn.(type) {
- case float64:
- return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil
- case string:
- if expiresOn, err := strconv.Atoi(v); err == nil {
- return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil
- }
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res, nil)
- default:
- msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v)
- return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res, nil)
- }
-}
-
-func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- switch c.msiType {
- case msiTypeIMDS:
- return c.createIMDSAuthRequest(ctx, id, scopes)
- case msiTypeAppService:
- return c.createAppServiceAuthRequest(ctx, id, scopes)
- case msiTypeAzureArc:
- // need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service
- key, err := c.getAzureArcSecretKey(ctx, scopes)
- if err != nil {
- msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err)
- return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err)
- }
- return c.createAzureArcAuthRequest(ctx, id, scopes, key)
- case msiTypeAzureML:
- return c.createAzureMLAuthRequest(ctx, id, scopes)
- case msiTypeServiceFabric:
- return c.createServiceFabricAuthRequest(ctx, id, scopes)
- case msiTypeCloudShell:
- return c.createCloudShellAuthRequest(ctx, id, scopes)
- default:
- return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment")
- }
-}
-
-func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- q := request.Raw().URL.Query()
- q.Add("api-version", imdsAPIVersion)
- q.Add("resource", strings.Join(scopes, " "))
- if id != nil {
- if id.idKind() == miResourceID {
- q.Add(msiResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader))
- q := request.Raw().URL.Query()
- q.Add("api-version", "2019-08-01")
- q.Add("resource", scopes[0])
- if id != nil {
- if id.idKind() == miResourceID {
- q.Add(miResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set("secret", os.Getenv(msiSecret))
- q := request.Raw().URL.Query()
- q.Add("api-version", "2017-09-01")
- q.Add("resource", strings.Join(scopes, " "))
- q.Add("clientid", os.Getenv(defaultIdentityClientID))
- if id != nil {
- if id.idKind() == miResourceID {
- log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID")
- q.Set("clientid", "")
- q.Set(miResID, id.String())
- } else {
- q.Set("clientid", id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- q := request.Raw().URL.Query()
- request.Raw().Header.Set("Accept", "application/json")
- request.Raw().Header.Set("Secret", os.Getenv(identityHeader))
- q.Add("api-version", serviceFabricAPIVersion)
- q.Add("resource", strings.Join(scopes, " "))
- if id != nil {
- log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime")
- if id.idKind() == miResourceID {
- q.Add(miResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) {
- // create the request to retreive the secret key challenge provided by the HIMDS service
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return "", err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- q := request.Raw().URL.Query()
- q.Add("api-version", azureArcAPIVersion)
- q.Add("resource", strings.Join(resources, " "))
- request.Raw().URL.RawQuery = q.Encode()
- // send the initial request to get the short-lived secret key
- response, err := c.azClient.Pipeline().Do(request)
- if err != nil {
- return "", err
- }
- // the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location
- // of the secret key file. Any other status code indicates an error in the request.
- if response.StatusCode != 401 {
- msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode)
- return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response, nil)
- }
- header := response.Header.Get("WWW-Authenticate")
- if len(header) == 0 {
- return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil, nil)
- }
- // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key
- _, p, found := strings.Cut(header, "=")
- if !found {
- return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil, nil)
- }
- expected, err := arcKeyDirectory()
- if err != nil {
- return "", err
- }
- if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") {
- return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil, nil)
- }
- f, err := os.Stat(p)
- if err != nil {
- return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil, nil)
- }
- if s := f.Size(); s > 4096 {
- return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil, nil)
- }
- key, err := os.ReadFile(p)
- if err != nil {
- return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil, nil)
- }
- return string(key), nil
-}
-
-func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key))
- q := request.Raw().URL.Query()
- q.Add("api-version", azureArcAPIVersion)
- q.Add("resource", strings.Join(resources, " "))
- if id != nil {
- log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities")
- if id.idKind() == miResourceID {
- q.Add(miResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- request.Raw().URL.RawQuery = q.Encode()
- return request, nil
-}
-
-func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) {
- request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint)
- if err != nil {
- return nil, err
- }
- request.Raw().Header.Set(headerMetadata, "true")
- data := url.Values{}
- data.Set("resource", strings.Join(scopes, " "))
- dataEncoded := data.Encode()
- body := streaming.NopCloser(strings.NewReader(dataEncoded))
- if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil {
- return nil, err
- }
- if id != nil {
- log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities")
- q := request.Raw().URL.Query()
- if id.idKind() == miResourceID {
- q.Add(miResID, id.String())
- } else {
- q.Add(qpClientID, id.String())
- }
- }
- return request, nil
+ err = newAuthenticationFailedErrorFromMSAL(credNameManagedIdentity, err)
+ return azcore.AccessToken{}, err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
index 13c043d8e..11b686ccd 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go
@@ -14,7 +14,6 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential"
)
const credNameManagedIdentity = "ManagedIdentityCredential"
@@ -22,8 +21,9 @@ const credNameManagedIdentity = "ManagedIdentityCredential"
type managedIdentityIDKind int
const (
- miClientID managedIdentityIDKind = 0
- miResourceID managedIdentityIDKind = 1
+ miClientID managedIdentityIDKind = iota
+ miObjectID
+ miResourceID
)
// ManagedIDKind identifies the ID of a managed identity as either a client or resource ID
@@ -32,7 +32,12 @@ type ManagedIDKind interface {
idKind() managedIdentityIDKind
}
-// ClientID is the client ID of a user-assigned managed identity.
+// ClientID is the client ID of a user-assigned managed identity. [NewManagedIdentityCredential]
+// returns an error when a ClientID is specified on the following platforms:
+//
+// - Azure Arc
+// - Cloud Shell
+// - Service Fabric
type ClientID string
func (ClientID) idKind() managedIdentityIDKind {
@@ -44,7 +49,31 @@ func (c ClientID) String() string {
return string(c)
}
-// ResourceID is the resource ID of a user-assigned managed identity.
+// ObjectID is the object ID of a user-assigned managed identity. [NewManagedIdentityCredential]
+// returns an error when an ObjectID is specified on the following platforms:
+//
+// - Azure Arc
+// - Azure ML
+// - Cloud Shell
+// - Service Fabric
+type ObjectID string
+
+func (ObjectID) idKind() managedIdentityIDKind {
+ return miObjectID
+}
+
+// String returns the string value of the ID.
+func (o ObjectID) String() string {
+ return string(o)
+}
+
+// ResourceID is the resource ID of a user-assigned managed identity. [NewManagedIdentityCredential]
+// returns an error when a ResourceID is specified on the following platforms:
+//
+// - Azure Arc
+// - Azure ML
+// - Cloud Shell
+// - Service Fabric
type ResourceID string
func (ResourceID) idKind() managedIdentityIDKind {
@@ -60,9 +89,10 @@ func (r ResourceID) String() string {
type ManagedIdentityCredentialOptions struct {
azcore.ClientOptions
- // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity
- // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that
- // some platforms don't accept resource IDs.
+ // ID of a managed identity the credential should authenticate. Set this field to use a specific identity instead of
+ // the hosting environment's default. The value may be the identity's client, object, or resource ID.
+ // NewManagedIdentityCredential returns an error when the hosting environment doesn't support user-assigned managed
+ // identities, or the specified kind of ID.
ID ManagedIDKind
// dac indicates whether the credential is part of DefaultAzureCredential. When true, and the environment doesn't have
@@ -73,13 +103,13 @@ type ManagedIdentityCredentialOptions struct {
dac bool
}
-// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities.
+// ManagedIdentityCredential authenticates an [Azure managed identity] in any hosting environment supporting managed identities.
// This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a
-// user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities:
-// https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview
+// user-assigned identity.
+//
+// [Azure managed identity]: https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview
type ManagedIdentityCredential struct {
- client *confidentialClient
- mic *managedIdentityClient
+ mic *managedIdentityClient
}
// NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options.
@@ -91,38 +121,22 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M
if err != nil {
return nil, err
}
- cred := confidential.NewCredFromTokenProvider(mic.provideToken)
-
- // It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key.
- // ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL.
- clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY"
- if options.ID != nil {
- clientID = options.ID.String()
- }
- // similarly, it's okay to give MSAL an incorrect tenant because MSAL won't use the value
- c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{
- ClientOptions: options.ClientOptions,
- })
- if err != nil {
- return nil, err
- }
- return &ManagedIdentityCredential{client: c, mic: mic}, nil
+ return &ManagedIdentityCredential{mic: mic}, nil
}
// GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients.
func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) {
var err error
- ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.client.azClient.Tracer(), nil)
+ ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.mic.azClient.Tracer(), nil)
defer func() { endSpan(err) }()
if len(opts.Scopes) != 1 {
err = fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity)
return azcore.AccessToken{}, err
}
- // managed identity endpoints require a Microsoft Entra ID v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here
+ // managed identity endpoints require a v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here
opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)}
- tk, err := c.client.GetToken(ctx, opts)
- return tk, err
+ return c.mic.GetToken(ctx, opts)
}
var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
index b3d22dbf3..053d1785f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go
@@ -30,12 +30,12 @@ type publicClientOptions struct {
azcore.ClientOptions
AdditionallyAllowedTenants []string
+ Cache Cache
DeviceCodePrompt func(context.Context, DeviceCodeMessage) error
DisableAutomaticAuthentication bool
DisableInstanceDiscovery bool
LoginHint, RedirectURL string
- Record authenticationRecord
- TokenCachePersistenceOptions *tokenCachePersistenceOptions
+ Record AuthenticationRecord
Username, Password string
}
@@ -48,7 +48,7 @@ type publicClient struct {
host string
name string
opts publicClientOptions
- record authenticationRecord
+ record AuthenticationRecord
azClient *azcore.Client
}
@@ -107,19 +107,19 @@ func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*p
}, nil
}
-func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (authenticationRecord, error) {
+func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (AuthenticationRecord, error) {
if tro == nil {
tro = &policy.TokenRequestOptions{}
}
if len(tro.Scopes) == 0 {
if p.defaultScope == nil {
- return authenticationRecord{}, errScopeRequired
+ return AuthenticationRecord{}, errScopeRequired
}
tro.Scopes = p.defaultScope
}
client, mu, err := p.client(*tro)
if err != nil {
- return authenticationRecord{}, err
+ return AuthenticationRecord{}, err
}
mu.Lock()
defer mu.Unlock()
@@ -152,14 +152,9 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti
return p.token(ar, err)
}
if p.opts.DisableAutomaticAuthentication {
- return azcore.AccessToken{}, newauthenticationRequiredError(p.name, tro)
+ return azcore.AccessToken{}, newAuthenticationRequiredError(p.name, tro)
}
- at, err := p.reqToken(ctx, client, tro)
- if err == nil {
- msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", "))
- log.Write(EventAuthentication, msg)
- }
- return at, err
+ return p.reqToken(ctx, client, tro)
}
// reqToken requests a token from the MSAL public client. It's separate from GetToken() to enable Authenticate() to bypass the cache.
@@ -222,13 +217,13 @@ func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient,
}
func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) {
- cache, err := internal.NewCache(p.opts.TokenCachePersistenceOptions, enableCAE)
+ c, err := internal.ExportReplace(p.opts.Cache, enableCAE)
if err != nil {
return nil, err
}
o := []public.Option{
public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)),
- public.WithCache(cache),
+ public.WithCache(c),
public.WithHTTPClient(p),
}
if enableCAE {
@@ -242,12 +237,13 @@ func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) {
func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) {
if err == nil {
+ msg := fmt.Sprintf(scopeLogFmt, p.name, strings.Join(ar.GrantedScopes, ", "))
+ log.Write(EventAuthentication, msg)
p.record, err = newAuthenticationRecord(ar)
} else {
- res := getResponseFromError(err)
- err = newAuthenticationFailedError(p.name, err.Error(), res, err)
+ err = newAuthenticationFailedErrorFromMSAL(p.name, err)
}
- return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err
+ return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err
}
// resolveTenant returns the correct WithTenantID() argument for a token request given the client's
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
index a69bbce34..67f97fbb2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1
@@ -5,7 +5,27 @@
param (
[hashtable] $AdditionalParameters = @{},
- [hashtable] $DeploymentOutputs
+ [hashtable] $DeploymentOutputs,
+
+ [Parameter(Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [string] $SubscriptionId,
+
+ [Parameter(ParameterSetName = 'Provisioner', Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [string] $TenantId,
+
+ [Parameter()]
+ [ValidatePattern('^[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$')]
+ [string] $TestApplicationId,
+
+ [Parameter(Mandatory = $true)]
+ [ValidateNotNullOrEmpty()]
+ [string] $Environment,
+
+ # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors).
+ [Parameter(ValueFromRemainingArguments = $true)]
+ $RemainingArguments
)
$ErrorActionPreference = 'Stop'
@@ -16,14 +36,15 @@ if ($CI) {
Write-Host "Skipping post-provisioning script because resources weren't deployed"
return
}
- az login --service-principal -u $DeploymentOutputs['AZIDENTITY_CLIENT_ID'] -p $DeploymentOutputs['AZIDENTITY_CLIENT_SECRET'] --tenant $DeploymentOutputs['AZIDENTITY_TENANT_ID']
- az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID']
+ az cloud set -n $Environment
+ az login --federated-token $env:ARM_OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId
+ az account set --subscription $SubscriptionId
}
Write-Host "Building container"
$image = "$($DeploymentOutputs['AZIDENTITY_ACR_LOGIN_SERVER'])/azidentity-managed-id-test"
Set-Content -Path "$PSScriptRoot/Dockerfile" -Value @"
-FROM mcr.microsoft.com/oss/go/microsoft/golang:latest as builder
+FROM mcr.microsoft.com/oss/go/microsoft/golang:latest AS builder
ENV GOARCH=amd64 GOWORK=off
COPY . /azidentity
WORKDIR /azidentity/testdata/managed-id-test
@@ -50,13 +71,20 @@ $aciName = "azidentity-test"
az container create -g $rg -n $aciName --image $image `
--acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
--assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
+ --cpu 1 `
+ --ip-address Public `
+ --memory 1.0 `
+ --os-type Linux `
--role "Storage Blob Data Reader" `
--scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) `
-e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) `
- AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) `
- AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
- FUNCTIONS_CUSTOMHANDLER_PORT=80
-Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName"
+ AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) `
+ AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) `
+ AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) `
+ AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID']) `
+ FUNCTIONS_CUSTOMHANDLER_PORT=80
+$aciIP = az container show -g $rg -n $aciName --query ipAddress.ip --output tsv
+Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_IP;]$aciIP"
# Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip
Write-Host "Deploying to Azure Functions"
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep
index 2a2165293..135feb017 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep
@@ -135,6 +135,14 @@ resource azfunc 'Microsoft.Web/sites@2021-03-01' = if (deployResources) {
name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY'
value: deployResources ? usermgdid.id : null
}
+ {
+ name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID'
+ value: deployResources ? usermgdid.properties.clientId : null
+ }
+ {
+ name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID'
+ value: deployResources ? usermgdid.properties.principalId : null
+ }
{
name: 'AzureWebJobsStorage'
value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}'
@@ -217,3 +225,4 @@ output AZIDENTITY_STORAGE_NAME_USER_ASSIGNED string = deployResources ? saUserAs
output AZIDENTITY_USER_ASSIGNED_IDENTITY string = deployResources ? usermgdid.id : ''
output AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID string = deployResources ? usermgdid.properties.clientId : ''
output AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME string = deployResources ? usermgdid.name : ''
+output AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID string = deployResources ? usermgdid.properties.principalId : ''
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
index 294ed81e9..5791e7d22 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go
@@ -17,6 +17,11 @@ import (
const credNameUserPassword = "UsernamePasswordCredential"
// UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential.
+//
+// Deprecated: UsernamePasswordCredential is deprecated because it can't support multifactor
+// authentication. See [Entra ID documentation] for migration guidance.
+//
+// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa
type UsernamePasswordCredentialOptions struct {
azcore.ClientOptions
@@ -25,24 +30,31 @@ type UsernamePasswordCredentialOptions struct {
// application is registered.
AdditionallyAllowedTenants []string
- // authenticationRecord returned by a call to a credential's Authenticate method. Set this option
+ // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option
// to enable the credential to use data from a previous authentication.
- authenticationRecord authenticationRecord
+ AuthenticationRecord AuthenticationRecord
+
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
-
- // tokenCachePersistenceOptions enables persistent token caching when not nil.
- tokenCachePersistenceOptions *tokenCachePersistenceOptions
}
// UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication,
// because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible
-// with any form of multi-factor authentication, and the application must already have user or admin consent.
+// with any form of multifactor authentication, and the application must already have user or admin consent.
// This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts.
+//
+// Deprecated: this credential is deprecated because it can't support multifactor authentication. See [Entra ID documentation]
+// for migration guidance.
+//
+// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa
type UsernamePasswordCredential struct {
client *publicClient
}
@@ -54,13 +66,13 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st
options = &UsernamePasswordCredentialOptions{}
}
opts := publicClientOptions{
- AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
- ClientOptions: options.ClientOptions,
- DisableInstanceDiscovery: options.DisableInstanceDiscovery,
- Password: password,
- Record: options.authenticationRecord,
- TokenCachePersistenceOptions: options.tokenCachePersistenceOptions,
- Username: username,
+ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ Cache: options.Cache,
+ ClientOptions: options.ClientOptions,
+ DisableInstanceDiscovery: options.DisableInstanceDiscovery,
+ Password: password,
+ Record: options.AuthenticationRecord,
+ Username: username,
}
c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts)
if err != nil {
@@ -70,7 +82,7 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st
}
// Authenticate the user. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord.
-func (c *UsernamePasswordCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) {
+func (c *UsernamePasswordCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) {
var err error
ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil)
defer func() { endSpan(err) }()
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
index 4305b5d3d..2b767762f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go
@@ -14,5 +14,5 @@ const (
module = "github.com/Azure/azure-sdk-for-go/sdk/" + component
// Version is the semantic version (see http://semver.org) of this module.
- version = "v1.7.0"
+ version = "v1.10.1"
)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go
index 3e43e788e..6fecada2f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go
@@ -39,15 +39,24 @@ type WorkloadIdentityCredentialOptions struct {
// Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the
// application is registered.
AdditionallyAllowedTenants []string
+
+ // Cache is a persistent cache the credential will use to store the tokens it acquires, making
+ // them available to other processes and credential instances. The default, zero value means the
+ // credential will store tokens in memory and not share them with any other credential instance.
+ Cache Cache
+
// ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID.
ClientID string
+
// DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or
// private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata
// from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making
// the application responsible for ensuring the configured authority is valid and trustworthy.
DisableInstanceDiscovery bool
+
// TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID.
TenantID string
+
// TokenFilePath is the path of a file containing a Kubernetes service account token. Defaults to the value of the
// environment variable AZURE_FEDERATED_TOKEN_FILE.
TokenFilePath string
@@ -81,6 +90,7 @@ func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) (
w := WorkloadIdentityCredential{file: file, mtx: &sync.RWMutex{}}
caco := ClientAssertionCredentialOptions{
AdditionallyAllowedTenants: options.AdditionallyAllowedTenants,
+ Cache: options.Cache,
ClientOptions: options.ClientOptions,
DisableInstanceDiscovery: options.DisableInstanceDiscovery,
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
index 4f1dcf1b7..76dadf7d3 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go
@@ -44,7 +44,7 @@ func Should(cls Event) bool {
if log.lst == nil {
return false
}
- if log.cls == nil || len(log.cls) == 0 {
+ if len(log.cls) == 0 {
return true
}
for _, c := range log.cls {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
index 238ef42ed..02aa1fb3b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go
@@ -11,9 +11,17 @@ import (
"time"
)
+// backoff sets a minimum wait time between eager update attempts. It's a variable so tests can manipulate it.
+var backoff = func(now, lastAttempt time.Time) bool {
+ return lastAttempt.Add(30 * time.Second).After(now)
+}
+
// AcquireResource abstracts a method for refreshing a temporal resource.
type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error)
+// ShouldRefresh abstracts a method for indicating whether a resource should be refreshed before expiration.
+type ShouldRefresh[TResource, TState any] func(TResource, TState) bool
+
// Resource is a temporal resource (usually a credential) that requires periodic refreshing.
type Resource[TResource, TState any] struct {
// cond is used to synchronize access to the shared resource embodied by the remaining fields
@@ -31,24 +39,43 @@ type Resource[TResource, TState any] struct {
// lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource
lastAttempt time.Time
+ // shouldRefresh indicates whether the resource should be refreshed before expiration
+ shouldRefresh ShouldRefresh[TResource, TState]
+
// acquireResource is the callback function that actually acquires the resource
acquireResource AcquireResource[TResource, TState]
}
// NewResource creates a new Resource that uses the specified AcquireResource for refreshing.
func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] {
- return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar}
+ r := &Resource[TResource, TState]{acquireResource: ar, cond: sync.NewCond(&sync.Mutex{})}
+ r.shouldRefresh = r.expiringSoon
+ return r
+}
+
+// ResourceOptions contains optional configuration for Resource
+type ResourceOptions[TResource, TState any] struct {
+ // ShouldRefresh indicates whether [Resource.Get] should acquire an updated resource despite
+ // the currently held resource not having expired. [Resource.Get] ignores all errors from
+ // refresh attempts triggered by ShouldRefresh returning true, and doesn't call ShouldRefresh
+ // when the resource has expired (it unconditionally updates expired resources). When
+ // ShouldRefresh is nil, [Resource.Get] refreshes the resource if it will expire within 5
+ // minutes.
+ ShouldRefresh ShouldRefresh[TResource, TState]
+}
+
+// NewResourceWithOptions creates a new Resource that uses the specified AcquireResource for refreshing.
+func NewResourceWithOptions[TResource, TState any](ar AcquireResource[TResource, TState], opts ResourceOptions[TResource, TState]) *Resource[TResource, TState] {
+ r := NewResource(ar)
+ if opts.ShouldRefresh != nil {
+ r.shouldRefresh = opts.ShouldRefresh
+ }
+ return r
}
// Get returns the underlying resource.
// If the resource is fresh, no refresh is performed.
func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) {
- // If the resource is expiring within this time window, update it eagerly.
- // This allows other threads/goroutines to keep running by using the not-yet-expired
- // resource value while one thread/goroutine updates the resource.
- const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration
- const backoff = 30 * time.Second // Minimum wait time between eager update attempts
-
now, acquire, expired := time.Now(), false, false
// acquire exclusive lock
@@ -65,9 +92,8 @@ func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) {
break
}
// Getting here means that this thread/goroutine will wait for the updated resource
- } else if er.expiration.Add(-window).Before(now) {
- // The resource is valid but is expiring within the time window
- if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) {
+ } else if er.shouldRefresh(resource, state) {
+ if !(er.acquiring || backoff(now, er.lastAttempt)) {
// If another thread/goroutine is not acquiring/renewing the resource, and none has attempted
// to do so within the last 30 seconds, this thread/goroutine will do it
er.acquiring, acquire = true, true
@@ -121,3 +147,8 @@ func (er *Resource[TResource, TState]) Expire() {
// Reset the expiration as if we never got this resource to begin with
er.expiration = time.Time{}
}
+
+func (er *Resource[TResource, TState]) expiringSoon(TResource, TState) bool {
+ // call time.Now() instead of using Get's value so ShouldRefresh doesn't need a time.Time parameter
+ return er.expiration.Add(-5 * time.Minute).Before(time.Now())
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md
index 3b66f7be9..0056d1124 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/CHANGELOG.md
@@ -1,5 +1,89 @@
# Release History
+## 1.6.1 (2025-04-16)
+
+### Bugs Fixed
+* Fixed return value of DownloadBuffer when the HTTPRange count given is greater than the data length. Fixes [#23884](https://github.com/Azure/azure-sdk-for-go/issues/23884)
+
+### Other Changes
+* Updated `azidentity` version to `1.9.0`
+* Updated `azcore` version to `1.18.0`
+
+## 1.6.1-beta.1 (2025-02-12)
+
+### Features Added
+* Upgraded service version to `2025-05-05`.
+
+## 1.6.0 (2025-01-23)
+
+### Features Added
+* Upgraded service version to `2025-01-05`.
+
+## 1.6.0-beta.1 (2025-01-13)
+
+### Features Added
+* Added permissions & resourcetype parameters in listblob response.
+* Added BlobProperties field in BlobPrefix definition in listblob response.
+
+### Bugs Fixed
+* Fix FilterBlob API if Query contains a space character. Fixes [#23546](https://github.com/Azure/azure-sdk-for-go/issues/23546)
+
+## 1.5.0 (2024-11-13)
+
+### Features Added
+* Fix compareHeaders custom sorting algorithm for String To Sign.
+
+## 1.5.0-beta.1 (2024-10-22)
+
+### Other Changes
+* Updated `azcore` version to `1.16.0`
+* Updated `azidentity` version to `1.8.0`
+
+## 1.4.1 (2024-09-18)
+
+### Features Added
+* Added crc64 response header to Put Blob.
+* Upgraded service version to `2024-08-04`.
+
+## 1.4.1-beta.1 (2024-08-27)
+
+### Features Added
+* Upgraded service version to `2024-08-04`.
+
+### Other Changes
+* Updated `azcore` version to `1.14.0`
+
+## 1.4.0 (2024-07-18)
+
+### Other Changes
+* GetProperties() was called twice in DownloadFile method. Enhanced to call it only once, reducing latency.
+* Updated `azcore` version to `1.13.0`
+
+## 1.4.0-beta.1 (2024-06-14)
+
+### Features Added
+* Updated service version to `2024-05-04`.
+
+### Other Changes
+* Updated `azidentity` version to `1.6.0`
+* Updated `azcore` version to `1.12.0`
+
+## 1.3.2 (2024-04-09)
+
+### Bugs Fixed
+* Fixed an issue where GetSASURL() was providing HTTPS SAS, instead of the default http+https SAS. Fixes [#22448](https://github.com/Azure/azure-sdk-for-go/issues/22448)
+
+### Other Changes
+* Integrate `InsecureAllowCredentialWithHTTP` client options.
+* Update dependencies.
+
+## 1.3.1 (2024-02-28)
+
+### Bugs Fixed
+
+* Re-enabled `SharedKeyCredential` authentication mode for non TLS protected endpoints.
+* Use random write in `DownloadFile` method. Fixes [#22426](https://github.com/Azure/azure-sdk-for-go/issues/22426).
+
## 1.3.0 (2024-02-12)
### Bugs Fixed
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md
index 1f51959fa..9fbc90d60 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/README.md
@@ -1,4 +1,7 @@
# Azure Blob Storage module for Go
+[](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob)
+[](https://dev.azure.com/azure-sdk/public/_build/latest?definitionId=2854&branchName=main)
+[](https://img.shields.io/azure-devops/coverage/azure-sdk/public/2854/main)
> Service Version: 2023-11-03
@@ -19,7 +22,7 @@ Key links:
### Prerequisites
-- Go, version 1.18 or higher - [Install Go](https://go.dev/doc/install)
+- [Supported](https://aka.ms/azsdk/go/supported-versions) version of Go - [Install Go](https://go.dev/doc/install)
- Azure subscription - [Create a free account](https://azure.microsoft.com/free/)
- Azure storage account - To create a storage account, use tools including the [Azure portal][storage_account_create_portal],
[Azure PowerShell][storage_account_create_ps], or the [Azure CLI][storage_account_create_cli].
@@ -246,7 +249,7 @@ For more information see the [Code of Conduct FAQ][coc_faq]
or contact [opencode@microsoft.com][coc_contact] with any
additional questions or comments.
-
+
[source]: https://github.com/Azure/azure-sdk-for-go/tree/main/sdk/storage/azblob
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go
index a62abfdc0..3bf058976 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob/client.go
@@ -9,19 +9,19 @@ package appendblob
import (
"context"
"errors"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
"io"
"os"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
)
// ClientOptions contains the optional parameters when creating a Client.
@@ -36,8 +36,8 @@ type Client base.CompositeClient[generated.BlobClient, generated.AppendBlobClien
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
audience := base.GetAudience((*base.ClientOptions)(options))
- authPolicy := shared.NewStorageChallengePolicy(cred, audience)
conOptions := shared.GetClientOptions(options)
+ authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
@@ -292,8 +292,8 @@ func (ab *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOpti
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
-func (ab *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) {
- return ab.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o)
+func (ab *Client) SetHTTPHeaders(ctx context.Context, httpHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) {
+ return ab.BlobClient().SetHTTPHeaders(ctx, httpHeaders, o)
}
// SetMetadata changes a blob's metadata.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json
index df7d66f02..11b07dbbc 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/assets.json
@@ -2,5 +2,5 @@
"AssetsRepo": "Azure/azure-sdk-assets",
"AssetsRepoPrefixPath": "go",
"TagPrefix": "go/storage/azblob",
- "Tag": "go/storage/azblob_9f40a5a13d"
+ "Tag": "go/storage/azblob_db9a368fe4"
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go
index 4175f3312..98a624f50 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/client.go
@@ -9,9 +9,9 @@ package blob
import (
"context"
"io"
- "math"
"os"
"sync"
+ "sync/atomic"
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
@@ -38,8 +38,8 @@ type Client base.Client[generated.BlobClient]
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
audience := base.GetAudience((*base.ClientOptions)(options))
- authPolicy := shared.NewStorageChallengePolicy(cred, audience)
conOptions := shared.GetClientOptions(options)
+ authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
@@ -186,9 +186,9 @@ func (b *Client) GetProperties(ctx context.Context, options *GetPropertiesOption
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
-func (b *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) {
+func (b *Client) SetHTTPHeaders(ctx context.Context, httpHeaders HTTPHeaders, o *SetHTTPHeadersOptions) (SetHTTPHeadersResponse, error) {
opts, leaseAccessConditions, modifiedAccessConditions := o.format()
- resp, err := b.generated().SetHTTPHeaders(ctx, opts, &HTTPHeaders, leaseAccessConditions, modifiedAccessConditions)
+ resp, err := b.generated().SetHTTPHeaders(ctx, opts, &httpHeaders, leaseAccessConditions, modifiedAccessConditions)
return resp, err
}
@@ -335,7 +335,8 @@ func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downl
if o.BlockSize == 0 {
o.BlockSize = DefaultDownloadBlockSize
}
-
+ dataDownloaded := int64(0)
+ computeReadLength := true
count := o.Range.Count
if count == CountToEnd { // If size not specified, calculate it
// If we don't have the length at all, get it
@@ -344,6 +345,8 @@ func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downl
return 0, err
}
count = *gr.ContentLength - o.Range.Offset
+ dataDownloaded = count
+ computeReadLength = false
}
if count <= 0 {
@@ -359,7 +362,7 @@ func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downl
OperationName: "downloadBlobToWriterAt",
TransferSize: count,
ChunkSize: o.BlockSize,
- NumChunks: uint16(((count - 1) / o.BlockSize) + 1),
+ NumChunks: uint64(((count - 1) / o.BlockSize) + 1),
Concurrency: o.Concurrency,
Operation: func(ctx context.Context, chunkStart int64, count int64) error {
downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{
@@ -388,6 +391,9 @@ func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downl
if err != nil {
return err
}
+ if computeReadLength {
+ atomic.AddInt64(&dataDownloaded, *dr.ContentLength)
+ }
err = body.Close()
return err
},
@@ -395,166 +401,7 @@ func (b *Client) downloadBuffer(ctx context.Context, writer io.WriterAt, o downl
if err != nil {
return 0, err
}
- return count, nil
-}
-
-// downloadFile downloads an Azure blob to a Writer. The blocks are downloaded parallely,
-// but written to file serially
-func (b *Client) downloadFile(ctx context.Context, writer io.Writer, o downloadOptions) (int64, error) {
- ctx, cancel := context.WithCancel(ctx)
- defer cancel()
- if o.BlockSize == 0 {
- o.BlockSize = DefaultDownloadBlockSize
- }
-
- if o.Concurrency == 0 {
- o.Concurrency = DefaultConcurrency
- }
-
- count := o.Range.Count
- if count == CountToEnd { //Calculate size if not specified
- gr, err := b.GetProperties(ctx, o.getBlobPropertiesOptions())
- if err != nil {
- return 0, err
- }
- count = *gr.ContentLength - o.Range.Offset
- }
-
- if count <= 0 {
- // The file is empty, there is nothing to download.
- return 0, nil
- }
-
- progress := int64(0)
- progressLock := &sync.Mutex{}
-
- // helper routine to get body
- getBodyForRange := func(ctx context.Context, chunkStart, size int64) (io.ReadCloser, error) {
- downloadBlobOptions := o.getDownloadBlobOptions(HTTPRange{
- Offset: chunkStart + o.Range.Offset,
- Count: size,
- }, nil)
- dr, err := b.DownloadStream(ctx, downloadBlobOptions)
- if err != nil {
- return nil, err
- }
-
- var body io.ReadCloser = dr.NewRetryReader(ctx, &o.RetryReaderOptionsPerBlock)
- if o.Progress != nil {
- rangeProgress := int64(0)
- body = streaming.NewResponseProgress(
- body,
- func(bytesTransferred int64) {
- diff := bytesTransferred - rangeProgress
- rangeProgress = bytesTransferred
- progressLock.Lock()
- progress += diff
- o.Progress(progress)
- progressLock.Unlock()
- })
- }
-
- return body, nil
- }
-
- // if file fits in a single buffer, we'll download here.
- if count <= o.BlockSize {
- body, err := getBodyForRange(ctx, int64(0), count)
- if err != nil {
- return 0, err
- }
- defer body.Close()
-
- return io.Copy(writer, body)
- }
-
- buffers := shared.NewMMBPool(int(o.Concurrency), o.BlockSize)
- defer buffers.Free()
-
- numChunks := uint16((count-1)/o.BlockSize + 1)
- for bufferCounter := float64(0); bufferCounter < math.Min(float64(numChunks), float64(o.Concurrency)); bufferCounter++ {
- if _, err := buffers.Grow(); err != nil {
- return 0, err
- }
- }
-
- acquireBuffer := func() ([]byte, error) {
- return <-buffers.Acquire(), nil
- }
-
- blocks := make([]chan []byte, numChunks)
- for b := range blocks {
- blocks[b] = make(chan []byte)
- }
-
- /*
- * We have created as many channels as the number of chunks we have.
- * Each downloaded block will be sent to the channel matching its
- * sequence number, i.e. 0th block is sent to 0th channel, 1st block
- * to 1st channel and likewise. The blocks are then read and written
- * to the file serially by below goroutine. Do note that the blocks
- * are still downloaded parallelly from n/w, only serialized
- * and written to file here.
- */
- writerError := make(chan error)
- writeSize := int64(0)
- go func(ch chan error) {
- for _, block := range blocks {
- select {
- case <-ctx.Done():
- return
- case block := <-block:
- n, err := writer.Write(block)
- writeSize += int64(n)
- buffers.Release(block[:cap(block)])
- if err != nil {
- ch <- err
- return
- }
- }
- }
- ch <- nil
- }(writerError)
-
- // Prepare and do parallel download.
- err := shared.DoBatchTransfer(ctx, &shared.BatchTransferOptions{
- OperationName: "downloadBlobToWriterAt",
- TransferSize: count,
- ChunkSize: o.BlockSize,
- NumChunks: numChunks,
- Concurrency: o.Concurrency,
- Operation: func(ctx context.Context, chunkStart int64, count int64) error {
- buff, err := acquireBuffer()
- if err != nil {
- return err
- }
-
- body, err := getBodyForRange(ctx, chunkStart, count)
- if err != nil {
- buffers.Release(buff)
- return nil
- }
-
- _, err = io.ReadFull(body, buff[:count])
- body.Close()
- if err != nil {
- return err
- }
-
- blockIndex := chunkStart / o.BlockSize
- blocks[blockIndex] <- buff[:count]
- return nil
- },
- })
-
- if err != nil {
- return 0, err
- }
- // error from writer thread.
- if err = <-writerError; err != nil {
- return 0, err
- }
- return writeSize, nil
+ return dataDownloaded, nil
}
// DownloadStream reads a range of bytes from a blob. The response also includes the blob's properties and metadata.
@@ -596,11 +443,6 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil
}
do := (*downloadOptions)(o)
- filePointer, err := file.Seek(0, io.SeekCurrent)
- if err != nil {
- return 0, err
- }
-
// 1. Calculate the size of the destination file
var size int64
@@ -613,6 +455,7 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil
return 0, err
}
size = *props.ContentLength - do.Range.Offset
+ do.Range.Count = size
} else {
size = count
}
@@ -629,15 +472,7 @@ func (b *Client) DownloadFile(ctx context.Context, file *os.File, o *DownloadFil
}
if size > 0 {
- writeSize, err := b.downloadFile(ctx, file, *do)
- if err != nil {
- return 0, err
- }
- _, err = file.Seek(filePointer, io.SeekStart)
- if err != nil {
- return 0, err
- }
- return writeSize, nil
+ return b.downloadBuffer(ctx, file, *do)
} else { // if the blob's size is 0, there is no need in downloading it
return 0, nil
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go
index 1deedb590..a625c9953 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob/retry_reader.go
@@ -101,7 +101,6 @@ func (s *RetryReader) setResponse(r io.ReadCloser) {
// Read from retry reader
func (s *RetryReader) Read(p []byte) (n int, err error) {
for try := int32(0); ; try++ {
- //fmt.Println(try) // Comment out for debugging.
if s.countWasBounded && s.info.Range.Count == CountToEnd {
// User specified an original count and the remaining bytes are 0, return 0, EOF
return 0, io.EOF
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go
index 1e8da9e32..7a3ab3fe8 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob/client.go
@@ -11,9 +11,6 @@ import (
"context"
"encoding/base64"
"errors"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
- "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
- "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
"io"
"math"
"os"
@@ -22,16 +19,19 @@ import (
"time"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/to"
"github.com/Azure/azure-sdk-for-go/sdk/internal/log"
"github.com/Azure/azure-sdk-for-go/sdk/internal/uuid"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
)
// ClientOptions contains the optional parameters when creating a Client.
@@ -46,8 +46,8 @@ type Client base.CompositeClient[generated.BlobClient, generated.BlockBlobClient
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
audience := base.GetAudience((*base.ClientOptions)(options))
- authPolicy := shared.NewStorageChallengePolicy(cred, audience)
conOptions := shared.GetClientOptions(options)
+ authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
@@ -364,8 +364,8 @@ func (bb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOpti
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
-func (bb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) {
- return bb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o)
+func (bb *Client) SetHTTPHeaders(ctx context.Context, httpHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) {
+ return bb.BlobClient().SetHTTPHeaders(ctx, httpHeaders, o)
}
// SetMetadata changes a blob's metadata.
@@ -474,7 +474,7 @@ func (bb *Client) uploadFromReader(ctx context.Context, reader io.ReaderAt, actu
OperationName: "uploadFromReader",
TransferSize: actualSize,
ChunkSize: o.BlockSize,
- NumChunks: uint16(((actualSize - 1) / o.BlockSize) + 1),
+ NumChunks: uint64(((actualSize - 1) / o.BlockSize) + 1),
Concurrency: o.Concurrency,
Operation: func(ctx context.Context, offset int64, chunkSize int64) error {
// This function is called once per block.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml
index 030350338..2259336b2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/ci.yml
@@ -21,8 +21,8 @@ pr:
- sdk/storage/azblob
-stages:
- - template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
+extends:
+ template: /eng/pipelines/templates/jobs/archetype-sdk-client.yml
parameters:
ServiceDirectory: 'storage/azblob'
RunLiveTests: true
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go
index c511d8a79..f36a16247 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/client.go
@@ -95,7 +95,7 @@ func (c *Client) ServiceClient() *service.Client {
}
// CreateContainer is a lifecycle method to creates a new container under the specified account.
-// If the container with the same name already exists, a ResourceExistsError will be raised.
+// If the container with the same name already exists, a ContainerAlreadyExists Error will be raised.
// This method returns a client with which to interact with the newly created container.
func (c *Client) CreateContainer(ctx context.Context, containerName string, o *CreateContainerOptions) (CreateContainerResponse, error) {
return c.svc.CreateContainer(ctx, containerName, o)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go
index b23798cb1..0e43ed015 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/client.go
@@ -11,8 +11,6 @@ import (
"context"
"errors"
"fmt"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
- "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"net/http"
"net/url"
"time"
@@ -20,8 +18,10 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/appendblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blockblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
@@ -43,8 +43,8 @@ type Client base.Client[generated.ContainerClient]
// - options - client options; pass nil to accept the default values
func NewClient(containerURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
audience := base.GetAudience((*base.ClientOptions)(options))
- authPolicy := shared.NewStorageChallengePolicy(cred, audience)
conOptions := shared.GetClientOptions(options)
+ authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
@@ -348,7 +348,6 @@ func (c *Client) GetSASURL(permissions sas.ContainerPermissions, expiry time.Tim
// Containers do not have snapshots, nor versions.
qps, err := sas.BlobSignatureValues{
Version: sas.Version,
- Protocol: sas.ProtocolHTTPS,
ContainerName: urlParts.ContainerName,
Permissions: permissions.String(),
StartTime: st,
@@ -371,7 +370,8 @@ func (c *Client) NewBatchBuilder() (*BatchBuilder, error) {
switch cred := c.credential().(type) {
case *azcore.TokenCredential:
- authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(c.getClientOptions()))
+ conOptions := c.getClientOptions()
+ authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(conOptions), conOptions.InsecureAllowCredentialWithHTTP)
case *SharedKeyCredential:
authPolicy = exported.NewSharedKeyCredPolicy(cred)
case nil:
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go
index 61d936ab7..ccee90dbc 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container/models.go
@@ -7,10 +7,11 @@
package container
import (
- "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
"reflect"
"time"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
+
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
)
@@ -126,7 +127,7 @@ func (o *GetPropertiesOptions) format() (*generated.ContainerClientGetProperties
// ListBlobsInclude indicates what additional information the service should return with each blob.
type ListBlobsInclude struct {
- Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, LegalHold, ImmutabilityPolicy, DeletedWithVersions bool
+ Copy, Metadata, Snapshots, UncommittedBlobs, Deleted, Tags, Versions, LegalHold, ImmutabilityPolicy, DeletedWithVersions, Permissions bool
}
func (l ListBlobsInclude) format() []generated.ListBlobsIncludeItem {
@@ -166,7 +167,9 @@ func (l ListBlobsInclude) format() []generated.ListBlobsIncludeItem {
if l.Versions {
include = append(include, generated.ListBlobsIncludeItemVersions)
}
-
+ if l.Permissions {
+ include = append(include, generated.ListBlobsIncludeItemPermissions)
+ }
return include
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go
index e4b076601..b0be323b7 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/shared_key_credential.go
@@ -11,9 +11,7 @@ import (
"crypto/hmac"
"crypto/sha256"
"encoding/base64"
- "errors"
"fmt"
- "github.com/Azure/azure-sdk-for-go/sdk/internal/errorinfo"
"net/http"
"net/url"
"sort"
@@ -111,6 +109,91 @@ func getHeader(key string, headers map[string][]string) string {
return ""
}
+func getWeightTables() [][]int {
+ tableLv0 := [...]int{
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x71c, 0x0, 0x71f, 0x721, 0x723, 0x725,
+ 0x0, 0x0, 0x0, 0x72d, 0x803, 0x0, 0x0, 0x733, 0x0, 0xd03, 0xd1a, 0xd1c, 0xd1e,
+ 0xd20, 0xd22, 0xd24, 0xd26, 0xd28, 0xd2a, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25, 0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51,
+ 0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99, 0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9,
+ 0x0, 0x0, 0x0, 0x743, 0x744, 0x748, 0xe02, 0xe09, 0xe0a, 0xe1a, 0xe21, 0xe23, 0xe25,
+ 0xe2c, 0xe32, 0xe35, 0xe36, 0xe48, 0xe51, 0xe70, 0xe7c, 0xe7e, 0xe89, 0xe8a, 0xe91, 0xe99,
+ 0xe9f, 0xea2, 0xea4, 0xea6, 0xea7, 0xea9, 0x0, 0x74c, 0x0, 0x750, 0x0,
+ }
+ tableLv2 := [...]int{
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8012, 0x0, 0x0, 0x0, 0x0, 0x0, 0x8212, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ }
+ tables := [][]int{tableLv0[:], tableLv2[:]}
+ return tables
+}
+
+// NewHeaderStringComparer performs a multi-level, weight-based comparison of two strings
+func compareHeaders(lhs, rhs string, tables [][]int) int {
+ currLevel, i, j := 0, 0, 0
+ n := len(tables)
+ lhsLen := len(lhs)
+ rhsLen := len(rhs)
+
+ for currLevel < n {
+ if currLevel == (n-1) && i != j {
+ if i > j {
+ return -1
+ }
+ if i < j {
+ return 1
+ }
+ return 0
+ }
+
+ var w1, w2 int
+
+ // Check bounds before accessing lhs[i]
+ if i < lhsLen {
+ w1 = tables[currLevel][lhs[i]]
+ } else {
+ w1 = 0x1
+ }
+
+ // Check bounds before accessing rhs[j]
+ if j < rhsLen {
+ w2 = tables[currLevel][rhs[j]]
+ } else {
+ w2 = 0x1
+ }
+
+ if w1 == 0x1 && w2 == 0x1 {
+ i = 0
+ j = 0
+ currLevel++
+ } else if w1 == w2 {
+ i++
+ j++
+ } else if w1 == 0 {
+ i++
+ } else if w2 == 0 {
+ j++
+ } else {
+ if w1 < w2 {
+ return -1
+ }
+ if w1 > w2 {
+ return 1
+ }
+ return 0
+ }
+ }
+ return 0
+}
+
func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) string {
cm := map[string][]string{}
for k, v := range headers {
@@ -127,7 +210,11 @@ func (c *SharedKeyCredential) buildCanonicalizedHeader(headers http.Header) stri
for key := range cm {
keys = append(keys, key)
}
- sort.Strings(keys)
+ tables := getWeightTables()
+ // Sort the keys using the custom comparator
+ sort.Slice(keys, func(i, j int) bool {
+ return compareHeaders(keys[i], keys[j], tables) < 0
+ })
ch := bytes.NewBufferString("")
for i, key := range keys {
if i > 0 {
@@ -204,10 +291,6 @@ func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) {
return req.Next()
}
- if err := checkHTTPSForAuth(req); err != nil {
- return nil, err
- }
-
if d := getHeader(shared.HeaderXmsDate, req.Raw().Header); d == "" {
req.Raw().Header.Set(shared.HeaderXmsDate, time.Now().UTC().Format(http.TimeFormat))
}
@@ -229,10 +312,3 @@ func (s *SharedKeyCredPolicy) Do(req *policy.Request) (*http.Response, error) {
}
return response, err
}
-
-func checkHTTPSForAuth(req *policy.Request) error {
- if strings.ToLower(req.Raw().URL.Scheme) != "https" {
- return errorinfo.NonRetriableError(errors.New("authenticated requests are not permitted for non TLS protected (https) endpoints"))
- }
- return nil
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go
index 6ee5452f8..a18d0c670 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported/version.go
@@ -8,5 +8,5 @@ package exported
const (
ModuleName = "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
- ModuleVersion = "v1.3.0"
+ ModuleVersion = "v1.6.1"
)
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md
index 92dc7e2d3..96d47c4ad 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/autorest.md
@@ -7,7 +7,7 @@ go: true
clear-output-folder: false
version: "^3.0.0"
license-header: MICROSOFT_MIT_NO_VERSION
-input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/a32d0b2423d19835246bb2ef92941503bfd5e734/specification/storage/data-plane/Microsoft.BlobStorage/preview/2021-12-02/blob.json"
+input-file: "https://raw.githubusercontent.com/Azure/azure-rest-api-specs/ae95eb6a4701d844bada7d1c4f5ecf4a7444e5b8/specification/storage/data-plane/Microsoft.BlobStorage/stable/2025-01-05/blob.json"
credential-scope: "https://storage.azure.com/.default"
output-folder: ../generated
file-prefix: "zz_"
@@ -19,10 +19,55 @@ modelerfour:
seal-single-value-enum-by-default: true
lenient-model-deduplication: true
export-clients: true
-use: "@autorest/go@4.0.0-preview.61"
+use: "@autorest/go@4.0.0-preview.65"
```
-### Updating service version to 2023-11-03
+### Add a Properties field to the BlobPrefix definition
+```yaml
+directive:
+- from: swagger-document
+ where: $.definitions
+ transform: >
+ $.BlobPrefix.properties["Properties"] = {
+ "type": "object",
+ "$ref": "#/definitions/BlobPropertiesInternal"
+ };
+```
+
+### Add Owner,Group,Permissions,Acl,ResourceType in ListBlob Response
+``` yaml
+directive:
+- from: swagger-document
+ where: $.definitions
+ transform: >
+ $.BlobPropertiesInternal.properties["Owner"] = {
+ "type" : "string",
+ };
+ $.BlobPropertiesInternal.properties["Group"] = {
+ "type" : "string",
+ };
+ $.BlobPropertiesInternal.properties["Permissions"] = {
+ "type" : "string",
+ };
+ $.BlobPropertiesInternal.properties["Acl"] = {
+ "type" : "string",
+ };
+ $.BlobPropertiesInternal.properties["ResourceType"] = {
+ "type" : "string",
+ };
+
+```
+
+### Add permissions in ListBlobsInclude
+``` yaml
+directive:
+- from: swagger-document
+ where: $.parameters.ListBlobsInclude
+ transform: >
+ $.items.enum.push("permissions");
+```
+
+### Updating service version to 2025-05-05
```yaml
directive:
- from:
@@ -35,8 +80,21 @@ directive:
where: $
transform: >-
return $.
- replaceAll(`[]string{"2021-12-02"}`, `[]string{ServiceVersion}`).
- replaceAll(`2021-12-02`, `2023-11-03`);
+ replaceAll(`[]string{"2025-01-05"}`, `[]string{ServiceVersion}`);
+```
+
+### Fix CRC Response Header in PutBlob response
+``` yaml
+directive:
+- from: swagger-document
+ where: $["x-ms-paths"]["/{containerName}/{blob}?BlockBlob"].put.responses["201"].headers
+ transform: >
+ $["x-ms-content-crc64"] = {
+ "x-ms-client-name": "ContentCRC64",
+ "type": "string",
+ "format": "byte",
+ "description": "Returned for a block blob so that the client can check the integrity of message content."
+ };
```
### Undo breaking change with BlobName
@@ -293,7 +351,7 @@ directive:
replace(/SourceIfMatch\s+\*string/g, `SourceIfMatch *azcore.ETag`).
replace(/SourceIfNoneMatch\s+\*string/g, `SourceIfNoneMatch *azcore.ETag`);
-- from: zz_response_types.go
+- from: zz_responses.go
where: $
transform: >-
return $.
@@ -364,11 +422,13 @@ directive:
``` yaml
directive:
- - from: zz_service_client.go
- where: $
- transform: >-
- return $.
- replace(/req.Raw\(\).URL.RawQuery \= reqQP.Encode\(\)/, `req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)`)
+- from:
+ - zz_service_client.go
+ - zz_container_client.go
+ where: $
+ transform: >-
+ return $.
+ replace(/req.Raw\(\).URL.RawQuery \= reqQP.Encode\(\)/g, `req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)`);
```
### Change `where` parameter in blob filtering to be required
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go
index 8f2bbbb7c..553cd227e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/constants.go
@@ -6,4 +6,4 @@
package generated
-const ServiceVersion = "2023-11-03"
+const ServiceVersion = "2025-05-05"
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go
index 797318611..9f9e145b5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_appendblob_client.go
@@ -1,6 +1,3 @@
-//go:build go1.18
-// +build go1.18
-
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
@@ -32,7 +29,7 @@ type AppendBlobClient struct {
// AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - contentLength - The length of the request.
// - body - Initial data
// - options - AppendBlobClientAppendBlockOptions contains the optional parameters for the AppendBlobClient.AppendBlock method.
@@ -72,54 +69,60 @@ func (client *AppendBlobClient) appendBlockCreateRequest(ctx context.Context, co
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header["Accept"] = []string{"application/xml"}
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
if options != nil && options.TransactionalContentMD5 != nil {
req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
}
- if options != nil && options.TransactionalContentCRC64 != nil {
- req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
- if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil {
- req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil {
req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)}
}
+ if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil {
+ req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)}
+ }
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
+ if options != nil && options.TransactionalContentCRC64 != nil {
+ req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)}
+ }
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
+ }
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
}
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
- }
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
+ req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ if options != nil && options.StructuredBodyType != nil {
+ req.Raw().Header["x-ms-structured-body"] = []string{*options.StructuredBodyType}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
- req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
+ if options != nil && options.StructuredContentLength != nil {
+ req.Raw().Header["x-ms-structured-content-length"] = []string{strconv.FormatInt(*options.StructuredContentLength, 10)}
}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- req.Raw().Header["Accept"] = []string{"application/xml"}
if err := req.SetBody(body, "application/octet-stream"); err != nil {
return nil, err
}
@@ -190,6 +193,9 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
+ if val := resp.Header.Get("x-ms-structured-body"); val != "" {
+ result.StructuredBodyType = &val
+ }
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
@@ -201,7 +207,7 @@ func (client *AppendBlobClient) appendBlockHandleResponse(resp *http.Response) (
// created with x-ms-blob-type set to AppendBlob. Append Block is supported only on version 2015-02-21 version or later.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - sourceURL - Specify a URL to the copy source.
// - contentLength - The length of the request.
// - options - AppendBlobClientAppendBlockFromURLOptions contains the optional parameters for the AppendBlobClient.AppendBlockFromURL
@@ -244,76 +250,76 @@ func (client *AppendBlobClient) appendBlockFromURLCreateRequest(ctx context.Cont
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-copy-source"] = []string{sourceURL}
- if options != nil && options.SourceRange != nil {
- req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange}
- }
- if options != nil && options.SourceContentMD5 != nil {
- req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)}
- }
- if options != nil && options.SourceContentcrc64 != nil {
- req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)}
- }
+ req.Raw().Header["Accept"] = []string{"application/xml"}
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
if options != nil && options.TransactionalContentMD5 != nil {
req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
}
- if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
- req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
- if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
- req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
}
- if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
- req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil {
+ req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)}
}
if appendPositionAccessConditions != nil && appendPositionAccessConditions.MaxSize != nil {
req.Raw().Header["x-ms-blob-condition-maxsize"] = []string{strconv.FormatInt(*appendPositionAccessConditions.MaxSize, 10)}
}
- if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil {
- req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ req.Raw().Header["x-ms-copy-source"] = []string{sourceURL}
+ if options != nil && options.CopySourceAuthorization != nil {
+ req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
+ req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
+ req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
+ }
+ if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
+ req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
- req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
- req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ if options != nil && options.SourceContentcrc64 != nil {
+ req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)}
+ }
+ if options != nil && options.SourceContentMD5 != nil {
+ req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
}
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
+ req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
+ req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- if options != nil && options.CopySourceAuthorization != nil {
- req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
+ if options != nil && options.SourceRange != nil {
+ req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -387,7 +393,7 @@ func (client *AppendBlobClient) appendBlockFromURLHandleResponse(resp *http.Resp
// Create - The Create Append Blob operation creates a new append blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - contentLength - The length of the request.
// - options - AppendBlobClientCreateOptions contains the optional parameters for the AppendBlobClient.Create method.
// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
@@ -424,10 +430,25 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
- req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
+ req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl}
+ }
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
+ req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil {
req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding}
@@ -438,21 +459,15 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)}
}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
- req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl}
- }
- if options != nil && options.Metadata != nil {
- for k, v := range options.Metadata {
- if v != nil {
- req.Raw().Header["x-ms-meta-"+k] = []string{*v}
- }
- }
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
+ req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ req.Raw().Header["x-ms-blob-type"] = []string{"AppendBlob"}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
- req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
@@ -460,44 +475,35 @@ func (client *AppendBlobClient) createCreateRequest(ctx context.Context, content
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
- }
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
- }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- if options != nil && options.BlobTagsString != nil {
- req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
+ if options != nil && options.ImmutabilityPolicyMode != nil {
+ req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
}
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
}
- if options != nil && options.ImmutabilityPolicyMode != nil {
- req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if options != nil && options.LegalHold != nil {
req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.Metadata != nil {
+ for k, v := range options.Metadata {
+ if v != nil {
+ req.Raw().Header["x-ms-meta-"+k] = []string{*v}
+ }
+ }
+ }
+ if options != nil && options.BlobTagsString != nil {
+ req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -560,7 +566,7 @@ func (client *AppendBlobClient) createHandleResponse(resp *http.Response) (Appen
// or later.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - AppendBlobClientSealOptions contains the optional parameters for the AppendBlobClient.Seal method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@@ -596,29 +602,29 @@ func (client *AppendBlobClient) sealCreateRequest(ctx context.Context, options *
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
if appendPositionAccessConditions != nil && appendPositionAccessConditions.AppendPosition != nil {
req.Raw().Header["x-ms-blob-condition-appendpos"] = []string{strconv.FormatInt(*appendPositionAccessConditions.AppendPosition, 10)}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go
index fe568a96c..54b299989 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blob_client.go
@@ -1,6 +1,3 @@
-//go:build go1.18
-// +build go1.18
-
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
@@ -32,7 +29,7 @@ type BlobClient struct {
// blob with zero length and full metadata.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - copyID - The copy identifier provided in the x-ms-copy-id header of the original Copy Blob operation.
// - options - BlobClientAbortCopyFromURLOptions contains the optional parameters for the BlobClient.AbortCopyFromURL method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@@ -67,15 +64,15 @@ func (client *BlobClient) abortCopyFromURLCreateRequest(ctx context.Context, cop
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
req.Raw().Header["x-ms-copy-action"] = []string{"abort"}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
@@ -104,7 +101,7 @@ func (client *BlobClient) abortCopyFromURLHandleResponse(resp *http.Response) (B
// AcquireLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite
// lease can be between 15 and 60 seconds. A lease duration cannot be changed using
// renew or change.
@@ -140,31 +137,31 @@ func (client *BlobClient) acquireLeaseCreateRequest(ctx context.Context, duratio
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-lease-action"] = []string{"acquire"}
- req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)}
- if options != nil && options.ProposedLeaseID != nil {
- req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ req.Raw().Header["x-ms-lease-action"] = []string{"acquire"}
+ req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)}
+ if options != nil && options.ProposedLeaseID != nil {
+ req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -206,7 +203,7 @@ func (client *BlobClient) acquireLeaseHandleResponse(resp *http.Response) (BlobC
// BreakLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - BlobClientBreakLeaseOptions contains the optional parameters for the BlobClient.BreakLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
func (client *BlobClient) BreakLease(ctx context.Context, options *BlobClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (BlobClientBreakLeaseResponse, error) {
@@ -239,30 +236,30 @@ func (client *BlobClient) breakLeaseCreateRequest(ctx context.Context, options *
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-lease-action"] = []string{"break"}
- if options != nil && options.BreakPeriod != nil {
- req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ req.Raw().Header["x-ms-lease-action"] = []string{"break"}
+ if options != nil && options.BreakPeriod != nil {
+ req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -309,7 +306,7 @@ func (client *BlobClient) breakLeaseHandleResponse(resp *http.Response) (BlobCli
// ChangeLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - leaseID - Specifies the current lease ID on the resource.
// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed
// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID
@@ -346,29 +343,29 @@ func (client *BlobClient) changeLeaseCreateRequest(ctx context.Context, leaseID
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-lease-action"] = []string{"change"}
- req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
- req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
+ req.Raw().Header["x-ms-lease-action"] = []string{"change"}
+ req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
+ req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
@@ -411,7 +408,7 @@ func (client *BlobClient) changeLeaseHandleResponse(resp *http.Response) (BlobCl
// until the copy is complete.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
// a page blob snapshot. The value should be URL-encoded as it would appear in a request
// URI. The source blob must either be public or must be authenticated via a shared access signature.
@@ -450,77 +447,77 @@ func (client *BlobClient) copyFromURLCreateRequest(ctx context.Context, copySour
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-requires-sync"] = []string{"true"}
- if options != nil && options.Metadata != nil {
- for k, v := range options.Metadata {
- if v != nil {
- req.Raw().Header["x-ms-meta-"+k] = []string{*v}
- }
- }
- }
- if options != nil && options.Tier != nil {
- req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
- }
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
- req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
- req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
- req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
- }
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
- req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
- req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- req.Raw().Header["x-ms-copy-source"] = []string{copySource}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ if options != nil && options.Tier != nil {
+ req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- if options != nil && options.SourceContentMD5 != nil {
- req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)}
+ req.Raw().Header["x-ms-copy-source"] = []string{copySource}
+ if options != nil && options.CopySourceAuthorization != nil {
+ req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
}
- if options != nil && options.BlobTagsString != nil {
- req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
+ if options != nil && options.CopySourceTags != nil {
+ req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)}
}
- if options != nil && options.ImmutabilityPolicyExpiry != nil {
- req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
+ if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
+ req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
+ req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
if options != nil && options.ImmutabilityPolicyMode != nil {
req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
}
+ if options != nil && options.ImmutabilityPolicyExpiry != nil {
+ req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
+ }
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ }
if options != nil && options.LegalHold != nil {
req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)}
}
- if options != nil && options.CopySourceAuthorization != nil {
- req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
+ if options != nil && options.Metadata != nil {
+ for k, v := range options.Metadata {
+ if v != nil {
+ req.Raw().Header["x-ms-meta-"+k] = []string{*v}
+ }
+ }
}
- if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
- req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
+ req.Raw().Header["x-ms-requires-sync"] = []string{"true"}
+ if options != nil && options.SourceContentMD5 != nil {
+ req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)}
}
- if options != nil && options.CopySourceTags != nil {
- req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)}
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
+ req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
+ req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
+ req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)}
+ }
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
+ req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if options != nil && options.BlobTagsString != nil {
+ req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -585,7 +582,7 @@ func (client *BlobClient) copyFromURLHandleResponse(resp *http.Response) (BlobCl
// CreateSnapshot - The Create Snapshot operation creates a read-only snapshot of a blob
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - BlobClientCreateSnapshotOptions contains the optional parameters for the BlobClient.CreateSnapshot method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
// - CPKScopeInfo - CPKScopeInfo contains a group of parameters for the BlobClient.SetMetadata method.
@@ -621,12 +618,24 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- if options != nil && options.Metadata != nil {
- for k, v := range options.Metadata {
- if v != nil {
- req.Raw().Header["x-ms-meta-"+k] = []string{*v}
- }
- }
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
@@ -634,35 +643,23 @@ func (client *BlobClient) createSnapshotCreateRequest(ctx context.Context, optio
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
- }
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
- }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if options != nil && options.Metadata != nil {
+ for k, v := range options.Metadata {
+ if v != nil {
+ req.Raw().Header["x-ms-meta-"+k] = []string{*v}
+ }
+ }
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -724,7 +721,7 @@ func (client *BlobClient) createSnapshotHandleResponse(resp *http.Response) (Blo
// return an HTTP status code of 404 (ResourceNotFound).
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@@ -753,45 +750,45 @@ func (client *BlobClient) deleteCreateRequest(ctx context.Context, options *Blob
return nil, err
}
reqQP := req.Raw().URL.Query()
+ if options != nil && options.DeleteType != nil {
+ reqQP.Set("deletetype", string(*options.DeleteType))
+ }
if options != nil && options.Snapshot != nil {
reqQP.Set("snapshot", *options.Snapshot)
}
- if options != nil && options.VersionID != nil {
- reqQP.Set("versionid", *options.VersionID)
- }
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- if options != nil && options.DeleteType != nil {
- reqQP.Set("deletetype", string(*options.DeleteType))
+ if options != nil && options.VersionID != nil {
+ reqQP.Set("versionid", *options.VersionID)
}
req.Raw().URL.RawQuery = reqQP.Encode()
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
- }
- if options != nil && options.DeleteSnapshots != nil {
- req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ if options != nil && options.DeleteSnapshots != nil {
+ req.Raw().Header["x-ms-delete-snapshots"] = []string{string(*options.DeleteSnapshots)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -820,7 +817,7 @@ func (client *BlobClient) deleteHandleResponse(resp *http.Response) (BlobClientD
// DeleteImmutabilityPolicy - The Delete Immutability Policy operation deletes the immutability policy on the blob
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - BlobClientDeleteImmutabilityPolicyOptions contains the optional parameters for the BlobClient.DeleteImmutabilityPolicy
// method.
func (client *BlobClient) DeleteImmutabilityPolicy(ctx context.Context, options *BlobClientDeleteImmutabilityPolicyOptions) (BlobClientDeleteImmutabilityPolicyResponse, error) {
@@ -849,15 +846,21 @@ func (client *BlobClient) deleteImmutabilityPolicyCreateRequest(ctx context.Cont
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "immutabilityPolicies")
+ if options != nil && options.Snapshot != nil {
+ reqQP.Set("snapshot", *options.Snapshot)
+ }
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
+ if options != nil && options.VersionID != nil {
+ reqQP.Set("versionid", *options.VersionID)
+ }
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -887,7 +890,7 @@ func (client *BlobClient) deleteImmutabilityPolicyHandleResponse(resp *http.Resp
// can also call Download to read a snapshot.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - BlobClientDownloadOptions contains the optional parameters for the BlobClient.Download method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
@@ -920,25 +923,32 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl
if options != nil && options.Snapshot != nil {
reqQP.Set("snapshot", *options.Snapshot)
}
- if options != nil && options.VersionID != nil {
- reqQP.Set("versionid", *options.VersionID)
- }
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
+ if options != nil && options.VersionID != nil {
+ reqQP.Set("versionid", *options.VersionID)
+ }
req.Raw().URL.RawQuery = reqQP.Encode()
runtime.SkipBodyDownload(req)
- if options != nil && options.Range != nil {
- req.Raw().Header["x-ms-range"] = []string{*options.Range}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
- if options != nil && options.RangeGetContentMD5 != nil {
- req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
}
- if options != nil && options.RangeGetContentCRC64 != nil {
- req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
@@ -946,29 +956,25 @@ func (client *BlobClient) downloadCreateRequest(ctx context.Context, options *Bl
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
+ req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ if options != nil && options.Range != nil {
+ req.Raw().Header["x-ms-range"] = []string{*options.Range}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ if options != nil && options.RangeGetContentCRC64 != nil {
+ req.Raw().Header["x-ms-range-get-content-crc64"] = []string{strconv.FormatBool(*options.RangeGetContentCRC64)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ if options != nil && options.RangeGetContentMD5 != nil {
+ req.Raw().Header["x-ms-range-get-content-md5"] = []string{strconv.FormatBool(*options.RangeGetContentMD5)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
- req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
+ if options != nil && options.StructuredBodyType != nil {
+ req.Raw().Header["x-ms-structured-body"] = []string{*options.StructuredBodyType}
}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
@@ -1167,15 +1173,25 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien
}
for hh := range resp.Header {
if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") {
- if result.Metadata == nil {
- result.Metadata = map[string]*string{}
+ if result.ObjectReplicationRules == nil {
+ result.ObjectReplicationRules = map[string]*string{}
}
- result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh))
+ result.ObjectReplicationRules[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh))
}
}
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
+ if val := resp.Header.Get("x-ms-structured-body"); val != "" {
+ result.StructuredBodyType = &val
+ }
+ if val := resp.Header.Get("x-ms-structured-content-length"); val != "" {
+ structuredContentLength, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ return BlobClientDownloadResponse{}, err
+ }
+ result.StructuredContentLength = &structuredContentLength
+ }
if val := resp.Header.Get("x-ms-tag-count"); val != "" {
tagCount, err := strconv.ParseInt(val, 10, 64)
if err != nil {
@@ -1195,7 +1211,7 @@ func (client *BlobClient) downloadHandleResponse(resp *http.Response) (BlobClien
// GetAccountInfo - Returns the sku name and account kind
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method.
func (client *BlobClient) GetAccountInfo(ctx context.Context, options *BlobClientGetAccountInfoOptions) (BlobClientGetAccountInfoResponse, error) {
var err error
@@ -1222,11 +1238,17 @@ func (client *BlobClient) getAccountInfoCreateRequest(ctx context.Context, optio
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "account")
reqQP.Set("comp", "properties")
+ reqQP.Set("restype", "account")
+ if options != nil && options.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
+ }
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -1246,6 +1268,13 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo
}
result.Date = &date
}
+ if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" {
+ isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val)
+ if err != nil {
+ return BlobClientGetAccountInfoResponse{}, err
+ }
+ result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled
+ }
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
@@ -1262,7 +1291,7 @@ func (client *BlobClient) getAccountInfoHandleResponse(resp *http.Response) (Blo
// for the blob. It does not return the content of the blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
@@ -1295,45 +1324,45 @@ func (client *BlobClient) getPropertiesCreateRequest(ctx context.Context, option
if options != nil && options.Snapshot != nil {
reqQP.Set("snapshot", *options.Snapshot)
}
- if options != nil && options.VersionID != nil {
- reqQP.Set("versionid", *options.VersionID)
- }
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
- }
- if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
- req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
- }
- if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
- req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
+ if options != nil && options.VersionID != nil {
+ reqQP.Set("versionid", *options.VersionID)
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
+ req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
+ }
+ if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
+ req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
+ }
+ if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
+ req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -1549,10 +1578,10 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
}
for hh := range resp.Header {
if len(hh) > len("x-ms-or-") && strings.EqualFold(hh[:len("x-ms-or-")], "x-ms-or-") {
- if result.Metadata == nil {
- result.Metadata = map[string]*string{}
+ if result.ObjectReplicationRules == nil {
+ result.ObjectReplicationRules = map[string]*string{}
}
- result.Metadata[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh))
+ result.ObjectReplicationRules[hh[len("x-ms-or-"):]] = to.Ptr(resp.Header.Get(hh))
}
}
if val := resp.Header.Get("x-ms-rehydrate-priority"); val != "" {
@@ -1580,7 +1609,7 @@ func (client *BlobClient) getPropertiesHandleResponse(resp *http.Response) (Blob
// GetTags - The Get Tags operation enables users to get the tags associated with a blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - BlobClientGetTagsOptions contains the optional parameters for the BlobClient.GetTags method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@@ -1610,17 +1639,17 @@ func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *Blo
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "tags")
- if options != nil && options.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
- }
if options != nil && options.Snapshot != nil {
reqQP.Set("snapshot", *options.Snapshot)
}
+ if options != nil && options.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
+ }
if options != nil && options.VersionID != nil {
reqQP.Set("versionid", *options.VersionID)
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@@ -1630,7 +1659,7 @@ func (client *BlobClient) getTagsCreateRequest(ctx context.Context, options *Blo
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -1662,7 +1691,7 @@ func (client *BlobClient) getTagsHandleResponse(resp *http.Response) (BlobClient
// Query - The Query operation enables users to select/project on blob data by providing simple query expressions.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - BlobClientQueryOptions contains the optional parameters for the BlobClient.Query method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
@@ -1701,38 +1730,38 @@ func (client *BlobClient) queryCreateRequest(ctx context.Context, options *BlobC
}
req.Raw().URL.RawQuery = reqQP.Encode()
runtime.SkipBodyDownload(req)
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
- }
- if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
- req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
- }
- if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
- req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
- }
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
+ }
+ if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
+ req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
+ }
+ if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
+ req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.QueryRequest != nil {
if err := runtime.MarshalAsXML(req, *options.QueryRequest); err != nil {
return nil, err
@@ -1896,7 +1925,7 @@ func (client *BlobClient) queryHandleResponse(resp *http.Response) (BlobClientQu
// ReleaseLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - leaseID - Specifies the current lease ID on the resource.
// - options - BlobClientReleaseLeaseOptions contains the optional parameters for the BlobClient.ReleaseLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@@ -1930,28 +1959,28 @@ func (client *BlobClient) releaseLeaseCreateRequest(ctx context.Context, leaseID
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-lease-action"] = []string{"release"}
- req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
+ req.Raw().Header["x-ms-lease-action"] = []string{"release"}
+ req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
@@ -1990,7 +2019,7 @@ func (client *BlobClient) releaseLeaseHandleResponse(resp *http.Response) (BlobC
// RenewLease - [Update] The Lease Blob operation establishes and manages a lock on a blob for write and delete operations
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - leaseID - Specifies the current lease ID on the resource.
// - options - BlobClientRenewLeaseOptions contains the optional parameters for the BlobClient.RenewLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@@ -2024,28 +2053,28 @@ func (client *BlobClient) renewLeaseCreateRequest(ctx context.Context, leaseID s
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-lease-action"] = []string{"renew"}
- req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
- req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
+ req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
+ }
+ req.Raw().Header["x-ms-lease-action"] = []string{"renew"}
+ req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -2087,7 +2116,7 @@ func (client *BlobClient) renewLeaseHandleResponse(resp *http.Response) (BlobCli
// SetExpiry - Sets the time a blob will expire and be deleted.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - expiryOptions - Required. Indicates mode of the expiry time
// - options - BlobClientSetExpiryOptions contains the optional parameters for the BlobClient.SetExpiry method.
func (client *BlobClient) SetExpiry(ctx context.Context, expiryOptions ExpiryOptions, options *BlobClientSetExpiryOptions) (BlobClientSetExpiryResponse, error) {
@@ -2120,7 +2149,7 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@@ -2128,7 +2157,7 @@ func (client *BlobClient) setExpiryCreateRequest(ctx context.Context, expiryOpti
if options != nil && options.ExpiresOn != nil {
req.Raw().Header["x-ms-expiry-time"] = []string{*options.ExpiresOn}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -2167,7 +2196,7 @@ func (client *BlobClient) setExpiryHandleResponse(resp *http.Response) (BlobClie
// SetHTTPHeaders - The Set HTTP Headers operation sets system properties on the blob
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - BlobClientSetHTTPHeadersOptions contains the optional parameters for the BlobClient.SetHTTPHeaders method.
// - BlobHTTPHeaders - BlobHTTPHeaders contains a group of parameters for the BlobClient.SetHTTPHeaders method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@@ -2202,14 +2231,24 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl}
}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
- req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
- }
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
- req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)}
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
+ req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil {
req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding}
@@ -2217,32 +2256,22 @@ func (client *BlobClient) setHTTPHeadersCreateRequest(ctx context.Context, optio
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil {
req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage}
}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
+ req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
+ req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
- req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
@@ -2288,7 +2317,7 @@ func (client *BlobClient) setHTTPHeadersHandleResponse(resp *http.Response) (Blo
// SetImmutabilityPolicy - The Set Immutability Policy operation sets the immutability policy on the blob
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - BlobClientSetImmutabilityPolicyOptions contains the optional parameters for the BlobClient.SetImmutabilityPolicy
// method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@@ -2318,24 +2347,30 @@ func (client *BlobClient) setImmutabilityPolicyCreateRequest(ctx context.Context
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "immutabilityPolicies")
+ if options != nil && options.Snapshot != nil {
+ reqQP.Set("snapshot", *options.Snapshot)
+ }
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if options != nil && options.VersionID != nil {
+ reqQP.Set("versionid", *options.VersionID)
}
+ req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- if options != nil && options.ImmutabilityPolicyExpiry != nil {
- req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if options != nil && options.ImmutabilityPolicyMode != nil {
req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.ImmutabilityPolicyExpiry != nil {
+ req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -2374,7 +2409,7 @@ func (client *BlobClient) setImmutabilityPolicyHandleResponse(resp *http.Respons
// SetLegalHold - The Set Legal Hold operation sets a legal hold on the blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - legalHold - Specified if a legal hold should be set on the blob.
// - options - BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method.
func (client *BlobClient) SetLegalHold(ctx context.Context, legalHold bool, options *BlobClientSetLegalHoldOptions) (BlobClientSetLegalHoldResponse, error) {
@@ -2403,16 +2438,22 @@ func (client *BlobClient) setLegalHoldCreateRequest(ctx context.Context, legalHo
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "legalhold")
+ if options != nil && options.Snapshot != nil {
+ reqQP.Set("snapshot", *options.Snapshot)
+ }
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
+ if options != nil && options.VersionID != nil {
+ reqQP.Set("versionid", *options.VersionID)
+ }
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(legalHold)}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -2449,7 +2490,7 @@ func (client *BlobClient) setLegalHoldHandleResponse(resp *http.Response) (BlobC
// pairs
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - CPKInfo - CPKInfo contains a group of parameters for the BlobClient.Download method.
@@ -2485,15 +2526,24 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- if options != nil && options.Metadata != nil {
- for k, v := range options.Metadata {
- if v != nil {
- req.Raw().Header["x-ms-meta-"+k] = []string{*v}
- }
- }
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
@@ -2501,32 +2551,23 @@ func (client *BlobClient) setMetadataCreateRequest(ctx context.Context, options
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
- }
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
- }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.Metadata != nil {
+ for k, v := range options.Metadata {
+ if v != nil {
+ req.Raw().Header["x-ms-meta-"+k] = []string{*v}
+ }
+ }
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -2581,7 +2622,7 @@ func (client *BlobClient) setMetadataHandleResponse(resp *http.Response) (BlobCl
// SetTags - The Set Tags operation enables users to set tags on a blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - tags - Blob tags
// - options - BlobClientSetTagsOptions contains the optional parameters for the BlobClient.SetTags method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@@ -2619,23 +2660,23 @@ func (client *BlobClient) setTagsCreateRequest(ctx context.Context, tags BlobTag
reqQP.Set("versionid", *options.VersionID)
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.TransactionalContentMD5 != nil {
req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
}
- if options != nil && options.TransactionalContentCRC64 != nil {
- req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)}
- }
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
+ if options != nil && options.TransactionalContentCRC64 != nil {
+ req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := runtime.MarshalAsXML(req, tags); err != nil {
return nil, err
}
@@ -2670,7 +2711,7 @@ func (client *BlobClient) setTagsHandleResponse(resp *http.Response) (BlobClient
// storage type. This operation does not update the blob's ETag.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - tier - Indicates the tier to be set on the blob.
// - options - BlobClientSetTierOptions contains the optional parameters for the BlobClient.SetTier method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@@ -2704,28 +2745,28 @@ func (client *BlobClient) setTierCreateRequest(ctx context.Context, tier AccessT
if options != nil && options.Snapshot != nil {
reqQP.Set("snapshot", *options.Snapshot)
}
- if options != nil && options.VersionID != nil {
- reqQP.Set("versionid", *options.VersionID)
- }
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
+ if options != nil && options.VersionID != nil {
+ reqQP.Set("versionid", *options.VersionID)
+ }
req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header["Accept"] = []string{"application/xml"}
req.Raw().Header["x-ms-access-tier"] = []string{string(tier)}
- if options != nil && options.RehydratePriority != nil {
- req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)}
- }
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
+ req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
+ }
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
- req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
+ if options != nil && options.RehydratePriority != nil {
+ req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -2747,7 +2788,7 @@ func (client *BlobClient) setTierHandleResponse(resp *http.Response) (BlobClient
// StartCopyFromURL - The Start Copy From URL operation copies a blob or an internet resource to a new blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
// a page blob snapshot. The value should be URL-encoded as it would appear in a request
// URI. The source blob must either be public or must be authenticated via a shared access signature.
@@ -2785,6 +2826,41 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if options != nil && options.Tier != nil {
+ req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
+ }
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
+ req.Raw().Header["x-ms-copy-source"] = []string{copySource}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
+ req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
+ }
+ if options != nil && options.ImmutabilityPolicyMode != nil {
+ req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
+ }
+ if options != nil && options.ImmutabilityPolicyExpiry != nil {
+ req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
+ }
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ }
+ if options != nil && options.LegalHold != nil {
+ req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)}
+ }
if options != nil && options.Metadata != nil {
for k, v := range options.Metadata {
if v != nil {
@@ -2792,66 +2868,31 @@ func (client *BlobClient) startCopyFromURLCreateRequest(ctx context.Context, cop
}
}
}
- if options != nil && options.Tier != nil {
- req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
- }
if options != nil && options.RehydratePriority != nil {
req.Raw().Header["x-ms-rehydrate-priority"] = []string{string(*options.RehydratePriority)}
}
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
- req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
- req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ if options != nil && options.SealBlob != nil {
+ req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
}
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
+ req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil {
req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
- req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
- }
- req.Raw().Header["x-ms-copy-source"] = []string{copySource}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
- }
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
+ req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if options != nil && options.BlobTagsString != nil {
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
- if options != nil && options.SealBlob != nil {
- req.Raw().Header["x-ms-seal-blob"] = []string{strconv.FormatBool(*options.SealBlob)}
- }
- if options != nil && options.ImmutabilityPolicyExpiry != nil {
- req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
- }
- if options != nil && options.ImmutabilityPolicyMode != nil {
- req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
- }
- if options != nil && options.LegalHold != nil {
- req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)}
- }
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -2899,7 +2940,7 @@ func (client *BlobClient) startCopyFromURLHandleResponse(resp *http.Response) (B
// Undelete - Undelete a blob that was previously soft deleted
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - BlobClientUndeleteOptions contains the optional parameters for the BlobClient.Undelete method.
func (client *BlobClient) Undelete(ctx context.Context, options *BlobClientUndeleteOptions) (BlobClientUndeleteResponse, error) {
var err error
@@ -2931,11 +2972,11 @@ func (client *BlobClient) undeleteCreateRequest(ctx context.Context, options *Bl
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go
index b6115b50a..324db7651 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_blockblob_client.go
@@ -1,6 +1,3 @@
-//go:build go1.18
-// +build go1.18
-
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
@@ -36,7 +33,7 @@ type BlockBlobClient struct {
// belong to.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - blocks - Blob Blocks.
// - options - BlockBlobClientCommitBlockListOptions contains the optional parameters for the BlockBlobClient.CommitBlockList
// method.
@@ -75,11 +72,30 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context,
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.TransactionalContentMD5 != nil {
+ req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if options != nil && options.Tier != nil {
+ req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
+ }
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl}
}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
- req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
+ req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil {
req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding}
@@ -90,24 +106,17 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context,
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)}
}
- if options != nil && options.TransactionalContentMD5 != nil {
- req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
+ req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
+ }
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if options != nil && options.TransactionalContentCRC64 != nil {
req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)}
}
- if options != nil && options.Metadata != nil {
- for k, v := range options.Metadata {
- if v != nil {
- req.Raw().Header["x-ms-meta-"+k] = []string{*v}
- }
- }
- }
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
- }
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
- req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
@@ -115,47 +124,35 @@ func (client *BlockBlobClient) commitBlockListCreateRequest(ctx context.Context,
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
- }
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
- if options != nil && options.Tier != nil {
- req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
- }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- if options != nil && options.BlobTagsString != nil {
- req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
+ if options != nil && options.ImmutabilityPolicyMode != nil {
+ req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
}
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
}
- if options != nil && options.ImmutabilityPolicyMode != nil {
- req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if options != nil && options.LegalHold != nil {
req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.Metadata != nil {
+ for k, v := range options.Metadata {
+ if v != nil {
+ req.Raw().Header["x-ms-meta-"+k] = []string{*v}
+ }
+ }
+ }
+ if options != nil && options.BlobTagsString != nil {
+ req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := runtime.MarshalAsXML(req, blocks); err != nil {
return nil, err
}
@@ -227,7 +224,7 @@ func (client *BlockBlobClient) commitBlockListHandleResponse(resp *http.Response
// GetBlockList - The Get Block List operation retrieves the list of blocks that have been uploaded as part of a block blob
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - listType - Specifies whether to return the list of committed blocks, the list of uncommitted blocks, or both lists together.
// - options - BlockBlobClientGetBlockListOptions contains the optional parameters for the BlockBlobClient.GetBlockList method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@@ -257,26 +254,26 @@ func (client *BlockBlobClient) getBlockListCreateRequest(ctx context.Context, li
return nil, err
}
reqQP := req.Raw().URL.Query()
+ reqQP.Set("blocklisttype", string(listType))
reqQP.Set("comp", "blocklist")
if options != nil && options.Snapshot != nil {
reqQP.Set("snapshot", *options.Snapshot)
}
- reqQP.Set("blocklisttype", string(listType))
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -332,7 +329,7 @@ func (client *BlockBlobClient) getBlockListHandleResponse(resp *http.Response) (
// Block from URL API in conjunction with Put Block List.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - contentLength - The length of the request.
// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
// a page blob snapshot. The value should be URL-encoded as it would appear in a request
@@ -375,13 +372,31 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context,
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
if options != nil && options.TransactionalContentMD5 != nil {
req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
}
- req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
- req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if options != nil && options.Tier != nil {
+ req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
+ }
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
+ req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl}
+ }
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
+ req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil {
req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding}
@@ -392,21 +407,25 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context,
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)}
}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
- req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl}
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
+ req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
}
- if options != nil && options.Metadata != nil {
- for k, v := range options.Metadata {
- if v != nil {
- req.Raw().Header["x-ms-meta-"+k] = []string{*v}
- }
- }
+ req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ req.Raw().Header["x-ms-copy-source"] = []string{copySource}
+ if options != nil && options.CopySourceAuthorization != nil {
+ req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
- req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
+ if options != nil && options.CopySourceBlobProperties != nil {
+ req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)}
+ }
+ if options != nil && options.CopySourceTags != nil {
+ req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)}
+ }
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
@@ -414,66 +433,44 @@ func (client *BlockBlobClient) putBlobFromURLCreateRequest(ctx context.Context,
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
- }
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
- if options != nil && options.Tier != nil {
- req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
- }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
- req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
- req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ if options != nil && options.Metadata != nil {
+ for k, v := range options.Metadata {
+ if v != nil {
+ req.Raw().Header["x-ms-meta-"+k] = []string{*v}
+ }
+ }
+ }
+ if options != nil && options.SourceContentMD5 != nil {
+ req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
}
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
+ req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfTags != nil {
req.Raw().Header["x-ms-source-if-tags"] = []string{*sourceModifiedAccessConditions.SourceIfTags}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- if options != nil && options.SourceContentMD5 != nil {
- req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)}
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
+ req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
if options != nil && options.BlobTagsString != nil {
req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
}
- req.Raw().Header["x-ms-copy-source"] = []string{copySource}
- if options != nil && options.CopySourceBlobProperties != nil {
- req.Raw().Header["x-ms-copy-source-blob-properties"] = []string{strconv.FormatBool(*options.CopySourceBlobProperties)}
- }
- if options != nil && options.CopySourceAuthorization != nil {
- req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
- }
- if options != nil && options.CopySourceTags != nil {
- req.Raw().Header["x-ms-copy-source-tag-option"] = []string{string(*options.CopySourceTags)}
- }
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -535,7 +532,7 @@ func (client *BlockBlobClient) putBlobFromURLHandleResponse(resp *http.Response)
// StageBlock - The Stage Block operation creates a new block to be committed as part of a blob
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal
// to 64 bytes in size. For a given blob, the length of the value specified for the blockid
// parameter must be the same size for each block.
@@ -570,21 +567,25 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("comp", "block")
reqQP.Set("blockid", blockID)
+ reqQP.Set("comp", "block")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header["Accept"] = []string{"application/xml"}
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
if options != nil && options.TransactionalContentMD5 != nil {
req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
if options != nil && options.TransactionalContentCRC64 != nil {
req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)}
}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
@@ -592,17 +593,19 @@ func (client *BlockBlobClient) stageBlockCreateRequest(ctx context.Context, bloc
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
- }
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.StructuredBodyType != nil {
+ req.Raw().Header["x-ms-structured-body"] = []string{*options.StructuredBodyType}
+ }
+ if options != nil && options.StructuredContentLength != nil {
+ req.Raw().Header["x-ms-structured-content-length"] = []string{strconv.FormatInt(*options.StructuredContentLength, 10)}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := req.SetBody(body, "application/octet-stream"); err != nil {
return nil, err
}
@@ -652,6 +655,9 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
+ if val := resp.Header.Get("x-ms-structured-body"); val != "" {
+ result.StructuredBodyType = &val
+ }
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
@@ -662,7 +668,7 @@ func (client *BlockBlobClient) stageBlockHandleResponse(resp *http.Response) (Bl
// are read from a URL.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - blockID - A valid Base64 string value that identifies the block. Prior to encoding, the string must be less than or equal
// to 64 bytes in size. For a given blob, the length of the value specified for the blockid
// parameter must be the same size for each block.
@@ -700,22 +706,23 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("comp", "block")
reqQP.Set("blockid", blockID)
+ reqQP.Set("comp", "block")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header["Accept"] = []string{"application/xml"}
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
- req.Raw().Header["x-ms-copy-source"] = []string{sourceURL}
- if options != nil && options.SourceRange != nil {
- req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- if options != nil && options.SourceContentMD5 != nil {
- req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)}
+ req.Raw().Header["x-ms-copy-source"] = []string{sourceURL}
+ if options != nil && options.CopySourceAuthorization != nil {
+ req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
}
- if options != nil && options.SourceContentcrc64 != nil {
- req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)}
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
@@ -723,35 +730,34 @@ func (client *BlockBlobClient) stageBlockFromURLCreateRequest(ctx context.Contex
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
- }
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
- req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
+ if options != nil && options.SourceContentcrc64 != nil {
+ req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)}
}
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
- req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ if options != nil && options.SourceContentMD5 != nil {
+ req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
}
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
+ req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
+ req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- if options != nil && options.CopySourceAuthorization != nil {
- req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
+ if options != nil && options.SourceRange != nil {
+ req.Raw().Header["x-ms-source-range"] = []string{*options.SourceRange}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -810,7 +816,7 @@ func (client *BlockBlobClient) stageBlockFromURLHandleResponse(resp *http.Respon
// the content of a block blob, use the Put Block List operation.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - contentLength - The length of the request.
// - body - Initial data
// - options - BlockBlobClientUploadOptions contains the optional parameters for the BlockBlobClient.Upload method.
@@ -848,13 +854,31 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
if options != nil && options.TransactionalContentMD5 != nil {
req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
}
- req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
- req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if options != nil && options.Tier != nil {
+ req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
+ }
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
+ req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl}
+ }
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
+ req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil {
req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding}
@@ -865,21 +889,18 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)}
}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
- req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl}
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
+ req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
}
- if options != nil && options.Metadata != nil {
- for k, v := range options.Metadata {
- if v != nil {
- req.Raw().Header["x-ms-meta-"+k] = []string{*v}
- }
- }
+ req.Raw().Header["x-ms-blob-type"] = []string{"BlockBlob"}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ if options != nil && options.TransactionalContentCRC64 != nil {
+ req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)}
}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
- req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
@@ -887,50 +908,41 @@ func (client *BlockBlobClient) uploadCreateRequest(ctx context.Context, contentL
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
- }
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
- if options != nil && options.Tier != nil {
- req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
- }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- if options != nil && options.BlobTagsString != nil {
- req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
+ if options != nil && options.ImmutabilityPolicyMode != nil {
+ req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
}
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
}
- if options != nil && options.ImmutabilityPolicyMode != nil {
- req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if options != nil && options.LegalHold != nil {
req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)}
}
- if options != nil && options.TransactionalContentCRC64 != nil {
- req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)}
+ if options != nil && options.Metadata != nil {
+ for k, v := range options.Metadata {
+ if v != nil {
+ req.Raw().Header["x-ms-meta-"+k] = []string{*v}
+ }
+ }
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.StructuredBodyType != nil {
+ req.Raw().Header["x-ms-structured-body"] = []string{*options.StructuredBodyType}
+ }
+ if options != nil && options.StructuredContentLength != nil {
+ req.Raw().Header["x-ms-structured-content-length"] = []string{strconv.FormatInt(*options.StructuredContentLength, 10)}
+ }
+ if options != nil && options.BlobTagsString != nil {
+ req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := req.SetBody(body, "application/octet-stream"); err != nil {
return nil, err
}
@@ -943,6 +955,13 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB
if val := resp.Header.Get("x-ms-client-request-id"); val != "" {
result.ClientRequestID = &val
}
+ if val := resp.Header.Get("x-ms-content-crc64"); val != "" {
+ contentCRC64, err := base64.StdEncoding.DecodeString(val)
+ if err != nil {
+ return BlockBlobClientUploadResponse{}, err
+ }
+ result.ContentCRC64 = contentCRC64
+ }
if val := resp.Header.Get("Content-MD5"); val != "" {
contentMD5, err := base64.StdEncoding.DecodeString(val)
if err != nil {
@@ -983,6 +1002,9 @@ func (client *BlockBlobClient) uploadHandleResponse(resp *http.Response) (BlockB
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
+ if val := resp.Header.Get("x-ms-structured-body"); val != "" {
+ result.StructuredBodyType = &val
+ }
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go
index 95af9e154..48724a4ce 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_constants.go
@@ -1,6 +1,3 @@
-//go:build go1.18
-// +build go1.18
-
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
@@ -346,6 +343,7 @@ const (
ListBlobsIncludeItemImmutabilitypolicy ListBlobsIncludeItem = "immutabilitypolicy"
ListBlobsIncludeItemLegalhold ListBlobsIncludeItem = "legalhold"
ListBlobsIncludeItemMetadata ListBlobsIncludeItem = "metadata"
+ ListBlobsIncludeItemPermissions ListBlobsIncludeItem = "permissions"
ListBlobsIncludeItemSnapshots ListBlobsIncludeItem = "snapshots"
ListBlobsIncludeItemTags ListBlobsIncludeItem = "tags"
ListBlobsIncludeItemUncommittedblobs ListBlobsIncludeItem = "uncommittedblobs"
@@ -361,6 +359,7 @@ func PossibleListBlobsIncludeItemValues() []ListBlobsIncludeItem {
ListBlobsIncludeItemImmutabilitypolicy,
ListBlobsIncludeItemLegalhold,
ListBlobsIncludeItemMetadata,
+ ListBlobsIncludeItemPermissions,
ListBlobsIncludeItemSnapshots,
ListBlobsIncludeItemTags,
ListBlobsIncludeItemUncommittedblobs,
@@ -523,6 +522,7 @@ const (
StorageErrorCodeAuthorizationResourceTypeMismatch StorageErrorCode = "AuthorizationResourceTypeMismatch"
StorageErrorCodeAuthorizationServiceMismatch StorageErrorCode = "AuthorizationServiceMismatch"
StorageErrorCodeAuthorizationSourceIPMismatch StorageErrorCode = "AuthorizationSourceIPMismatch"
+ StorageErrorCodeBlobAccessTierNotSupportedForAccountType StorageErrorCode = "BlobAccessTierNotSupportedForAccountType"
StorageErrorCodeBlobAlreadyExists StorageErrorCode = "BlobAlreadyExists"
StorageErrorCodeBlobArchived StorageErrorCode = "BlobArchived"
StorageErrorCodeBlobBeingRehydrated StorageErrorCode = "BlobBeingRehydrated"
@@ -641,6 +641,7 @@ func PossibleStorageErrorCodeValues() []StorageErrorCode {
StorageErrorCodeAuthorizationResourceTypeMismatch,
StorageErrorCodeAuthorizationServiceMismatch,
StorageErrorCodeAuthorizationSourceIPMismatch,
+ StorageErrorCodeBlobAccessTierNotSupportedForAccountType,
StorageErrorCodeBlobAlreadyExists,
StorageErrorCodeBlobArchived,
StorageErrorCodeBlobBeingRehydrated,
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go
index dbc2a293e..61ddc6695 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_container_client.go
@@ -1,6 +1,3 @@
-//go:build go1.18
-// +build go1.18
-
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
@@ -34,7 +31,7 @@ type ContainerClient struct {
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - duration - Specifies the duration of the lease, in seconds, or negative one (-1) for a lease that never expires. A non-infinite
// lease can be between 15 and 60 seconds. A lease duration cannot be changed using
// renew or change.
@@ -70,23 +67,23 @@ func (client *ContainerClient) acquireLeaseCreateRequest(ctx context.Context, du
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-lease-action"] = []string{"acquire"}
- req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)}
- if options != nil && options.ProposedLeaseID != nil {
- req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID}
- }
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-lease-action"] = []string{"acquire"}
+ req.Raw().Header["x-ms-lease-duration"] = []string{strconv.FormatInt(int64(duration), 10)}
+ if options != nil && options.ProposedLeaseID != nil {
+ req.Raw().Header["x-ms-proposed-lease-id"] = []string{*options.ProposedLeaseID}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -129,7 +126,7 @@ func (client *ContainerClient) acquireLeaseHandleResponse(resp *http.Response) (
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - ContainerClientBreakLeaseOptions contains the optional parameters for the ContainerClient.BreakLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
func (client *ContainerClient) BreakLease(ctx context.Context, options *ContainerClientBreakLeaseOptions, modifiedAccessConditions *ModifiedAccessConditions) (ContainerClientBreakLeaseResponse, error) {
@@ -162,22 +159,22 @@ func (client *ContainerClient) breakLeaseCreateRequest(ctx context.Context, opti
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-lease-action"] = []string{"break"}
- if options != nil && options.BreakPeriod != nil {
- req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)}
- }
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-lease-action"] = []string{"break"}
+ if options != nil && options.BreakPeriod != nil {
+ req.Raw().Header["x-ms-lease-break-period"] = []string{strconv.FormatInt(int64(*options.BreakPeriod), 10)}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -225,7 +222,7 @@ func (client *ContainerClient) breakLeaseHandleResponse(resp *http.Response) (Co
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - leaseID - Specifies the current lease ID on the resource.
// - proposedLeaseID - Proposed lease ID, in a GUID string format. The Blob service returns 400 (Invalid request) if the proposed
// lease ID is not in the correct format. See Guid Constructor (String) for a list of valid GUID
@@ -262,21 +259,21 @@ func (client *ContainerClient) changeLeaseCreateRequest(ctx context.Context, lea
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-lease-action"] = []string{"change"}
- req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
- req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID}
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-lease-action"] = []string{"change"}
+ req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
+ req.Raw().Header["x-ms-proposed-lease-id"] = []string{proposedLeaseID}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -319,7 +316,7 @@ func (client *ContainerClient) changeLeaseHandleResponse(resp *http.Response) (C
// fails
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - ContainerClientCreateOptions contains the optional parameters for the ContainerClient.Create method.
// - ContainerCPKScopeInfo - ContainerCPKScopeInfo contains a group of parameters for the ContainerClient.Create method.
func (client *ContainerClient) Create(ctx context.Context, options *ContainerClientCreateOptions, containerCPKScopeInfo *ContainerCPKScopeInfo) (ContainerClientCreateResponse, error) {
@@ -351,18 +348,11 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- if options != nil && options.Metadata != nil {
- for k, v := range options.Metadata {
- if v != nil {
- req.Raw().Header["x-ms-meta-"+k] = []string{*v}
- }
- }
- }
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.Access != nil {
req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@@ -372,7 +362,14 @@ func (client *ContainerClient) createCreateRequest(ctx context.Context, options
if containerCPKScopeInfo != nil && containerCPKScopeInfo.PreventEncryptionScopeOverride != nil {
req.Raw().Header["x-ms-deny-encryption-scope-override"] = []string{strconv.FormatBool(*containerCPKScopeInfo.PreventEncryptionScopeOverride)}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.Metadata != nil {
+ for k, v := range options.Metadata {
+ if v != nil {
+ req.Raw().Header["x-ms-meta-"+k] = []string{*v}
+ }
+ }
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -412,7 +409,7 @@ func (client *ContainerClient) createHandleResponse(resp *http.Response) (Contai
// deleted during garbage collection
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - ContainerClientDeleteOptions contains the optional parameters for the ContainerClient.Delete method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@@ -445,21 +442,21 @@ func (client *ContainerClient) deleteCreateRequest(ctx context.Context, options
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
- }
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -489,7 +486,7 @@ func (client *ContainerClient) deleteHandleResponse(resp *http.Response) (Contai
// Filter blobs searches within the given container.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - where - Filters the results to return only to return only blobs whose tags match the specified expression.
// - options - ContainerClientFilterBlobsOptions contains the optional parameters for the ContainerClient.FilterBlobs method.
func (client *ContainerClient) FilterBlobs(ctx context.Context, where string, options *ContainerClientFilterBlobsOptions) (ContainerClientFilterBlobsResponse, error) {
@@ -517,27 +514,27 @@ func (client *ContainerClient) filterBlobsCreateRequest(ctx context.Context, whe
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "container")
reqQP.Set("comp", "blobs")
- if options != nil && options.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
+ if options != nil && options.Include != nil {
+ reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ","))
}
- reqQP.Set("where", where)
if options != nil && options.Marker != nil {
reqQP.Set("marker", *options.Marker)
}
if options != nil && options.Maxresults != nil {
reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10))
}
- if options != nil && options.Include != nil {
- reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ","))
+ reqQP.Set("restype", "container")
+ if options != nil && options.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ reqQP.Set("where", where)
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -570,7 +567,7 @@ func (client *ContainerClient) filterBlobsHandleResponse(resp *http.Response) (C
// be accessed publicly.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - ContainerClientGetAccessPolicyOptions contains the optional parameters for the ContainerClient.GetAccessPolicy
// method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@@ -599,20 +596,20 @@ func (client *ContainerClient) getAccessPolicyCreateRequest(ctx context.Context,
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "container")
reqQP.Set("comp", "acl")
+ reqQP.Set("restype", "container")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
@@ -657,7 +654,7 @@ func (client *ContainerClient) getAccessPolicyHandleResponse(resp *http.Response
// GetAccountInfo - Returns the sku name and account kind
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo
// method.
func (client *ContainerClient) GetAccountInfo(ctx context.Context, options *ContainerClientGetAccountInfoOptions) (ContainerClientGetAccountInfoResponse, error) {
@@ -685,11 +682,17 @@ func (client *ContainerClient) getAccountInfoCreateRequest(ctx context.Context,
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "account")
reqQP.Set("comp", "properties")
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ reqQP.Set("restype", "account")
+ if options != nil && options.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
+ }
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -709,6 +712,13 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response)
}
result.Date = &date
}
+ if val := resp.Header.Get("x-ms-is-hns-enabled"); val != "" {
+ isHierarchicalNamespaceEnabled, err := strconv.ParseBool(val)
+ if err != nil {
+ return ContainerClientGetAccountInfoResponse{}, err
+ }
+ result.IsHierarchicalNamespaceEnabled = &isHierarchicalNamespaceEnabled
+ }
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
@@ -725,7 +735,7 @@ func (client *ContainerClient) getAccountInfoHandleResponse(resp *http.Response)
// does not include the container's list of blobs
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
func (client *ContainerClient) GetProperties(ctx context.Context, options *ContainerClientGetPropertiesOptions, leaseAccessConditions *LeaseAccessConditions) (ContainerClientGetPropertiesResponse, error) {
@@ -757,15 +767,15 @@ func (client *ContainerClient) getPropertiesCreateRequest(ctx context.Context, o
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
@@ -854,7 +864,7 @@ func (client *ContainerClient) getPropertiesHandleResponse(resp *http.Response)
// NewListBlobFlatSegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - ContainerClientListBlobFlatSegmentOptions contains the optional parameters for the ContainerClient.NewListBlobFlatSegmentPager
// method.
//
@@ -865,10 +875,9 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "container")
reqQP.Set("comp", "list")
- if options != nil && options.Prefix != nil {
- reqQP.Set("prefix", *options.Prefix)
+ if options != nil && options.Include != nil {
+ reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ","))
}
if options != nil && options.Marker != nil {
reqQP.Set("marker", *options.Marker)
@@ -876,18 +885,19 @@ func (client *ContainerClient) ListBlobFlatSegmentCreateRequest(ctx context.Cont
if options != nil && options.Maxresults != nil {
reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10))
}
- if options != nil && options.Include != nil {
- reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ","))
+ if options != nil && options.Prefix != nil {
+ reqQP.Set("prefix", *options.Prefix)
}
+ reqQP.Set("restype", "container")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -921,7 +931,7 @@ func (client *ContainerClient) ListBlobFlatSegmentHandleResponse(resp *http.Resp
// NewListBlobHierarchySegmentPager - [Update] The List Blobs operation returns a list of the blobs under the specified container
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - delimiter - When the request includes this parameter, the operation returns a BlobPrefix element in the response body that
// acts as a placeholder for all blobs whose names begin with the same substring up to the
// appearance of the delimiter character. The delimiter may be a single character or a string.
@@ -955,30 +965,30 @@ func (client *ContainerClient) ListBlobHierarchySegmentCreateRequest(ctx context
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "container")
reqQP.Set("comp", "list")
- if options != nil && options.Prefix != nil {
- reqQP.Set("prefix", *options.Prefix)
- }
reqQP.Set("delimiter", delimiter)
+ if options != nil && options.Include != nil {
+ reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ","))
+ }
if options != nil && options.Marker != nil {
reqQP.Set("marker", *options.Marker)
}
if options != nil && options.Maxresults != nil {
reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10))
}
- if options != nil && options.Include != nil {
- reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ","))
+ if options != nil && options.Prefix != nil {
+ reqQP.Set("prefix", *options.Prefix)
}
+ reqQP.Set("restype", "container")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -1014,7 +1024,7 @@ func (client *ContainerClient) ListBlobHierarchySegmentHandleResponse(resp *http
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - leaseID - Specifies the current lease ID on the resource.
// - options - ContainerClientReleaseLeaseOptions contains the optional parameters for the ContainerClient.ReleaseLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@@ -1048,20 +1058,20 @@ func (client *ContainerClient) releaseLeaseCreateRequest(ctx context.Context, le
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-lease-action"] = []string{"release"}
- req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-lease-action"] = []string{"release"}
+ req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -1100,7 +1110,7 @@ func (client *ContainerClient) releaseLeaseHandleResponse(resp *http.Response) (
// Rename - Renames an existing container.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - sourceContainerName - Required. Specifies the name of the container to rename.
// - options - ContainerClientRenameOptions contains the optional parameters for the ContainerClient.Rename method.
func (client *ContainerClient) Rename(ctx context.Context, sourceContainerName string, options *ContainerClientRenameOptions) (ContainerClientRenameResponse, error) {
@@ -1128,13 +1138,13 @@ func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceCo
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "container")
reqQP.Set("comp", "rename")
+ reqQP.Set("restype", "container")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@@ -1142,7 +1152,7 @@ func (client *ContainerClient) renameCreateRequest(ctx context.Context, sourceCo
if options != nil && options.SourceLeaseID != nil {
req.Raw().Header["x-ms-source-lease-id"] = []string{*options.SourceLeaseID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -1172,7 +1182,7 @@ func (client *ContainerClient) renameHandleResponse(resp *http.Response) (Contai
// to 60 seconds, or can be infinite
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - leaseID - Specifies the current lease ID on the resource.
// - options - ContainerClientRenewLeaseOptions contains the optional parameters for the ContainerClient.RenewLease method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@@ -1206,20 +1216,20 @@ func (client *ContainerClient) renewLeaseCreateRequest(ctx context.Context, leas
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-lease-action"] = []string{"renew"}
- req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-lease-action"] = []string{"renew"}
+ req.Raw().Header["x-ms-lease-id"] = []string{leaseID}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -1261,7 +1271,7 @@ func (client *ContainerClient) renewLeaseHandleResponse(resp *http.Response) (Co
// Restore - Restores a previously-deleted container.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - ContainerClientRestoreOptions contains the optional parameters for the ContainerClient.Restore method.
func (client *ContainerClient) Restore(ctx context.Context, options *ContainerClientRestoreOptions) (ContainerClientRestoreResponse, error) {
var err error
@@ -1288,13 +1298,13 @@ func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "container")
reqQP.Set("comp", "undelete")
+ reqQP.Set("restype", "container")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
@@ -1304,7 +1314,7 @@ func (client *ContainerClient) restoreCreateRequest(ctx context.Context, options
if options != nil && options.DeletedContainerVersion != nil {
req.Raw().Header["x-ms-deleted-container-version"] = []string{*options.DeletedContainerVersion}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -1334,7 +1344,7 @@ func (client *ContainerClient) restoreHandleResponse(resp *http.Response) (Conta
// may be accessed publicly.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - containerACL - the acls for the container
// - options - ContainerClientSetAccessPolicyOptions contains the optional parameters for the ContainerClient.SetAccessPolicy
// method.
@@ -1365,29 +1375,29 @@ func (client *ContainerClient) setAccessPolicyCreateRequest(ctx context.Context,
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "container")
reqQP.Set("comp", "acl")
+ reqQP.Set("restype", "container")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
- }
- if options != nil && options.Access != nil {
- req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)}
- }
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ if options != nil && options.Access != nil {
+ req.Raw().Header["x-ms-blob-public-access"] = []string{string(*options.Access)}
+ }
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
type wrapper struct {
XMLName xml.Name `xml:"SignedIdentifiers"`
ContainerACL *[]*SignedIdentifier `xml:"SignedIdentifier"`
@@ -1433,7 +1443,7 @@ func (client *ContainerClient) setAccessPolicyHandleResponse(resp *http.Response
// SetMetadata - operation sets one or more user-defined name-value pairs for the specified container.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - ContainerClientSetMetadataOptions contains the optional parameters for the ContainerClient.SetMetadata method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
// - ModifiedAccessConditions - ModifiedAccessConditions contains a group of parameters for the ContainerClient.Delete method.
@@ -1462,12 +1472,19 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "container")
reqQP.Set("comp", "metadata")
+ reqQP.Set("restype", "container")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
@@ -1478,14 +1495,7 @@ func (client *ContainerClient) setMetadataCreateRequest(ctx context.Context, opt
}
}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
@@ -1524,7 +1534,7 @@ func (client *ContainerClient) setMetadataHandleResponse(resp *http.Response) (C
// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - contentLength - The length of the request.
// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header
// value: multipart/mixed; boundary=batch_
@@ -1555,20 +1565,20 @@ func (client *ContainerClient) submitBatchCreateRequest(ctx context.Context, con
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "container")
reqQP.Set("comp", "batch")
+ reqQP.Set("restype", "container")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
runtime.SkipBodyDownload(req)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
req.Raw().Header["Content-Type"] = []string{multipartContentType}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := req.SetBody(body, multipartContentType); err != nil {
return nil, err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go
index 7251de839..803b2858e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models.go
@@ -1,6 +1,3 @@
-//go:build go1.18
-// +build go1.18
-
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
@@ -89,6 +86,9 @@ type BlobName struct {
type BlobPrefix struct {
// REQUIRED
Name *string `xml:"Name"`
+
+ // Properties of a blob
+ Properties *BlobProperties `xml:"Properties"`
}
// BlobProperties - Properties of a blob
@@ -98,6 +98,7 @@ type BlobProperties struct {
// REQUIRED
LastModified *time.Time `xml:"Last-Modified"`
+ ACL *string `xml:"Acl"`
AccessTier *AccessTier `xml:"AccessTier"`
AccessTierChangeTime *time.Time `xml:"AccessTierChangeTime"`
AccessTierInferred *bool `xml:"AccessTierInferred"`
@@ -127,6 +128,7 @@ type BlobProperties struct {
// The name of the encryption scope under which the blob is encrypted.
EncryptionScope *string `xml:"EncryptionScope"`
ExpiresOn *time.Time `xml:"Expiry-Time"`
+ Group *string `xml:"Group"`
ImmutabilityPolicyExpiresOn *time.Time `xml:"ImmutabilityPolicyUntilDate"`
ImmutabilityPolicyMode *ImmutabilityPolicyMode `xml:"ImmutabilityPolicyMode"`
IncrementalCopy *bool `xml:"IncrementalCopy"`
@@ -136,11 +138,14 @@ type BlobProperties struct {
LeaseState *LeaseStateType `xml:"LeaseState"`
LeaseStatus *LeaseStatusType `xml:"LeaseStatus"`
LegalHold *bool `xml:"LegalHold"`
+ Owner *string `xml:"Owner"`
+ Permissions *string `xml:"Permissions"`
// If an object is in rehydrate pending state then this header is returned with priority of rehydrate. Valid values are High
// and Standard.
RehydratePriority *RehydratePriority `xml:"RehydratePriority"`
RemainingRetentionDays *int32 `xml:"RemainingRetentionDays"`
+ ResourceType *string `xml:"ResourceType"`
ServerEncrypted *bool `xml:"ServerEncrypted"`
TagCount *int32 `xml:"TagCount"`
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go
index 7e094db87..e2e64d6ff 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_models_serde.go
@@ -1,6 +1,3 @@
-//go:build go1.18
-// +build go1.18
-
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
@@ -46,8 +43,12 @@ func (a *AccessPolicy) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) er
if err := dec.DecodeElement(aux, &start); err != nil {
return err
}
- a.Expiry = (*time.Time)(aux.Expiry)
- a.Start = (*time.Time)(aux.Start)
+ if aux.Expiry != nil && !(*time.Time)(aux.Expiry).IsZero() {
+ a.Expiry = (*time.Time)(aux.Expiry)
+ }
+ if aux.Start != nil && !(*time.Time)(aux.Start).IsZero() {
+ a.Start = (*time.Time)(aux.Start)
+ }
return nil
}
@@ -152,19 +153,35 @@ func (b *BlobProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElement)
if err := dec.DecodeElement(aux, &start); err != nil {
return err
}
- b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime)
+ if aux.AccessTierChangeTime != nil && !(*time.Time)(aux.AccessTierChangeTime).IsZero() {
+ b.AccessTierChangeTime = (*time.Time)(aux.AccessTierChangeTime)
+ }
if aux.ContentMD5 != nil {
if err := runtime.DecodeByteArray(*aux.ContentMD5, &b.ContentMD5, runtime.Base64StdFormat); err != nil {
return err
}
}
- b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime)
- b.CreationTime = (*time.Time)(aux.CreationTime)
- b.DeletedTime = (*time.Time)(aux.DeletedTime)
- b.ExpiresOn = (*time.Time)(aux.ExpiresOn)
- b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn)
- b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn)
- b.LastModified = (*time.Time)(aux.LastModified)
+ if aux.CopyCompletionTime != nil && !(*time.Time)(aux.CopyCompletionTime).IsZero() {
+ b.CopyCompletionTime = (*time.Time)(aux.CopyCompletionTime)
+ }
+ if aux.CreationTime != nil && !(*time.Time)(aux.CreationTime).IsZero() {
+ b.CreationTime = (*time.Time)(aux.CreationTime)
+ }
+ if aux.DeletedTime != nil && !(*time.Time)(aux.DeletedTime).IsZero() {
+ b.DeletedTime = (*time.Time)(aux.DeletedTime)
+ }
+ if aux.ExpiresOn != nil && !(*time.Time)(aux.ExpiresOn).IsZero() {
+ b.ExpiresOn = (*time.Time)(aux.ExpiresOn)
+ }
+ if aux.ImmutabilityPolicyExpiresOn != nil && !(*time.Time)(aux.ImmutabilityPolicyExpiresOn).IsZero() {
+ b.ImmutabilityPolicyExpiresOn = (*time.Time)(aux.ImmutabilityPolicyExpiresOn)
+ }
+ if aux.LastAccessedOn != nil && !(*time.Time)(aux.LastAccessedOn).IsZero() {
+ b.LastAccessedOn = (*time.Time)(aux.LastAccessedOn)
+ }
+ if aux.LastModified != nil && !(*time.Time)(aux.LastModified).IsZero() {
+ b.LastModified = (*time.Time)(aux.LastModified)
+ }
return nil
}
@@ -271,8 +288,12 @@ func (c *ContainerProperties) UnmarshalXML(dec *xml.Decoder, start xml.StartElem
if err := dec.DecodeElement(aux, &start); err != nil {
return err
}
- c.DeletedTime = (*time.Time)(aux.DeletedTime)
- c.LastModified = (*time.Time)(aux.LastModified)
+ if aux.DeletedTime != nil && !(*time.Time)(aux.DeletedTime).IsZero() {
+ c.DeletedTime = (*time.Time)(aux.DeletedTime)
+ }
+ if aux.LastModified != nil && !(*time.Time)(aux.LastModified).IsZero() {
+ c.LastModified = (*time.Time)(aux.LastModified)
+ }
return nil
}
@@ -316,7 +337,9 @@ func (g *GeoReplication) UnmarshalXML(dec *xml.Decoder, start xml.StartElement)
if err := dec.DecodeElement(aux, &start); err != nil {
return err
}
- g.LastSyncTime = (*time.Time)(aux.LastSyncTime)
+ if aux.LastSyncTime != nil && !(*time.Time)(aux.LastSyncTime).IsZero() {
+ g.LastSyncTime = (*time.Time)(aux.LastSyncTime)
+ }
return nil
}
@@ -436,8 +459,12 @@ func (u *UserDelegationKey) UnmarshalXML(dec *xml.Decoder, start xml.StartElemen
if err := dec.DecodeElement(aux, &start); err != nil {
return err
}
- u.SignedExpiry = (*time.Time)(aux.SignedExpiry)
- u.SignedStart = (*time.Time)(aux.SignedStart)
+ if aux.SignedExpiry != nil && !(*time.Time)(aux.SignedExpiry).IsZero() {
+ u.SignedExpiry = (*time.Time)(aux.SignedExpiry)
+ }
+ if aux.SignedStart != nil && !(*time.Time)(aux.SignedStart).IsZero() {
+ u.SignedStart = (*time.Time)(aux.SignedStart)
+ }
return nil
}
@@ -451,18 +478,8 @@ func populate(m map[string]any, k string, v any) {
}
}
-func populateAny(m map[string]any, k string, v any) {
- if v == nil {
- return
- } else if azcore.IsNullValue(v) {
- m[k] = nil
- } else {
- m[k] = v
- }
-}
-
func unpopulate(data json.RawMessage, fn string, v any) error {
- if data == nil {
+ if data == nil || string(data) == "null" {
return nil
}
if err := json.Unmarshal(data, v); err != nil {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go
index 216f8b73a..01d1422d0 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_options.go
@@ -1,6 +1,3 @@
-//go:build go1.18
-// +build go1.18
-
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
@@ -46,6 +43,13 @@ type AppendBlobClientAppendBlockOptions struct {
// analytics logging is enabled.
RequestID *string
+ // Required if the request body is a structured message. Specifies the message schema version and properties.
+ StructuredBodyType *string
+
+ // Required if the request body is a structured message. Specifies the length of the blob/file content inside the message
+ // body. Will always be smaller than Content-Length.
+ StructuredContentLength *int64
+
// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
Timeout *int32
@@ -239,9 +243,18 @@ type BlobClientDeleteImmutabilityPolicyOptions struct {
// analytics logging is enabled.
RequestID *string
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+ // information on working with blob snapshots, see Creating a Snapshot of a Blob.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+ Snapshot *string
+
// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
Timeout *int32
+
+ // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+ // It's for service version 2019-10-10 and newer.
+ VersionID *string
}
// BlobClientDeleteOptions contains the optional parameters for the BlobClient.Delete method.
@@ -294,6 +307,10 @@ type BlobClientDownloadOptions struct {
// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
Snapshot *string
+ // Specifies the response content should be returned as a structured message and specifies the message schema version and
+ // properties.
+ StructuredBodyType *string
+
// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
Timeout *int32
@@ -305,7 +322,13 @@ type BlobClientDownloadOptions struct {
// BlobClientGetAccountInfoOptions contains the optional parameters for the BlobClient.GetAccountInfo method.
type BlobClientGetAccountInfoOptions struct {
- // placeholder for future optional parameters
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
}
// BlobClientGetPropertiesOptions contains the optional parameters for the BlobClient.GetProperties method.
@@ -426,9 +449,18 @@ type BlobClientSetImmutabilityPolicyOptions struct {
// analytics logging is enabled.
RequestID *string
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+ // information on working with blob snapshots, see Creating a Snapshot of a Blob.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+ Snapshot *string
+
// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
Timeout *int32
+
+ // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+ // It's for service version 2019-10-10 and newer.
+ VersionID *string
}
// BlobClientSetLegalHoldOptions contains the optional parameters for the BlobClient.SetLegalHold method.
@@ -437,9 +469,18 @@ type BlobClientSetLegalHoldOptions struct {
// analytics logging is enabled.
RequestID *string
+ // The snapshot parameter is an opaque DateTime value that, when present, specifies the blob snapshot to retrieve. For more
+ // information on working with blob snapshots, see Creating a Snapshot of a Blob.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob]
+ Snapshot *string
+
// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
Timeout *int32
+
+ // The version id parameter is an opaque DateTime value that, when present, specifies the version of the blob to operate on.
+ // It's for service version 2019-10-10 and newer.
+ VersionID *string
}
// BlobClientSetMetadataOptions contains the optional parameters for the BlobClient.SetMetadata method.
@@ -705,6 +746,13 @@ type BlockBlobClientStageBlockOptions struct {
// analytics logging is enabled.
RequestID *string
+ // Required if the request body is a structured message. Specifies the message schema version and properties.
+ StructuredBodyType *string
+
+ // Required if the request body is a structured message. Specifies the length of the blob/file content inside the message
+ // body. Will always be smaller than Content-Length.
+ StructuredContentLength *int64
+
// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
Timeout *int32
@@ -742,6 +790,13 @@ type BlockBlobClientUploadOptions struct {
// analytics logging is enabled.
RequestID *string
+ // Required if the request body is a structured message. Specifies the message schema version and properties.
+ StructuredBodyType *string
+
+ // Required if the request body is a structured message. Specifies the length of the blob/file content inside the message
+ // body. Will always be smaller than Content-Length.
+ StructuredContentLength *int64
+
// Optional. Indicates the tier to be set on the blob.
Tier *AccessTier
@@ -876,7 +931,13 @@ type ContainerClientGetAccessPolicyOptions struct {
// ContainerClientGetAccountInfoOptions contains the optional parameters for the ContainerClient.GetAccountInfo method.
type ContainerClientGetAccountInfoOptions struct {
- // placeholder for future optional parameters
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
}
// ContainerClientGetPropertiesOptions contains the optional parameters for the ContainerClient.GetProperties method.
@@ -1307,6 +1368,13 @@ type PageBlobClientUploadPagesOptions struct {
// analytics logging is enabled.
RequestID *string
+ // Required if the request body is a structured message. Specifies the message schema version and properties.
+ StructuredBodyType *string
+
+ // Required if the request body is a structured message. Specifies the length of the blob/file content inside the message
+ // body. Will always be smaller than Content-Length.
+ StructuredContentLength *int64
+
// The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
// [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
Timeout *int32
@@ -1360,7 +1428,13 @@ type ServiceClientFilterBlobsOptions struct {
// ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method.
type ServiceClientGetAccountInfoOptions struct {
- // placeholder for future optional parameters
+ // Provides a client-generated, opaque value with a 1 KB character limit that is recorded in the analytics logs when storage
+ // analytics logging is enabled.
+ RequestID *string
+
+ // The timeout parameter is expressed in seconds. For more information, see Setting Timeouts for Blob Service Operations.
+ // [https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/setting-timeouts-for-blob-service-operations]
+ Timeout *int32
}
// ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go
index cb6a19f7a..8d8c95347 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_pageblob_client.go
@@ -1,6 +1,3 @@
-//go:build go1.18
-// +build go1.18
-
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
@@ -30,7 +27,7 @@ type PageBlobClient struct {
// ClearPages - The Clear Pages operation clears a set of pages from a page blob
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - contentLength - The length of the request.
// - options - PageBlobClientClearPagesOptions contains the optional parameters for the PageBlobClient.ClearPages method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@@ -69,13 +66,25 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-page-write"] = []string{"clear"}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
- if options != nil && options.Range != nil {
- req.Raw().Header["x-ms-range"] = []string{*options.Range}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
@@ -83,41 +92,29 @@ func (client *PageBlobClient) clearPagesCreateRequest(ctx context.Context, conte
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
- }
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
+ if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil {
+ req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)}
+ }
if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil {
req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)}
}
if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil {
req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)}
}
- if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil {
- req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
- }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-page-write"] = []string{"clear"}
+ if options != nil && options.Range != nil {
+ req.Raw().Header["x-ms-range"] = []string{*options.Range}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -181,7 +178,7 @@ func (client *PageBlobClient) clearPagesHandleResponse(resp *http.Response) (Pag
// 2016-05-31.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - copySource - Specifies the name of the source page blob snapshot. This value is a URL of up to 2 KB in length that specifies
// a page blob snapshot. The value should be URL-encoded as it would appear in a request
// URI. The source blob must either be public or must be authenticated via a shared access signature.
@@ -218,27 +215,27 @@ func (client *PageBlobClient) copyIncrementalCreateRequest(ctx context.Context,
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
+ req.Raw().Header["x-ms-copy-source"] = []string{copySource}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-copy-source"] = []string{copySource}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- req.Raw().Header["Accept"] = []string{"application/xml"}
return req, nil
}
@@ -283,7 +280,7 @@ func (client *PageBlobClient) copyIncrementalHandleResponse(resp *http.Response)
// Create - The Create operation creates a new page blob.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - contentLength - The length of the request.
// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned
// to a 512-byte boundary.
@@ -322,13 +319,28 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
if options != nil && options.Tier != nil {
req.Raw().Header["x-ms-access-tier"] = []string{string(*options.Tier)}
}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
- req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
+ req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl}
+ }
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
+ req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentEncoding != nil {
req.Raw().Header["x-ms-blob-content-encoding"] = []string{*blobHTTPHeaders.BlobContentEncoding}
@@ -336,24 +348,22 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentLanguage != nil {
req.Raw().Header["x-ms-blob-content-language"] = []string{*blobHTTPHeaders.BlobContentLanguage}
}
+ req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)}
if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentMD5 != nil {
req.Raw().Header["x-ms-blob-content-md5"] = []string{base64.StdEncoding.EncodeToString(blobHTTPHeaders.BlobContentMD5)}
}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobCacheControl != nil {
- req.Raw().Header["x-ms-blob-cache-control"] = []string{*blobHTTPHeaders.BlobCacheControl}
+ if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentType != nil {
+ req.Raw().Header["x-ms-blob-content-type"] = []string{*blobHTTPHeaders.BlobContentType}
}
- if options != nil && options.Metadata != nil {
- for k, v := range options.Metadata {
- if v != nil {
- req.Raw().Header["x-ms-meta-"+k] = []string{*v}
- }
- }
+ if options != nil && options.BlobSequenceNumber != nil {
+ req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)}
}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ req.Raw().Header["x-ms-blob-type"] = []string{"PageBlob"}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- if blobHTTPHeaders != nil && blobHTTPHeaders.BlobContentDisposition != nil {
- req.Raw().Header["x-ms-blob-content-disposition"] = []string{*blobHTTPHeaders.BlobContentDisposition}
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
@@ -361,48 +371,35 @@ func (client *PageBlobClient) createCreateRequest(ctx context.Context, contentLe
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
- }
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
- }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)}
- if options != nil && options.BlobSequenceNumber != nil {
- req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)}
- }
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- if options != nil && options.BlobTagsString != nil {
- req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
+ if options != nil && options.ImmutabilityPolicyMode != nil {
+ req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
}
if options != nil && options.ImmutabilityPolicyExpiry != nil {
req.Raw().Header["x-ms-immutability-policy-until-date"] = []string{(*options.ImmutabilityPolicyExpiry).In(gmt).Format(time.RFC1123)}
}
- if options != nil && options.ImmutabilityPolicyMode != nil {
- req.Raw().Header["x-ms-immutability-policy-mode"] = []string{string(*options.ImmutabilityPolicyMode)}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
if options != nil && options.LegalHold != nil {
req.Raw().Header["x-ms-legal-hold"] = []string{strconv.FormatBool(*options.LegalHold)}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.Metadata != nil {
+ for k, v := range options.Metadata {
+ if v != nil {
+ req.Raw().Header["x-ms-meta-"+k] = []string{*v}
+ }
+ }
+ }
+ if options != nil && options.BlobTagsString != nil {
+ req.Raw().Header["x-ms-tags"] = []string{*options.BlobTagsString}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -464,7 +461,7 @@ func (client *PageBlobClient) createHandleResponse(resp *http.Response) (PageBlo
// NewGetPageRangesPager - The Get Page Ranges operation returns the list of valid page ranges for a page blob or snapshot
// of a page blob
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - PageBlobClientGetPageRangesOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesPager
// method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@@ -498,45 +495,45 @@ func (client *PageBlobClient) GetPageRangesCreateRequest(ctx context.Context, op
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "pagelist")
- if options != nil && options.Snapshot != nil {
- reqQP.Set("snapshot", *options.Snapshot)
- }
- if options != nil && options.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
- }
if options != nil && options.Marker != nil {
reqQP.Set("marker", *options.Marker)
}
if options != nil && options.Maxresults != nil {
reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- if options != nil && options.Range != nil {
- req.Raw().Header["x-ms-range"] = []string{*options.Range}
+ if options != nil && options.Snapshot != nil {
+ reqQP.Set("snapshot", *options.Snapshot)
}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ if options != nil && options.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
+ }
+ req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.Range != nil {
+ req.Raw().Header["x-ms-range"] = []string{*options.Range}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -585,7 +582,7 @@ func (client *PageBlobClient) GetPageRangesHandleResponse(resp *http.Response) (
// NewGetPageRangesDiffPager - The Get Page Ranges Diff operation returns the list of valid page ranges for a page blob that
// were changed between target blob and previous snapshot.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - PageBlobClientGetPageRangesDiffOptions contains the optional parameters for the PageBlobClient.NewGetPageRangesDiffPager
// method.
// - LeaseAccessConditions - LeaseAccessConditions contains a group of parameters for the ContainerClient.GetProperties method.
@@ -619,51 +616,51 @@ func (client *PageBlobClient) GetPageRangesDiffCreateRequest(ctx context.Context
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "pagelist")
- if options != nil && options.Snapshot != nil {
- reqQP.Set("snapshot", *options.Snapshot)
- }
- if options != nil && options.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
- }
- if options != nil && options.Prevsnapshot != nil {
- reqQP.Set("prevsnapshot", *options.Prevsnapshot)
- }
if options != nil && options.Marker != nil {
reqQP.Set("marker", *options.Marker)
}
if options != nil && options.Maxresults != nil {
reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- if options != nil && options.PrevSnapshotURL != nil {
- req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL}
+ if options != nil && options.Prevsnapshot != nil {
+ reqQP.Set("prevsnapshot", *options.Prevsnapshot)
}
- if options != nil && options.Range != nil {
- req.Raw().Header["x-ms-range"] = []string{*options.Range}
+ if options != nil && options.Snapshot != nil {
+ reqQP.Set("snapshot", *options.Snapshot)
}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ if options != nil && options.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
+ }
+ req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.PrevSnapshotURL != nil {
+ req.Raw().Header["x-ms-previous-snapshot-url"] = []string{*options.PrevSnapshotURL}
+ }
+ if options != nil && options.Range != nil {
+ req.Raw().Header["x-ms-range"] = []string{*options.Range}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -712,7 +709,7 @@ func (client *PageBlobClient) GetPageRangesDiffHandleResponse(resp *http.Respons
// Resize - Resize the Blob
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - blobContentLength - This header specifies the maximum size for the page blob, up to 1 TB. The page blob size must be aligned
// to a 512-byte boundary.
// - options - PageBlobClientResizeOptions contains the optional parameters for the PageBlobClient.Resize method.
@@ -750,8 +747,25 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
@@ -759,33 +773,16 @@ func (client *PageBlobClient) resizeCreateRequest(ctx context.Context, blobConte
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
- }
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
- }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- req.Raw().Header["x-ms-blob-content-length"] = []string{strconv.FormatInt(blobContentLength, 10)}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -831,7 +828,7 @@ func (client *PageBlobClient) resizeHandleResponse(resp *http.Response) (PageBlo
// UpdateSequenceNumber - Update the sequence number of the blob
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - sequenceNumberAction - Required if the x-ms-blob-sequence-number header is set for the request. This property applies to
// page blobs only. This property indicates how the service should modify the blob's sequence number
// - options - PageBlobClientUpdateSequenceNumberOptions contains the optional parameters for the PageBlobClient.UpdateSequenceNumber
@@ -868,33 +865,33 @@ func (client *PageBlobClient) updateSequenceNumberCreateRequest(ctx context.Cont
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
}
if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
- req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)}
if options != nil && options.BlobSequenceNumber != nil {
req.Raw().Header["x-ms-blob-sequence-number"] = []string{strconv.FormatInt(*options.BlobSequenceNumber, 10)}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
+ req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
+ }
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ }
+ req.Raw().Header["x-ms-sequence-number-action"] = []string{string(sequenceNumberAction)}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -940,7 +937,7 @@ func (client *PageBlobClient) updateSequenceNumberHandleResponse(resp *http.Resp
// UploadPages - The Upload Pages operation writes a range of pages to a page blob
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - contentLength - The length of the request.
// - body - Initial data
// - options - PageBlobClientUploadPagesOptions contains the optional parameters for the PageBlobClient.UploadPages method.
@@ -980,19 +977,31 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-page-write"] = []string{"update"}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
if options != nil && options.TransactionalContentMD5 != nil {
req.Raw().Header["Content-MD5"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentMD5)}
}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
if options != nil && options.TransactionalContentCRC64 != nil {
req.Raw().Header["x-ms-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.TransactionalContentCRC64)}
}
- if options != nil && options.Range != nil {
- req.Raw().Header["x-ms-range"] = []string{*options.Range}
- }
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
@@ -1000,41 +1009,35 @@ func (client *PageBlobClient) uploadPagesCreateRequest(ctx context.Context, cont
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
- }
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
+ if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil {
+ req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)}
+ }
if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil {
req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)}
}
if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil {
req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)}
}
- if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil {
- req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
+ req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ req.Raw().Header["x-ms-page-write"] = []string{"update"}
+ if options != nil && options.Range != nil {
+ req.Raw().Header["x-ms-range"] = []string{*options.Range}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ if options != nil && options.StructuredBodyType != nil {
+ req.Raw().Header["x-ms-structured-body"] = []string{*options.StructuredBodyType}
}
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
- req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
+ if options != nil && options.StructuredContentLength != nil {
+ req.Raw().Header["x-ms-structured-content-length"] = []string{strconv.FormatInt(*options.StructuredContentLength, 10)}
}
req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- req.Raw().Header["Accept"] = []string{"application/xml"}
if err := req.SetBody(body, "application/octet-stream"); err != nil {
return nil, err
}
@@ -1101,6 +1104,9 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa
if val := resp.Header.Get("x-ms-request-id"); val != "" {
result.RequestID = &val
}
+ if val := resp.Header.Get("x-ms-structured-body"); val != "" {
+ result.StructuredBodyType = &val
+ }
if val := resp.Header.Get("x-ms-version"); val != "" {
result.Version = &val
}
@@ -1111,7 +1117,7 @@ func (client *PageBlobClient) uploadPagesHandleResponse(resp *http.Response) (Pa
// a URL
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - sourceURL - Specify a URL to the copy source.
// - sourceRange - Bytes of source data in the specified range. The length of this range should match the ContentLength header
// and x-ms-range/Range destination range header.
@@ -1158,31 +1164,41 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-page-write"] = []string{"update"}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
+ req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
+ req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
+ req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
+ }
+ if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
+ req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ }
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
req.Raw().Header["x-ms-copy-source"] = []string{sourceURL}
- req.Raw().Header["x-ms-source-range"] = []string{sourceRange}
- if options != nil && options.SourceContentMD5 != nil {
- req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)}
+ if options != nil && options.CopySourceAuthorization != nil {
+ req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
}
- if options != nil && options.SourceContentcrc64 != nil {
- req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)}
+ if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
+ req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
}
- req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
- req.Raw().Header["x-ms-range"] = []string{rangeParam}
if cpkInfo != nil && cpkInfo.EncryptionKey != nil {
req.Raw().Header["x-ms-encryption-key"] = []string{*cpkInfo.EncryptionKey}
}
if cpkInfo != nil && cpkInfo.EncryptionKeySHA256 != nil {
req.Raw().Header["x-ms-encryption-key-sha256"] = []string{*cpkInfo.EncryptionKeySHA256}
}
- if cpkInfo != nil && cpkInfo.EncryptionAlgorithm != nil {
- req.Raw().Header["x-ms-encryption-algorithm"] = []string{string(*cpkInfo.EncryptionAlgorithm)}
- }
if cpkScopeInfo != nil && cpkScopeInfo.EncryptionScope != nil {
req.Raw().Header["x-ms-encryption-scope"] = []string{*cpkScopeInfo.EncryptionScope}
}
- if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
- req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
+ if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil {
+ req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)}
}
if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo != nil {
req.Raw().Header["x-ms-if-sequence-number-le"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThanOrEqualTo, 10)}
@@ -1190,44 +1206,34 @@ func (client *PageBlobClient) uploadPagesFromURLCreateRequest(ctx context.Contex
if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberLessThan != nil {
req.Raw().Header["x-ms-if-sequence-number-lt"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberLessThan, 10)}
}
- if sequenceNumberAccessConditions != nil && sequenceNumberAccessConditions.IfSequenceNumberEqualTo != nil {
- req.Raw().Header["x-ms-if-sequence-number-eq"] = []string{strconv.FormatInt(*sequenceNumberAccessConditions.IfSequenceNumberEqualTo, 10)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfModifiedSince != nil {
- req.Raw().Header["If-Modified-Since"] = []string{(*modifiedAccessConditions.IfModifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfUnmodifiedSince != nil {
- req.Raw().Header["If-Unmodified-Since"] = []string{(*modifiedAccessConditions.IfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfMatch != nil {
- req.Raw().Header["If-Match"] = []string{string(*modifiedAccessConditions.IfMatch)}
- }
- if modifiedAccessConditions != nil && modifiedAccessConditions.IfNoneMatch != nil {
- req.Raw().Header["If-None-Match"] = []string{string(*modifiedAccessConditions.IfNoneMatch)}
- }
if modifiedAccessConditions != nil && modifiedAccessConditions.IfTags != nil {
req.Raw().Header["x-ms-if-tags"] = []string{*modifiedAccessConditions.IfTags}
}
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
- req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
+ if leaseAccessConditions != nil && leaseAccessConditions.LeaseID != nil {
+ req.Raw().Header["x-ms-lease-id"] = []string{*leaseAccessConditions.LeaseID}
}
- if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
- req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
+ req.Raw().Header["x-ms-page-write"] = []string{"update"}
+ req.Raw().Header["x-ms-range"] = []string{rangeParam}
+ if options != nil && options.SourceContentcrc64 != nil {
+ req.Raw().Header["x-ms-source-content-crc64"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentcrc64)}
+ }
+ if options != nil && options.SourceContentMD5 != nil {
+ req.Raw().Header["x-ms-source-content-md5"] = []string{base64.StdEncoding.EncodeToString(options.SourceContentMD5)}
}
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfMatch != nil {
req.Raw().Header["x-ms-source-if-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfMatch)}
}
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfModifiedSince != nil {
+ req.Raw().Header["x-ms-source-if-modified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfModifiedSince).In(gmt).Format(time.RFC1123)}
+ }
if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfNoneMatch != nil {
req.Raw().Header["x-ms-source-if-none-match"] = []string{string(*sourceModifiedAccessConditions.SourceIfNoneMatch)}
}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
- if options != nil && options.RequestID != nil {
- req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
- }
- if options != nil && options.CopySourceAuthorization != nil {
- req.Raw().Header["x-ms-copy-source-authorization"] = []string{*options.CopySourceAuthorization}
+ if sourceModifiedAccessConditions != nil && sourceModifiedAccessConditions.SourceIfUnmodifiedSince != nil {
+ req.Raw().Header["x-ms-source-if-unmodified-since"] = []string{(*sourceModifiedAccessConditions.SourceIfUnmodifiedSince).In(gmt).Format(time.RFC1123)}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-source-range"] = []string{sourceRange}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go
deleted file mode 100644
index 738d23c8f..000000000
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_response_types.go
+++ /dev/null
@@ -1,2016 +0,0 @@
-//go:build go1.18
-// +build go1.18
-
-// Copyright (c) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT License. See License.txt in the project root for license information.
-// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
-// Changes may cause incorrect behavior and will be lost if the code is regenerated.
-
-package generated
-
-import (
- "github.com/Azure/azure-sdk-for-go/sdk/azcore"
- "io"
- "time"
-)
-
-// AppendBlobClientAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL.
-type AppendBlobClientAppendBlockFromURLResponse struct {
- // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response.
- BlobAppendOffset *string
-
- // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response.
- BlobCommittedBlockCount *int32
-
- // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
- ContentCRC64 []byte
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
- IsServerEncrypted *bool
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// AppendBlobClientAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock.
-type AppendBlobClientAppendBlockResponse struct {
- // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response.
- BlobAppendOffset *string
-
- // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response.
- BlobCommittedBlockCount *int32
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
- ContentCRC64 []byte
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
- IsServerEncrypted *bool
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// AppendBlobClientCreateResponse contains the response from method AppendBlobClient.Create.
-type AppendBlobClientCreateResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
- IsServerEncrypted *bool
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-
- // VersionID contains the information returned from the x-ms-version-id header response.
- VersionID *string
-}
-
-// AppendBlobClientSealResponse contains the response from method AppendBlobClient.Seal.
-type AppendBlobClientSealResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // IsSealed contains the information returned from the x-ms-blob-sealed header response.
- IsSealed *bool
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL.
-type BlobClientAbortCopyFromURLResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientAcquireLeaseResponse contains the response from method BlobClient.AcquireLease.
-type BlobClientAcquireLeaseResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // LeaseID contains the information returned from the x-ms-lease-id header response.
- LeaseID *string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientBreakLeaseResponse contains the response from method BlobClient.BreakLease.
-type BlobClientBreakLeaseResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // LeaseTime contains the information returned from the x-ms-lease-time header response.
- LeaseTime *int32
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientChangeLeaseResponse contains the response from method BlobClient.ChangeLease.
-type BlobClientChangeLeaseResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // LeaseID contains the information returned from the x-ms-lease-id header response.
- LeaseID *string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientCopyFromURLResponse contains the response from method BlobClient.CopyFromURL.
-type BlobClientCopyFromURLResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
- ContentCRC64 []byte
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // CopyID contains the information returned from the x-ms-copy-id header response.
- CopyID *string
-
- // CopyStatus contains the information returned from the x-ms-copy-status header response.
- CopyStatus *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-
- // VersionID contains the information returned from the x-ms-version-id header response.
- VersionID *string
-}
-
-// BlobClientCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot.
-type BlobClientCreateSnapshotResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
- IsServerEncrypted *bool
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Snapshot contains the information returned from the x-ms-snapshot header response.
- Snapshot *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-
- // VersionID contains the information returned from the x-ms-version-id header response.
- VersionID *string
-}
-
-// BlobClientDeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicy.
-type BlobClientDeleteImmutabilityPolicyResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientDeleteResponse contains the response from method BlobClient.Delete.
-type BlobClientDeleteResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientDownloadResponse contains the response from method BlobClient.Download.
-type BlobClientDownloadResponse struct {
- // AcceptRanges contains the information returned from the Accept-Ranges header response.
- AcceptRanges *string
-
- // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response.
- BlobCommittedBlockCount *int32
-
- // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response.
- BlobContentMD5 []byte
-
- // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
- BlobSequenceNumber *int64
-
- // BlobType contains the information returned from the x-ms-blob-type header response.
- BlobType *BlobType
-
- // Body contains the streaming response.
- Body io.ReadCloser
-
- // CacheControl contains the information returned from the Cache-Control header response.
- CacheControl *string
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
- ContentCRC64 []byte
-
- // ContentDisposition contains the information returned from the Content-Disposition header response.
- ContentDisposition *string
-
- // ContentEncoding contains the information returned from the Content-Encoding header response.
- ContentEncoding *string
-
- // ContentLanguage contains the information returned from the Content-Language header response.
- ContentLanguage *string
-
- // ContentLength contains the information returned from the Content-Length header response.
- ContentLength *int64
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // ContentRange contains the information returned from the Content-Range header response.
- ContentRange *string
-
- // ContentType contains the information returned from the Content-Type header response.
- ContentType *string
-
- // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response.
- CopyCompletionTime *time.Time
-
- // CopyID contains the information returned from the x-ms-copy-id header response.
- CopyID *string
-
- // CopyProgress contains the information returned from the x-ms-copy-progress header response.
- CopyProgress *string
-
- // CopySource contains the information returned from the x-ms-copy-source header response.
- CopySource *string
-
- // CopyStatus contains the information returned from the x-ms-copy-status header response.
- CopyStatus *CopyStatusType
-
- // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response.
- CopyStatusDescription *string
-
- // CreationTime contains the information returned from the x-ms-creation-time header response.
- CreationTime *time.Time
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // ErrorCode contains the information returned from the x-ms-error-code header response.
- ErrorCode *string
-
- // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response.
- ImmutabilityPolicyExpiresOn *time.Time
-
- // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response.
- ImmutabilityPolicyMode *ImmutabilityPolicyMode
-
- // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response.
- IsCurrentVersion *bool
-
- // IsSealed contains the information returned from the x-ms-blob-sealed header response.
- IsSealed *bool
-
- // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response.
- IsServerEncrypted *bool
-
- // LastAccessed contains the information returned from the x-ms-last-access-time header response.
- LastAccessed *time.Time
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // LeaseDuration contains the information returned from the x-ms-lease-duration header response.
- LeaseDuration *LeaseDurationType
-
- // LeaseState contains the information returned from the x-ms-lease-state header response.
- LeaseState *LeaseStateType
-
- // LeaseStatus contains the information returned from the x-ms-lease-status header response.
- LeaseStatus *LeaseStatusType
-
- // LegalHold contains the information returned from the x-ms-legal-hold header response.
- LegalHold *bool
-
- // Metadata contains the information returned from the x-ms-meta header response.
- Metadata map[string]*string
-
- // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response.
- ObjectReplicationPolicyID *string
-
- // ObjectReplicationRules contains the information returned from the x-ms-or header response.
- ObjectReplicationRules map[string]*string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // TagCount contains the information returned from the x-ms-tag-count header response.
- TagCount *int64
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-
- // VersionID contains the information returned from the x-ms-version-id header response.
- VersionID *string
-}
-
-// BlobClientGetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo.
-type BlobClientGetAccountInfoResponse struct {
- // AccountKind contains the information returned from the x-ms-account-kind header response.
- AccountKind *AccountKind
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // SKUName contains the information returned from the x-ms-sku-name header response.
- SKUName *SKUName
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientGetPropertiesResponse contains the response from method BlobClient.GetProperties.
-type BlobClientGetPropertiesResponse struct {
- // AcceptRanges contains the information returned from the Accept-Ranges header response.
- AcceptRanges *string
-
- // AccessTier contains the information returned from the x-ms-access-tier header response.
- AccessTier *string
-
- // AccessTierChangeTime contains the information returned from the x-ms-access-tier-change-time header response.
- AccessTierChangeTime *time.Time
-
- // AccessTierInferred contains the information returned from the x-ms-access-tier-inferred header response.
- AccessTierInferred *bool
-
- // ArchiveStatus contains the information returned from the x-ms-archive-status header response.
- ArchiveStatus *string
-
- // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response.
- BlobCommittedBlockCount *int32
-
- // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
- BlobSequenceNumber *int64
-
- // BlobType contains the information returned from the x-ms-blob-type header response.
- BlobType *BlobType
-
- // CacheControl contains the information returned from the Cache-Control header response.
- CacheControl *string
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentDisposition contains the information returned from the Content-Disposition header response.
- ContentDisposition *string
-
- // ContentEncoding contains the information returned from the Content-Encoding header response.
- ContentEncoding *string
-
- // ContentLanguage contains the information returned from the Content-Language header response.
- ContentLanguage *string
-
- // ContentLength contains the information returned from the Content-Length header response.
- ContentLength *int64
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // ContentType contains the information returned from the Content-Type header response.
- ContentType *string
-
- // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response.
- CopyCompletionTime *time.Time
-
- // CopyID contains the information returned from the x-ms-copy-id header response.
- CopyID *string
-
- // CopyProgress contains the information returned from the x-ms-copy-progress header response.
- CopyProgress *string
-
- // CopySource contains the information returned from the x-ms-copy-source header response.
- CopySource *string
-
- // CopyStatus contains the information returned from the x-ms-copy-status header response.
- CopyStatus *CopyStatusType
-
- // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response.
- CopyStatusDescription *string
-
- // CreationTime contains the information returned from the x-ms-creation-time header response.
- CreationTime *time.Time
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // DestinationSnapshot contains the information returned from the x-ms-copy-destination-snapshot header response.
- DestinationSnapshot *string
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // ExpiresOn contains the information returned from the x-ms-expiry-time header response.
- ExpiresOn *time.Time
-
- // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response.
- ImmutabilityPolicyExpiresOn *time.Time
-
- // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response.
- ImmutabilityPolicyMode *ImmutabilityPolicyMode
-
- // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response.
- IsCurrentVersion *bool
-
- // IsIncrementalCopy contains the information returned from the x-ms-incremental-copy header response.
- IsIncrementalCopy *bool
-
- // IsSealed contains the information returned from the x-ms-blob-sealed header response.
- IsSealed *bool
-
- // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response.
- IsServerEncrypted *bool
-
- // LastAccessed contains the information returned from the x-ms-last-access-time header response.
- LastAccessed *time.Time
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // LeaseDuration contains the information returned from the x-ms-lease-duration header response.
- LeaseDuration *LeaseDurationType
-
- // LeaseState contains the information returned from the x-ms-lease-state header response.
- LeaseState *LeaseStateType
-
- // LeaseStatus contains the information returned from the x-ms-lease-status header response.
- LeaseStatus *LeaseStatusType
-
- // LegalHold contains the information returned from the x-ms-legal-hold header response.
- LegalHold *bool
-
- // Metadata contains the information returned from the x-ms-meta header response.
- Metadata map[string]*string
-
- // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response.
- ObjectReplicationPolicyID *string
-
- // ObjectReplicationRules contains the information returned from the x-ms-or header response.
- ObjectReplicationRules map[string]*string
-
- // RehydratePriority contains the information returned from the x-ms-rehydrate-priority header response.
- RehydratePriority *string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // TagCount contains the information returned from the x-ms-tag-count header response.
- TagCount *int64
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-
- // VersionID contains the information returned from the x-ms-version-id header response.
- VersionID *string
-}
-
-// BlobClientGetTagsResponse contains the response from method BlobClient.GetTags.
-type BlobClientGetTagsResponse struct {
- // Blob tags
- BlobTags
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientQueryResponse contains the response from method BlobClient.Query.
-type BlobClientQueryResponse struct {
- // AcceptRanges contains the information returned from the Accept-Ranges header response.
- AcceptRanges *string
-
- // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response.
- BlobCommittedBlockCount *int32
-
- // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response.
- BlobContentMD5 []byte
-
- // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
- BlobSequenceNumber *int64
-
- // BlobType contains the information returned from the x-ms-blob-type header response.
- BlobType *BlobType
-
- // Body contains the streaming response.
- Body io.ReadCloser
-
- // CacheControl contains the information returned from the Cache-Control header response.
- CacheControl *string
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
- ContentCRC64 []byte
-
- // ContentDisposition contains the information returned from the Content-Disposition header response.
- ContentDisposition *string
-
- // ContentEncoding contains the information returned from the Content-Encoding header response.
- ContentEncoding *string
-
- // ContentLanguage contains the information returned from the Content-Language header response.
- ContentLanguage *string
-
- // ContentLength contains the information returned from the Content-Length header response.
- ContentLength *int64
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // ContentRange contains the information returned from the Content-Range header response.
- ContentRange *string
-
- // ContentType contains the information returned from the Content-Type header response.
- ContentType *string
-
- // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response.
- CopyCompletionTime *time.Time
-
- // CopyID contains the information returned from the x-ms-copy-id header response.
- CopyID *string
-
- // CopyProgress contains the information returned from the x-ms-copy-progress header response.
- CopyProgress *string
-
- // CopySource contains the information returned from the x-ms-copy-source header response.
- CopySource *string
-
- // CopyStatus contains the information returned from the x-ms-copy-status header response.
- CopyStatus *CopyStatusType
-
- // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response.
- CopyStatusDescription *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response.
- IsServerEncrypted *bool
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // LeaseDuration contains the information returned from the x-ms-lease-duration header response.
- LeaseDuration *LeaseDurationType
-
- // LeaseState contains the information returned from the x-ms-lease-state header response.
- LeaseState *LeaseStateType
-
- // LeaseStatus contains the information returned from the x-ms-lease-status header response.
- LeaseStatus *LeaseStatusType
-
- // Metadata contains the information returned from the x-ms-meta header response.
- Metadata map[string]*string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease.
-type BlobClientReleaseLeaseResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientRenewLeaseResponse contains the response from method BlobClient.RenewLease.
-type BlobClientRenewLeaseResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // LeaseID contains the information returned from the x-ms-lease-id header response.
- LeaseID *string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientSetExpiryResponse contains the response from method BlobClient.SetExpiry.
-type BlobClientSetExpiryResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders.
-type BlobClientSetHTTPHeadersResponse struct {
- // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
- BlobSequenceNumber *int64
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientSetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy.
-type BlobClientSetImmutabilityPolicyResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ImmutabilityPolicyExpiry contains the information returned from the x-ms-immutability-policy-until-date header response.
- ImmutabilityPolicyExpiry *time.Time
-
- // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response.
- ImmutabilityPolicyMode *ImmutabilityPolicyMode
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientSetLegalHoldResponse contains the response from method BlobClient.SetLegalHold.
-type BlobClientSetLegalHoldResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // LegalHold contains the information returned from the x-ms-legal-hold header response.
- LegalHold *bool
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientSetMetadataResponse contains the response from method BlobClient.SetMetadata.
-type BlobClientSetMetadataResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
- IsServerEncrypted *bool
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-
- // VersionID contains the information returned from the x-ms-version-id header response.
- VersionID *string
-}
-
-// BlobClientSetTagsResponse contains the response from method BlobClient.SetTags.
-type BlobClientSetTagsResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientSetTierResponse contains the response from method BlobClient.SetTier.
-type BlobClientSetTierResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlobClientStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL.
-type BlobClientStartCopyFromURLResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // CopyID contains the information returned from the x-ms-copy-id header response.
- CopyID *string
-
- // CopyStatus contains the information returned from the x-ms-copy-status header response.
- CopyStatus *CopyStatusType
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-
- // VersionID contains the information returned from the x-ms-version-id header response.
- VersionID *string
-}
-
-// BlobClientUndeleteResponse contains the response from method BlobClient.Undelete.
-type BlobClientUndeleteResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlockBlobClientCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList.
-type BlockBlobClientCommitBlockListResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
- ContentCRC64 []byte
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
- IsServerEncrypted *bool
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-
- // VersionID contains the information returned from the x-ms-version-id header response.
- VersionID *string
-}
-
-// BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList.
-type BlockBlobClientGetBlockListResponse struct {
- BlockList
-
- // BlobContentLength contains the information returned from the x-ms-blob-content-length header response.
- BlobContentLength *int64
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentType contains the information returned from the Content-Type header response.
- ContentType *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL.
-type BlockBlobClientPutBlobFromURLResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
- IsServerEncrypted *bool
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-
- // VersionID contains the information returned from the x-ms-version-id header response.
- VersionID *string
-}
-
-// BlockBlobClientStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL.
-type BlockBlobClientStageBlockFromURLResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
- ContentCRC64 []byte
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
- IsServerEncrypted *bool
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlockBlobClientStageBlockResponse contains the response from method BlockBlobClient.StageBlock.
-type BlockBlobClientStageBlockResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
- ContentCRC64 []byte
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
- IsServerEncrypted *bool
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// BlockBlobClientUploadResponse contains the response from method BlockBlobClient.Upload.
-type BlockBlobClientUploadResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
- IsServerEncrypted *bool
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-
- // VersionID contains the information returned from the x-ms-version-id header response.
- VersionID *string
-}
-
-// ContainerClientAcquireLeaseResponse contains the response from method ContainerClient.AcquireLease.
-type ContainerClientAcquireLeaseResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // LeaseID contains the information returned from the x-ms-lease-id header response.
- LeaseID *string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientBreakLeaseResponse contains the response from method ContainerClient.BreakLease.
-type ContainerClientBreakLeaseResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // LeaseTime contains the information returned from the x-ms-lease-time header response.
- LeaseTime *int32
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientChangeLeaseResponse contains the response from method ContainerClient.ChangeLease.
-type ContainerClientChangeLeaseResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // LeaseID contains the information returned from the x-ms-lease-id header response.
- LeaseID *string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientCreateResponse contains the response from method ContainerClient.Create.
-type ContainerClientCreateResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientDeleteResponse contains the response from method ContainerClient.Delete.
-type ContainerClientDeleteResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientFilterBlobsResponse contains the response from method ContainerClient.FilterBlobs.
-type ContainerClientFilterBlobsResponse struct {
- // The result of a Filter Blobs API call
- FilterBlobSegment
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy.
-type ContainerClientGetAccessPolicyResponse struct {
- // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response.
- BlobPublicAccess *PublicAccessType
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // a collection of signed identifiers
- SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"`
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo.
-type ContainerClientGetAccountInfoResponse struct {
- // AccountKind contains the information returned from the x-ms-account-kind header response.
- AccountKind *AccountKind
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // SKUName contains the information returned from the x-ms-sku-name header response.
- SKUName *SKUName
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientGetPropertiesResponse contains the response from method ContainerClient.GetProperties.
-type ContainerClientGetPropertiesResponse struct {
- // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response.
- BlobPublicAccess *PublicAccessType
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // DefaultEncryptionScope contains the information returned from the x-ms-default-encryption-scope header response.
- DefaultEncryptionScope *string
-
- // DenyEncryptionScopeOverride contains the information returned from the x-ms-deny-encryption-scope-override header response.
- DenyEncryptionScopeOverride *bool
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response.
- HasImmutabilityPolicy *bool
-
- // HasLegalHold contains the information returned from the x-ms-has-legal-hold header response.
- HasLegalHold *bool
-
- // IsImmutableStorageWithVersioningEnabled contains the information returned from the x-ms-immutable-storage-with-versioning-enabled
- // header response.
- IsImmutableStorageWithVersioningEnabled *bool
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // LeaseDuration contains the information returned from the x-ms-lease-duration header response.
- LeaseDuration *LeaseDurationType
-
- // LeaseState contains the information returned from the x-ms-lease-state header response.
- LeaseState *LeaseStateType
-
- // LeaseStatus contains the information returned from the x-ms-lease-status header response.
- LeaseStatus *LeaseStatusType
-
- // Metadata contains the information returned from the x-ms-meta header response.
- Metadata map[string]*string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.NewListBlobFlatSegmentPager.
-type ContainerClientListBlobFlatSegmentResponse struct {
- // An enumeration of blobs
- ListBlobsFlatSegmentResponse
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentType contains the information returned from the Content-Type header response.
- ContentType *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.NewListBlobHierarchySegmentPager.
-type ContainerClientListBlobHierarchySegmentResponse struct {
- // An enumeration of blobs
- ListBlobsHierarchySegmentResponse
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentType contains the information returned from the Content-Type header response.
- ContentType *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease.
-type ContainerClientReleaseLeaseResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientRenameResponse contains the response from method ContainerClient.Rename.
-type ContainerClientRenameResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientRenewLeaseResponse contains the response from method ContainerClient.RenewLease.
-type ContainerClientRenewLeaseResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // LeaseID contains the information returned from the x-ms-lease-id header response.
- LeaseID *string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientRestoreResponse contains the response from method ContainerClient.Restore.
-type ContainerClientRestoreResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy.
-type ContainerClientSetAccessPolicyResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientSetMetadataResponse contains the response from method ContainerClient.SetMetadata.
-type ContainerClientSetMetadataResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ContainerClientSubmitBatchResponse contains the response from method ContainerClient.SubmitBatch.
-type ContainerClientSubmitBatchResponse struct {
- // Body contains the streaming response.
- Body io.ReadCloser
-
- // ContentType contains the information returned from the Content-Type header response.
- ContentType *string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// PageBlobClientClearPagesResponse contains the response from method PageBlobClient.ClearPages.
-type PageBlobClientClearPagesResponse struct {
- // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
- BlobSequenceNumber *int64
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
- ContentCRC64 []byte
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// PageBlobClientCopyIncrementalResponse contains the response from method PageBlobClient.CopyIncremental.
-type PageBlobClientCopyIncrementalResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // CopyID contains the information returned from the x-ms-copy-id header response.
- CopyID *string
-
- // CopyStatus contains the information returned from the x-ms-copy-status header response.
- CopyStatus *CopyStatusType
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// PageBlobClientCreateResponse contains the response from method PageBlobClient.Create.
-type PageBlobClientCreateResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
- IsServerEncrypted *bool
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-
- // VersionID contains the information returned from the x-ms-version-id header response.
- VersionID *string
-}
-
-// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.NewGetPageRangesDiffPager.
-type PageBlobClientGetPageRangesDiffResponse struct {
- // the list of pages
- PageList
-
- // BlobContentLength contains the information returned from the x-ms-blob-content-length header response.
- BlobContentLength *int64
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.NewGetPageRangesPager.
-type PageBlobClientGetPageRangesResponse struct {
- // the list of pages
- PageList
-
- // BlobContentLength contains the information returned from the x-ms-blob-content-length header response.
- BlobContentLength *int64
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize.
-type PageBlobClientResizeResponse struct {
- // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
- BlobSequenceNumber *int64
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// PageBlobClientUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber.
-type PageBlobClientUpdateSequenceNumberResponse struct {
- // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
- BlobSequenceNumber *int64
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// PageBlobClientUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL.
-type PageBlobClientUploadPagesFromURLResponse struct {
- // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
- BlobSequenceNumber *int64
-
- // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
- ContentCRC64 []byte
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
- IsServerEncrypted *bool
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// PageBlobClientUploadPagesResponse contains the response from method PageBlobClient.UploadPages.
-type PageBlobClientUploadPagesResponse struct {
- // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
- BlobSequenceNumber *int64
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
- ContentCRC64 []byte
-
- // ContentMD5 contains the information returned from the Content-MD5 header response.
- ContentMD5 []byte
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // ETag contains the information returned from the ETag header response.
- ETag *azcore.ETag
-
- // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
- EncryptionKeySHA256 *string
-
- // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
- EncryptionScope *string
-
- // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
- IsServerEncrypted *bool
-
- // LastModified contains the information returned from the Last-Modified header response.
- LastModified *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs.
-type ServiceClientFilterBlobsResponse struct {
- // The result of a Filter Blobs API call
- FilterBlobSegment
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo.
-type ServiceClientGetAccountInfoResponse struct {
- // AccountKind contains the information returned from the x-ms-account-kind header response.
- AccountKind *AccountKind
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response.
- IsHierarchicalNamespaceEnabled *bool
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // SKUName contains the information returned from the x-ms-sku-name header response.
- SKUName *SKUName
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties.
-type ServiceClientGetPropertiesResponse struct {
- // Storage Service Properties.
- StorageServiceProperties
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics.
-type ServiceClientGetStatisticsResponse struct {
- // Stats for the storage service.
- StorageServiceStats
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey.
-type ServiceClientGetUserDelegationKeyResponse struct {
- // A user delegation key
- UserDelegationKey
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // Date contains the information returned from the Date header response.
- Date *time.Time
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.NewListContainersSegmentPager.
-type ServiceClientListContainersSegmentResponse struct {
- // An enumeration of containers
- ListContainersSegmentResponse
-
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties.
-type ServiceClientSetPropertiesResponse struct {
- // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
- ClientRequestID *string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
-
-// ServiceClientSubmitBatchResponse contains the response from method ServiceClient.SubmitBatch.
-type ServiceClientSubmitBatchResponse struct {
- // Body contains the streaming response.
- Body io.ReadCloser
-
- // ContentType contains the information returned from the Content-Type header response.
- ContentType *string
-
- // RequestID contains the information returned from the x-ms-request-id header response.
- RequestID *string
-
- // Version contains the information returned from the x-ms-version header response.
- Version *string
-}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go
new file mode 100644
index 000000000..5ed22156d
--- /dev/null
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_responses.go
@@ -0,0 +1,2040 @@
+// Copyright (c) Microsoft Corporation. All rights reserved.
+// Licensed under the MIT License. See License.txt in the project root for license information.
+// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
+// Changes may cause incorrect behavior and will be lost if the code is regenerated.
+
+package generated
+
+import (
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore"
+ "io"
+ "time"
+)
+
+// AppendBlobClientAppendBlockFromURLResponse contains the response from method AppendBlobClient.AppendBlockFromURL.
+type AppendBlobClientAppendBlockFromURLResponse struct {
+ // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response.
+ BlobAppendOffset *string
+
+ // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response.
+ BlobCommittedBlockCount *int32
+
+ // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
+ ContentCRC64 []byte
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// AppendBlobClientAppendBlockResponse contains the response from method AppendBlobClient.AppendBlock.
+type AppendBlobClientAppendBlockResponse struct {
+ // BlobAppendOffset contains the information returned from the x-ms-blob-append-offset header response.
+ BlobAppendOffset *string
+
+ // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response.
+ BlobCommittedBlockCount *int32
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
+ ContentCRC64 []byte
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // StructuredBodyType contains the information returned from the x-ms-structured-body header response.
+ StructuredBodyType *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// AppendBlobClientCreateResponse contains the response from method AppendBlobClient.Create.
+type AppendBlobClientCreateResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+
+ // VersionID contains the information returned from the x-ms-version-id header response.
+ VersionID *string
+}
+
+// AppendBlobClientSealResponse contains the response from method AppendBlobClient.Seal.
+type AppendBlobClientSealResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // IsSealed contains the information returned from the x-ms-blob-sealed header response.
+ IsSealed *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientAbortCopyFromURLResponse contains the response from method BlobClient.AbortCopyFromURL.
+type BlobClientAbortCopyFromURLResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientAcquireLeaseResponse contains the response from method BlobClient.AcquireLease.
+type BlobClientAcquireLeaseResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // LeaseID contains the information returned from the x-ms-lease-id header response.
+ LeaseID *string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientBreakLeaseResponse contains the response from method BlobClient.BreakLease.
+type BlobClientBreakLeaseResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // LeaseTime contains the information returned from the x-ms-lease-time header response.
+ LeaseTime *int32
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientChangeLeaseResponse contains the response from method BlobClient.ChangeLease.
+type BlobClientChangeLeaseResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // LeaseID contains the information returned from the x-ms-lease-id header response.
+ LeaseID *string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientCopyFromURLResponse contains the response from method BlobClient.CopyFromURL.
+type BlobClientCopyFromURLResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
+ ContentCRC64 []byte
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // CopyID contains the information returned from the x-ms-copy-id header response.
+ CopyID *string
+
+ // CopyStatus contains the information returned from the x-ms-copy-status header response.
+ CopyStatus *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+
+ // VersionID contains the information returned from the x-ms-version-id header response.
+ VersionID *string
+}
+
+// BlobClientCreateSnapshotResponse contains the response from method BlobClient.CreateSnapshot.
+type BlobClientCreateSnapshotResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Snapshot contains the information returned from the x-ms-snapshot header response.
+ Snapshot *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+
+ // VersionID contains the information returned from the x-ms-version-id header response.
+ VersionID *string
+}
+
+// BlobClientDeleteImmutabilityPolicyResponse contains the response from method BlobClient.DeleteImmutabilityPolicy.
+type BlobClientDeleteImmutabilityPolicyResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientDeleteResponse contains the response from method BlobClient.Delete.
+type BlobClientDeleteResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientDownloadResponse contains the response from method BlobClient.Download.
+type BlobClientDownloadResponse struct {
+ // AcceptRanges contains the information returned from the Accept-Ranges header response.
+ AcceptRanges *string
+
+ // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response.
+ BlobCommittedBlockCount *int32
+
+ // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response.
+ BlobContentMD5 []byte
+
+ // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
+ BlobSequenceNumber *int64
+
+ // BlobType contains the information returned from the x-ms-blob-type header response.
+ BlobType *BlobType
+
+ // Body contains the streaming response.
+ Body io.ReadCloser
+
+ // CacheControl contains the information returned from the Cache-Control header response.
+ CacheControl *string
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
+ ContentCRC64 []byte
+
+ // ContentDisposition contains the information returned from the Content-Disposition header response.
+ ContentDisposition *string
+
+ // ContentEncoding contains the information returned from the Content-Encoding header response.
+ ContentEncoding *string
+
+ // ContentLanguage contains the information returned from the Content-Language header response.
+ ContentLanguage *string
+
+ // ContentLength contains the information returned from the Content-Length header response.
+ ContentLength *int64
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // ContentRange contains the information returned from the Content-Range header response.
+ ContentRange *string
+
+ // ContentType contains the information returned from the Content-Type header response.
+ ContentType *string
+
+ // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response.
+ CopyCompletionTime *time.Time
+
+ // CopyID contains the information returned from the x-ms-copy-id header response.
+ CopyID *string
+
+ // CopyProgress contains the information returned from the x-ms-copy-progress header response.
+ CopyProgress *string
+
+ // CopySource contains the information returned from the x-ms-copy-source header response.
+ CopySource *string
+
+ // CopyStatus contains the information returned from the x-ms-copy-status header response.
+ CopyStatus *CopyStatusType
+
+ // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response.
+ CopyStatusDescription *string
+
+ // CreationTime contains the information returned from the x-ms-creation-time header response.
+ CreationTime *time.Time
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // ErrorCode contains the information returned from the x-ms-error-code header response.
+ ErrorCode *string
+
+ // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response.
+ ImmutabilityPolicyExpiresOn *time.Time
+
+ // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response.
+ ImmutabilityPolicyMode *ImmutabilityPolicyMode
+
+ // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response.
+ IsCurrentVersion *bool
+
+ // IsSealed contains the information returned from the x-ms-blob-sealed header response.
+ IsSealed *bool
+
+ // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastAccessed contains the information returned from the x-ms-last-access-time header response.
+ LastAccessed *time.Time
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // LeaseDuration contains the information returned from the x-ms-lease-duration header response.
+ LeaseDuration *LeaseDurationType
+
+ // LeaseState contains the information returned from the x-ms-lease-state header response.
+ LeaseState *LeaseStateType
+
+ // LeaseStatus contains the information returned from the x-ms-lease-status header response.
+ LeaseStatus *LeaseStatusType
+
+ // LegalHold contains the information returned from the x-ms-legal-hold header response.
+ LegalHold *bool
+
+ // Metadata contains the information returned from the x-ms-meta header response.
+ Metadata map[string]*string
+
+ // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response.
+ ObjectReplicationPolicyID *string
+
+ // ObjectReplicationRules contains the information returned from the x-ms-or header response.
+ ObjectReplicationRules map[string]*string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // StructuredBodyType contains the information returned from the x-ms-structured-body header response.
+ StructuredBodyType *string
+
+ // StructuredContentLength contains the information returned from the x-ms-structured-content-length header response.
+ StructuredContentLength *int64
+
+ // TagCount contains the information returned from the x-ms-tag-count header response.
+ TagCount *int64
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+
+ // VersionID contains the information returned from the x-ms-version-id header response.
+ VersionID *string
+}
+
+// BlobClientGetAccountInfoResponse contains the response from method BlobClient.GetAccountInfo.
+type BlobClientGetAccountInfoResponse struct {
+ // AccountKind contains the information returned from the x-ms-account-kind header response.
+ AccountKind *AccountKind
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response.
+ IsHierarchicalNamespaceEnabled *bool
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // SKUName contains the information returned from the x-ms-sku-name header response.
+ SKUName *SKUName
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientGetPropertiesResponse contains the response from method BlobClient.GetProperties.
+type BlobClientGetPropertiesResponse struct {
+ // AcceptRanges contains the information returned from the Accept-Ranges header response.
+ AcceptRanges *string
+
+ // AccessTier contains the information returned from the x-ms-access-tier header response.
+ AccessTier *string
+
+ // AccessTierChangeTime contains the information returned from the x-ms-access-tier-change-time header response.
+ AccessTierChangeTime *time.Time
+
+ // AccessTierInferred contains the information returned from the x-ms-access-tier-inferred header response.
+ AccessTierInferred *bool
+
+ // ArchiveStatus contains the information returned from the x-ms-archive-status header response.
+ ArchiveStatus *string
+
+ // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response.
+ BlobCommittedBlockCount *int32
+
+ // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
+ BlobSequenceNumber *int64
+
+ // BlobType contains the information returned from the x-ms-blob-type header response.
+ BlobType *BlobType
+
+ // CacheControl contains the information returned from the Cache-Control header response.
+ CacheControl *string
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentDisposition contains the information returned from the Content-Disposition header response.
+ ContentDisposition *string
+
+ // ContentEncoding contains the information returned from the Content-Encoding header response.
+ ContentEncoding *string
+
+ // ContentLanguage contains the information returned from the Content-Language header response.
+ ContentLanguage *string
+
+ // ContentLength contains the information returned from the Content-Length header response.
+ ContentLength *int64
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // ContentType contains the information returned from the Content-Type header response.
+ ContentType *string
+
+ // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response.
+ CopyCompletionTime *time.Time
+
+ // CopyID contains the information returned from the x-ms-copy-id header response.
+ CopyID *string
+
+ // CopyProgress contains the information returned from the x-ms-copy-progress header response.
+ CopyProgress *string
+
+ // CopySource contains the information returned from the x-ms-copy-source header response.
+ CopySource *string
+
+ // CopyStatus contains the information returned from the x-ms-copy-status header response.
+ CopyStatus *CopyStatusType
+
+ // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response.
+ CopyStatusDescription *string
+
+ // CreationTime contains the information returned from the x-ms-creation-time header response.
+ CreationTime *time.Time
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // DestinationSnapshot contains the information returned from the x-ms-copy-destination-snapshot header response.
+ DestinationSnapshot *string
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // ExpiresOn contains the information returned from the x-ms-expiry-time header response.
+ ExpiresOn *time.Time
+
+ // ImmutabilityPolicyExpiresOn contains the information returned from the x-ms-immutability-policy-until-date header response.
+ ImmutabilityPolicyExpiresOn *time.Time
+
+ // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response.
+ ImmutabilityPolicyMode *ImmutabilityPolicyMode
+
+ // IsCurrentVersion contains the information returned from the x-ms-is-current-version header response.
+ IsCurrentVersion *bool
+
+ // IsIncrementalCopy contains the information returned from the x-ms-incremental-copy header response.
+ IsIncrementalCopy *bool
+
+ // IsSealed contains the information returned from the x-ms-blob-sealed header response.
+ IsSealed *bool
+
+ // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastAccessed contains the information returned from the x-ms-last-access-time header response.
+ LastAccessed *time.Time
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // LeaseDuration contains the information returned from the x-ms-lease-duration header response.
+ LeaseDuration *LeaseDurationType
+
+ // LeaseState contains the information returned from the x-ms-lease-state header response.
+ LeaseState *LeaseStateType
+
+ // LeaseStatus contains the information returned from the x-ms-lease-status header response.
+ LeaseStatus *LeaseStatusType
+
+ // LegalHold contains the information returned from the x-ms-legal-hold header response.
+ LegalHold *bool
+
+ // Metadata contains the information returned from the x-ms-meta header response.
+ Metadata map[string]*string
+
+ // ObjectReplicationPolicyID contains the information returned from the x-ms-or-policy-id header response.
+ ObjectReplicationPolicyID *string
+
+ // ObjectReplicationRules contains the information returned from the x-ms-or header response.
+ ObjectReplicationRules map[string]*string
+
+ // RehydratePriority contains the information returned from the x-ms-rehydrate-priority header response.
+ RehydratePriority *string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // TagCount contains the information returned from the x-ms-tag-count header response.
+ TagCount *int64
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+
+ // VersionID contains the information returned from the x-ms-version-id header response.
+ VersionID *string
+}
+
+// BlobClientGetTagsResponse contains the response from method BlobClient.GetTags.
+type BlobClientGetTagsResponse struct {
+ // Blob tags
+ BlobTags
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientQueryResponse contains the response from method BlobClient.Query.
+type BlobClientQueryResponse struct {
+ // AcceptRanges contains the information returned from the Accept-Ranges header response.
+ AcceptRanges *string
+
+ // BlobCommittedBlockCount contains the information returned from the x-ms-blob-committed-block-count header response.
+ BlobCommittedBlockCount *int32
+
+ // BlobContentMD5 contains the information returned from the x-ms-blob-content-md5 header response.
+ BlobContentMD5 []byte
+
+ // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
+ BlobSequenceNumber *int64
+
+ // BlobType contains the information returned from the x-ms-blob-type header response.
+ BlobType *BlobType
+
+ // Body contains the streaming response.
+ Body io.ReadCloser
+
+ // CacheControl contains the information returned from the Cache-Control header response.
+ CacheControl *string
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
+ ContentCRC64 []byte
+
+ // ContentDisposition contains the information returned from the Content-Disposition header response.
+ ContentDisposition *string
+
+ // ContentEncoding contains the information returned from the Content-Encoding header response.
+ ContentEncoding *string
+
+ // ContentLanguage contains the information returned from the Content-Language header response.
+ ContentLanguage *string
+
+ // ContentLength contains the information returned from the Content-Length header response.
+ ContentLength *int64
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // ContentRange contains the information returned from the Content-Range header response.
+ ContentRange *string
+
+ // ContentType contains the information returned from the Content-Type header response.
+ ContentType *string
+
+ // CopyCompletionTime contains the information returned from the x-ms-copy-completion-time header response.
+ CopyCompletionTime *time.Time
+
+ // CopyID contains the information returned from the x-ms-copy-id header response.
+ CopyID *string
+
+ // CopyProgress contains the information returned from the x-ms-copy-progress header response.
+ CopyProgress *string
+
+ // CopySource contains the information returned from the x-ms-copy-source header response.
+ CopySource *string
+
+ // CopyStatus contains the information returned from the x-ms-copy-status header response.
+ CopyStatus *CopyStatusType
+
+ // CopyStatusDescription contains the information returned from the x-ms-copy-status-description header response.
+ CopyStatusDescription *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // IsServerEncrypted contains the information returned from the x-ms-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // LeaseDuration contains the information returned from the x-ms-lease-duration header response.
+ LeaseDuration *LeaseDurationType
+
+ // LeaseState contains the information returned from the x-ms-lease-state header response.
+ LeaseState *LeaseStateType
+
+ // LeaseStatus contains the information returned from the x-ms-lease-status header response.
+ LeaseStatus *LeaseStatusType
+
+ // Metadata contains the information returned from the x-ms-meta header response.
+ Metadata map[string]*string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientReleaseLeaseResponse contains the response from method BlobClient.ReleaseLease.
+type BlobClientReleaseLeaseResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientRenewLeaseResponse contains the response from method BlobClient.RenewLease.
+type BlobClientRenewLeaseResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // LeaseID contains the information returned from the x-ms-lease-id header response.
+ LeaseID *string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientSetExpiryResponse contains the response from method BlobClient.SetExpiry.
+type BlobClientSetExpiryResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientSetHTTPHeadersResponse contains the response from method BlobClient.SetHTTPHeaders.
+type BlobClientSetHTTPHeadersResponse struct {
+ // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
+ BlobSequenceNumber *int64
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientSetImmutabilityPolicyResponse contains the response from method BlobClient.SetImmutabilityPolicy.
+type BlobClientSetImmutabilityPolicyResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ImmutabilityPolicyExpiry contains the information returned from the x-ms-immutability-policy-until-date header response.
+ ImmutabilityPolicyExpiry *time.Time
+
+ // ImmutabilityPolicyMode contains the information returned from the x-ms-immutability-policy-mode header response.
+ ImmutabilityPolicyMode *ImmutabilityPolicyMode
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientSetLegalHoldResponse contains the response from method BlobClient.SetLegalHold.
+type BlobClientSetLegalHoldResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // LegalHold contains the information returned from the x-ms-legal-hold header response.
+ LegalHold *bool
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientSetMetadataResponse contains the response from method BlobClient.SetMetadata.
+type BlobClientSetMetadataResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+
+ // VersionID contains the information returned from the x-ms-version-id header response.
+ VersionID *string
+}
+
+// BlobClientSetTagsResponse contains the response from method BlobClient.SetTags.
+type BlobClientSetTagsResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientSetTierResponse contains the response from method BlobClient.SetTier.
+type BlobClientSetTierResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlobClientStartCopyFromURLResponse contains the response from method BlobClient.StartCopyFromURL.
+type BlobClientStartCopyFromURLResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // CopyID contains the information returned from the x-ms-copy-id header response.
+ CopyID *string
+
+ // CopyStatus contains the information returned from the x-ms-copy-status header response.
+ CopyStatus *CopyStatusType
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+
+ // VersionID contains the information returned from the x-ms-version-id header response.
+ VersionID *string
+}
+
+// BlobClientUndeleteResponse contains the response from method BlobClient.Undelete.
+type BlobClientUndeleteResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlockBlobClientCommitBlockListResponse contains the response from method BlockBlobClient.CommitBlockList.
+type BlockBlobClientCommitBlockListResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
+ ContentCRC64 []byte
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+
+ // VersionID contains the information returned from the x-ms-version-id header response.
+ VersionID *string
+}
+
+// BlockBlobClientGetBlockListResponse contains the response from method BlockBlobClient.GetBlockList.
+type BlockBlobClientGetBlockListResponse struct {
+ BlockList
+
+ // BlobContentLength contains the information returned from the x-ms-blob-content-length header response.
+ BlobContentLength *int64
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentType contains the information returned from the Content-Type header response.
+ ContentType *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlockBlobClientPutBlobFromURLResponse contains the response from method BlockBlobClient.PutBlobFromURL.
+type BlockBlobClientPutBlobFromURLResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+
+ // VersionID contains the information returned from the x-ms-version-id header response.
+ VersionID *string
+}
+
+// BlockBlobClientStageBlockFromURLResponse contains the response from method BlockBlobClient.StageBlockFromURL.
+type BlockBlobClientStageBlockFromURLResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
+ ContentCRC64 []byte
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlockBlobClientStageBlockResponse contains the response from method BlockBlobClient.StageBlock.
+type BlockBlobClientStageBlockResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
+ ContentCRC64 []byte
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // StructuredBodyType contains the information returned from the x-ms-structured-body header response.
+ StructuredBodyType *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// BlockBlobClientUploadResponse contains the response from method BlockBlobClient.Upload.
+type BlockBlobClientUploadResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
+ ContentCRC64 []byte
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // StructuredBodyType contains the information returned from the x-ms-structured-body header response.
+ StructuredBodyType *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+
+ // VersionID contains the information returned from the x-ms-version-id header response.
+ VersionID *string
+}
+
+// ContainerClientAcquireLeaseResponse contains the response from method ContainerClient.AcquireLease.
+type ContainerClientAcquireLeaseResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // LeaseID contains the information returned from the x-ms-lease-id header response.
+ LeaseID *string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientBreakLeaseResponse contains the response from method ContainerClient.BreakLease.
+type ContainerClientBreakLeaseResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // LeaseTime contains the information returned from the x-ms-lease-time header response.
+ LeaseTime *int32
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientChangeLeaseResponse contains the response from method ContainerClient.ChangeLease.
+type ContainerClientChangeLeaseResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // LeaseID contains the information returned from the x-ms-lease-id header response.
+ LeaseID *string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientCreateResponse contains the response from method ContainerClient.Create.
+type ContainerClientCreateResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientDeleteResponse contains the response from method ContainerClient.Delete.
+type ContainerClientDeleteResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientFilterBlobsResponse contains the response from method ContainerClient.FilterBlobs.
+type ContainerClientFilterBlobsResponse struct {
+ // The result of a Filter Blobs API call
+ FilterBlobSegment
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientGetAccessPolicyResponse contains the response from method ContainerClient.GetAccessPolicy.
+type ContainerClientGetAccessPolicyResponse struct {
+ // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response.
+ BlobPublicAccess *PublicAccessType
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // a collection of signed identifiers
+ SignedIdentifiers []*SignedIdentifier `xml:"SignedIdentifier"`
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientGetAccountInfoResponse contains the response from method ContainerClient.GetAccountInfo.
+type ContainerClientGetAccountInfoResponse struct {
+ // AccountKind contains the information returned from the x-ms-account-kind header response.
+ AccountKind *AccountKind
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response.
+ IsHierarchicalNamespaceEnabled *bool
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // SKUName contains the information returned from the x-ms-sku-name header response.
+ SKUName *SKUName
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientGetPropertiesResponse contains the response from method ContainerClient.GetProperties.
+type ContainerClientGetPropertiesResponse struct {
+ // BlobPublicAccess contains the information returned from the x-ms-blob-public-access header response.
+ BlobPublicAccess *PublicAccessType
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // DefaultEncryptionScope contains the information returned from the x-ms-default-encryption-scope header response.
+ DefaultEncryptionScope *string
+
+ // DenyEncryptionScopeOverride contains the information returned from the x-ms-deny-encryption-scope-override header response.
+ DenyEncryptionScopeOverride *bool
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // HasImmutabilityPolicy contains the information returned from the x-ms-has-immutability-policy header response.
+ HasImmutabilityPolicy *bool
+
+ // HasLegalHold contains the information returned from the x-ms-has-legal-hold header response.
+ HasLegalHold *bool
+
+ // IsImmutableStorageWithVersioningEnabled contains the information returned from the x-ms-immutable-storage-with-versioning-enabled
+ // header response.
+ IsImmutableStorageWithVersioningEnabled *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // LeaseDuration contains the information returned from the x-ms-lease-duration header response.
+ LeaseDuration *LeaseDurationType
+
+ // LeaseState contains the information returned from the x-ms-lease-state header response.
+ LeaseState *LeaseStateType
+
+ // LeaseStatus contains the information returned from the x-ms-lease-status header response.
+ LeaseStatus *LeaseStatusType
+
+ // Metadata contains the information returned from the x-ms-meta header response.
+ Metadata map[string]*string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientListBlobFlatSegmentResponse contains the response from method ContainerClient.NewListBlobFlatSegmentPager.
+type ContainerClientListBlobFlatSegmentResponse struct {
+ // An enumeration of blobs
+ ListBlobsFlatSegmentResponse
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentType contains the information returned from the Content-Type header response.
+ ContentType *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientListBlobHierarchySegmentResponse contains the response from method ContainerClient.NewListBlobHierarchySegmentPager.
+type ContainerClientListBlobHierarchySegmentResponse struct {
+ // An enumeration of blobs
+ ListBlobsHierarchySegmentResponse
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentType contains the information returned from the Content-Type header response.
+ ContentType *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientReleaseLeaseResponse contains the response from method ContainerClient.ReleaseLease.
+type ContainerClientReleaseLeaseResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientRenameResponse contains the response from method ContainerClient.Rename.
+type ContainerClientRenameResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientRenewLeaseResponse contains the response from method ContainerClient.RenewLease.
+type ContainerClientRenewLeaseResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // LeaseID contains the information returned from the x-ms-lease-id header response.
+ LeaseID *string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientRestoreResponse contains the response from method ContainerClient.Restore.
+type ContainerClientRestoreResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientSetAccessPolicyResponse contains the response from method ContainerClient.SetAccessPolicy.
+type ContainerClientSetAccessPolicyResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientSetMetadataResponse contains the response from method ContainerClient.SetMetadata.
+type ContainerClientSetMetadataResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ContainerClientSubmitBatchResponse contains the response from method ContainerClient.SubmitBatch.
+type ContainerClientSubmitBatchResponse struct {
+ // Body contains the streaming response.
+ Body io.ReadCloser
+
+ // ContentType contains the information returned from the Content-Type header response.
+ ContentType *string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// PageBlobClientClearPagesResponse contains the response from method PageBlobClient.ClearPages.
+type PageBlobClientClearPagesResponse struct {
+ // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
+ BlobSequenceNumber *int64
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
+ ContentCRC64 []byte
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// PageBlobClientCopyIncrementalResponse contains the response from method PageBlobClient.CopyIncremental.
+type PageBlobClientCopyIncrementalResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // CopyID contains the information returned from the x-ms-copy-id header response.
+ CopyID *string
+
+ // CopyStatus contains the information returned from the x-ms-copy-status header response.
+ CopyStatus *CopyStatusType
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// PageBlobClientCreateResponse contains the response from method PageBlobClient.Create.
+type PageBlobClientCreateResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+
+ // VersionID contains the information returned from the x-ms-version-id header response.
+ VersionID *string
+}
+
+// PageBlobClientGetPageRangesDiffResponse contains the response from method PageBlobClient.NewGetPageRangesDiffPager.
+type PageBlobClientGetPageRangesDiffResponse struct {
+ // the list of pages
+ PageList
+
+ // BlobContentLength contains the information returned from the x-ms-blob-content-length header response.
+ BlobContentLength *int64
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// PageBlobClientGetPageRangesResponse contains the response from method PageBlobClient.NewGetPageRangesPager.
+type PageBlobClientGetPageRangesResponse struct {
+ // the list of pages
+ PageList
+
+ // BlobContentLength contains the information returned from the x-ms-blob-content-length header response.
+ BlobContentLength *int64
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// PageBlobClientResizeResponse contains the response from method PageBlobClient.Resize.
+type PageBlobClientResizeResponse struct {
+ // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
+ BlobSequenceNumber *int64
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// PageBlobClientUpdateSequenceNumberResponse contains the response from method PageBlobClient.UpdateSequenceNumber.
+type PageBlobClientUpdateSequenceNumberResponse struct {
+ // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
+ BlobSequenceNumber *int64
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// PageBlobClientUploadPagesFromURLResponse contains the response from method PageBlobClient.UploadPagesFromURL.
+type PageBlobClientUploadPagesFromURLResponse struct {
+ // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
+ BlobSequenceNumber *int64
+
+ // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
+ ContentCRC64 []byte
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// PageBlobClientUploadPagesResponse contains the response from method PageBlobClient.UploadPages.
+type PageBlobClientUploadPagesResponse struct {
+ // BlobSequenceNumber contains the information returned from the x-ms-blob-sequence-number header response.
+ BlobSequenceNumber *int64
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // ContentCRC64 contains the information returned from the x-ms-content-crc64 header response.
+ ContentCRC64 []byte
+
+ // ContentMD5 contains the information returned from the Content-MD5 header response.
+ ContentMD5 []byte
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // ETag contains the information returned from the ETag header response.
+ ETag *azcore.ETag
+
+ // EncryptionKeySHA256 contains the information returned from the x-ms-encryption-key-sha256 header response.
+ EncryptionKeySHA256 *string
+
+ // EncryptionScope contains the information returned from the x-ms-encryption-scope header response.
+ EncryptionScope *string
+
+ // IsServerEncrypted contains the information returned from the x-ms-request-server-encrypted header response.
+ IsServerEncrypted *bool
+
+ // LastModified contains the information returned from the Last-Modified header response.
+ LastModified *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // StructuredBodyType contains the information returned from the x-ms-structured-body header response.
+ StructuredBodyType *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ServiceClientFilterBlobsResponse contains the response from method ServiceClient.FilterBlobs.
+type ServiceClientFilterBlobsResponse struct {
+ // The result of a Filter Blobs API call
+ FilterBlobSegment
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ServiceClientGetAccountInfoResponse contains the response from method ServiceClient.GetAccountInfo.
+type ServiceClientGetAccountInfoResponse struct {
+ // AccountKind contains the information returned from the x-ms-account-kind header response.
+ AccountKind *AccountKind
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // IsHierarchicalNamespaceEnabled contains the information returned from the x-ms-is-hns-enabled header response.
+ IsHierarchicalNamespaceEnabled *bool
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // SKUName contains the information returned from the x-ms-sku-name header response.
+ SKUName *SKUName
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ServiceClientGetPropertiesResponse contains the response from method ServiceClient.GetProperties.
+type ServiceClientGetPropertiesResponse struct {
+ // Storage Service Properties.
+ StorageServiceProperties
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ServiceClientGetStatisticsResponse contains the response from method ServiceClient.GetStatistics.
+type ServiceClientGetStatisticsResponse struct {
+ // Stats for the storage service.
+ StorageServiceStats
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ServiceClientGetUserDelegationKeyResponse contains the response from method ServiceClient.GetUserDelegationKey.
+type ServiceClientGetUserDelegationKeyResponse struct {
+ // A user delegation key
+ UserDelegationKey
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // Date contains the information returned from the Date header response.
+ Date *time.Time
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ServiceClientListContainersSegmentResponse contains the response from method ServiceClient.NewListContainersSegmentPager.
+type ServiceClientListContainersSegmentResponse struct {
+ // An enumeration of containers
+ ListContainersSegmentResponse
+
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ServiceClientSetPropertiesResponse contains the response from method ServiceClient.SetProperties.
+type ServiceClientSetPropertiesResponse struct {
+ // ClientRequestID contains the information returned from the x-ms-client-request-id header response.
+ ClientRequestID *string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
+
+// ServiceClientSubmitBatchResponse contains the response from method ServiceClient.SubmitBatch.
+type ServiceClientSubmitBatchResponse struct {
+ // Body contains the streaming response.
+ Body io.ReadCloser
+
+ // ContentType contains the information returned from the Content-Type header response.
+ ContentType *string
+
+ // RequestID contains the information returned from the x-ms-request-id header response.
+ RequestID *string
+
+ // Version contains the information returned from the x-ms-version header response.
+ Version *string
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go
index c792fbf09..8764591b3 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_service_client.go
@@ -1,6 +1,3 @@
-//go:build go1.18
-// +build go1.18
-
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
@@ -33,7 +30,7 @@ type ServiceClient struct {
// be scoped within the expression to a single container.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - where - Filters the results to return only to return only blobs whose tags match the specified expression.
// - options - ServiceClientFilterBlobsOptions contains the optional parameters for the ServiceClient.FilterBlobs method.
func (client *ServiceClient) FilterBlobs(ctx context.Context, where string, options *ServiceClientFilterBlobsOptions) (ServiceClientFilterBlobsResponse, error) {
@@ -62,25 +59,25 @@ func (client *ServiceClient) filterBlobsCreateRequest(ctx context.Context, where
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "blobs")
- if options != nil && options.Timeout != nil {
- reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
+ if options != nil && options.Include != nil {
+ reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ","))
}
- reqQP.Set("where", where)
if options != nil && options.Marker != nil {
reqQP.Set("marker", *options.Marker)
}
if options != nil && options.Maxresults != nil {
reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10))
}
- if options != nil && options.Include != nil {
- reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ","))
+ if options != nil && options.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
+ reqQP.Set("where", where)
req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -112,7 +109,7 @@ func (client *ServiceClient) filterBlobsHandleResponse(resp *http.Response) (Ser
// GetAccountInfo - Returns the sku name and account kind
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - ServiceClientGetAccountInfoOptions contains the optional parameters for the ServiceClient.GetAccountInfo method.
func (client *ServiceClient) GetAccountInfo(ctx context.Context, options *ServiceClientGetAccountInfoOptions) (ServiceClientGetAccountInfoResponse, error) {
var err error
@@ -139,11 +136,17 @@ func (client *ServiceClient) getAccountInfoCreateRequest(ctx context.Context, op
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "account")
reqQP.Set("comp", "properties")
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ reqQP.Set("restype", "account")
+ if options != nil && options.Timeout != nil {
+ reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
+ }
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
req.Raw().Header["Accept"] = []string{"application/xml"}
+ if options != nil && options.RequestID != nil {
+ req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
+ }
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -186,7 +189,7 @@ func (client *ServiceClient) getAccountInfoHandleResponse(resp *http.Response) (
// CORS (Cross-Origin Resource Sharing) rules.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - ServiceClientGetPropertiesOptions contains the optional parameters for the ServiceClient.GetProperties method.
func (client *ServiceClient) GetProperties(ctx context.Context, options *ServiceClientGetPropertiesOptions) (ServiceClientGetPropertiesResponse, error) {
var err error
@@ -213,17 +216,17 @@ func (client *ServiceClient) getPropertiesCreateRequest(ctx context.Context, opt
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "service")
reqQP.Set("comp", "properties")
+ reqQP.Set("restype", "service")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -249,7 +252,7 @@ func (client *ServiceClient) getPropertiesHandleResponse(resp *http.Response) (S
// location endpoint when read-access geo-redundant replication is enabled for the storage account.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - ServiceClientGetStatisticsOptions contains the optional parameters for the ServiceClient.GetStatistics method.
func (client *ServiceClient) GetStatistics(ctx context.Context, options *ServiceClientGetStatisticsOptions) (ServiceClientGetStatisticsResponse, error) {
var err error
@@ -276,17 +279,17 @@ func (client *ServiceClient) getStatisticsCreateRequest(ctx context.Context, opt
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "service")
reqQP.Set("comp", "stats")
+ reqQP.Set("restype", "service")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -319,7 +322,7 @@ func (client *ServiceClient) getStatisticsHandleResponse(resp *http.Response) (S
// bearer token authentication.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - keyInfo - Key information
// - options - ServiceClientGetUserDelegationKeyOptions contains the optional parameters for the ServiceClient.GetUserDelegationKey
// method.
@@ -348,17 +351,17 @@ func (client *ServiceClient) getUserDelegationKeyCreateRequest(ctx context.Conte
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "service")
reqQP.Set("comp", "userdelegationkey")
+ reqQP.Set("restype", "service")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := runtime.MarshalAsXML(req, keyInfo); err != nil {
return nil, err
}
@@ -393,7 +396,7 @@ func (client *ServiceClient) getUserDelegationKeyHandleResponse(resp *http.Respo
// NewListContainersSegmentPager - The List Containers Segment operation returns a list of the containers under the specified
// account
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - options - ServiceClientListContainersSegmentOptions contains the optional parameters for the ServiceClient.NewListContainersSegmentPager
// method.
//
@@ -405,8 +408,8 @@ func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Cont
}
reqQP := req.Raw().URL.Query()
reqQP.Set("comp", "list")
- if options != nil && options.Prefix != nil {
- reqQP.Set("prefix", *options.Prefix)
+ if options != nil && options.Include != nil {
+ reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ","))
}
if options != nil && options.Marker != nil {
reqQP.Set("marker", *options.Marker)
@@ -414,18 +417,18 @@ func (client *ServiceClient) ListContainersSegmentCreateRequest(ctx context.Cont
if options != nil && options.Maxresults != nil {
reqQP.Set("maxresults", strconv.FormatInt(int64(*options.Maxresults), 10))
}
- if options != nil && options.Include != nil {
- reqQP.Set("include", strings.Join(strings.Fields(strings.Trim(fmt.Sprint(options.Include), "[]")), ","))
+ if options != nil && options.Prefix != nil {
+ reqQP.Set("prefix", *options.Prefix)
}
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
return req, nil
}
@@ -451,7 +454,7 @@ func (client *ServiceClient) ListContainersSegmentHandleResponse(resp *http.Resp
// and CORS (Cross-Origin Resource Sharing) rules
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - storageServiceProperties - The StorageService properties.
// - options - ServiceClientSetPropertiesOptions contains the optional parameters for the ServiceClient.SetProperties method.
func (client *ServiceClient) SetProperties(ctx context.Context, storageServiceProperties StorageServiceProperties, options *ServiceClientSetPropertiesOptions) (ServiceClientSetPropertiesResponse, error) {
@@ -479,17 +482,17 @@ func (client *ServiceClient) setPropertiesCreateRequest(ctx context.Context, sto
return nil, err
}
reqQP := req.Raw().URL.Query()
- reqQP.Set("restype", "service")
reqQP.Set("comp", "properties")
+ reqQP.Set("restype", "service")
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := runtime.MarshalAsXML(req, storageServiceProperties); err != nil {
return nil, err
}
@@ -514,7 +517,7 @@ func (client *ServiceClient) setPropertiesHandleResponse(resp *http.Response) (S
// SubmitBatch - The Batch operation allows multiple API calls to be embedded into a single HTTP request.
// If the operation fails it returns an *azcore.ResponseError type.
//
-// Generated from API version 2023-11-03
+// Generated from API version 2025-01-05
// - contentLength - The length of the request.
// - multipartContentType - Required. The value of this header must be multipart/mixed with a batch boundary. Example header
// value: multipart/mixed; boundary=batch_
@@ -549,15 +552,15 @@ func (client *ServiceClient) submitBatchCreateRequest(ctx context.Context, conte
if options != nil && options.Timeout != nil {
reqQP.Set("timeout", strconv.FormatInt(int64(*options.Timeout), 10))
}
- req.Raw().URL.RawQuery = reqQP.Encode()
+ req.Raw().URL.RawQuery = strings.Replace(reqQP.Encode(), "+", "%20", -1)
runtime.SkipBodyDownload(req)
+ req.Raw().Header["Accept"] = []string{"application/xml"}
req.Raw().Header["Content-Length"] = []string{strconv.FormatInt(contentLength, 10)}
req.Raw().Header["Content-Type"] = []string{multipartContentType}
- req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if options != nil && options.RequestID != nil {
req.Raw().Header["x-ms-client-request-id"] = []string{*options.RequestID}
}
- req.Raw().Header["Accept"] = []string{"application/xml"}
+ req.Raw().Header["x-ms-version"] = []string{ServiceVersion}
if err := req.SetBody(body, multipartContentType); err != nil {
return nil, err
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go
index 586650329..ee3732ebc 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc1123.go
@@ -1,6 +1,3 @@
-//go:build go1.18
-// +build go1.18
-
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
@@ -36,7 +33,14 @@ func (t *dateTimeRFC1123) UnmarshalJSON(data []byte) error {
}
func (t *dateTimeRFC1123) UnmarshalText(data []byte) error {
+ if len(data) == 0 {
+ return nil
+ }
p, err := time.Parse(time.RFC1123, string(data))
*t = dateTimeRFC1123(p)
return err
}
+
+func (t dateTimeRFC1123) String() string {
+ return time.Time(t).Format(time.RFC1123)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go
index 82b370133..e9eac9bcb 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_time_rfc3339.go
@@ -1,6 +1,3 @@
-//go:build go1.18
-// +build go1.18
-
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
@@ -15,12 +12,16 @@ import (
)
// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases.
-var tzOffsetRegex = regexp.MustCompile(`(Z|z|\+|-)(\d+:\d+)*"*$`)
+var tzOffsetRegex = regexp.MustCompile(`(?:Z|z|\+|-)(?:\d+:\d+)*"*$`)
const (
- utcDateTimeJSON = `"2006-01-02T15:04:05.999999999"`
- utcDateTime = "2006-01-02T15:04:05.999999999"
- dateTimeJSON = `"` + time.RFC3339Nano + `"`
+ utcDateTime = "2006-01-02T15:04:05.999999999"
+ utcDateTimeJSON = `"` + utcDateTime + `"`
+ utcDateTimeNoT = "2006-01-02 15:04:05.999999999"
+ utcDateTimeJSONNoT = `"` + utcDateTimeNoT + `"`
+ dateTimeNoT = `2006-01-02 15:04:05.999999999Z07:00`
+ dateTimeJSON = `"` + time.RFC3339Nano + `"`
+ dateTimeJSONNoT = `"` + dateTimeNoT + `"`
)
type dateTimeRFC3339 time.Time
@@ -36,17 +37,36 @@ func (t dateTimeRFC3339) MarshalText() ([]byte, error) {
}
func (t *dateTimeRFC3339) UnmarshalJSON(data []byte) error {
- layout := utcDateTimeJSON
- if tzOffsetRegex.Match(data) {
+ tzOffset := tzOffsetRegex.Match(data)
+ hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t")
+ var layout string
+ if tzOffset && hasT {
layout = dateTimeJSON
+ } else if tzOffset {
+ layout = dateTimeJSONNoT
+ } else if hasT {
+ layout = utcDateTimeJSON
+ } else {
+ layout = utcDateTimeJSONNoT
}
return t.Parse(layout, string(data))
}
func (t *dateTimeRFC3339) UnmarshalText(data []byte) error {
- layout := utcDateTime
- if tzOffsetRegex.Match(data) {
+ if len(data) == 0 {
+ return nil
+ }
+ tzOffset := tzOffsetRegex.Match(data)
+ hasT := strings.Contains(string(data), "T") || strings.Contains(string(data), "t")
+ var layout string
+ if tzOffset && hasT {
layout = time.RFC3339Nano
+ } else if tzOffset {
+ layout = dateTimeNoT
+ } else if hasT {
+ layout = utcDateTime
+ } else {
+ layout = utcDateTimeNoT
}
return t.Parse(layout, string(data))
}
@@ -56,3 +76,7 @@ func (t *dateTimeRFC3339) Parse(layout, value string) error {
*t = dateTimeRFC3339(p)
return err
}
+
+func (t dateTimeRFC3339) String() string {
+ return time.Time(t).Format(time.RFC3339Nano)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go
index 1bd0e4de0..355d0176b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated/zz_xml_helper.go
@@ -1,6 +1,3 @@
-//go:build go1.18
-// +build go1.18
-
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator. DO NOT EDIT.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go
index c1b3a3d27..5c44af34a 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/batch_transfer.go
@@ -19,7 +19,7 @@ const (
type BatchTransferOptions struct {
TransferSize int64
ChunkSize int64
- NumChunks uint16
+ NumChunks uint64
Concurrency uint16
Operation func(ctx context.Context, offset int64, chunkSize int64) error
OperationName string
@@ -44,7 +44,6 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error {
// Create the goroutines that process each operation (in parallel).
for g := uint16(0); g < o.Concurrency; g++ {
- //grIndex := g
go func() {
for f := range operationChannel {
err := f()
@@ -54,7 +53,7 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error {
}
// Add each chunk's operation to the channel.
- for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ {
+ for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ {
curChunkSize := o.ChunkSize
if chunkNum == o.NumChunks-1 { // Last chunk
@@ -69,7 +68,7 @@ func DoBatchTransfer(ctx context.Context, o *BatchTransferOptions) error {
// Wait for the operations to complete.
var firstErr error = nil
- for chunkNum := uint16(0); chunkNum < o.NumChunks; chunkNum++ {
+ for chunkNum := uint64(0); chunkNum < o.NumChunks; chunkNum++ {
responseError := <-operationResponseChannel
// record the first error (the original error which should cause the other chunks to fail with canceled context)
if responseError != nil && firstErr == nil {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go
index 1c81b9db9..fff61016c 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/challenge_policy.go
@@ -8,11 +8,12 @@ package shared
import (
"errors"
+ "net/http"
+ "strings"
+
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
- "net/http"
- "strings"
)
type storageAuthorizer struct {
@@ -20,13 +21,14 @@ type storageAuthorizer struct {
tenantID string
}
-func NewStorageChallengePolicy(cred azcore.TokenCredential, audience string) policy.Policy {
+func NewStorageChallengePolicy(cred azcore.TokenCredential, audience string, allowHTTP bool) policy.Policy {
s := storageAuthorizer{scopes: []string{audience}}
return runtime.NewBearerTokenPolicy(cred, []string{audience}, &policy.BearerTokenOptions{
AuthorizationHandler: policy.AuthorizationHandler{
OnRequest: s.onRequest,
OnChallenge: s.onChallenge,
},
+ InsecureAllowCredentialWithHTTP: allowHTTP,
})
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go
index cdcadf311..072fd27b1 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared/mmf_unix.go
@@ -1,6 +1,6 @@
-//go:build go1.18 && (linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris || aix)
+//go:build go1.18 && (linux || darwin || dragonfly || freebsd || openbsd || netbsd || solaris || aix || zos)
// +build go1.18
-// +build linux darwin dragonfly freebsd openbsd netbsd solaris aix
+// +build linux darwin dragonfly freebsd openbsd netbsd solaris aix zos
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go
index ca196f2c8..63ceac979 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/pageblob/client.go
@@ -8,7 +8,6 @@ package pageblob
import (
"context"
- "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
"io"
"net/http"
"net/url"
@@ -23,6 +22,7 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas"
)
// ClientOptions contains the optional parameters when creating a Client.
@@ -37,8 +37,8 @@ type Client base.CompositeClient[generated.BlobClient, generated.PageBlobClient]
// - options - client options; pass nil to accept the default values
func NewClient(blobURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
audience := base.GetAudience((*base.ClientOptions)(options))
- authPolicy := shared.NewStorageChallengePolicy(cred, audience)
conOptions := shared.GetClientOptions(options)
+ authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
@@ -380,8 +380,8 @@ func (pb *Client) GetAccountInfo(ctx context.Context, o *blob.GetAccountInfoOpti
// SetHTTPHeaders changes a blob's HTTP headers.
// For more information, see https://docs.microsoft.com/rest/api/storageservices/set-blob-properties.
-func (pb *Client) SetHTTPHeaders(ctx context.Context, HTTPHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) {
- return pb.BlobClient().SetHTTPHeaders(ctx, HTTPHeaders, o)
+func (pb *Client) SetHTTPHeaders(ctx context.Context, httpHeaders blob.HTTPHeaders, o *blob.SetHTTPHeadersOptions) (blob.SetHTTPHeadersResponse, error) {
+ return pb.BlobClient().SetHTTPHeaders(ctx, httpHeaders, o)
}
// SetMetadata changes a blob's metadata.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go
index 4c23208e2..20f9875a9 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/query_params.go
@@ -8,6 +8,7 @@ package sas
import (
"errors"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"net"
"net/url"
"strings"
@@ -23,7 +24,7 @@ const (
var (
// Version is the default version encoded in the SAS token.
- Version = "2021-12-02"
+ Version = generated.ServiceVersion
)
// TimeFormats ISO 8601 format.
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go
index 45f730847..813fa77a9 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/service.go
@@ -255,7 +255,7 @@ func (v BlobSignatureValues) SignWithUserDelegation(userDelegationCredential *Us
signature: signature,
}
- //User delegation SAS specific parameters
+ // User delegation SAS specific parameters
p.signedOID = *udk.SignedOID
p.signedTID = *udk.SignedTID
p.signedStart = *udk.SignedStart
@@ -272,7 +272,7 @@ func getCanonicalName(account string, containerName string, blobName string, dir
// Blob: "/blob/account/containername/blobname"
elements := []string{"/blob/", account, "/", containerName}
if blobName != "" {
- elements = append(elements, "/", strings.Replace(blobName, "\\", "/", -1))
+ elements = append(elements, "/", strings.ReplaceAll(blobName, "\\", "/"))
} else if directoryName != "" {
elements = append(elements, "/", directoryName)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go
index 57fe053f0..758739cb8 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas/url_parts.go
@@ -117,7 +117,7 @@ func (up URLParts) String() string {
rawQuery := up.UnparsedParams
- //If no snapshot is initially provided, fill it in from the SAS query properties to help the user
+ // If no snapshot is initially provided, fill it in from the SAS query properties to help the user
if up.Snapshot == "" && !up.SAS.SnapshotTime().IsZero() {
up.Snapshot = up.SAS.SnapshotTime().Format(exported.SnapshotTimeFormat)
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go
index ccf4159c2..cf39c3d57 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service/client.go
@@ -11,9 +11,6 @@ import (
"context"
"errors"
"fmt"
- "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
- "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
- "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"net/http"
"strings"
"time"
@@ -21,8 +18,11 @@ import (
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
+ "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/bloberror"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/container"
+ "github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/base"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/exported"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/generated"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/internal/shared"
@@ -41,8 +41,8 @@ type Client base.Client[generated.ServiceClient]
// - options - client options; pass nil to accept the default values
func NewClient(serviceURL string, cred azcore.TokenCredential, options *ClientOptions) (*Client, error) {
audience := base.GetAudience((*base.ClientOptions)(options))
- authPolicy := shared.NewStorageChallengePolicy(cred, audience)
conOptions := shared.GetClientOptions(options)
+ authPolicy := shared.NewStorageChallengePolicy(cred, audience, conOptions.InsecureAllowCredentialWithHTTP)
plOpts := runtime.PipelineOptions{PerRetry: []policy.Policy{authPolicy}}
azClient, err := azcore.NewClient(exported.ModuleName, exported.ModuleVersion, plOpts, &conOptions.ClientOptions)
@@ -280,7 +280,6 @@ func (s *Client) GetSASURL(resources sas.AccountResourceTypes, permissions sas.A
st := o.format()
qps, err := sas.AccountSignatureValues{
Version: sas.Version,
- Protocol: sas.ProtocolHTTPS,
Permissions: permissions.String(),
ResourceTypes: resources.String(),
StartTime: st,
@@ -320,7 +319,8 @@ func (s *Client) NewBatchBuilder() (*BatchBuilder, error) {
switch cred := s.credential().(type) {
case *azcore.TokenCredential:
- authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(s.getClientOptions()))
+ conOptions := s.getClientOptions()
+ authPolicy = shared.NewStorageChallengePolicy(*cred, base.GetAudience(conOptions), conOptions.InsecureAllowCredentialWithHTTP)
case *SharedKeyCredential:
authPolicy = exported.NewSharedKeyCredPolicy(cred)
case nil:
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
index f86286051..549d68ab9 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go
@@ -18,6 +18,8 @@ import (
"encoding/pem"
"errors"
"fmt"
+ "os"
+ "strings"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base"
@@ -63,6 +65,13 @@ type AuthenticationScheme = authority.AuthenticationScheme
type Account = shared.Account
+type TokenSource = base.TokenSource
+
+const (
+ TokenSourceIdentityProvider = base.TokenSourceIdentityProvider
+ TokenSourceCache = base.TokenSourceCache
+)
+
// CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file
// must contain the public certificate and the private key. If a PEM block is encrypted and
// password is not an empty string, it attempts to decrypt the PEM blocks using the password.
@@ -303,7 +312,9 @@ func WithInstanceDiscovery(enabled bool) Option {
// If an invalid region name is provided, the non-regional endpoint MIGHT be used or the token request MIGHT fail.
func WithAzureRegion(val string) Option {
return func(o *clientOptions) {
- o.azureRegion = val
+ if val != "" {
+ o.azureRegion = val
+ }
}
}
@@ -315,16 +326,21 @@ func New(authority, clientID string, cred Credential, options ...Option) (Client
if err != nil {
return Client{}, err
}
-
+ autoEnabledRegion := os.Getenv("MSAL_FORCE_REGION")
opts := clientOptions{
authority: authority,
// if the caller specified a token provider, it will handle all details of authentication, using Client only as a token cache
disableInstanceDiscovery: cred.tokenProvider != nil,
httpClient: shared.DefaultClient,
+ azureRegion: autoEnabledRegion,
}
for _, o := range options {
o(&opts)
}
+ if strings.EqualFold(opts.azureRegion, "DisableMsalForceRegion") {
+ opts.azureRegion = ""
+ }
+
baseOpts := []base.Option{
base.WithCacheAccessor(opts.accessor),
base.WithClientCapabilities(opts.capabilities),
@@ -422,6 +438,7 @@ func WithClaims(claims string) interface {
AcquireByAuthCodeOption
AcquireByCredentialOption
AcquireOnBehalfOfOption
+ AcquireByUsernamePasswordOption
AcquireSilentOption
AuthCodeURLOption
options.CallOption
@@ -430,6 +447,7 @@ func WithClaims(claims string) interface {
AcquireByAuthCodeOption
AcquireByCredentialOption
AcquireOnBehalfOfOption
+ AcquireByUsernamePasswordOption
AcquireSilentOption
AuthCodeURLOption
options.CallOption
@@ -443,6 +461,8 @@ func WithClaims(claims string) interface {
t.claims = claims
case *acquireTokenOnBehalfOfOptions:
t.claims = claims
+ case *acquireTokenByUsernamePasswordOptions:
+ t.claims = claims
case *acquireTokenSilentOptions:
t.claims = claims
case *authCodeURLOptions:
@@ -489,6 +509,7 @@ func WithTenantID(tenantID string) interface {
AcquireByAuthCodeOption
AcquireByCredentialOption
AcquireOnBehalfOfOption
+ AcquireByUsernamePasswordOption
AcquireSilentOption
AuthCodeURLOption
options.CallOption
@@ -497,6 +518,7 @@ func WithTenantID(tenantID string) interface {
AcquireByAuthCodeOption
AcquireByCredentialOption
AcquireOnBehalfOfOption
+ AcquireByUsernamePasswordOption
AcquireSilentOption
AuthCodeURLOption
options.CallOption
@@ -510,6 +532,8 @@ func WithTenantID(tenantID string) interface {
t.tenantID = tenantID
case *acquireTokenOnBehalfOfOptions:
t.tenantID = tenantID
+ case *acquireTokenByUsernamePasswordOptions:
+ t.tenantID = tenantID
case *acquireTokenSilentOptions:
t.tenantID = tenantID
case *authCodeURLOptions:
@@ -585,6 +609,46 @@ func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts
return cca.base.AcquireTokenSilent(ctx, silentParameters)
}
+// acquireTokenByUsernamePasswordOptions contains optional configuration for AcquireTokenByUsernamePassword
+type acquireTokenByUsernamePasswordOptions struct {
+ claims, tenantID string
+ authnScheme AuthenticationScheme
+}
+
+// AcquireByUsernamePasswordOption is implemented by options for AcquireTokenByUsernamePassword
+type AcquireByUsernamePasswordOption interface {
+ acquireByUsernamePasswordOption()
+}
+
+// AcquireTokenByUsernamePassword acquires a security token from the authority, via Username/Password Authentication.
+// NOTE: this flow is NOT recommended.
+//
+// Options: [WithClaims], [WithTenantID]
+func (cca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username, password string, opts ...AcquireByUsernamePasswordOption) (AuthResult, error) {
+ o := acquireTokenByUsernamePasswordOptions{}
+ if err := options.ApplyOptions(&o, opts); err != nil {
+ return AuthResult{}, err
+ }
+ authParams, err := cca.base.AuthParams.WithTenant(o.tenantID)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ authParams.Scopes = scopes
+ authParams.AuthorizationType = authority.ATUsernamePassword
+ authParams.Claims = o.claims
+ authParams.Username = username
+ authParams.Password = password
+ if o.authnScheme != nil {
+ authParams.AuthnScheme = o.authnScheme
+ }
+
+ token, err := cca.base.Token.UsernamePassword(ctx, authParams)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return cca.base.AuthResultFromToken(ctx, authParams, token)
+}
+
// acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow.
type acquireTokenByAuthCodeOptions struct {
challenge, claims, tenantID string
@@ -676,7 +740,7 @@ func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string,
if err != nil {
return AuthResult{}, err
}
- return cca.base.AuthResultFromToken(ctx, authParams, token, true)
+ return cca.base.AuthResultFromToken(ctx, authParams, token)
}
// acquireTokenOnBehalfOfOptions contains optional configuration for AcquireTokenOnBehalfOf
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go
index c9b8dbed0..b5cbb5721 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go
@@ -64,11 +64,20 @@ type CallErr struct {
Err error
}
+type InvalidJsonErr struct {
+ Err error
+}
+
// Errors implements error.Error().
func (e CallErr) Error() string {
return e.Err.Error()
}
+// Errors implements error.Error().
+func (e InvalidJsonErr) Error() string {
+ return e.Err.Error()
+}
+
// Verbose prints a versbose error message with the request or response.
func (e CallErr) Verbose() string {
e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
index 09a0d92f5..61c1c4cec 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go
@@ -5,16 +5,17 @@ package base
import (
"context"
- "errors"
"fmt"
"net/url"
"reflect"
"strings"
"sync"
+ "sync/atomic"
"time"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
@@ -89,14 +90,28 @@ type AuthResult struct {
ExpiresOn time.Time
GrantedScopes []string
DeclinedScopes []string
+ Metadata AuthResultMetadata
}
+// AuthResultMetadata which contains meta data for the AuthResult
+type AuthResultMetadata struct {
+ RefreshOn time.Time
+ TokenSource TokenSource
+}
+
+type TokenSource int
+
+// These are all the types of token flows.
+const (
+ TokenSourceIdentityProvider TokenSource = 0
+ TokenSourceCache TokenSource = 1
+)
+
// AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache).
func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) {
if err := storageTokenResponse.AccessToken.Validate(); err != nil {
return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err)
}
-
account := storageTokenResponse.Account
accessToken := storageTokenResponse.AccessToken.Secret
grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator)
@@ -109,7 +124,18 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu
return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err)
}
}
- return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil
+ return AuthResult{
+ Account: account,
+ IDToken: idToken,
+ AccessToken: accessToken,
+ ExpiresOn: storageTokenResponse.AccessToken.ExpiresOn.T,
+ GrantedScopes: grantedScopes,
+ DeclinedScopes: nil,
+ Metadata: AuthResultMetadata{
+ TokenSource: TokenSourceCache,
+ RefreshOn: storageTokenResponse.AccessToken.RefreshOn.T,
+ },
+ }, nil
}
// NewAuthResult creates an AuthResult.
@@ -121,8 +147,12 @@ func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Acco
Account: account,
IDToken: tokenResponse.IDToken,
AccessToken: tokenResponse.AccessToken,
- ExpiresOn: tokenResponse.ExpiresOn.T,
+ ExpiresOn: tokenResponse.ExpiresOn,
GrantedScopes: tokenResponse.GrantedScopes.Slice,
+ Metadata: AuthResultMetadata{
+ TokenSource: TokenSourceIdentityProvider,
+ RefreshOn: tokenResponse.RefreshOn.T,
+ },
}, nil
}
@@ -137,6 +167,8 @@ type Client struct {
AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New().
cacheAccessor cache.ExportReplace
cacheAccessorMu *sync.RWMutex
+ canRefresh map[string]*atomic.Value
+ canRefreshMu *sync.Mutex
}
// Option is an optional argument to the New constructor.
@@ -213,6 +245,8 @@ func New(clientID string, authorityURI string, token *oauth.Client, options ...O
cacheAccessorMu: &sync.RWMutex{},
manager: storage.New(token),
pmanager: storage.NewPartitionedManager(token),
+ canRefresh: make(map[string]*atomic.Value),
+ canRefreshMu: &sync.Mutex{},
}
for _, o := range options {
if err = o(&client); err != nil {
@@ -317,6 +351,28 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen
if silent.Claims == "" {
ar, err = AuthResultFromStorage(storageTokenResponse)
if err == nil {
+ if rt := storageTokenResponse.AccessToken.RefreshOn.T; !rt.IsZero() && Now().After(rt) {
+ b.canRefreshMu.Lock()
+ refreshValue, ok := b.canRefresh[tenant]
+ if !ok {
+ refreshValue = &atomic.Value{}
+ refreshValue.Store(false)
+ b.canRefresh[tenant] = refreshValue
+ }
+ b.canRefreshMu.Unlock()
+ if refreshValue.CompareAndSwap(false, true) {
+ defer refreshValue.Store(false)
+ // Added a check to see if the token is still same because there is a chance
+ // that the token is already refreshed by another thread.
+ // If the token is not same, we don't need to refresh it.
+ // Which means it refreshed.
+ if str, err := m.Read(ctx, authParams); err == nil && str.AccessToken.Secret == ar.AccessToken {
+ if tr, er := b.Token.Credential(ctx, authParams, silent.Credential); er == nil {
+ return b.AuthResultFromToken(ctx, authParams, tr)
+ }
+ }
+ }
+ }
ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
return ar, err
}
@@ -334,7 +390,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen
if err != nil {
return ar, err
}
- return b.AuthResultFromToken(ctx, authParams, token, true)
+ return b.AuthResultFromToken(ctx, authParams, token)
}
func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) {
@@ -363,7 +419,7 @@ func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams Acqui
return AuthResult{}, err
}
- return b.AuthResultFromToken(ctx, authParams, token, true)
+ return b.AuthResultFromToken(ctx, authParams, token)
}
// AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token.
@@ -392,15 +448,12 @@ func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams Acq
authParams.UserAssertion = onBehalfOfParams.UserAssertion
token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential)
if err == nil {
- ar, err = b.AuthResultFromToken(ctx, authParams, token, true)
+ ar, err = b.AuthResultFromToken(ctx, authParams, token)
}
return ar, err
}
-func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) {
- if !cacheWrite {
- return NewAuthResult(token, shared.Account{})
- }
+func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse) (AuthResult, error) {
var m manager = b.manager
if authParams.AuthorizationType == authority.ATOnBehalfOf {
m = b.pmanager
@@ -430,6 +483,10 @@ func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.Au
return ar, err
}
+// This function wraps time.Now() and is used for refreshing the application
+// was created to test the function against refreshin
+var Now = time.Now
+
func (b Client) AllAccounts(ctx context.Context) ([]shared.Account, error) {
if b.cacheAccessor != nil {
b.cacheAccessorMu.RLock()
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
deleted file mode 100644
index 2221e60c4..000000000
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go
+++ /dev/null
@@ -1,583 +0,0 @@
-// Copyright (c) Microsoft Corporation.
-// Licensed under the MIT license.
-
-// Package storage holds all cached token information for MSAL. This storage can be
-// augmented with third-party extensions to provide persistent storage. In that case,
-// reads and writes in upper packages will call Marshal() to take the entire in-memory
-// representation and write it to storage and Unmarshal() to update the entire in-memory
-// storage with what was in the persistent storage. The persistent storage can only be
-// accessed in this way because multiple MSAL clients written in multiple languages can
-// access the same storage and must adhere to the same method that was defined
-// previously.
-package storage
-
-import (
- "context"
- "errors"
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
- "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
-)
-
-// aadInstanceDiscoveryer allows faking in tests.
-// It is implemented in production by ops/authority.Client
-type aadInstanceDiscoveryer interface {
- AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error)
-}
-
-// TokenResponse mimics a token response that was pulled from the cache.
-type TokenResponse struct {
- RefreshToken accesstokens.RefreshToken
- IDToken IDToken // *Credential
- AccessToken AccessToken
- Account shared.Account
-}
-
-// Manager is an in-memory cache of access tokens, accounts and meta data. This data is
-// updated on read/write calls. Unmarshal() replaces all data stored here with whatever
-// was given to it on each call.
-type Manager struct {
- contract *Contract
- contractMu sync.RWMutex
- requests aadInstanceDiscoveryer // *oauth.Token
-
- aadCacheMu sync.RWMutex
- aadCache map[string]authority.InstanceDiscoveryMetadata
-}
-
-// New is the constructor for Manager.
-func New(requests *oauth.Client) *Manager {
- m := &Manager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)}
- m.contract = NewContract()
- return m
-}
-
-func checkAlias(alias string, aliases []string) bool {
- for _, v := range aliases {
- if alias == v {
- return true
- }
- }
- return false
-}
-
-func isMatchingScopes(scopesOne []string, scopesTwo string) bool {
- newScopesTwo := strings.Split(scopesTwo, scopeSeparator)
- scopeCounter := 0
- for _, scope := range scopesOne {
- for _, otherScope := range newScopesTwo {
- if strings.EqualFold(scope, otherScope) {
- scopeCounter++
- continue
- }
- }
- }
- return scopeCounter == len(scopesOne)
-}
-
-// needsUpgrade returns true if the given key follows the v1.0 schema i.e.,
-// it contains an uppercase character (v1.1+ keys are all lowercase)
-func needsUpgrade(key string) bool {
- for _, r := range key {
- if 'A' <= r && r <= 'Z' {
- return true
- }
- }
- return false
-}
-
-// upgrade a v1.0 cache item by adding a v1.1+ item having the same value and deleting
-// the v1.0 item. Callers must hold an exclusive lock on m.
-func upgrade[T any](m map[string]T, k string) T {
- v1_1Key := strings.ToLower(k)
- v, ok := m[k]
- if !ok {
- // another goroutine did the upgrade while this one was waiting for the write lock
- return m[v1_1Key]
- }
- if v2, ok := m[v1_1Key]; ok {
- // cache has an equivalent v1.1+ item, which we prefer because we know it was added
- // by a newer version of the module and is therefore more likely to remain valid.
- // The v1.0 item may have expired because only v1.0 or earlier would update it.
- v = v2
- } else {
- // add an equivalent item according to the v1.1 schema
- m[v1_1Key] = v
- }
- delete(m, k)
- return v
-}
-
-// Read reads a storage token from the cache if it exists.
-func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) {
- tr := TokenResponse{}
- homeAccountID := authParameters.HomeAccountID
- realm := authParameters.AuthorityInfo.Tenant
- clientID := authParameters.ClientID
- scopes := authParameters.Scopes
- authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
- tokenType := authParameters.AuthnScheme.AccessTokenType()
-
- // fetch metadata if instanceDiscovery is enabled
- aliases := []string{authParameters.AuthorityInfo.Host}
- if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled {
- metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo)
- if err != nil {
- return TokenResponse{}, err
- }
- aliases = metadata.Aliases
- }
-
- accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes, tokenType, authnSchemeKeyID)
- tr.AccessToken = accessToken
-
- if homeAccountID == "" {
- // caller didn't specify a user, so there's no reason to search for an ID or refresh token
- return tr, nil
- }
- // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating
- // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token.
- idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID)
- if err == nil {
- tr.IDToken = idToken
- }
-
- if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil {
- // we need the family ID to identify the correct refresh token, if any
- familyID := appMetadata.FamilyID
- refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID)
- if err == nil {
- tr.RefreshToken = refreshToken
- }
- }
-
- account, err := m.readAccount(homeAccountID, aliases, realm)
- if err == nil {
- tr.Account = account
- }
- return tr, nil
-}
-
-const scopeSeparator = " "
-
-// Write writes a token response to the cache and returns the account information the token is stored with.
-func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) {
- homeAccountID := tokenResponse.HomeAccountID()
- environment := authParameters.AuthorityInfo.Host
- realm := authParameters.AuthorityInfo.Tenant
- clientID := authParameters.ClientID
- target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator)
- cachedAt := time.Now()
- authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
-
- var account shared.Account
-
- if len(tokenResponse.RefreshToken) > 0 {
- refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID)
- if err := m.writeRefreshToken(refreshToken); err != nil {
- return account, err
- }
- }
-
- if len(tokenResponse.AccessToken) > 0 {
- accessToken := NewAccessToken(
- homeAccountID,
- environment,
- realm,
- clientID,
- cachedAt,
- tokenResponse.ExpiresOn.T,
- tokenResponse.ExtExpiresOn.T,
- target,
- tokenResponse.AccessToken,
- tokenResponse.TokenType,
- authnSchemeKeyID,
- )
-
- // Since we have a valid access token, cache it before moving on.
- if err := accessToken.Validate(); err == nil {
- if err := m.writeAccessToken(accessToken); err != nil {
- return account, err
- }
- }
- }
-
- idTokenJwt := tokenResponse.IDToken
- if !idTokenJwt.IsZero() {
- idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken)
- if err := m.writeIDToken(idToken); err != nil {
- return shared.Account{}, err
- }
-
- localAccountID := idTokenJwt.LocalAccountID()
- authorityType := authParameters.AuthorityInfo.AuthorityType
-
- preferredUsername := idTokenJwt.UPN
- if idTokenJwt.PreferredUsername != "" {
- preferredUsername = idTokenJwt.PreferredUsername
- }
-
- account = shared.NewAccount(
- homeAccountID,
- environment,
- realm,
- localAccountID,
- authorityType,
- preferredUsername,
- )
- if err := m.writeAccount(account); err != nil {
- return shared.Account{}, err
- }
- }
-
- AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment)
-
- if err := m.writeAppMetaData(AppMetaData); err != nil {
- return shared.Account{}, err
- }
- return account, nil
-}
-
-func (m *Manager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
- md, err := m.aadMetadataFromCache(ctx, authorityInfo)
- if err != nil {
- // not in the cache, retrieve it
- md, err = m.aadMetadata(ctx, authorityInfo)
- }
- return md, err
-}
-
-func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
- m.aadCacheMu.RLock()
- defer m.aadCacheMu.RUnlock()
- metadata, ok := m.aadCache[authorityInfo.Host]
- if ok {
- return metadata, nil
- }
- return metadata, errors.New("not found")
-}
-
-func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
- m.aadCacheMu.Lock()
- defer m.aadCacheMu.Unlock()
- discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo)
- if err != nil {
- return authority.InstanceDiscoveryMetadata{}, err
- }
-
- for _, metadataEntry := range discoveryResponse.Metadata {
- for _, aliasedAuthority := range metadataEntry.Aliases {
- m.aadCache[aliasedAuthority] = metadataEntry
- }
- }
- if _, ok := m.aadCache[authorityInfo.Host]; !ok {
- m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{
- PreferredNetwork: authorityInfo.Host,
- PreferredCache: authorityInfo.Host,
- }
- }
- return m.aadCache[authorityInfo.Host], nil
-}
-
-func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string, tokenType, authnSchemeKeyID string) AccessToken {
- m.contractMu.RLock()
- // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens.
- // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't
- // an issue, however if it does become a problem then we know where to look.
- for k, at := range m.contract.AccessTokens {
- if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID {
- if (strings.EqualFold(at.TokenType, tokenType) && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) {
- if checkAlias(at.Environment, envAliases) && isMatchingScopes(scopes, at.Scopes) {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- at = upgrade(m.contract.AccessTokens, k)
- }
- return at
- }
- }
- }
- }
- m.contractMu.RUnlock()
- return AccessToken{}
-}
-
-func (m *Manager) writeAccessToken(accessToken AccessToken) error {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- key := accessToken.Key()
- m.contract.AccessTokens[key] = accessToken
- return nil
-}
-
-func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, clientID string) (accesstokens.RefreshToken, error) {
- byFamily := func(rt accesstokens.RefreshToken) bool {
- return matchFamilyRefreshToken(rt, homeID, envAliases)
- }
- byClient := func(rt accesstokens.RefreshToken) bool {
- return matchClientIDRefreshToken(rt, homeID, envAliases, clientID)
- }
-
- var matchers []func(rt accesstokens.RefreshToken) bool
- if familyID == "" {
- matchers = []func(rt accesstokens.RefreshToken) bool{
- byClient, byFamily,
- }
- } else {
- matchers = []func(rt accesstokens.RefreshToken) bool{
- byFamily, byClient,
- }
- }
-
- // TODO(keegan): All the tests here pass, but Bogdan says this is
- // more complicated. I'm opening an issue for this to have him
- // review the tests and suggest tests that would break this so
- // we can re-write against good tests. His comments as follow:
- // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is.
- // The algorithm is:
- // If application is NOT part of the family, search by client_ID
- // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response).
- // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95
- m.contractMu.RLock()
- for _, matcher := range matchers {
- for k, rt := range m.contract.RefreshTokens {
- if matcher(rt) {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- rt = upgrade(m.contract.RefreshTokens, k)
- }
- return rt, nil
- }
- }
- }
-
- m.contractMu.RUnlock()
- return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found")
-}
-
-func matchFamilyRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string) bool {
- return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.FamilyID != ""
-}
-
-func matchClientIDRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string, clientID string) bool {
- return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID
-}
-
-func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) error {
- key := refreshToken.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract.RefreshTokens[key] = refreshToken
- return nil
-}
-
-func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) {
- m.contractMu.RLock()
- for k, idt := range m.contract.IDTokens {
- if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID {
- if checkAlias(idt.Environment, envAliases) {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- idt = upgrade(m.contract.IDTokens, k)
- }
- return idt, nil
- }
- }
- }
- m.contractMu.RUnlock()
- return IDToken{}, fmt.Errorf("token not found")
-}
-
-func (m *Manager) writeIDToken(idToken IDToken) error {
- key := idToken.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract.IDTokens[key] = idToken
- return nil
-}
-
-func (m *Manager) AllAccounts() []shared.Account {
- m.contractMu.RLock()
- defer m.contractMu.RUnlock()
-
- var accounts []shared.Account
- for _, v := range m.contract.Accounts {
- accounts = append(accounts, v)
- }
-
- return accounts
-}
-
-func (m *Manager) Account(homeAccountID string) shared.Account {
- m.contractMu.RLock()
- defer m.contractMu.RUnlock()
-
- for _, v := range m.contract.Accounts {
- if v.HomeAccountID == homeAccountID {
- return v
- }
- }
-
- return shared.Account{}
-}
-
-func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) {
- m.contractMu.RLock()
-
- // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key.
- // We only use a map because the storage contract shared between all language implementations says use a map.
- // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing
- // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup
- // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored
- // is really low (say 2). Each hash is more expensive than the entire iteration.
- for k, acc := range m.contract.Accounts {
- if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- acc = upgrade(m.contract.Accounts, k)
- }
- return acc, nil
- }
- }
- m.contractMu.RUnlock()
- return shared.Account{}, fmt.Errorf("account not found")
-}
-
-func (m *Manager) writeAccount(account shared.Account) error {
- key := account.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract.Accounts[key] = account
- return nil
-}
-
-func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) {
- m.contractMu.RLock()
- for k, app := range m.contract.AppMetaData {
- if checkAlias(app.Environment, envAliases) && app.ClientID == clientID {
- m.contractMu.RUnlock()
- if needsUpgrade(k) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- app = upgrade(m.contract.AppMetaData, k)
- }
- return app, nil
- }
- }
- m.contractMu.RUnlock()
- return AppMetaData{}, fmt.Errorf("not found")
-}
-
-func (m *Manager) writeAppMetaData(AppMetaData AppMetaData) error {
- key := AppMetaData.Key()
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract.AppMetaData[key] = AppMetaData
- return nil
-}
-
-// RemoveAccount removes all the associated ATs, RTs and IDTs from the cache associated with this account.
-func (m *Manager) RemoveAccount(account shared.Account, clientID string) {
- m.removeRefreshTokens(account.HomeAccountID, account.Environment, clientID)
- m.removeAccessTokens(account.HomeAccountID, account.Environment)
- m.removeIDTokens(account.HomeAccountID, account.Environment)
- m.removeAccounts(account.HomeAccountID, account.Environment)
-}
-
-func (m *Manager) removeRefreshTokens(homeID string, env string, clientID string) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- for key, rt := range m.contract.RefreshTokens {
- // Check for RTs associated with the account.
- if rt.HomeAccountID == homeID && rt.Environment == env {
- // Do RT's app ownership check as a precaution, in case family apps
- // and 3rd-party apps share same token cache, although they should not.
- if rt.ClientID == clientID || rt.FamilyID != "" {
- delete(m.contract.RefreshTokens, key)
- }
- }
- }
-}
-
-func (m *Manager) removeAccessTokens(homeID string, env string) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- for key, at := range m.contract.AccessTokens {
- // Remove AT's associated with the account
- if at.HomeAccountID == homeID && at.Environment == env {
- // # To avoid the complexity of locating sibling family app's AT, we skip AT's app ownership check.
- // It means ATs for other apps will also be removed, it is OK because:
- // non-family apps are not supposed to share token cache to begin with;
- // Even if it happens, we keep other app's RT already, so SSO still works.
- delete(m.contract.AccessTokens, key)
- }
- }
-}
-
-func (m *Manager) removeIDTokens(homeID string, env string) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- for key, idt := range m.contract.IDTokens {
- // Remove ID tokens associated with the account.
- if idt.HomeAccountID == homeID && idt.Environment == env {
- delete(m.contract.IDTokens, key)
- }
- }
-}
-
-func (m *Manager) removeAccounts(homeID string, env string) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- for key, acc := range m.contract.Accounts {
- // Remove the specified account.
- if acc.HomeAccountID == homeID && acc.Environment == env {
- delete(m.contract.Accounts, key)
- }
- }
-}
-
-// update updates the internal cache object. This is for use in tests, other uses are not
-// supported.
-func (m *Manager) update(cache *Contract) {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
- m.contract = cache
-}
-
-// Marshal implements cache.Marshaler.
-func (m *Manager) Marshal() ([]byte, error) {
- m.contractMu.RLock()
- defer m.contractMu.RUnlock()
- return json.Marshal(m.contract)
-}
-
-// Unmarshal implements cache.Unmarshaler.
-func (m *Manager) Unmarshal(b []byte) error {
- m.contractMu.Lock()
- defer m.contractMu.Unlock()
-
- contract := NewContract()
-
- err := json.Unmarshal(b, contract)
- if err != nil {
- return err
- }
-
- m.contract = contract
-
- return nil
-}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go
similarity index 95%
rename from vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go
rename to vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go
index f9be90276..7379e2233 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go
@@ -72,6 +72,7 @@ type AccessToken struct {
ClientID string `json:"client_id,omitempty"`
Secret string `json:"secret,omitempty"`
Scopes string `json:"target,omitempty"`
+ RefreshOn internalTime.Unix `json:"refresh_on,omitempty"`
ExpiresOn internalTime.Unix `json:"expires_on,omitempty"`
ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"`
CachedAt internalTime.Unix `json:"cached_at,omitempty"`
@@ -83,7 +84,7 @@ type AccessToken struct {
}
// NewAccessToken is the constructor for AccessToken.
-func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken {
+func NewAccessToken(homeID, env, realm, clientID string, cachedAt, refreshOn, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken {
return AccessToken{
HomeAccountID: homeID,
Environment: env,
@@ -93,6 +94,7 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex
Secret: token,
Scopes: scopes,
CachedAt: internalTime.Unix{T: cachedAt.UTC()},
+ RefreshOn: internalTime.Unix{T: refreshOn.UTC()},
ExpiresOn: internalTime.Unix{T: expiresOn.UTC()},
ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()},
TokenType: tokenType,
@@ -102,8 +104,9 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex
// Key outputs the key that can be used to uniquely look up this entry in a map.
func (a AccessToken) Key() string {
+ ks := []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}
key := strings.Join(
- []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes},
+ ks,
shared.CacheKeySeparator,
)
// add token type to key for new access tokens types. skip for bearer token type to
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go
similarity index 99%
rename from vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go
rename to vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go
index c09318330..ff07d4b5a 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go
@@ -114,7 +114,8 @@ func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenRes
realm,
clientID,
cachedAt,
- tokenResponse.ExpiresOn.T,
+ tokenResponse.RefreshOn.T,
+ tokenResponse.ExpiresOn,
tokenResponse.ExtExpiresOn.T,
target,
tokenResponse.AccessToken,
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go
new file mode 100644
index 000000000..84a234967
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go
@@ -0,0 +1,589 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+// Package storage holds all cached token information for MSAL. This storage can be
+// augmented with third-party extensions to provide persistent storage. In that case,
+// reads and writes in upper packages will call Marshal() to take the entire in-memory
+// representation and write it to storage and Unmarshal() to update the entire in-memory
+// storage with what was in the persistent storage. The persistent storage can only be
+// accessed in this way because multiple MSAL clients written in multiple languages can
+// access the same storage and must adhere to the same method that was defined
+// previously.
+package storage
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
+)
+
+// aadInstanceDiscoveryer allows faking in tests.
+// It is implemented in production by ops/authority.Client
+type aadInstanceDiscoveryer interface {
+ AADInstanceDiscovery(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryResponse, error)
+}
+
+// TokenResponse mimics a token response that was pulled from the cache.
+type TokenResponse struct {
+ RefreshToken accesstokens.RefreshToken
+ IDToken IDToken // *Credential
+ AccessToken AccessToken
+ Account shared.Account
+}
+
+// Manager is an in-memory cache of access tokens, accounts and meta data. This data is
+// updated on read/write calls. Unmarshal() replaces all data stored here with whatever
+// was given to it on each call.
+type Manager struct {
+ contract *Contract
+ contractMu sync.RWMutex
+ requests aadInstanceDiscoveryer // *oauth.Token
+
+ aadCacheMu sync.RWMutex
+ aadCache map[string]authority.InstanceDiscoveryMetadata
+}
+
+// New is the constructor for Manager.
+func New(requests *oauth.Client) *Manager {
+ m := &Manager{requests: requests, aadCache: make(map[string]authority.InstanceDiscoveryMetadata)}
+ m.contract = NewContract()
+ return m
+}
+
+func checkAlias(alias string, aliases []string) bool {
+ for _, v := range aliases {
+ if alias == v {
+ return true
+ }
+ }
+ return false
+}
+
+func isMatchingScopes(scopesOne []string, scopesTwo string) bool {
+ newScopesTwo := strings.Split(scopesTwo, scopeSeparator)
+ scopeCounter := 0
+ for _, scope := range scopesOne {
+ for _, otherScope := range newScopesTwo {
+ if strings.EqualFold(scope, otherScope) {
+ scopeCounter++
+ continue
+ }
+ }
+ }
+ return scopeCounter == len(scopesOne)
+}
+
+// needsUpgrade returns true if the given key follows the v1.0 schema i.e.,
+// it contains an uppercase character (v1.1+ keys are all lowercase)
+func needsUpgrade(key string) bool {
+ for _, r := range key {
+ if 'A' <= r && r <= 'Z' {
+ return true
+ }
+ }
+ return false
+}
+
+// upgrade a v1.0 cache item by adding a v1.1+ item having the same value and deleting
+// the v1.0 item. Callers must hold an exclusive lock on m.
+func upgrade[T any](m map[string]T, k string) T {
+ v1_1Key := strings.ToLower(k)
+ v, ok := m[k]
+ if !ok {
+ // another goroutine did the upgrade while this one was waiting for the write lock
+ return m[v1_1Key]
+ }
+ if v2, ok := m[v1_1Key]; ok {
+ // cache has an equivalent v1.1+ item, which we prefer because we know it was added
+ // by a newer version of the module and is therefore more likely to remain valid.
+ // The v1.0 item may have expired because only v1.0 or earlier would update it.
+ v = v2
+ } else {
+ // add an equivalent item according to the v1.1 schema
+ m[v1_1Key] = v
+ }
+ delete(m, k)
+ return v
+}
+
+// Read reads a storage token from the cache if it exists.
+func (m *Manager) Read(ctx context.Context, authParameters authority.AuthParams) (TokenResponse, error) {
+ tr := TokenResponse{}
+ homeAccountID := authParameters.HomeAccountID
+ realm := authParameters.AuthorityInfo.Tenant
+ clientID := authParameters.ClientID
+ scopes := authParameters.Scopes
+ authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
+ tokenType := authParameters.AuthnScheme.AccessTokenType()
+
+ // fetch metadata if instanceDiscovery is enabled
+ aliases := []string{authParameters.AuthorityInfo.Host}
+ if !authParameters.AuthorityInfo.InstanceDiscoveryDisabled {
+ metadata, err := m.getMetadataEntry(ctx, authParameters.AuthorityInfo)
+ if err != nil {
+ return TokenResponse{}, err
+ }
+ aliases = metadata.Aliases
+ }
+
+ accessToken := m.readAccessToken(homeAccountID, aliases, realm, clientID, scopes, tokenType, authnSchemeKeyID)
+ tr.AccessToken = accessToken
+
+ if homeAccountID == "" {
+ // caller didn't specify a user, so there's no reason to search for an ID or refresh token
+ return tr, nil
+ }
+ // errors returned by read* methods indicate a cache miss and are therefore non-fatal. We continue populating
+ // TokenResponse fields so that e.g. lack of an ID token doesn't prevent the caller from receiving a refresh token.
+ idToken, err := m.readIDToken(homeAccountID, aliases, realm, clientID)
+ if err == nil {
+ tr.IDToken = idToken
+ }
+
+ if appMetadata, err := m.readAppMetaData(aliases, clientID); err == nil {
+ // we need the family ID to identify the correct refresh token, if any
+ familyID := appMetadata.FamilyID
+ refreshToken, err := m.readRefreshToken(homeAccountID, aliases, familyID, clientID)
+ if err == nil {
+ tr.RefreshToken = refreshToken
+ }
+ }
+
+ account, err := m.readAccount(homeAccountID, aliases, realm)
+ if err == nil {
+ tr.Account = account
+ }
+ return tr, nil
+}
+
+const scopeSeparator = " "
+
+// Write writes a token response to the cache and returns the account information the token is stored with.
+func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse accesstokens.TokenResponse) (shared.Account, error) {
+ homeAccountID := tokenResponse.HomeAccountID()
+ environment := authParameters.AuthorityInfo.Host
+ realm := authParameters.AuthorityInfo.Tenant
+ clientID := authParameters.ClientID
+
+ target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator)
+ cachedAt := time.Now()
+ authnSchemeKeyID := authParameters.AuthnScheme.KeyID()
+
+ var account shared.Account
+
+ if len(tokenResponse.RefreshToken) > 0 {
+ refreshToken := accesstokens.NewRefreshToken(homeAccountID, environment, clientID, tokenResponse.RefreshToken, tokenResponse.FamilyID)
+ if err := m.writeRefreshToken(refreshToken); err != nil {
+ return account, err
+ }
+ }
+
+ if len(tokenResponse.AccessToken) > 0 {
+ accessToken := NewAccessToken(
+ homeAccountID,
+ environment,
+ realm,
+ clientID,
+ cachedAt,
+ tokenResponse.RefreshOn.T,
+ tokenResponse.ExpiresOn,
+ tokenResponse.ExtExpiresOn.T,
+ target,
+ tokenResponse.AccessToken,
+ tokenResponse.TokenType,
+ authnSchemeKeyID,
+ )
+
+ // Since we have a valid access token, cache it before moving on.
+ if err := accessToken.Validate(); err == nil {
+ if err := m.writeAccessToken(accessToken); err != nil {
+ return account, err
+ }
+ }
+ }
+
+ idTokenJwt := tokenResponse.IDToken
+ if !idTokenJwt.IsZero() {
+ idToken := NewIDToken(homeAccountID, environment, realm, clientID, idTokenJwt.RawToken)
+ if err := m.writeIDToken(idToken); err != nil {
+ return shared.Account{}, err
+ }
+
+ localAccountID := idTokenJwt.LocalAccountID()
+ authorityType := authParameters.AuthorityInfo.AuthorityType
+
+ preferredUsername := idTokenJwt.UPN
+ if idTokenJwt.PreferredUsername != "" {
+ preferredUsername = idTokenJwt.PreferredUsername
+ }
+
+ account = shared.NewAccount(
+ homeAccountID,
+ environment,
+ realm,
+ localAccountID,
+ authorityType,
+ preferredUsername,
+ )
+ if err := m.writeAccount(account); err != nil {
+ return shared.Account{}, err
+ }
+ }
+
+ AppMetaData := NewAppMetaData(tokenResponse.FamilyID, clientID, environment)
+
+ if err := m.writeAppMetaData(AppMetaData); err != nil {
+ return shared.Account{}, err
+ }
+ return account, nil
+}
+
+func (m *Manager) getMetadataEntry(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
+ md, err := m.aadMetadataFromCache(ctx, authorityInfo)
+ if err != nil {
+ // not in the cache, retrieve it
+ md, err = m.aadMetadata(ctx, authorityInfo)
+ }
+ return md, err
+}
+
+func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
+ m.aadCacheMu.RLock()
+ defer m.aadCacheMu.RUnlock()
+ metadata, ok := m.aadCache[authorityInfo.Host]
+ if ok {
+ return metadata, nil
+ }
+ return metadata, errors.New("not found")
+}
+
+func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) {
+ if m.requests == nil {
+ return authority.InstanceDiscoveryMetadata{}, fmt.Errorf("httpclient in oauth instance for fetching metadata is nil")
+ }
+ m.aadCacheMu.Lock()
+ defer m.aadCacheMu.Unlock()
+ discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo)
+ if err != nil {
+ return authority.InstanceDiscoveryMetadata{}, err
+ }
+
+ for _, metadataEntry := range discoveryResponse.Metadata {
+ for _, aliasedAuthority := range metadataEntry.Aliases {
+ m.aadCache[aliasedAuthority] = metadataEntry
+ }
+ }
+ if _, ok := m.aadCache[authorityInfo.Host]; !ok {
+ m.aadCache[authorityInfo.Host] = authority.InstanceDiscoveryMetadata{
+ PreferredNetwork: authorityInfo.Host,
+ PreferredCache: authorityInfo.Host,
+ }
+ }
+ return m.aadCache[authorityInfo.Host], nil
+}
+
+func (m *Manager) readAccessToken(homeID string, envAliases []string, realm, clientID string, scopes []string, tokenType, authnSchemeKeyID string) AccessToken {
+ m.contractMu.RLock()
+ // TODO: linear search (over a map no less) is slow for a large number (thousands) of tokens.
+ // this shows up as the dominating node in a profile. for real-world scenarios this likely isn't
+ // an issue, however if it does become a problem then we know where to look.
+ for k, at := range m.contract.AccessTokens {
+ if at.HomeAccountID == homeID && at.Realm == realm && at.ClientID == clientID {
+ if (strings.EqualFold(at.TokenType, tokenType) && at.AuthnSchemeKeyID == authnSchemeKeyID) || (at.TokenType == "" && (tokenType == "" || tokenType == "Bearer")) {
+ if checkAlias(at.Environment, envAliases) && isMatchingScopes(scopes, at.Scopes) {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ at = upgrade(m.contract.AccessTokens, k)
+ }
+ return at
+ }
+ }
+ }
+ }
+ m.contractMu.RUnlock()
+ return AccessToken{}
+}
+
+func (m *Manager) writeAccessToken(accessToken AccessToken) error {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ key := accessToken.Key()
+ m.contract.AccessTokens[key] = accessToken
+ return nil
+}
+
+func (m *Manager) readRefreshToken(homeID string, envAliases []string, familyID, clientID string) (accesstokens.RefreshToken, error) {
+ byFamily := func(rt accesstokens.RefreshToken) bool {
+ return matchFamilyRefreshToken(rt, homeID, envAliases)
+ }
+ byClient := func(rt accesstokens.RefreshToken) bool {
+ return matchClientIDRefreshToken(rt, homeID, envAliases, clientID)
+ }
+
+ var matchers []func(rt accesstokens.RefreshToken) bool
+ if familyID == "" {
+ matchers = []func(rt accesstokens.RefreshToken) bool{
+ byClient, byFamily,
+ }
+ } else {
+ matchers = []func(rt accesstokens.RefreshToken) bool{
+ byFamily, byClient,
+ }
+ }
+
+ // TODO(keegan): All the tests here pass, but Bogdan says this is
+ // more complicated. I'm opening an issue for this to have him
+ // review the tests and suggest tests that would break this so
+ // we can re-write against good tests. His comments as follow:
+ // The algorithm is a bit more complex than this, I assume there are some tests covering everything. I would keep the order as is.
+ // The algorithm is:
+ // If application is NOT part of the family, search by client_ID
+ // If app is part of the family or if we DO NOT KNOW if it's part of the family, search by family ID, then by client_id (we will know if an app is part of the family after the first token response).
+ // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/311fe8b16e7c293462806f397e189a6aa1159769/src/client/Microsoft.Identity.Client/Internal/Requests/Silent/CacheSilentStrategy.cs#L95
+ m.contractMu.RLock()
+ for _, matcher := range matchers {
+ for k, rt := range m.contract.RefreshTokens {
+ if matcher(rt) {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ rt = upgrade(m.contract.RefreshTokens, k)
+ }
+ return rt, nil
+ }
+ }
+ }
+
+ m.contractMu.RUnlock()
+ return accesstokens.RefreshToken{}, fmt.Errorf("refresh token not found")
+}
+
+func matchFamilyRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string) bool {
+ return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.FamilyID != ""
+}
+
+func matchClientIDRefreshToken(rt accesstokens.RefreshToken, homeID string, envAliases []string, clientID string) bool {
+ return rt.HomeAccountID == homeID && checkAlias(rt.Environment, envAliases) && rt.ClientID == clientID
+}
+
+func (m *Manager) writeRefreshToken(refreshToken accesstokens.RefreshToken) error {
+ key := refreshToken.Key()
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract.RefreshTokens[key] = refreshToken
+ return nil
+}
+
+func (m *Manager) readIDToken(homeID string, envAliases []string, realm, clientID string) (IDToken, error) {
+ m.contractMu.RLock()
+ for k, idt := range m.contract.IDTokens {
+ if idt.HomeAccountID == homeID && idt.Realm == realm && idt.ClientID == clientID {
+ if checkAlias(idt.Environment, envAliases) {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ idt = upgrade(m.contract.IDTokens, k)
+ }
+ return idt, nil
+ }
+ }
+ }
+ m.contractMu.RUnlock()
+ return IDToken{}, fmt.Errorf("token not found")
+}
+
+func (m *Manager) writeIDToken(idToken IDToken) error {
+ key := idToken.Key()
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract.IDTokens[key] = idToken
+ return nil
+}
+
+func (m *Manager) AllAccounts() []shared.Account {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+
+ var accounts []shared.Account
+ for _, v := range m.contract.Accounts {
+ accounts = append(accounts, v)
+ }
+
+ return accounts
+}
+
+func (m *Manager) Account(homeAccountID string) shared.Account {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+
+ for _, v := range m.contract.Accounts {
+ if v.HomeAccountID == homeAccountID {
+ return v
+ }
+ }
+
+ return shared.Account{}
+}
+
+func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm string) (shared.Account, error) {
+ m.contractMu.RLock()
+
+ // You might ask why, if cache.Accounts is a map, we would loop through all of these instead of using a key.
+ // We only use a map because the storage contract shared between all language implementations says use a map.
+ // We can't change that. The other is because the keys are made using a specific "env", but here we are allowing
+ // a match in multiple envs (envAlias). That means we either need to hash each possible keyand do the lookup
+ // or just statically check. Since the design is to have a storage.Manager per user, the amount of keys stored
+ // is really low (say 2). Each hash is more expensive than the entire iteration.
+ for k, acc := range m.contract.Accounts {
+ if acc.HomeAccountID == homeAccountID && checkAlias(acc.Environment, envAliases) && acc.Realm == realm {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ acc = upgrade(m.contract.Accounts, k)
+ }
+ return acc, nil
+ }
+ }
+ m.contractMu.RUnlock()
+ return shared.Account{}, fmt.Errorf("account not found")
+}
+
+func (m *Manager) writeAccount(account shared.Account) error {
+ key := account.Key()
+
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract.Accounts[key] = account
+ return nil
+}
+
+func (m *Manager) readAppMetaData(envAliases []string, clientID string) (AppMetaData, error) {
+ m.contractMu.RLock()
+ for k, app := range m.contract.AppMetaData {
+ if checkAlias(app.Environment, envAliases) && app.ClientID == clientID {
+ m.contractMu.RUnlock()
+ if needsUpgrade(k) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ app = upgrade(m.contract.AppMetaData, k)
+ }
+ return app, nil
+ }
+ }
+ m.contractMu.RUnlock()
+ return AppMetaData{}, fmt.Errorf("not found")
+}
+
+func (m *Manager) writeAppMetaData(AppMetaData AppMetaData) error {
+ key := AppMetaData.Key()
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract.AppMetaData[key] = AppMetaData
+ return nil
+}
+
+// RemoveAccount removes all the associated ATs, RTs and IDTs from the cache associated with this account.
+func (m *Manager) RemoveAccount(account shared.Account, clientID string) {
+ m.removeRefreshTokens(account.HomeAccountID, account.Environment, clientID)
+ m.removeAccessTokens(account.HomeAccountID, account.Environment)
+ m.removeIDTokens(account.HomeAccountID, account.Environment)
+ m.removeAccounts(account.HomeAccountID, account.Environment)
+}
+
+func (m *Manager) removeRefreshTokens(homeID string, env string, clientID string) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ for key, rt := range m.contract.RefreshTokens {
+ // Check for RTs associated with the account.
+ if rt.HomeAccountID == homeID && rt.Environment == env {
+ // Do RT's app ownership check as a precaution, in case family apps
+ // and 3rd-party apps share same token cache, although they should not.
+ if rt.ClientID == clientID || rt.FamilyID != "" {
+ delete(m.contract.RefreshTokens, key)
+ }
+ }
+ }
+}
+
+func (m *Manager) removeAccessTokens(homeID string, env string) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ for key, at := range m.contract.AccessTokens {
+ // Remove AT's associated with the account
+ if at.HomeAccountID == homeID && at.Environment == env {
+ // # To avoid the complexity of locating sibling family app's AT, we skip AT's app ownership check.
+ // It means ATs for other apps will also be removed, it is OK because:
+ // non-family apps are not supposed to share token cache to begin with;
+ // Even if it happens, we keep other app's RT already, so SSO still works.
+ delete(m.contract.AccessTokens, key)
+ }
+ }
+}
+
+func (m *Manager) removeIDTokens(homeID string, env string) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ for key, idt := range m.contract.IDTokens {
+ // Remove ID tokens associated with the account.
+ if idt.HomeAccountID == homeID && idt.Environment == env {
+ delete(m.contract.IDTokens, key)
+ }
+ }
+}
+
+func (m *Manager) removeAccounts(homeID string, env string) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ for key, acc := range m.contract.Accounts {
+ // Remove the specified account.
+ if acc.HomeAccountID == homeID && acc.Environment == env {
+ delete(m.contract.Accounts, key)
+ }
+ }
+}
+
+// update updates the internal cache object. This is for use in tests, other uses are not
+// supported.
+func (m *Manager) update(cache *Contract) {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+ m.contract = cache
+}
+
+// Marshal implements cache.Marshaler.
+func (m *Manager) Marshal() ([]byte, error) {
+ m.contractMu.RLock()
+ defer m.contractMu.RUnlock()
+ return json.Marshal(m.contract)
+}
+
+// Unmarshal implements cache.Unmarshaler.
+func (m *Manager) Unmarshal(b []byte) error {
+ m.contractMu.Lock()
+ defer m.contractMu.Unlock()
+
+ contract := NewContract()
+
+ err := json.Unmarshal(b, contract)
+ if err != nil {
+ return err
+ }
+
+ m.contract = contract
+
+ return nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go
index 7b673e3fe..de1bf381f 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go
@@ -31,4 +31,6 @@ type TokenProviderResult struct {
AccessToken string
// ExpiresInSeconds is the lifetime of the token in seconds
ExpiresInSeconds int
+ // RefreshInSeconds indicates the suggested time to refresh the token, if any
+ RefreshInSeconds int
}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go
index 2238521f5..2134e57c9 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go
@@ -18,10 +18,6 @@ import (
)
const addField = "AdditionalFields"
-const (
- marshalJSON = "MarshalJSON"
- unmarshalJSON = "UnmarshalJSON"
-)
var (
leftBrace = []byte("{")[0]
@@ -106,48 +102,38 @@ func delimIs(got json.Token, want rune) bool {
// hasMarshalJSON will determine if the value or a pointer to this value has
// the MarshalJSON method.
func hasMarshalJSON(v reflect.Value) bool {
- if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid {
- _, ok := v.Interface().(json.Marshaler)
- return ok
- }
-
- if v.Kind() == reflect.Ptr {
- v = v.Elem()
- } else {
- if !v.CanAddr() {
- return false
+ ok := false
+ if _, ok = v.Interface().(json.Marshaler); !ok {
+ var i any
+ if v.Kind() == reflect.Ptr {
+ i = v.Elem().Interface()
+ } else if v.CanAddr() {
+ i = v.Addr().Interface()
}
- v = v.Addr()
- }
-
- if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid {
- _, ok := v.Interface().(json.Marshaler)
- return ok
+ _, ok = i.(json.Marshaler)
}
- return false
+ return ok
}
// callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value.
// This will panic if the method is not defined.
func callMarshalJSON(v reflect.Value) ([]byte, error) {
- if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid {
- marsh := v.Interface().(json.Marshaler)
+ if marsh, ok := v.Interface().(json.Marshaler); ok {
return marsh.MarshalJSON()
}
if v.Kind() == reflect.Ptr {
- v = v.Elem()
+ if marsh, ok := v.Elem().Interface().(json.Marshaler); ok {
+ return marsh.MarshalJSON()
+ }
} else {
if v.CanAddr() {
- v = v.Addr()
+ if marsh, ok := v.Addr().Interface().(json.Marshaler); ok {
+ return marsh.MarshalJSON()
+ }
}
}
- if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid {
- marsh := v.Interface().(json.Marshaler)
- return marsh.MarshalJSON()
- }
-
panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface()))
}
@@ -162,12 +148,8 @@ func hasUnmarshalJSON(v reflect.Value) bool {
v = v.Addr()
}
- if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid {
- _, ok := v.Interface().(json.Unmarshaler)
- return ok
- }
-
- return false
+ _, ok := v.Interface().(json.Unmarshaler)
+ return ok
}
// hasOmitEmpty indicates if the field has instructed us to not output
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
index 04236ff31..cda678e33 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go
@@ -7,6 +7,7 @@ package local
import (
"context"
"fmt"
+ "html"
"net"
"net/http"
"strconv"
@@ -141,11 +142,12 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) {
headerErr := q.Get("error")
if headerErr != "" {
- desc := q.Get("error_description")
+ desc := html.EscapeString(q.Get("error_description"))
// Note: It is a little weird we handle some errors by not going to the failPage. If they all should,
// change this to s.error() and make s.error() write the failPage instead of an error code.
_, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc)))
- s.putResult(Result{Err: fmt.Errorf(desc)})
+ s.putResult(Result{Err: fmt.Errorf("%s", desc)})
+
return
}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
index ef8d908a4..738a29eb9 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go
@@ -10,6 +10,8 @@ import (
"io"
"time"
+ "github.com/google/uuid"
+
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported"
internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time"
@@ -18,7 +20,6 @@ import (
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs"
- "github.com/google/uuid"
)
// ResolveEndpointer contains the methods for resolving authority endpoints.
@@ -110,7 +111,7 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams
Scopes: scopes,
TenantID: authParams.AuthorityInfo.Tenant,
}
- tr, err := cred.TokenProvider(ctx, params)
+ pr, err := cred.TokenProvider(ctx, params)
if err != nil {
if len(scopes) == 0 {
err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err)
@@ -118,14 +119,18 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams
}
return accesstokens.TokenResponse{}, err
}
- return accesstokens.TokenResponse{
- TokenType: authParams.AuthnScheme.AccessTokenType(),
- AccessToken: tr.AccessToken,
- ExpiresOn: internalTime.DurationTime{
- T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second),
- },
+ tr := accesstokens.TokenResponse{
+ TokenType: authParams.AuthnScheme.AccessTokenType(),
+ AccessToken: pr.AccessToken,
+ ExpiresOn: now.Add(time.Duration(pr.ExpiresInSeconds) * time.Second),
GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes},
- }, nil
+ }
+ if pr.RefreshInSeconds > 0 {
+ tr.RefreshOn = internalTime.DurationTime{
+ T: now.Add(time.Duration(pr.RefreshInSeconds) * time.Second),
+ }
+ }
+ return tr, nil
}
if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil {
@@ -331,7 +336,7 @@ func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams
func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error {
endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName)
if err != nil {
- return fmt.Errorf("unable to resolve an endpoint: %s", err)
+ return fmt.Errorf("unable to resolve an endpoint: %w", err)
}
authParams.Endpoints = endpoints
return nil
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
index a7b7b0742..d738c7591 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go
@@ -17,6 +17,7 @@ import (
/* #nosec */
"crypto/sha1"
+ "crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/json"
@@ -68,7 +69,7 @@ type DeviceCodeResponse struct {
UserCode string `json:"user_code"`
DeviceCode string `json:"device_code"`
- VerificationURL string `json:"verification_url"`
+ VerificationURL string `json:"verification_uri"`
ExpiresIn int `json:"expires_in"`
Interval int `json:"interval"`
Message string `json:"message"`
@@ -112,19 +113,31 @@ func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (
}
return c.AssertionCallback(ctx, options)
}
-
- token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{
+ claims := jwt.MapClaims{
"aud": authParams.Endpoints.TokenEndpoint,
"exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)),
"iss": authParams.ClientID,
"jti": uuid.New().String(),
"nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)),
"sub": authParams.ClientID,
- })
+ }
+
+ isADFSorDSTS := authParams.AuthorityInfo.AuthorityType == authority.ADFS ||
+ authParams.AuthorityInfo.AuthorityType == authority.DSTS
+
+ var signingMethod jwt.SigningMethod = jwt.SigningMethodPS256
+ thumbprintKey := "x5t#S256"
+
+ if isADFSorDSTS {
+ signingMethod = jwt.SigningMethodRS256
+ thumbprintKey = "x5t"
+ }
+
+ token := jwt.NewWithClaims(signingMethod, claims)
token.Header = map[string]interface{}{
- "alg": "RS256",
- "typ": "JWT",
- "x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)),
+ "alg": signingMethod.Alg(),
+ "typ": "JWT",
+ thumbprintKey: base64.StdEncoding.EncodeToString(thumbprint(c.Cert, signingMethod.Alg())),
}
if authParams.SendX5C {
@@ -133,17 +146,23 @@ func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) (
assertion, err := token.SignedString(c.Key)
if err != nil {
- return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err)
+ return "", fmt.Errorf("unable to sign JWT token: %w", err)
}
+
return assertion, nil
}
// thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT.
// https://tools.ietf.org/html/rfc7517#section-4.8
-func thumbprint(cert *x509.Certificate) []byte {
- /* #nosec */
- a := sha1.Sum(cert.Raw)
- return a[:]
+func thumbprint(cert *x509.Certificate, alg string) []byte {
+ switch alg {
+ case jwt.SigningMethodRS256.Name: // identity providers like ADFS don't support SHA256 assertions, so need to support this
+ hash := sha1.Sum(cert.Raw) /* #nosec */
+ return hash[:]
+ default:
+ hash := sha256.Sum256(cert.Raw)
+ return hash[:]
+ }
}
// Client represents the REST calls to get tokens from token generator backends.
@@ -262,11 +281,7 @@ func (c Client) FromClientSecret(ctx context.Context, authParameters authority.A
qv.Set(clientID, authParameters.ClientID)
addScopeQueryParam(qv, authParameters)
- token, err := c.doTokenResp(ctx, authParameters, qv)
- if err != nil {
- return token, fmt.Errorf("FromClientSecret(): %w", err)
- }
- return token, nil
+ return c.doTokenResp(ctx, authParameters, qv)
}
func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) {
@@ -281,11 +296,7 @@ func (c Client) FromAssertion(ctx context.Context, authParameters authority.Auth
qv.Set(clientInfo, clientInfoVal)
addScopeQueryParam(qv, authParameters)
- token, err := c.doTokenResp(ctx, authParameters, qv)
- if err != nil {
- return token, fmt.Errorf("FromAssertion(): %w", err)
- }
- return token, nil
+ return c.doTokenResp(ctx, authParameters, qv)
}
func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) {
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
index 3107b45c1..32dde7b76 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go
@@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"reflect"
+ "strconv"
"strings"
"time"
@@ -173,14 +174,75 @@ type TokenResponse struct {
FamilyID string `json:"foci"`
IDToken IDToken `json:"id_token"`
ClientInfo ClientInfo `json:"client_info"`
- ExpiresOn internalTime.DurationTime `json:"expires_in"`
+ RefreshOn internalTime.DurationTime `json:"refresh_in,omitempty"`
+ ExpiresOn time.Time `json:"-"`
ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"`
GrantedScopes Scopes `json:"scope"`
DeclinedScopes []string // This is derived
AdditionalFields map[string]interface{}
+ scopesComputed bool
+}
+
+func (tr *TokenResponse) UnmarshalJSON(data []byte) error {
+ type Alias TokenResponse
+ aux := &struct {
+ ExpiresIn internalTime.DurationTime `json:"expires_in,omitempty"`
+ ExpiresOn any `json:"expires_on,omitempty"`
+ *Alias
+ }{
+ Alias: (*Alias)(tr),
+ }
+
+ // Unmarshal the JSON data into the aux struct
+ if err := json.Unmarshal(data, &aux); err != nil {
+ return err
+ }
+
+ // Function to parse different date formats
+ // This is a workaround for the issue described here:
+ // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/issues/4963
+ parseExpiresOn := func(expiresOn string) (time.Time, error) {
+ var formats = []string{
+ "01/02/2006 15:04:05", // MM/dd/yyyy HH:mm:ss
+ "2006-01-02 15:04:05", // yyyy-MM-dd HH:mm:ss
+ time.RFC3339Nano, // ISO 8601 (with nanosecond precision)
+ }
+
+ for _, format := range formats {
+ if t, err := time.Parse(format, expiresOn); err == nil {
+ return t, nil
+ }
+ }
+ return time.Time{}, fmt.Errorf("invalid ExpiresOn format: %s", expiresOn)
+ }
- scopesComputed bool
+ if expiresOnStr, ok := aux.ExpiresOn.(string); ok {
+ if ts, err := strconv.ParseInt(expiresOnStr, 10, 64); err == nil {
+ tr.ExpiresOn = time.Unix(ts, 0)
+ return nil
+ }
+ if expiresOnStr != "" {
+ if t, err := parseExpiresOn(expiresOnStr); err != nil {
+ return err
+ } else {
+ tr.ExpiresOn = t
+ return nil
+ }
+ }
+ }
+
+ // Check if ExpiresOn is a number (Unix timestamp or ISO 8601)
+ if expiresOnNum, ok := aux.ExpiresOn.(float64); ok {
+ tr.ExpiresOn = time.Unix(int64(expiresOnNum), 0)
+ return nil
+ }
+
+ if !aux.ExpiresIn.T.IsZero() {
+ tr.ExpiresOn = aux.ExpiresIn.T
+ return nil
+ }
+ return errors.New("expires_in and expires_on are both missing or invalid")
}
// ComputeScope computes the final scopes based on what was granted by the server and
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
index 9d60734f8..c3c4a96fc 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go
@@ -23,7 +23,7 @@ import (
const (
authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize"
- instanceDiscoveryEndpoint = "https://%v/common/discovery/instance"
+ aadInstanceDiscoveryEndpoint = "https://%v/common/discovery/instance"
tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration"
regionName = "REGION_NAME"
defaultAPIVersion = "2021-10-01"
@@ -47,13 +47,12 @@ type jsonCaller interface {
}
var aadTrustedHostList = map[string]bool{
- "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list
- "login.chinacloudapi.cn": true, // Microsoft Azure China
- "login.microsoftonline.de": true, // Microsoft Azure Blackforest
- "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy
- "login.microsoftonline.us": true, // Microsoft Azure US Government
- "login.microsoftonline.com": true, // Microsoft Azure Worldwide
- "login.cloudgovapi.us": true, // Microsoft Azure US Government
+ "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list
+ "login.partner.microsoftonline.cn": true, // Microsoft Azure China
+ "login.microsoftonline.de": true, // Microsoft Azure Blackforest
+ "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy
+ "login.microsoftonline.us": true, // Microsoft Azure US Government
+ "login.microsoftonline.com": true, // Microsoft Azure Worldwide
}
// TrustedHost checks if an AAD host is trusted/valid.
@@ -137,8 +136,12 @@ const (
const (
AAD = "MSSTS"
ADFS = "ADFS"
+ DSTS = "DSTS"
)
+// DSTSTenant is referenced throughout multiple files, let us use a const in case we ever need to change it.
+const DSTSTenant = "7a433bfc-2514-4697-b467-e0933190487f"
+
// AuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens.
type AuthenticationScheme interface {
// Extra parameters that are added to the request to the /token endpoint.
@@ -236,23 +239,26 @@ func NewAuthParams(clientID string, authorityInfo Info) AuthParams {
// - the client is configured to authenticate only Microsoft accounts via the "consumers" endpoint
// - the resulting authority URL is invalid
func (p AuthParams) WithTenant(ID string) (AuthParams, error) {
- switch ID {
- case "", p.AuthorityInfo.Tenant:
- // keep the default tenant because the caller didn't override it
+ if ID == "" || ID == p.AuthorityInfo.Tenant {
return p, nil
- case "common", "consumers", "organizations":
- if p.AuthorityInfo.AuthorityType == AAD {
+ }
+
+ var authority string
+ switch p.AuthorityInfo.AuthorityType {
+ case AAD:
+ if ID == "common" || ID == "consumers" || ID == "organizations" {
return p, fmt.Errorf(`tenant ID must be a specific tenant, not "%s"`, ID)
}
- // else we'll return a better error below
- }
- if p.AuthorityInfo.AuthorityType != AAD {
- return p, errors.New("the authority doesn't support tenants")
- }
- if p.AuthorityInfo.Tenant == "consumers" {
- return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`)
+ if p.AuthorityInfo.Tenant == "consumers" {
+ return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`)
+ }
+ authority = "https://" + path.Join(p.AuthorityInfo.Host, ID)
+ case ADFS:
+ return p, errors.New("ADFS authority doesn't support tenants")
+ case DSTS:
+ return p, errors.New("dSTS authority doesn't support tenants")
}
- authority := "https://" + path.Join(p.AuthorityInfo.Host, ID)
+
info, err := NewInfoFromAuthorityURI(authority, p.AuthorityInfo.ValidateAuthority, p.AuthorityInfo.InstanceDiscoveryDisabled)
if err == nil {
info.Region = p.AuthorityInfo.Region
@@ -344,44 +350,57 @@ type Info struct {
Host string
CanonicalAuthorityURI string
AuthorityType string
- UserRealmURIPrefix string
ValidateAuthority bool
Tenant string
Region string
InstanceDiscoveryDisabled bool
}
-func firstPathSegment(u *url.URL) (string, error) {
- pathParts := strings.Split(u.EscapedPath(), "/")
- if len(pathParts) >= 2 {
- return pathParts[1], nil
- }
-
- return "", errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`)
-}
-
// NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided.
func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) {
- u, err := url.Parse(strings.ToLower(authority))
- if err != nil || u.Scheme != "https" {
- return Info{}, errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`)
+
+ cannonicalAuthority := authority
+
+ // suffix authority with / if it doesn't have one
+ if !strings.HasSuffix(cannonicalAuthority, "/") {
+ cannonicalAuthority += "/"
}
- tenant, err := firstPathSegment(u)
+ u, err := url.Parse(strings.ToLower(cannonicalAuthority))
+
if err != nil {
- return Info{}, err
+ return Info{}, fmt.Errorf("couldn't parse authority url: %w", err)
+ }
+ if u.Scheme != "https" {
+ return Info{}, errors.New("authority url scheme must be https")
+ }
+
+ pathParts := strings.Split(u.EscapedPath(), "/")
+ if len(pathParts) < 3 {
+ return Info{}, errors.New(`authority must be an URL such as "https://login.microsoftonline.com/"`)
}
+
authorityType := AAD
- if tenant == "adfs" {
+ tenant := pathParts[1]
+ switch tenant {
+ case "adfs":
authorityType = ADFS
+ case "dstsv2":
+ if len(pathParts) != 4 {
+ return Info{}, fmt.Errorf("dSTS authority must be an https URL such as https:///dstsv2/%s", DSTSTenant)
+ }
+ if pathParts[2] != DSTSTenant {
+ return Info{}, fmt.Errorf("dSTS authority only accepts a single tenant %q", DSTSTenant)
+ }
+ authorityType = DSTS
+ tenant = DSTSTenant
}
// u.Host includes the port, if any, which is required for private cloud deployments
return Info{
Host: u.Host,
- CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Host, tenant),
+ CanonicalAuthorityURI: cannonicalAuthority,
AuthorityType: authorityType,
- UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()),
ValidateAuthority: validateAuthority,
Tenant: tenant,
InstanceDiscoveryDisabled: instanceDiscoveryDisabled,
@@ -525,7 +544,7 @@ func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (I
discoveryHost = authorityInfo.Host
}
- endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost)
+ endpoint := fmt.Sprintf(aadInstanceDiscoveryEndpoint, discoveryHost)
err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp)
}
return resp, err
@@ -543,17 +562,19 @@ func detectRegion(ctx context.Context) string {
client := http.Client{
Timeout: time.Duration(2 * time.Second),
}
- req, _ := http.NewRequest("GET", imdsEndpoint, nil)
+ req, _ := http.NewRequestWithContext(ctx, http.MethodGet, imdsEndpoint, nil)
req.Header.Set("Metadata", "true")
resp, err := client.Do(req)
+ if err == nil {
+ defer resp.Body.Close()
+ }
// If the request times out or there is an error, it is retried once
- if err != nil || resp.StatusCode != 200 {
+ if err != nil || resp.StatusCode != http.StatusOK {
resp, err = client.Do(req)
- if err != nil || resp.StatusCode != 200 {
+ if err != nil || resp.StatusCode != http.StatusOK {
return ""
}
}
- defer resp.Body.Close()
response, err := io.ReadAll(resp.Body)
if err != nil {
return ""
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
index 7d9ec7cd3..790680366 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go
@@ -18,10 +18,11 @@ import (
"strings"
"time"
+ "github.com/google/uuid"
+
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json"
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version"
- "github.com/google/uuid"
)
// HTTPClient represents an HTTP client.
@@ -70,15 +71,13 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea
unmarshal = customJSON.Unmarshal
}
- u, err := url.Parse(endpoint)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s?%s", endpoint, qv.Encode()), nil)
if err != nil {
- return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err)
+ return fmt.Errorf("could not create request: %w", err)
}
- u.RawQuery = qv.Encode()
addStdHeaders(headers)
-
- req := &http.Request{Method: http.MethodGet, URL: u, Header: headers}
+ req.Header = headers
if body != nil {
// Note: In case your wondering why we are not gzip encoding....
@@ -99,7 +98,7 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea
if resp != nil {
if err := unmarshal(data, resp); err != nil {
- return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data))
+ return errors.InvalidJsonErr{Err: fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data))}
}
}
return nil
@@ -222,7 +221,7 @@ func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values
}
if resp != nil {
if err := unmarshal(data, resp); err != nil {
- return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data))
+ return errors.InvalidJsonErr{Err: fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data))}
}
}
return nil
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go
index 0ade41179..4030ec8d8 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go
@@ -18,9 +18,6 @@ import (
"github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
)
-// ADFS is an active directory federation service authority type.
-const ADFS = "ADFS"
-
type cacheEntry struct {
Endpoints authority.Endpoints
ValidForDomainsInList map[string]bool
@@ -51,7 +48,7 @@ func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo
return endpoints, nil
}
- endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName)
+ endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo)
if err != nil {
return authority.Endpoints{}, err
}
@@ -83,7 +80,7 @@ func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPr
defer m.mu.Unlock()
if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok {
- if authorityInfo.AuthorityType == ADFS {
+ if authorityInfo.AuthorityType == authority.ADFS {
domain, err := adfsDomainFromUpn(userPrincipalName)
if err == nil {
if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok {
@@ -102,7 +99,7 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use
updatedCacheEntry := createcacheEntry(endpoints)
- if authorityInfo.AuthorityType == ADFS {
+ if authorityInfo.AuthorityType == authority.ADFS {
// Since we're here, we've made a call to the backend. We want to ensure we're caching
// the latest values from the server.
if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok {
@@ -119,9 +116,12 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use
m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry
}
-func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) {
- if authorityInfo.Tenant == "adfs" {
+func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info) (string, error) {
+ if authorityInfo.AuthorityType == authority.ADFS {
return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil
+ } else if authorityInfo.AuthorityType == authority.DSTS {
+ return fmt.Sprintf("https://%s/dstsv2/%s/v2.0/.well-known/openid-configuration", authorityInfo.Host, authority.DSTSTenant), nil
+
} else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) {
resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo)
if err != nil {
@@ -134,7 +134,6 @@ func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, aut
return "", err
}
return resp.TenantDiscoveryEndpoint, nil
-
}
return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
index eb16b405c..5e551abc8 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go
@@ -5,4 +5,4 @@
package version
// Version is the version of this client package that is communicated to the server.
-const Version = "1.2.0"
+const Version = "1.4.2"
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go
new file mode 100644
index 000000000..d7cffc295
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go
@@ -0,0 +1,28 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package managedidentity
+
+import (
+ "context"
+ "net/http"
+ "os"
+)
+
+func createAzureMLAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) {
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, os.Getenv(msiEndpointEnvVar), nil)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("secret", os.Getenv(msiSecretEnvVar))
+ q := req.URL.Query()
+ q.Set(apiVersionQueryParameterName, azureMLAPIVersion)
+ q.Set(resourceQueryParameterName, resource)
+ q.Set("clientid", os.Getenv("DEFAULT_IDENTITY_CLIENT_ID"))
+ if cid, ok := id.(UserAssignedClientID); ok {
+ q.Set("clientid", string(cid))
+ }
+ req.URL.RawQuery = q.Encode()
+ return req, nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go
new file mode 100644
index 000000000..be9a0bca3
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go
@@ -0,0 +1,37 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package managedidentity
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+)
+
+func createCloudShellAuthRequest(ctx context.Context, resource string) (*http.Request, error) {
+ msiEndpoint := os.Getenv(msiEndpointEnvVar)
+ msiEndpointParsed, err := url.Parse(msiEndpoint)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse %q: %s", msiEndpoint, err)
+ }
+
+ data := url.Values{}
+ data.Set(resourceQueryParameterName, resource)
+ msiDataEncoded := data.Encode()
+ body := io.NopCloser(strings.NewReader(msiDataEncoded))
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, msiEndpointParsed.String(), body)
+ if err != nil {
+ return nil, fmt.Errorf("error creating http request %s", err)
+ }
+
+ req.Header.Set(metaHTTPHeaderName, "true")
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ return req, nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go
new file mode 100644
index 000000000..ca3de4325
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go
@@ -0,0 +1,717 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+/*
+Package managedidentity provides a client for retrieval of Managed Identity applications.
+The Managed Identity Client is used to acquire a token for managed identity assigned to
+an azure resource such as Azure function, app service, virtual machine, etc. to acquire a token
+without using credentials.
+*/
+package managedidentity
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority"
+ "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared"
+)
+
+// AuthResult contains the results of one token acquisition operation.
+// For details see https://aka.ms/msal-net-authenticationresult
+type AuthResult = base.AuthResult
+
+type TokenSource = base.TokenSource
+
+const (
+ TokenSourceIdentityProvider = base.TokenSourceIdentityProvider
+ TokenSourceCache = base.TokenSourceCache
+)
+
+const (
+ // DefaultToIMDS indicates that the source is defaulted to IMDS when no environment variables are set.
+ DefaultToIMDS Source = "DefaultToIMDS"
+ AzureArc Source = "AzureArc"
+ ServiceFabric Source = "ServiceFabric"
+ CloudShell Source = "CloudShell"
+ AzureML Source = "AzureML"
+ AppService Source = "AppService"
+
+ // General request query parameter names
+ metaHTTPHeaderName = "Metadata"
+ apiVersionQueryParameterName = "api-version"
+ resourceQueryParameterName = "resource"
+ wwwAuthenticateHeaderName = "www-authenticate"
+
+ // UAMI query parameter name
+ miQueryParameterClientId = "client_id"
+ miQueryParameterObjectId = "object_id"
+ miQueryParameterPrincipalId = "principal_id"
+ miQueryParameterResourceIdIMDS = "msi_res_id"
+ miQueryParameterResourceId = "mi_res_id"
+
+ // IMDS
+ imdsDefaultEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token"
+ imdsAPIVersion = "2018-02-01"
+ systemAssignedManagedIdentity = "system_assigned_managed_identity"
+
+ // Azure Arc
+ azureArcEndpoint = "http://127.0.0.1:40342/metadata/identity/oauth2/token"
+ azureArcAPIVersion = "2020-06-01"
+ azureArcFileExtension = ".key"
+ azureArcMaxFileSizeBytes int64 = 4096
+ linuxTokenPath = "/var/opt/azcmagent/tokens" // #nosec G101
+ linuxHimdsPath = "/opt/azcmagent/bin/himds"
+ azureConnectedMachine = "AzureConnectedMachineAgent"
+ himdsExecutableName = "himds.exe"
+ tokenName = "Tokens"
+
+ // App Service
+ appServiceAPIVersion = "2019-08-01"
+
+ // AzureML
+ azureMLAPIVersion = "2017-09-01"
+ // Service Fabric
+ serviceFabricAPIVersion = "2019-07-01-preview"
+
+ // Environment Variables
+ identityEndpointEnvVar = "IDENTITY_ENDPOINT"
+ identityHeaderEnvVar = "IDENTITY_HEADER"
+ azurePodIdentityAuthorityHostEnvVar = "AZURE_POD_IDENTITY_AUTHORITY_HOST"
+ imdsEndVar = "IMDS_ENDPOINT"
+ msiEndpointEnvVar = "MSI_ENDPOINT"
+ msiSecretEnvVar = "MSI_SECRET"
+ identityServerThumbprintEnvVar = "IDENTITY_SERVER_THUMBPRINT"
+
+ defaultRetryCount = 3
+)
+
+var retryCodesForIMDS = []int{
+ http.StatusNotFound, // 404
+ http.StatusGone, // 410
+ http.StatusTooManyRequests, // 429
+ http.StatusInternalServerError, // 500
+ http.StatusNotImplemented, // 501
+ http.StatusBadGateway, // 502
+ http.StatusServiceUnavailable, // 503
+ http.StatusGatewayTimeout, // 504
+ http.StatusHTTPVersionNotSupported, // 505
+ http.StatusVariantAlsoNegotiates, // 506
+ http.StatusInsufficientStorage, // 507
+ http.StatusLoopDetected, // 508
+ http.StatusNotExtended, // 510
+ http.StatusNetworkAuthenticationRequired, // 511
+}
+
+var retryStatusCodes = []int{
+ http.StatusRequestTimeout, // 408
+ http.StatusTooManyRequests, // 429
+ http.StatusInternalServerError, // 500
+ http.StatusBadGateway, // 502
+ http.StatusServiceUnavailable, // 503
+ http.StatusGatewayTimeout, // 504
+}
+
+var getAzureArcPlatformPath = func(platform string) string {
+ switch platform {
+ case "windows":
+ return filepath.Join(os.Getenv("ProgramData"), azureConnectedMachine, tokenName)
+ case "linux":
+ return linuxTokenPath
+ default:
+ return ""
+ }
+}
+
+var getAzureArcHimdsFilePath = func(platform string) string {
+ switch platform {
+ case "windows":
+ return filepath.Join(os.Getenv("ProgramData"), azureConnectedMachine, himdsExecutableName)
+ case "linux":
+ return linuxHimdsPath
+ default:
+ return ""
+ }
+}
+
+type Source string
+
+type ID interface {
+ value() string
+}
+
+type systemAssignedValue string // its private for a reason to make the input consistent.
+type UserAssignedClientID string
+type UserAssignedObjectID string
+type UserAssignedResourceID string
+
+func (s systemAssignedValue) value() string { return string(s) }
+func (c UserAssignedClientID) value() string { return string(c) }
+func (o UserAssignedObjectID) value() string { return string(o) }
+func (r UserAssignedResourceID) value() string { return string(r) }
+func SystemAssigned() ID {
+ return systemAssignedValue(systemAssignedManagedIdentity)
+}
+
+// cache never uses the client because instance discovery is always disabled.
+var cacheManager *storage.Manager = storage.New(nil)
+
+type Client struct {
+ httpClient ops.HTTPClient
+ miType ID
+ source Source
+ authParams authority.AuthParams
+ retryPolicyEnabled bool
+ canRefresh *atomic.Value
+}
+
+type AcquireTokenOptions struct {
+ claims string
+}
+
+type ClientOption func(*Client)
+
+type AcquireTokenOption func(o *AcquireTokenOptions)
+
+// WithClaims sets additional claims to request for the token, such as those required by token revocation or conditional access policies.
+// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded.
+func WithClaims(claims string) AcquireTokenOption {
+ return func(o *AcquireTokenOptions) {
+ o.claims = claims
+ }
+}
+
+// WithHTTPClient allows for a custom HTTP client to be set.
+func WithHTTPClient(httpClient ops.HTTPClient) ClientOption {
+ return func(c *Client) {
+ c.httpClient = httpClient
+ }
+}
+
+func WithRetryPolicyDisabled() ClientOption {
+ return func(c *Client) {
+ c.retryPolicyEnabled = false
+ }
+}
+
+// Client to be used to acquire tokens for managed identity.
+// ID: [SystemAssigned], [UserAssignedClientID], [UserAssignedResourceID], [UserAssignedObjectID]
+//
+// Options: [WithHTTPClient]
+func New(id ID, options ...ClientOption) (Client, error) {
+ source, err := GetSource()
+ if err != nil {
+ return Client{}, err
+ }
+
+ // Check for user-assigned restrictions based on the source
+ switch source {
+ case AzureArc:
+ switch id.(type) {
+ case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID:
+ return Client{}, errors.New("Azure Arc doesn't support user-assigned managed identities")
+ }
+ case AzureML:
+ switch id.(type) {
+ case UserAssignedObjectID, UserAssignedResourceID:
+ return Client{}, errors.New("Azure ML supports specifying a user-assigned managed identity by client ID only")
+ }
+ case CloudShell:
+ switch id.(type) {
+ case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID:
+ return Client{}, errors.New("Cloud Shell doesn't support user-assigned managed identities")
+ }
+ case ServiceFabric:
+ switch id.(type) {
+ case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID:
+ return Client{}, errors.New("Service Fabric API doesn't support specifying a user-assigned identity. The identity is determined by cluster resource configuration. See https://aka.ms/servicefabricmi")
+ }
+ }
+
+ switch t := id.(type) {
+ case UserAssignedClientID:
+ if len(string(t)) == 0 {
+ return Client{}, fmt.Errorf("empty %T", t)
+ }
+ case UserAssignedResourceID:
+ if len(string(t)) == 0 {
+ return Client{}, fmt.Errorf("empty %T", t)
+ }
+ case UserAssignedObjectID:
+ if len(string(t)) == 0 {
+ return Client{}, fmt.Errorf("empty %T", t)
+ }
+ case systemAssignedValue:
+ default:
+ return Client{}, fmt.Errorf("unsupported type %T", id)
+ }
+ zero := atomic.Value{}
+ zero.Store(false)
+ client := Client{
+ miType: id,
+ httpClient: shared.DefaultClient,
+ retryPolicyEnabled: true,
+ source: source,
+ canRefresh: &zero,
+ }
+ for _, option := range options {
+ option(&client)
+ }
+ fakeAuthInfo, err := authority.NewInfoFromAuthorityURI("https://login.microsoftonline.com/managed_identity", false, true)
+ if err != nil {
+ return Client{}, err
+ }
+ client.authParams = authority.NewAuthParams(client.miType.value(), fakeAuthInfo)
+ return client, nil
+}
+
+// GetSource detects and returns the managed identity source available on the environment.
+func GetSource() (Source, error) {
+ identityEndpoint := os.Getenv(identityEndpointEnvVar)
+ identityHeader := os.Getenv(identityHeaderEnvVar)
+ identityServerThumbprint := os.Getenv(identityServerThumbprintEnvVar)
+ msiEndpoint := os.Getenv(msiEndpointEnvVar)
+ msiSecret := os.Getenv(msiSecretEnvVar)
+ imdsEndpoint := os.Getenv(imdsEndVar)
+
+ if identityEndpoint != "" && identityHeader != "" {
+ if identityServerThumbprint != "" {
+ return ServiceFabric, nil
+ }
+ return AppService, nil
+ } else if msiEndpoint != "" {
+ if msiSecret != "" {
+ return AzureML, nil
+ } else {
+ return CloudShell, nil
+ }
+ } else if isAzureArcEnvironment(identityEndpoint, imdsEndpoint) {
+ return AzureArc, nil
+ }
+
+ return DefaultToIMDS, nil
+}
+
+// This function wraps time.Now() and is used for refreshing the application
+// was created to test the function against refreshin
+var now = time.Now
+
+// Acquires tokens from the configured managed identity on an azure resource.
+//
+// Resource: scopes application is requesting access to
+// Options: [WithClaims]
+func (c Client) AcquireToken(ctx context.Context, resource string, options ...AcquireTokenOption) (AuthResult, error) {
+ resource = strings.TrimSuffix(resource, "/.default")
+ o := AcquireTokenOptions{}
+ for _, option := range options {
+ option(&o)
+ }
+ c.authParams.Scopes = []string{resource}
+
+ // ignore cached access tokens when given claims
+ if o.claims == "" {
+ stResp, err := cacheManager.Read(ctx, c.authParams)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ ar, err := base.AuthResultFromStorage(stResp)
+ if err == nil {
+ if !stResp.AccessToken.RefreshOn.T.IsZero() && !stResp.AccessToken.RefreshOn.T.After(now()) && c.canRefresh.CompareAndSwap(false, true) {
+ defer c.canRefresh.Store(false)
+ if tr, er := c.getToken(ctx, resource); er == nil {
+ return tr, nil
+ }
+ }
+ ar.AccessToken, err = c.authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
+ return ar, err
+ }
+ }
+ return c.getToken(ctx, resource)
+}
+
+func (c Client) getToken(ctx context.Context, resource string) (AuthResult, error) {
+ switch c.source {
+ case AzureArc:
+ return c.acquireTokenForAzureArc(ctx, resource)
+ case AzureML:
+ return c.acquireTokenForAzureML(ctx, resource)
+ case CloudShell:
+ return c.acquireTokenForCloudShell(ctx, resource)
+ case DefaultToIMDS:
+ return c.acquireTokenForIMDS(ctx, resource)
+ case AppService:
+ return c.acquireTokenForAppService(ctx, resource)
+ case ServiceFabric:
+ return c.acquireTokenForServiceFabric(ctx, resource)
+ default:
+ return AuthResult{}, fmt.Errorf("unsupported source %q", c.source)
+ }
+}
+
+func (c Client) acquireTokenForAppService(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createAppServiceAuthRequest(ctx, c.miType, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForIMDS(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createIMDSAuthRequest(ctx, c.miType, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForCloudShell(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createCloudShellAuthRequest(ctx, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForAzureML(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createAzureMLAuthRequest(ctx, c.miType, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForServiceFabric(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createServiceFabricAuthRequest(ctx, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ tokenResponse, err := c.getTokenForRequest(req, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func (c Client) acquireTokenForAzureArc(ctx context.Context, resource string) (AuthResult, error) {
+ req, err := createAzureArcAuthRequest(ctx, resource, "")
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ response, err := c.httpClient.Do(req)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ defer response.Body.Close()
+
+ if response.StatusCode != http.StatusUnauthorized {
+ return AuthResult{}, fmt.Errorf("expected a 401 response, received %d", response.StatusCode)
+ }
+
+ secret, err := c.getAzureArcSecretKey(response, runtime.GOOS)
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ secondRequest, err := createAzureArcAuthRequest(ctx, resource, string(secret))
+ if err != nil {
+ return AuthResult{}, err
+ }
+
+ tokenResponse, err := c.getTokenForRequest(secondRequest, resource)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ return authResultFromToken(c.authParams, tokenResponse)
+}
+
+func authResultFromToken(authParams authority.AuthParams, token accesstokens.TokenResponse) (AuthResult, error) {
+ if cacheManager == nil {
+ return AuthResult{}, errors.New("cache instance is nil")
+ }
+ account, err := cacheManager.Write(authParams, token)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ // if refreshOn is not set, set it to half of the time until expiry if expiry is more than 2 hours away
+ if token.RefreshOn.T.IsZero() {
+ if lifetime := time.Until(token.ExpiresOn); lifetime > 2*time.Hour {
+ token.RefreshOn.T = time.Now().Add(lifetime / 2)
+ }
+ }
+ ar, err := base.NewAuthResult(token, account)
+ if err != nil {
+ return AuthResult{}, err
+ }
+ ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken)
+ return ar, err
+}
+
+// contains checks if the element is present in the list.
+func contains[T comparable](list []T, element T) bool {
+ for _, v := range list {
+ if v == element {
+ return true
+ }
+ }
+ return false
+}
+
+// retry performs an HTTP request with retries based on the provided options.
+func (c Client) retry(maxRetries int, req *http.Request) (*http.Response, error) {
+ var resp *http.Response
+ var err error
+ for attempt := 0; attempt < maxRetries; attempt++ {
+ tryCtx, tryCancel := context.WithTimeout(req.Context(), time.Minute)
+ defer tryCancel()
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ resp.Body.Close()
+ }
+ cloneReq := req.Clone(tryCtx)
+ resp, err = c.httpClient.Do(cloneReq)
+ retrylist := retryStatusCodes
+ if c.source == DefaultToIMDS {
+ retrylist = retryCodesForIMDS
+ }
+ if err == nil && !contains(retrylist, resp.StatusCode) {
+ return resp, nil
+ }
+ select {
+ case <-time.After(time.Second):
+ case <-req.Context().Done():
+ err = req.Context().Err()
+ return resp, err
+ }
+ }
+ return resp, err
+}
+
+func (c Client) getTokenForRequest(req *http.Request, resource string) (accesstokens.TokenResponse, error) {
+ r := accesstokens.TokenResponse{}
+ var resp *http.Response
+ var err error
+
+ if c.retryPolicyEnabled {
+ resp, err = c.retry(defaultRetryCount, req)
+ } else {
+ resp, err = c.httpClient.Do(req)
+ }
+ if err != nil {
+ return r, err
+ }
+ responseBytes, err := io.ReadAll(resp.Body)
+ defer resp.Body.Close()
+ if err != nil {
+ return r, err
+ }
+ switch resp.StatusCode {
+ case http.StatusOK, http.StatusAccepted:
+ default:
+ sd := strings.TrimSpace(string(responseBytes))
+ if sd != "" {
+ return r, errors.CallErr{
+ Req: req,
+ Resp: resp,
+ Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s",
+ req.URL.String(),
+ req.Method,
+ resp.StatusCode,
+ sd),
+ }
+ }
+ return r, errors.CallErr{
+ Req: req,
+ Resp: resp,
+ Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, resp.StatusCode),
+ }
+ }
+
+ err = json.Unmarshal(responseBytes, &r)
+ if err != nil {
+ return r, errors.InvalidJsonErr{
+ Err: fmt.Errorf("error parsing the json error: %s", err),
+ }
+ }
+ r.GrantedScopes.Slice = append(r.GrantedScopes.Slice, resource)
+
+ return r, err
+}
+
+func createAppServiceAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) {
+ identityEndpoint := os.Getenv(identityEndpointEnvVar)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, identityEndpoint, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeaderEnvVar))
+ q := req.URL.Query()
+ q.Set("api-version", appServiceAPIVersion)
+ q.Set("resource", resource)
+ switch t := id.(type) {
+ case UserAssignedClientID:
+ q.Set(miQueryParameterClientId, string(t))
+ case UserAssignedResourceID:
+ q.Set(miQueryParameterResourceId, string(t))
+ case UserAssignedObjectID:
+ q.Set(miQueryParameterObjectId, string(t))
+ case systemAssignedValue:
+ default:
+ return nil, fmt.Errorf("unsupported type %T", id)
+ }
+ req.URL.RawQuery = q.Encode()
+ return req, nil
+}
+
+func createIMDSAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) {
+ msiEndpoint, err := url.Parse(imdsDefaultEndpoint)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse %q: %s", imdsDefaultEndpoint, err)
+ }
+ msiParameters := msiEndpoint.Query()
+ msiParameters.Set(apiVersionQueryParameterName, imdsAPIVersion)
+ msiParameters.Set(resourceQueryParameterName, resource)
+
+ switch t := id.(type) {
+ case UserAssignedClientID:
+ msiParameters.Set(miQueryParameterClientId, string(t))
+ case UserAssignedResourceID:
+ msiParameters.Set(miQueryParameterResourceIdIMDS, string(t))
+ case UserAssignedObjectID:
+ msiParameters.Set(miQueryParameterObjectId, string(t))
+ case systemAssignedValue: // not adding anything
+ default:
+ return nil, fmt.Errorf("unsupported type %T", id)
+ }
+
+ msiEndpoint.RawQuery = msiParameters.Encode()
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, msiEndpoint.String(), nil)
+ if err != nil {
+ return nil, fmt.Errorf("error creating http request %s", err)
+ }
+ req.Header.Set(metaHTTPHeaderName, "true")
+ return req, nil
+}
+
+func createAzureArcAuthRequest(ctx context.Context, resource string, key string) (*http.Request, error) {
+ identityEndpoint := os.Getenv(identityEndpointEnvVar)
+ if identityEndpoint == "" {
+ identityEndpoint = azureArcEndpoint
+ }
+ msiEndpoint, parseErr := url.Parse(identityEndpoint)
+
+ if parseErr != nil {
+ return nil, fmt.Errorf("couldn't parse %q: %s", identityEndpoint, parseErr)
+ }
+
+ msiParameters := msiEndpoint.Query()
+ msiParameters.Set(apiVersionQueryParameterName, azureArcAPIVersion)
+ msiParameters.Set(resourceQueryParameterName, resource)
+
+ msiEndpoint.RawQuery = msiParameters.Encode()
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, msiEndpoint.String(), nil)
+ if err != nil {
+ return nil, fmt.Errorf("error creating http request %s", err)
+ }
+ req.Header.Set(metaHTTPHeaderName, "true")
+
+ if key != "" {
+ req.Header.Set("Authorization", fmt.Sprintf("Basic %s", key))
+ }
+
+ return req, nil
+}
+
+func isAzureArcEnvironment(identityEndpoint, imdsEndpoint string) bool {
+ if identityEndpoint != "" && imdsEndpoint != "" {
+ return true
+ }
+ himdsFilePath := getAzureArcHimdsFilePath(runtime.GOOS)
+ if himdsFilePath != "" {
+ if _, err := os.Stat(himdsFilePath); err == nil {
+ return true
+ }
+ }
+ return false
+}
+
+func (c *Client) getAzureArcSecretKey(response *http.Response, platform string) (string, error) {
+ wwwAuthenticateHeader := response.Header.Get(wwwAuthenticateHeaderName)
+
+ if len(wwwAuthenticateHeader) == 0 {
+ return "", errors.New("response has no www-authenticate header")
+ }
+
+ // check if the platform is supported
+ expectedSecretFilePath := getAzureArcPlatformPath(platform)
+ if expectedSecretFilePath == "" {
+ return "", errors.New("platform not supported, expected linux or windows")
+ }
+
+ parts := strings.Split(wwwAuthenticateHeader, "Basic realm=")
+ if len(parts) < 2 {
+ return "", fmt.Errorf("basic realm= not found in the string, instead found: %s", wwwAuthenticateHeader)
+ }
+
+ secretFilePath := parts
+
+ // check that the file in the file path is a .key file
+ fileName := filepath.Base(secretFilePath[1])
+ if !strings.HasSuffix(fileName, azureArcFileExtension) {
+ return "", fmt.Errorf("invalid file extension, expected %s, got %s", azureArcFileExtension, filepath.Ext(fileName))
+ }
+
+ // check that file path from header matches the expected file path for the platform
+ if expectedSecretFilePath != filepath.Dir(secretFilePath[1]) {
+ return "", fmt.Errorf("invalid file path, expected %s, got %s", expectedSecretFilePath, filepath.Dir(secretFilePath[1]))
+ }
+
+ fileInfo, err := os.Stat(secretFilePath[1])
+ if err != nil {
+ return "", fmt.Errorf("failed to get metadata for %s due to error: %s", secretFilePath[1], err)
+ }
+
+ // Throw an error if the secret file's size is greater than 4096 bytes
+ if s := fileInfo.Size(); s > azureArcMaxFileSizeBytes {
+ return "", fmt.Errorf("invalid secret file size, expected %d, file size was %d", azureArcMaxFileSizeBytes, s)
+ }
+
+ // Attempt to read the contents of the secret file
+ secret, err := os.ReadFile(secretFilePath[1])
+ if err != nil {
+ return "", fmt.Errorf("failed to read %q due to error: %s", secretFilePath[1], err)
+ }
+
+ return string(secret), nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go
new file mode 100644
index 000000000..535065e9d
--- /dev/null
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go
@@ -0,0 +1,25 @@
+// Copyright (c) Microsoft Corporation.
+// Licensed under the MIT license.
+
+package managedidentity
+
+import (
+ "context"
+ "net/http"
+ "os"
+)
+
+func createServiceFabricAuthRequest(ctx context.Context, resource string) (*http.Request, error) {
+ identityEndpoint := os.Getenv(identityEndpointEnvVar)
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, identityEndpoint, nil)
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Accept", "application/json")
+ req.Header.Set("Secret", os.Getenv(identityHeaderEnvVar))
+ q := req.URL.Query()
+ q.Set("api-version", serviceFabricAPIVersion)
+ q.Set("resource", resource)
+ req.URL.RawQuery = q.Encode()
+ return req, nil
+}
diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
index 392e5e43f..7beed2617 100644
--- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
+++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go
@@ -51,6 +51,13 @@ type AuthenticationScheme = authority.AuthenticationScheme
type Account = shared.Account
+type TokenSource = base.TokenSource
+
+const (
+ TokenSourceIdentityProvider = base.TokenSourceIdentityProvider
+ TokenSourceCache = base.TokenSourceCache
+)
+
var errNoAccount = errors.New("no account was specified with public.WithSilentAccount(), or the specified account is invalid")
// clientOptions configures the Client's behavior.
@@ -387,7 +394,7 @@ func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []s
if err != nil {
return AuthResult{}, err
}
- return pca.base.AuthResultFromToken(ctx, authParams, token, true)
+ return pca.base.AuthResultFromToken(ctx, authParams, token)
}
type DeviceCodeResult = accesstokens.DeviceCodeResult
@@ -412,7 +419,7 @@ func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error
if err != nil {
return AuthResult{}, err
}
- return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true)
+ return d.client.base.AuthResultFromToken(ctx, d.authParams, token)
}
// acquireTokenByDeviceCodeOptions contains optional configuration for AcquireTokenByDeviceCode
@@ -687,7 +694,7 @@ func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string,
return AuthResult{}, err
}
- return pca.base.AuthResultFromToken(ctx, authParams, token, true)
+ return pca.base.AuthResultFromToken(ctx, authParams, token)
}
type interactiveAuthResult struct {
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md
new file mode 100644
index 000000000..9515ee520
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/README.md
@@ -0,0 +1,3 @@
+# GCP Resource detection library
+
+This is a library intended to be used by Upstream OpenTelemetry resource detectors. It exists within this repository to allow for integration testing of the detection functions in real GCP environments.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go
new file mode 100644
index 000000000..9ce7d96fe
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/app_engine.go
@@ -0,0 +1,78 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import "context"
+
+const (
+ // See https://cloud.google.com/appengine/docs/flexible/python/migrating#modules
+ // for the environment variables available in GAE environments.
+ gaeServiceEnv = "GAE_SERVICE"
+ gaeVersionEnv = "GAE_VERSION"
+ gaeInstanceEnv = "GAE_INSTANCE"
+ gaeEnv = "GAE_ENV"
+ gaeStandard = "standard"
+)
+
+func (d *Detector) onAppEngineStandard() bool {
+ // See https://cloud.google.com/appengine/docs/standard/go111/runtime#environment_variables.
+ env, found := d.os.LookupEnv(gaeEnv)
+ return found && env == gaeStandard
+}
+
+func (d *Detector) onAppEngine() bool {
+ _, found := d.os.LookupEnv(gaeServiceEnv)
+ return found
+}
+
+// AppEngineServiceName returns the service name of the app engine service.
+func (d *Detector) AppEngineServiceName() (string, error) {
+ if name, found := d.os.LookupEnv(gaeServiceEnv); found {
+ return name, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// AppEngineServiceVersion returns the service version of the app engine service.
+func (d *Detector) AppEngineServiceVersion() (string, error) {
+ if version, found := d.os.LookupEnv(gaeVersionEnv); found {
+ return version, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// AppEngineServiceInstance returns the service instance of the app engine service.
+func (d *Detector) AppEngineServiceInstance() (string, error) {
+ if instanceID, found := d.os.LookupEnv(gaeInstanceEnv); found {
+ return instanceID, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// AppEngineFlexAvailabilityZoneAndRegion returns the zone and region in which this program is running.
+func (d *Detector) AppEngineFlexAvailabilityZoneAndRegion() (string, string, error) {
+ // The GCE metadata server is available on App Engine Flex.
+ return d.GCEAvailabilityZoneAndRegion()
+}
+
+// AppEngineStandardAvailabilityZone returns the zone the app engine service is running in.
+func (d *Detector) AppEngineStandardAvailabilityZone() (string, error) {
+ return d.metadata.ZoneWithContext(context.TODO())
+}
+
+// AppEngineStandardCloudRegion returns the region the app engine service is running in.
+func (d *Detector) AppEngineStandardCloudRegion() (string, error) {
+ return d.FaaSCloudRegion()
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go
new file mode 100644
index 000000000..d3992a4f7
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/bms.go
@@ -0,0 +1,55 @@
+// Copyright 2024 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+const (
+ bmsProjectIDEnv = "BMS_PROJECT_ID"
+ bmsRegionEnv = "BMS_REGION"
+ bmsInstanceIDEnv = "BMS_INSTANCE_ID"
+)
+
+// onBareMetalSolution checks if the code is running on a Google Cloud Bare Metal Solution (BMS) by verifying
+// the presence and non-empty values of BMS_PROJECT_ID, BMS_REGION, and BMS_INSTANCE_ID environment variables.
+// For more information on Google Cloud Bare Metal Solution, see: https://cloud.google.com/bare-metal/docs
+func (d *Detector) onBareMetalSolution() bool {
+ projectID, projectIDExists := d.os.LookupEnv(bmsProjectIDEnv)
+ region, regionExists := d.os.LookupEnv(bmsRegionEnv)
+ instanceID, instanceIDExists := d.os.LookupEnv(bmsInstanceIDEnv)
+ return projectIDExists && regionExists && instanceIDExists && projectID != "" && region != "" && instanceID != ""
+}
+
+// BareMetalSolutionInstanceID returns the instance ID from the BMS_INSTANCE_ID environment variable.
+func (d *Detector) BareMetalSolutionInstanceID() (string, error) {
+ if instanceID, found := d.os.LookupEnv(bmsInstanceIDEnv); found {
+ return instanceID, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// BareMetalSolutionCloudRegion returns the region from the BMS_REGION environment variable.
+func (d *Detector) BareMetalSolutionCloudRegion() (string, error) {
+ if region, found := d.os.LookupEnv(bmsRegionEnv); found {
+ return region, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// BareMetalSolutionProjectID returns the project ID from the BMS_PROJECT_ID environment variable.
+func (d *Detector) BareMetalSolutionProjectID() (string, error) {
+ if project, found := d.os.LookupEnv(bmsProjectIDEnv); found {
+ return project, nil
+ }
+ return "", errEnvVarNotFound
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go
new file mode 100644
index 000000000..4eac3c74b
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/detector.go
@@ -0,0 +1,101 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "context"
+ "errors"
+ "os"
+ "strings"
+
+ "cloud.google.com/go/compute/metadata"
+)
+
+var errEnvVarNotFound = errors.New("environment variable not found")
+
+// NewDetector returns a *Detector which can get detect the platform,
+// and fetch attributes of the platform on which it is running.
+func NewDetector() *Detector {
+ return &Detector{metadata: metadata.NewClient(nil), os: realOSProvider{}}
+}
+
+type Platform int64
+
+const (
+ UnknownPlatform Platform = iota
+ GKE
+ GCE
+ CloudRun
+ CloudRunJob
+ CloudFunctions
+ AppEngineStandard
+ AppEngineFlex
+ BareMetalSolution
+)
+
+// CloudPlatform returns the platform on which this program is running.
+func (d *Detector) CloudPlatform() Platform {
+ switch {
+ case d.onBareMetalSolution():
+ return BareMetalSolution
+ case d.onGKE():
+ return GKE
+ case d.onCloudFunctions():
+ return CloudFunctions
+ case d.onCloudRun():
+ return CloudRun
+ case d.onCloudRunJob():
+ return CloudRunJob
+ case d.onAppEngineStandard():
+ return AppEngineStandard
+ case d.onAppEngine():
+ return AppEngineFlex
+ case d.onGCE():
+ return GCE
+ }
+ return UnknownPlatform
+}
+
+// ProjectID returns the ID of the project in which this program is running.
+func (d *Detector) ProjectID() (string, error) {
+ // N.B. d.metadata.ProjectIDWithContext(context.TODO()) is cached globally, so if we use it here it's untestable.
+ s, err := d.metadata.GetWithContext(context.TODO(), "project/project-id")
+ return strings.TrimSpace(s), err
+}
+
+// instanceID returns the ID of the project in which this program is running.
+func (d *Detector) instanceID() (string, error) {
+ // N.B. d.metadata.InstanceIDWithContext(context.TODO()) is cached globally, so if we use it here it's untestable.
+ s, err := d.metadata.GetWithContext(context.TODO(), "instance/id")
+ return strings.TrimSpace(s), err
+}
+
+// Detector collects resource information for all GCP platforms.
+type Detector struct {
+ metadata *metadata.Client
+ os osProvider
+}
+
+// osProvider contains the subset of the os package functions used by.
+type osProvider interface {
+ LookupEnv(string) (string, bool)
+}
+
+// realOSProvider uses the os package to lookup env vars.
+type realOSProvider struct{}
+
+func (realOSProvider) LookupEnv(env string) (string, bool) {
+ return os.LookupEnv(env)
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go
new file mode 100644
index 000000000..f137b1fae
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/faas.go
@@ -0,0 +1,106 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "context"
+ "strings"
+)
+
+const (
+ // Cloud Functions env vars:
+ // https://cloud.google.com/functions/docs/configuring/env-var#newer_runtimes
+ //
+ // Cloud Run env vars:
+ // https://cloud.google.com/run/docs/container-contract#services-env-vars
+ //
+ // Cloud Run jobs env vars:
+ // https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+ cloudFunctionsTargetEnv = "FUNCTION_TARGET"
+ cloudRunConfigurationEnv = "K_CONFIGURATION"
+ cloudRunJobsEnv = "CLOUD_RUN_JOB"
+ faasServiceEnv = "K_SERVICE"
+ faasRevisionEnv = "K_REVISION"
+ cloudRunJobExecutionEnv = "CLOUD_RUN_EXECUTION"
+ cloudRunJobTaskIndexEnv = "CLOUD_RUN_TASK_INDEX"
+ regionMetadataAttr = "instance/region"
+)
+
+func (d *Detector) onCloudFunctions() bool {
+ _, found := d.os.LookupEnv(cloudFunctionsTargetEnv)
+ return found
+}
+
+func (d *Detector) onCloudRun() bool {
+ _, found := d.os.LookupEnv(cloudRunConfigurationEnv)
+ return found
+}
+
+func (d *Detector) onCloudRunJob() bool {
+ _, found := d.os.LookupEnv(cloudRunJobsEnv)
+ return found
+}
+
+// FaaSName returns the name of the Cloud Run, Cloud Run jobs or Cloud Functions service.
+func (d *Detector) FaaSName() (string, error) {
+ if name, found := d.os.LookupEnv(faasServiceEnv); found {
+ return name, nil
+ }
+ if name, found := d.os.LookupEnv(cloudRunJobsEnv); found {
+ return name, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// FaaSVersion returns the revision of the Cloud Run or Cloud Functions service.
+func (d *Detector) FaaSVersion() (string, error) {
+ if version, found := d.os.LookupEnv(faasRevisionEnv); found {
+ return version, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// CloudRunJobExecution returns the execution id of the Cloud Run jobs.
+func (d *Detector) CloudRunJobExecution() (string, error) {
+ if eid, found := d.os.LookupEnv(cloudRunJobExecutionEnv); found {
+ return eid, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// CloudRunJobTaskIndex returns the task index for the execution of the Cloud Run jobs.
+func (d *Detector) CloudRunJobTaskIndex() (string, error) {
+ if tidx, found := d.os.LookupEnv(cloudRunJobTaskIndexEnv); found {
+ return tidx, nil
+ }
+ return "", errEnvVarNotFound
+}
+
+// FaaSID returns the instance id of the Cloud Run or Cloud Function.
+func (d *Detector) FaaSID() (string, error) {
+ return d.instanceID()
+}
+
+// FaaSCloudRegion detects region from the metadata server.
+// It is in the format /projects//regions/.
+//
+// https://cloud.google.com/run/docs/reference/container-contract#metadata-server
+func (d *Detector) FaaSCloudRegion() (string, error) {
+ region, err := d.metadata.GetWithContext(context.TODO(), regionMetadataAttr)
+ if err != nil {
+ return "", err
+ }
+ return region[strings.LastIndex(region, "/")+1:], nil
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go
new file mode 100644
index 000000000..794cfdf03
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gce.go
@@ -0,0 +1,117 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "context"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "cloud.google.com/go/compute/metadata"
+)
+
+// See the available GCE instance metadata:
+// https://cloud.google.com/compute/docs/metadata/predefined-metadata-keys#instance-metadata
+const machineTypeMetadataAttr = "instance/machine-type"
+
+// https://cloud.google.com/compute/docs/instance-groups/getting-info-about-migs#checking_if_a_vm_instance_is_part_of_a_mig
+const createdByInstanceAttr = "created-by"
+
+func (d *Detector) onGCE() bool {
+ _, err := d.metadata.GetWithContext(context.TODO(), machineTypeMetadataAttr)
+ return err == nil
+}
+
+// GCEHostType returns the machine type of the instance on which this program is running.
+func (d *Detector) GCEHostType() (string, error) {
+ return d.metadata.GetWithContext(context.TODO(), machineTypeMetadataAttr)
+}
+
+// GCEHostID returns the instance ID of the instance on which this program is running.
+func (d *Detector) GCEHostID() (string, error) {
+ return d.instanceID()
+}
+
+// GCEHostName returns the instance name of the instance on which this program is running.
+// Recommended to use GCEInstanceName() or GCEInstanceHostname() to more accurately reflect which
+// value is returned.
+func (d *Detector) GCEHostName() (string, error) {
+ return d.metadata.InstanceNameWithContext(context.TODO())
+}
+
+// GCEInstanceName returns the instance name of the instance on which this program is running.
+// This is the value visible in the Cloud Console UI, and the prefix for the default hostname
+// of the instance as defined by the default internal DNS name (see https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func (d *Detector) GCEInstanceName() (string, error) {
+ return d.metadata.InstanceNameWithContext(context.TODO())
+}
+
+// GCEInstanceHostname returns the full value of the default or custom hostname of the instance
+// on which this program is running. See https://cloud.google.com/compute/docs/instances/custom-hostname-vm.
+func (d *Detector) GCEInstanceHostname() (string, error) {
+ return d.metadata.HostnameWithContext(context.TODO())
+}
+
+// GCEAvailabilityZoneAndRegion returns the zone and region in which this program is running.
+func (d *Detector) GCEAvailabilityZoneAndRegion() (string, string, error) {
+ zone, err := d.metadata.ZoneWithContext(context.TODO())
+ if err != nil {
+ return "", "", err
+ }
+ if zone == "" {
+ return "", "", fmt.Errorf("no zone detected from GCE metadata server")
+ }
+ splitZone := strings.SplitN(zone, "-", 3)
+ if len(splitZone) != 3 {
+ return "", "", fmt.Errorf("zone was not in the expected format: country-region-zone. Got %v", zone)
+ }
+ return zone, strings.Join(splitZone[0:2], "-"), nil
+}
+
+type ManagedInstanceGroup struct {
+ Name string
+ Location string
+ Type LocationType
+}
+
+var createdByMIGRE = regexp.MustCompile(`^projects/[^/]+/(zones|regions)/([^/]+)/instanceGroupManagers/([^/]+)$`)
+
+func (d *Detector) GCEManagedInstanceGroup() (ManagedInstanceGroup, error) {
+ createdBy, err := d.metadata.InstanceAttributeValueWithContext(context.TODO(), createdByInstanceAttr)
+ if _, ok := err.(metadata.NotDefinedError); ok {
+ return ManagedInstanceGroup{}, nil
+ } else if err != nil {
+ return ManagedInstanceGroup{}, err
+ }
+ matches := createdByMIGRE.FindStringSubmatch(createdBy)
+ if matches == nil {
+ // The "created-by" key exists, but it doesn't describe a MIG.
+ // Something else must have created this VM.
+ return ManagedInstanceGroup{}, nil
+ }
+
+ mig := ManagedInstanceGroup{
+ Name: matches[3],
+ Location: matches[2],
+ }
+ switch matches[1] {
+ case "zones":
+ mig.Type = Zone
+ case "regions":
+ mig.Type = Region
+ }
+ return mig, nil
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go
new file mode 100644
index 000000000..734d44cc0
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp/gke.go
@@ -0,0 +1,78 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gcp
+
+import (
+ "context"
+ "fmt"
+ "strings"
+)
+
+const (
+ // If the kubernetes.default.svc service exists in the cluster,
+ // then the KUBERNETES_SERVICE_HOST env var will be populated.
+ // Use this as an indication that we are running on kubernetes.
+ k8sServiceHostEnv = "KUBERNETES_SERVICE_HOST"
+ // See the available GKE metadata:
+ // https://cloud.google.com/kubernetes-engine/docs/concepts/workload-identity#instance_metadata
+ clusterNameMetadataAttr = "cluster-name"
+ clusterLocationMetadataAttr = "cluster-location"
+)
+
+func (d *Detector) onGKE() bool {
+ // Check if we are on k8s first
+ _, found := d.os.LookupEnv(k8sServiceHostEnv)
+ if !found {
+ return false
+ }
+ // If we are on k8s, make sure that we are actually on GKE, and not a
+ // different managed k8s platform.
+ _, err := d.metadata.InstanceAttributeValueWithContext(context.TODO(), clusterLocationMetadataAttr)
+ return err == nil
+}
+
+// GKEHostID returns the instance ID of the instance on which this program is running.
+func (d *Detector) GKEHostID() (string, error) {
+ return d.GCEHostID()
+}
+
+// GKEClusterName returns the name if the GKE cluster in which this program is running.
+func (d *Detector) GKEClusterName() (string, error) {
+ return d.metadata.InstanceAttributeValueWithContext(context.TODO(), clusterNameMetadataAttr)
+}
+
+type LocationType int64
+
+const (
+ UndefinedLocation LocationType = iota
+ Zone
+ Region
+)
+
+// GKEAvailabilityZoneOrRegion returns the location of the cluster and whether the cluster is zonal or regional.
+func (d *Detector) GKEAvailabilityZoneOrRegion() (string, LocationType, error) {
+ clusterLocation, err := d.metadata.InstanceAttributeValueWithContext(context.TODO(), clusterLocationMetadataAttr)
+ if err != nil {
+ return "", UndefinedLocation, err
+ }
+ switch strings.Count(clusterLocation, "-") {
+ case 1:
+ return clusterLocation, Region, nil
+ case 2:
+ return clusterLocation, Zone, nil
+ default:
+ return "", UndefinedLocation, fmt.Errorf("unrecognized format for cluster location: %v", clusterLocation)
+ }
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md
new file mode 100644
index 000000000..ea391705f
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/README.md
@@ -0,0 +1,44 @@
+# OpenTelemetry Google Cloud Monitoring Exporter
+
+[](https://pkg.go.dev/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric)
+[![Apache License][license-image]][license-url]
+
+OpenTelemetry Google Cloud Monitoring Exporter allows the user to send collected metrics to Google Cloud.
+
+To get started with instrumentation in Google Cloud, see [Generate traces and metrics with
+Go](https://cloud.google.com/stackdriver/docs/instrumentation/setup/go).
+
+To learn more about instrumentation and observability, including opinionated recommendations
+for Google Cloud Observability, visit [Instrumentation and
+observability](https://cloud.google.com/stackdriver/docs/instrumentation/overview).
+
+[Google Cloud Monitoring](https://cloud.google.com/monitoring) provides visibility into the performance, uptime, and overall health of cloud-powered applications. It collects metrics, events, and metadata from Google Cloud, Amazon Web Services, hosted uptime probes, application instrumentation, and a variety of common application components including Cassandra, Nginx, Apache Web Server, Elasticsearch, and many others. Operations ingests that data and generates insights via dashboards, charts, and alerts. Cloud Monitoring alerting helps you collaborate by integrating with Slack, PagerDuty, and more.
+
+## Setup
+
+Google Cloud Monitoring is a managed service provided by Google Cloud Platform. Google Cloud Monitoring requires to set up "Workspace" in advance. The guide to create a new Workspace is available on [the official document](https://cloud.google.com/monitoring/workspaces/create).
+
+## Authentication
+
+The Google Cloud Monitoring exporter depends upon [`google.FindDefaultCredentials`](https://pkg.go.dev/golang.org/x/oauth2/google?tab=doc#FindDefaultCredentials), so the service account is automatically detected by default, but also the custom credential file (so called `service_account_key.json`) can be detected with specific conditions. Quoting from the document of `google.FindDefaultCredentials`:
+
+* A JSON file whose path is specified by the `GOOGLE_APPLICATION_CREDENTIALS` environment variable.
+* A JSON file in a location known to the gcloud command-line tool. On Windows, this is `%APPDATA%/gcloud/application_default_credentials.json`. On other systems, `$HOME/.config/gcloud/application_default_credentials.json`.
+
+When running code locally, you may need to specify a Google Project ID in addition to `GOOGLE_APPLICATION_CREDENTIALS`. This is best done using an environment variable (e.g. `GOOGLE_CLOUD_PROJECT`) and the `metric.WithProjectID` method, e.g.:
+
+```golang
+projectID := os.Getenv("GOOGLE_CLOUD_PROJECT")
+opts := []mexporter.Option{
+ mexporter.WithProjectID(projectID),
+}
+```
+
+## Useful links
+
+* For more information on OpenTelemetry, visit: https://opentelemetry.io/
+* For more about OpenTelemetry Go, visit: https://github.com/open-telemetry/opentelemetry-go
+* Learn more about Google Cloud Monitoring at https://cloud.google.com/monitoring
+
+[license-url]: https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/main/LICENSE
+[license-image]: https://img.shields.io/badge/license-Apache_2.0-green.svg?style=flat
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go
new file mode 100644
index 000000000..90dfcb344
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/cloudmonitoring.go
@@ -0,0 +1,49 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ sdkmetric "go.opentelemetry.io/otel/sdk/metric"
+
+ monitoring "cloud.google.com/go/monitoring/apiv3/v2"
+ "golang.org/x/oauth2/google"
+)
+
+// New creates a new Exporter thats implements metric.Exporter.
+func New(opts ...Option) (sdkmetric.Exporter, error) {
+ o := options{
+ context: context.Background(),
+ resourceAttributeFilter: DefaultResourceAttributesFilter,
+ }
+ for _, opt := range opts {
+ opt(&o)
+ }
+
+ if o.projectID == "" {
+ creds, err := google.FindDefaultCredentials(o.context, monitoring.DefaultAuthScopes()...)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find Google Cloud credentials: %v", err)
+ }
+ if creds.ProjectID == "" {
+ return nil, errors.New("google cloud monitoring: no project found with application default credentials")
+ }
+ o.projectID = creds.ProjectID
+ }
+ return newMetricExporter(&o)
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go
new file mode 100644
index 000000000..57329a4bd
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/constants.go
@@ -0,0 +1,97 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+// TODO: remove this file when the constants are ready in the Go SDK
+
+// Mappings for the well-known OpenTelemetry resource label keys
+// to applicable Monitored Resource label keys.
+// A uniquely identifying name for the Kubernetes cluster. Kubernetes
+// does not have cluster names as an internal concept so this may be
+// set to any meaningful value within the environment. For example,
+// GKE clusters have a name which can be used for this label.
+const (
+ // Deprecated: use semconv.CloudProviderKey instead.
+ CloudKeyProvider = "cloud.provider"
+ // Deprecated: use semconv.CloudAccountIDKey instead.
+ CloudKeyAccountID = "cloud.account.id"
+ // Deprecated: use semconv.CloudRegionKey instead.
+ CloudKeyRegion = "cloud.region"
+ // Deprecated: use semconv.CloudAvailabilityZoneKey instead.
+ CloudKeyZone = "cloud.availability_zone"
+
+ // Deprecated: use semconv.ServiceNamespaceKey instead.
+ ServiceKeyNamespace = "service.namespace"
+ // Deprecated: use semconv.ServiceInstanceIDKey instead.
+ ServiceKeyInstanceID = "service.instance.id"
+ // Deprecated: use semconv.ServiceNameKey instead.
+ ServiceKeyName = "service.name"
+
+ // Deprecated: HostType is not needed.
+ HostType = "host"
+ // A uniquely identifying name for the host.
+ // Deprecated: use semconv.HostNameKey instead.
+ HostKeyName = "host.name"
+ // A hostname as returned by the 'hostname' command on host machine.
+ // Deprecated: HostKeyHostName is not needed.
+ HostKeyHostName = "host.hostname"
+ // Deprecated: use semconv.HostIDKey instead.
+ HostKeyID = "host.id"
+ // Deprecated: use semconv.HostTypeKey instead.
+ HostKeyType = "host.type"
+
+ // A uniquely identifying name for the Container.
+ // Deprecated: use semconv.ContainerNameKey instead.
+ ContainerKeyName = "container.name"
+ // Deprecated: use semconv.ContainerImageNameKey instead.
+ ContainerKeyImageName = "container.image.name"
+ // Deprecated: use semconv.ContainerImageTagKey instead.
+ ContainerKeyImageTag = "container.image.tag"
+
+ // Cloud Providers
+ // Deprecated: use semconv.CloudProviderAWS instead.
+ CloudProviderAWS = "aws"
+ // Deprecated: use semconv.CloudProviderGCP instead.
+ CloudProviderGCP = "gcp"
+ // Deprecated: use semconv.CloudProviderAzure instead.
+ CloudProviderAZURE = "azure"
+
+ // Deprecated: Use "k8s" instead. This should not be needed.
+ K8S = "k8s"
+ // Deprecated: use semconv.K8SClusterNameKey instead.
+ K8SKeyClusterName = "k8s.cluster.name"
+ // Deprecated: use semconv.K8SNamespaceNameKey instead.
+ K8SKeyNamespaceName = "k8s.namespace.name"
+ // Deprecated: use semconv.K8SPodNameKey instead.
+ K8SKeyPodName = "k8s.pod.name"
+ // Deprecated: use semconv.K8SDeploymentNameKey instead.
+ K8SKeyDeploymentName = "k8s.deployment.name"
+
+ // Monitored Resources types
+ // Deprecated: Use "k8s_container" instead.
+ K8SContainer = "k8s_container"
+ // Deprecated: Use "k8s_node" instead.
+ K8SNode = "k8s_node"
+ // Deprecated: Use "k8s_pod" instead.
+ K8SPod = "k8s_pod"
+ // Deprecated: Use "k8s_cluster" instead.
+ K8SCluster = "k8s_cluster"
+ // Deprecated: Use "gce_instance" instead.
+ GCEInstance = "gce_instance"
+ // Deprecated: Use "aws_ec2_instance" instead.
+ AWSEC2Instance = "aws_ec2_instance"
+ // Deprecated: Use "generic_task" instead.
+ GenericTask = "generic_task"
+)
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go
new file mode 100644
index 000000000..974c0af95
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/error.go
@@ -0,0 +1,32 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "errors"
+ "fmt"
+)
+
+var (
+ errBlankProjectID = errors.New("expecting a non-blank ProjectID")
+)
+
+type errUnexpectedAggregationKind struct {
+ kind string
+}
+
+func (e errUnexpectedAggregationKind) Error() string {
+ return fmt.Sprintf("the metric kind is unexpected: %v", e.kind)
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go
new file mode 100644
index 000000000..b0ab713c6
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/metric.go
@@ -0,0 +1,890 @@
+// Copyright 2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "math"
+ "net/url"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+ "unicode"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/metric"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/trace"
+
+ monitoring "cloud.google.com/go/monitoring/apiv3/v2"
+ "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb"
+ "github.com/googleapis/gax-go/v2"
+ "google.golang.org/api/option"
+ "google.golang.org/genproto/googleapis/api/distribution"
+ "google.golang.org/genproto/googleapis/api/label"
+ googlemetricpb "google.golang.org/genproto/googleapis/api/metric"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/encoding/gzip"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/protobuf/types/known/anypb"
+ "google.golang.org/protobuf/types/known/timestamppb"
+
+ "github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping"
+)
+
+const (
+ // The number of timeserieses to send to GCM in a single request. This
+ // is a hard limit in the GCM API, so we never want to exceed 200.
+ sendBatchSize = 200
+
+ cloudMonitoringMetricDescriptorNameFormat = "workload.googleapis.com/%s"
+ platformMappingMonitoredResourceKey = "gcp.resource_type"
+)
+
+// key is used to judge the uniqueness of the record descriptor.
+type key struct {
+ name string
+ libraryname string
+}
+
+func keyOf(metrics metricdata.Metrics, library instrumentation.Scope) key {
+ return key{
+ name: metrics.Name,
+ libraryname: library.Name,
+ }
+}
+
+// metricExporter is the implementation of OpenTelemetry metric exporter for
+// Google Cloud Monitoring.
+type metricExporter struct {
+ o *options
+ shutdown chan struct{}
+ // mdCache is the cache to hold MetricDescriptor to avoid creating duplicate MD.
+ mdCache map[key]*googlemetricpb.MetricDescriptor
+ client *monitoring.MetricClient
+ mdLock sync.RWMutex
+ shutdownOnce sync.Once
+}
+
+// ForceFlush does nothing, the exporter holds no state.
+func (e *metricExporter) ForceFlush(ctx context.Context) error { return ctx.Err() }
+
+// Shutdown shuts down the client connections.
+func (e *metricExporter) Shutdown(ctx context.Context) error {
+ err := errShutdown
+ e.shutdownOnce.Do(func() {
+ close(e.shutdown)
+ err = errors.Join(ctx.Err(), e.client.Close())
+ })
+ return err
+}
+
+// newMetricExporter returns an exporter that uploads OTel metric data to Google Cloud Monitoring.
+func newMetricExporter(o *options) (*metricExporter, error) {
+ if strings.TrimSpace(o.projectID) == "" {
+ return nil, errBlankProjectID
+ }
+
+ clientOpts := append([]option.ClientOption{option.WithGRPCDialOption(grpc.WithUserAgent(userAgent))}, o.monitoringClientOptions...)
+ ctx := o.context
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ client, err := monitoring.NewMetricClient(ctx, clientOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ if o.compression == "gzip" {
+ client.CallOptions.GetMetricDescriptor = append(client.CallOptions.GetMetricDescriptor,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ client.CallOptions.CreateMetricDescriptor = append(client.CallOptions.CreateMetricDescriptor,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ client.CallOptions.CreateTimeSeries = append(client.CallOptions.CreateTimeSeries,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ client.CallOptions.CreateServiceTimeSeries = append(client.CallOptions.CreateServiceTimeSeries,
+ gax.WithGRPCOptions(grpc.UseCompressor(gzip.Name)))
+ }
+
+ cache := map[key]*googlemetricpb.MetricDescriptor{}
+ e := &metricExporter{
+ o: o,
+ mdCache: cache,
+ client: client,
+ shutdown: make(chan struct{}),
+ }
+ return e, nil
+}
+
+var errShutdown = fmt.Errorf("exporter is shutdown")
+
+// Export exports OpenTelemetry Metrics to Google Cloud Monitoring.
+func (me *metricExporter) Export(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ select {
+ case <-me.shutdown:
+ return errShutdown
+ default:
+ }
+
+ if me.o.destinationProjectQuota {
+ ctx = metadata.NewOutgoingContext(ctx, metadata.New(map[string]string{"x-goog-user-project": strings.TrimPrefix(me.o.projectID, "projects/")}))
+ }
+ return errors.Join(
+ me.exportMetricDescriptor(ctx, rm),
+ me.exportTimeSeries(ctx, rm),
+ )
+}
+
+// Temporality returns the Temporality to use for an instrument kind.
+func (me *metricExporter) Temporality(ik metric.InstrumentKind) metricdata.Temporality {
+ return metric.DefaultTemporalitySelector(ik)
+}
+
+// Aggregation returns the Aggregation to use for an instrument kind.
+func (me *metricExporter) Aggregation(ik metric.InstrumentKind) metric.Aggregation {
+ return metric.DefaultAggregationSelector(ik)
+}
+
+// exportMetricDescriptor create MetricDescriptor from the record
+// if the descriptor is not registered in Cloud Monitoring yet.
+func (me *metricExporter) exportMetricDescriptor(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ // We only send metric descriptors if we're configured *and* we're not sending service timeseries.
+ if me.o.disableCreateMetricDescriptors {
+ return nil
+ }
+
+ me.mdLock.Lock()
+ defer me.mdLock.Unlock()
+ mds := make(map[key]*googlemetricpb.MetricDescriptor)
+ extraLabels := me.extraLabelsFromResource(rm.Resource)
+ for _, scope := range rm.ScopeMetrics {
+ for _, metrics := range scope.Metrics {
+ k := keyOf(metrics, scope.Scope)
+
+ if _, ok := me.mdCache[k]; ok {
+ continue
+ }
+
+ if _, localok := mds[k]; !localok {
+ md := me.recordToMdpb(metrics, extraLabels)
+ mds[k] = md
+ }
+ }
+ }
+
+ // TODO: This process is synchronous and blocks longer time if records in cps
+ // have many different descriptors. In the cps.ForEach above, it should spawn
+ // goroutines to send CreateMetricDescriptorRequest asynchronously in the case
+ // the descriptor does not exist in global cache (me.mdCache).
+ // See details in #26.
+ var errs []error
+ for kmd, md := range mds {
+ err := me.createMetricDescriptorIfNeeded(ctx, md)
+ if err == nil {
+ me.mdCache[kmd] = md
+ }
+ errs = append(errs, err)
+ }
+ return errors.Join(errs...)
+}
+
+func (me *metricExporter) createMetricDescriptorIfNeeded(ctx context.Context, md *googlemetricpb.MetricDescriptor) error {
+ mdReq := &monitoringpb.GetMetricDescriptorRequest{
+ Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", me.o.projectID, md.Type),
+ }
+ _, err := me.client.GetMetricDescriptor(ctx, mdReq)
+ if err == nil {
+ // If the metric descriptor already exists, skip the CreateMetricDescriptor call.
+ // Metric descriptors cannot be updated without deleting them first, so there
+ // isn't anything we can do here:
+ // https://cloud.google.com/monitoring/custom-metrics/creating-metrics#md-modify
+ return nil
+ }
+ req := &monitoringpb.CreateMetricDescriptorRequest{
+ Name: fmt.Sprintf("projects/%s", me.o.projectID),
+ MetricDescriptor: md,
+ }
+ _, err = me.client.CreateMetricDescriptor(ctx, req)
+ return err
+}
+
+// exportTimeSeries create TimeSeries from the records in cps.
+// res should be the common resource among all TimeSeries, such as instance id, application name and so on.
+func (me *metricExporter) exportTimeSeries(ctx context.Context, rm *metricdata.ResourceMetrics) error {
+ tss, err := me.recordsToTspbs(rm)
+ if len(tss) == 0 {
+ return err
+ }
+
+ name := fmt.Sprintf("projects/%s", me.o.projectID)
+
+ errs := []error{err}
+ for i := 0; i < len(tss); i += sendBatchSize {
+ j := i + sendBatchSize
+ if j >= len(tss) {
+ j = len(tss)
+ }
+
+ // TODO: When this exporter is rewritten, support writing to multiple
+ // projects based on the "gcp.project.id" resource.
+ req := &monitoringpb.CreateTimeSeriesRequest{
+ Name: name,
+ TimeSeries: tss[i:j],
+ }
+ if me.o.createServiceTimeSeries {
+ errs = append(errs, me.client.CreateServiceTimeSeries(ctx, req))
+ } else {
+ errs = append(errs, me.client.CreateTimeSeries(ctx, req))
+ }
+ }
+
+ return errors.Join(errs...)
+}
+
+func (me *metricExporter) extraLabelsFromResource(res *resource.Resource) *attribute.Set {
+ set, _ := attribute.NewSetWithFiltered(res.Attributes(), me.o.resourceAttributeFilter)
+ return &set
+}
+
+// descToMetricType converts descriptor to MetricType proto type.
+// Basically this returns default value ("workload.googleapis.com/[metric type]").
+func (me *metricExporter) descToMetricType(desc metricdata.Metrics) string {
+ if formatter := me.o.metricDescriptorTypeFormatter; formatter != nil {
+ return formatter(desc)
+ }
+ return fmt.Sprintf(cloudMonitoringMetricDescriptorNameFormat, desc.Name)
+}
+
+// metricTypeToDisplayName takes a GCM metric type, like (workload.googleapis.com/MyCoolMetric) and returns the display name.
+func metricTypeToDisplayName(mURL string) string {
+ // strip domain, keep path after domain.
+ u, err := url.Parse(fmt.Sprintf("metrics://%s", mURL))
+ if err != nil || u.Path == "" {
+ return mURL
+ }
+ return strings.TrimLeft(u.Path, "/")
+}
+
+// recordToMdpb extracts data and converts them to googlemetricpb.MetricDescriptor.
+func (me *metricExporter) recordToMdpb(metrics metricdata.Metrics, extraLabels *attribute.Set) *googlemetricpb.MetricDescriptor {
+ name := metrics.Name
+ typ := me.descToMetricType(metrics)
+ kind, valueType := recordToMdpbKindType(metrics.Data)
+
+ // Detailed explanations on MetricDescriptor proto is not documented on
+ // generated Go packages. Refer to the original proto file.
+ // https://github.com/googleapis/googleapis/blob/50af053/google/api/metric.proto#L33
+ return &googlemetricpb.MetricDescriptor{
+ Name: name,
+ DisplayName: metricTypeToDisplayName(typ),
+ Type: typ,
+ MetricKind: kind,
+ ValueType: valueType,
+ Unit: string(metrics.Unit),
+ Description: metrics.Description,
+ Labels: labelDescriptors(metrics, extraLabels),
+ }
+}
+
+func labelDescriptors(metrics metricdata.Metrics, extraLabels *attribute.Set) []*label.LabelDescriptor {
+ labels := []*label.LabelDescriptor{}
+ seenKeys := map[string]struct{}{}
+ addAttributes := func(attr *attribute.Set) {
+ iter := attr.Iter()
+ for iter.Next() {
+ kv := iter.Attribute()
+ // Skip keys that have already been set
+ if _, ok := seenKeys[normalizeLabelKey(string(kv.Key))]; ok {
+ continue
+ }
+ labels = append(labels, &label.LabelDescriptor{
+ Key: normalizeLabelKey(string(kv.Key)),
+ })
+ seenKeys[normalizeLabelKey(string(kv.Key))] = struct{}{}
+ }
+ }
+ addAttributes(extraLabels)
+ switch a := metrics.Data.(type) {
+ case metricdata.Gauge[int64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Gauge[float64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Sum[int64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Sum[float64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Histogram[float64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ case metricdata.Histogram[int64]:
+ for _, pt := range a.DataPoints {
+ addAttributes(&pt.Attributes)
+ }
+ }
+ return labels
+}
+
+type attributes struct {
+ attrs attribute.Set
+}
+
+func (attrs *attributes) GetString(key string) (string, bool) {
+ value, ok := attrs.attrs.Value(attribute.Key(key))
+ return value.AsString(), ok
+}
+
+// resourceToMonitoredResourcepb converts resource in OTel to MonitoredResource
+// proto type for Cloud Monitoring.
+//
+// https://cloud.google.com/monitoring/api/ref_v3/rest/v3/projects.monitoredResourceDescriptors
+func (me *metricExporter) resourceToMonitoredResourcepb(res *resource.Resource) *monitoredrespb.MonitoredResource {
+ platformMrType, platformMappingRequested := res.Set().Value(platformMappingMonitoredResourceKey)
+
+ // check if platform mapping is requested and possible
+ if platformMappingRequested && platformMrType.AsString() == me.o.monitoredResourceDescription.mrType {
+ // assemble attributes required to construct this MR
+ attributeMap := make(map[string]string)
+ for expectedLabel := range me.o.monitoredResourceDescription.mrLabels {
+ value, found := res.Set().Value(attribute.Key(expectedLabel))
+ if found {
+ attributeMap[expectedLabel] = value.AsString()
+ }
+ }
+ return &monitoredrespb.MonitoredResource{
+ Type: platformMrType.AsString(),
+ Labels: attributeMap,
+ }
+ }
+
+ gmr := resourcemapping.ResourceAttributesToMonitoringMonitoredResource(&attributes{
+ attrs: attribute.NewSet(res.Attributes()...),
+ })
+ newLabels := make(map[string]string, len(gmr.Labels))
+ for k, v := range gmr.Labels {
+ newLabels[k] = sanitizeUTF8(v)
+ }
+ mr := &monitoredrespb.MonitoredResource{
+ Type: gmr.Type,
+ Labels: newLabels,
+ }
+ return mr
+}
+
+// recordToMdpbKindType return the mapping from OTel's record descriptor to
+// Cloud Monitoring's MetricKind and ValueType.
+func recordToMdpbKindType(a metricdata.Aggregation) (googlemetricpb.MetricDescriptor_MetricKind, googlemetricpb.MetricDescriptor_ValueType) {
+ switch agg := a.(type) {
+ case metricdata.Gauge[int64]:
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64
+ case metricdata.Gauge[float64]:
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE
+ case metricdata.Sum[int64]:
+ if agg.IsMonotonic {
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_INT64
+ }
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_INT64
+ case metricdata.Sum[float64]:
+ if agg.IsMonotonic {
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DOUBLE
+ }
+ return googlemetricpb.MetricDescriptor_GAUGE, googlemetricpb.MetricDescriptor_DOUBLE
+ case metricdata.Histogram[int64], metricdata.Histogram[float64]:
+ return googlemetricpb.MetricDescriptor_CUMULATIVE, googlemetricpb.MetricDescriptor_DISTRIBUTION
+ default:
+ return googlemetricpb.MetricDescriptor_METRIC_KIND_UNSPECIFIED, googlemetricpb.MetricDescriptor_VALUE_TYPE_UNSPECIFIED
+ }
+}
+
+// recordToMpb converts data from records to Metric proto type for Cloud Monitoring.
+func (me *metricExporter) recordToMpb(metrics metricdata.Metrics, attributes attribute.Set, library instrumentation.Scope, extraLabels *attribute.Set) *googlemetricpb.Metric {
+ me.mdLock.RLock()
+ defer me.mdLock.RUnlock()
+ k := keyOf(metrics, library)
+ md, ok := me.mdCache[k]
+ if !ok {
+ md = me.recordToMdpb(metrics, extraLabels)
+ }
+
+ labels := make(map[string]string)
+ addAttributes := func(attr *attribute.Set) {
+ iter := attr.Iter()
+ for iter.Next() {
+ kv := iter.Attribute()
+ labels[normalizeLabelKey(string(kv.Key))] = sanitizeUTF8(kv.Value.Emit())
+ }
+ }
+ addAttributes(extraLabels)
+ addAttributes(&attributes)
+
+ return &googlemetricpb.Metric{
+ Type: md.Type,
+ Labels: labels,
+ }
+}
+
+// recordToTspb converts record to TimeSeries proto type with common resource.
+// ref. https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TimeSeries
+func (me *metricExporter) recordToTspb(m metricdata.Metrics, mr *monitoredrespb.MonitoredResource, library instrumentation.Scope, extraLabels *attribute.Set) ([]*monitoringpb.TimeSeries, error) {
+ var tss []*monitoringpb.TimeSeries
+ var errs []error
+ if m.Data == nil {
+ return nil, nil
+ }
+ switch a := m.Data.(type) {
+ case metricdata.Gauge[int64]:
+ for _, point := range a.DataPoints {
+ ts, err := gaugeToTimeSeries[int64](point, m, mr)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Gauge[float64]:
+ for _, point := range a.DataPoints {
+ ts, err := gaugeToTimeSeries[float64](point, m, mr)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Sum[int64]:
+ for _, point := range a.DataPoints {
+ var ts *monitoringpb.TimeSeries
+ var err error
+ if a.IsMonotonic {
+ ts, err = sumToTimeSeries[int64](point, m, mr)
+ } else {
+ // Send non-monotonic sums as gauges
+ ts, err = gaugeToTimeSeries[int64](point, m, mr)
+ }
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Sum[float64]:
+ for _, point := range a.DataPoints {
+ var ts *monitoringpb.TimeSeries
+ var err error
+ if a.IsMonotonic {
+ ts, err = sumToTimeSeries[float64](point, m, mr)
+ } else {
+ // Send non-monotonic sums as gauges
+ ts, err = gaugeToTimeSeries[float64](point, m, mr)
+ }
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Histogram[int64]:
+ for _, point := range a.DataPoints {
+ ts, err := histogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.Histogram[float64]:
+ for _, point := range a.DataPoints {
+ ts, err := histogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.ExponentialHistogram[int64]:
+ for _, point := range a.DataPoints {
+ ts, err := expHistogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ case metricdata.ExponentialHistogram[float64]:
+ for _, point := range a.DataPoints {
+ ts, err := expHistogramToTimeSeries(point, m, mr, me.o.enableSumOfSquaredDeviation, me.o.projectID)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ ts.Metric = me.recordToMpb(m, point.Attributes, library, extraLabels)
+ tss = append(tss, ts)
+ }
+ default:
+ errs = append(errs, errUnexpectedAggregationKind{kind: reflect.TypeOf(m.Data).String()})
+ }
+ return tss, errors.Join(errs...)
+}
+
+func (me *metricExporter) recordsToTspbs(rm *metricdata.ResourceMetrics) ([]*monitoringpb.TimeSeries, error) {
+ mr := me.resourceToMonitoredResourcepb(rm.Resource)
+ extraLabels := me.extraLabelsFromResource(rm.Resource)
+
+ var (
+ tss []*monitoringpb.TimeSeries
+ errs []error
+ )
+ for _, scope := range rm.ScopeMetrics {
+ for _, metrics := range scope.Metrics {
+ ts, err := me.recordToTspb(metrics, mr, scope.Scope, extraLabels)
+ errs = append(errs, err)
+ tss = append(tss, ts...)
+ }
+ }
+
+ return tss, errors.Join(errs...)
+}
+
+func sanitizeUTF8(s string) string {
+ return strings.ToValidUTF8(s, "�")
+}
+
+func gaugeToTimeSeries[N int64 | float64](point metricdata.DataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource) (*monitoringpb.TimeSeries, error) {
+ value, valueType := numberDataPointToValue(point)
+ timestamp := timestamppb.New(point.Time)
+ if err := timestamp.CheckValid(); err != nil {
+ return nil, err
+ }
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_GAUGE,
+ ValueType: valueType,
+ Points: []*monitoringpb.Point{{
+ Interval: &monitoringpb.TimeInterval{
+ EndTime: timestamp,
+ },
+ Value: value,
+ }},
+ }, nil
+}
+
+func sumToTimeSeries[N int64 | float64](point metricdata.DataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource) (*monitoringpb.TimeSeries, error) {
+ interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time)
+ if err != nil {
+ return nil, err
+ }
+ value, valueType := numberDataPointToValue[N](point)
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE,
+ ValueType: valueType,
+ Points: []*monitoringpb.Point{{
+ Interval: interval,
+ Value: value,
+ }},
+ }, nil
+}
+
+// TODO(@dashpole): Refactor to pass control-coupling lint check.
+//
+//nolint:revive
+func histogramToTimeSeries[N int64 | float64](point metricdata.HistogramDataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource, enableSOSD bool, projectID string) (*monitoringpb.TimeSeries, error) {
+ interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time)
+ if err != nil {
+ return nil, err
+ }
+ distributionValue := histToDistribution(point, projectID)
+ if enableSOSD {
+ setSumOfSquaredDeviation(point, distributionValue)
+ }
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE,
+ ValueType: googlemetricpb.MetricDescriptor_DISTRIBUTION,
+ Points: []*monitoringpb.Point{{
+ Interval: interval,
+ Value: &monitoringpb.TypedValue{
+ Value: &monitoringpb.TypedValue_DistributionValue{
+ DistributionValue: distributionValue,
+ },
+ },
+ }},
+ }, nil
+}
+
+func expHistogramToTimeSeries[N int64 | float64](point metricdata.ExponentialHistogramDataPoint[N], metrics metricdata.Metrics, mr *monitoredrespb.MonitoredResource, enableSOSD bool, projectID string) (*monitoringpb.TimeSeries, error) {
+ interval, err := toNonemptyTimeIntervalpb(point.StartTime, point.Time)
+ if err != nil {
+ return nil, err
+ }
+ distributionValue := expHistToDistribution(point, projectID)
+ // TODO: Implement "setSumOfSquaredDeviationExpHist" for parameter "enableSOSD" functionality.
+ return &monitoringpb.TimeSeries{
+ Resource: mr,
+ Unit: string(metrics.Unit),
+ MetricKind: googlemetricpb.MetricDescriptor_CUMULATIVE,
+ ValueType: googlemetricpb.MetricDescriptor_DISTRIBUTION,
+ Points: []*monitoringpb.Point{{
+ Interval: interval,
+ Value: &monitoringpb.TypedValue{
+ Value: &monitoringpb.TypedValue_DistributionValue{
+ DistributionValue: distributionValue,
+ },
+ },
+ }},
+ }, nil
+}
+
+func toNonemptyTimeIntervalpb(start, end time.Time) (*monitoringpb.TimeInterval, error) {
+ // The end time of a new interval must be at least a millisecond after the end time of the
+ // previous interval, for all non-gauge types.
+ // https://cloud.google.com/monitoring/api/ref_v3/rpc/google.monitoring.v3#timeinterval
+ if end.Sub(start).Milliseconds() <= 1 {
+ end = start.Add(time.Millisecond)
+ }
+ startpb := timestamppb.New(start)
+ endpb := timestamppb.New(end)
+ err := errors.Join(
+ startpb.CheckValid(),
+ endpb.CheckValid(),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return &monitoringpb.TimeInterval{
+ StartTime: startpb,
+ EndTime: endpb,
+ }, nil
+}
+
+func histToDistribution[N int64 | float64](hist metricdata.HistogramDataPoint[N], projectID string) *distribution.Distribution {
+ counts := make([]int64, len(hist.BucketCounts))
+ for i, v := range hist.BucketCounts {
+ counts[i] = int64(v)
+ }
+ var mean float64
+ if !math.IsNaN(float64(hist.Sum)) && hist.Count > 0 { // Avoid divide-by-zero
+ mean = float64(hist.Sum) / float64(hist.Count)
+ }
+ return &distribution.Distribution{
+ Count: int64(hist.Count),
+ Mean: mean,
+ BucketCounts: counts,
+ BucketOptions: &distribution.Distribution_BucketOptions{
+ Options: &distribution.Distribution_BucketOptions_ExplicitBuckets{
+ ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{
+ Bounds: hist.Bounds,
+ },
+ },
+ },
+ Exemplars: toDistributionExemplar[N](hist.Exemplars, projectID),
+ }
+}
+
+func expHistToDistribution[N int64 | float64](hist metricdata.ExponentialHistogramDataPoint[N], projectID string) *distribution.Distribution {
+ // First calculate underflow bucket with all negatives + zeros.
+ underflow := hist.ZeroCount
+ negativeBuckets := hist.NegativeBucket.Counts
+ for i := 0; i < len(negativeBuckets); i++ {
+ underflow += negativeBuckets[i]
+ }
+
+ // Next, pull in remaining buckets.
+ counts := make([]int64, len(hist.PositiveBucket.Counts)+2)
+ bucketOptions := &distribution.Distribution_BucketOptions{}
+ counts[0] = int64(underflow)
+ positiveBuckets := hist.PositiveBucket.Counts
+ for i := 0; i < len(positiveBuckets); i++ {
+ counts[i+1] = int64(positiveBuckets[i])
+ }
+ // Overflow bucket is always empty
+ counts[len(counts)-1] = 0
+
+ if len(hist.PositiveBucket.Counts) == 0 {
+ // We cannot send exponential distributions with no positive buckets,
+ // instead we send a simple overflow/underflow histogram.
+ bucketOptions.Options = &distribution.Distribution_BucketOptions_ExplicitBuckets{
+ ExplicitBuckets: &distribution.Distribution_BucketOptions_Explicit{
+ Bounds: []float64{0},
+ },
+ }
+ } else {
+ // Exponential histogram
+ growth := math.Exp2(math.Exp2(-float64(hist.Scale)))
+ scale := math.Pow(growth, float64(hist.PositiveBucket.Offset))
+ bucketOptions.Options = &distribution.Distribution_BucketOptions_ExponentialBuckets{
+ ExponentialBuckets: &distribution.Distribution_BucketOptions_Exponential{
+ GrowthFactor: growth,
+ Scale: scale,
+ NumFiniteBuckets: int32(len(counts) - 2),
+ },
+ }
+ }
+
+ var mean float64
+ if !math.IsNaN(float64(hist.Sum)) && hist.Count > 0 { // Avoid divide-by-zero
+ mean = float64(hist.Sum) / float64(hist.Count)
+ }
+
+ return &distribution.Distribution{
+ Count: int64(hist.Count),
+ Mean: mean,
+ BucketCounts: counts,
+ BucketOptions: bucketOptions,
+ Exemplars: toDistributionExemplar[N](hist.Exemplars, projectID),
+ }
+}
+
+func toDistributionExemplar[N int64 | float64](Exemplars []metricdata.Exemplar[N], projectID string) []*distribution.Distribution_Exemplar {
+ var exemplars []*distribution.Distribution_Exemplar
+ for _, e := range Exemplars {
+ attachments := []*anypb.Any{}
+ if hasValidSpanContext(e) {
+ sctx, err := anypb.New(&monitoringpb.SpanContext{
+ SpanName: fmt.Sprintf("projects/%s/traces/%s/spans/%s", projectID, hex.EncodeToString(e.TraceID[:]), hex.EncodeToString(e.SpanID[:])),
+ })
+ if err == nil {
+ attachments = append(attachments, sctx)
+ }
+ }
+ if len(e.FilteredAttributes) > 0 {
+ attr, err := anypb.New(&monitoringpb.DroppedLabels{
+ Label: attributesToLabels(e.FilteredAttributes),
+ })
+ if err == nil {
+ attachments = append(attachments, attr)
+ }
+ }
+ exemplars = append(exemplars, &distribution.Distribution_Exemplar{
+ Value: float64(e.Value),
+ Timestamp: timestamppb.New(e.Time),
+ Attachments: attachments,
+ })
+ }
+ sort.Slice(exemplars, func(i, j int) bool {
+ return exemplars[i].Value < exemplars[j].Value
+ })
+ return exemplars
+}
+
+func attributesToLabels(attrs []attribute.KeyValue) map[string]string {
+ labels := make(map[string]string, len(attrs))
+ for _, attr := range attrs {
+ labels[normalizeLabelKey(string(attr.Key))] = sanitizeUTF8(attr.Value.Emit())
+ }
+ return labels
+}
+
+var (
+ nilTraceID trace.TraceID
+ nilSpanID trace.SpanID
+)
+
+func hasValidSpanContext[N int64 | float64](e metricdata.Exemplar[N]) bool {
+ return !bytes.Equal(e.TraceID[:], nilTraceID[:]) && !bytes.Equal(e.SpanID[:], nilSpanID[:])
+}
+
+func setSumOfSquaredDeviation[N int64 | float64](hist metricdata.HistogramDataPoint[N], dist *distribution.Distribution) {
+ var prevBound float64
+ // Calculate the sum of squared deviation.
+ for i := 0; i < len(hist.Bounds); i++ {
+ // Assume all points in the bucket occur at the middle of the bucket range
+ middleOfBucket := (prevBound + hist.Bounds[i]) / 2
+ dist.SumOfSquaredDeviation += float64(dist.BucketCounts[i]) * (middleOfBucket - dist.Mean) * (middleOfBucket - dist.Mean)
+ prevBound = hist.Bounds[i]
+ }
+ // The infinity bucket is an implicit +Inf bound after the list of explicit bounds.
+ // Assume points in the infinity bucket are at the top of the previous bucket
+ middleOfInfBucket := prevBound
+ if len(dist.BucketCounts) > 0 {
+ dist.SumOfSquaredDeviation += float64(dist.BucketCounts[len(dist.BucketCounts)-1]) * (middleOfInfBucket - dist.Mean) * (middleOfInfBucket - dist.Mean)
+ }
+}
+
+func numberDataPointToValue[N int64 | float64](
+ point metricdata.DataPoint[N],
+) (*monitoringpb.TypedValue, googlemetricpb.MetricDescriptor_ValueType) {
+ switch v := any(point.Value).(type) {
+ case int64:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{
+ Int64Value: v,
+ }},
+ googlemetricpb.MetricDescriptor_INT64
+ case float64:
+ return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{
+ DoubleValue: v,
+ }},
+ googlemetricpb.MetricDescriptor_DOUBLE
+ }
+ // It is impossible to reach this statement
+ return nil, googlemetricpb.MetricDescriptor_INT64
+}
+
+// https://github.com/googleapis/googleapis/blob/c4c562f89acce603fb189679836712d08c7f8584/google/api/metric.proto#L149
+//
+// > The label key name must follow:
+// >
+// > * Only upper and lower-case letters, digits and underscores (_) are
+// > allowed.
+// > * Label name must start with a letter or digit.
+// > * The maximum length of a label name is 100 characters.
+//
+// Note: this does not truncate if a label is too long.
+func normalizeLabelKey(s string) string {
+ if len(s) == 0 {
+ return s
+ }
+ s = strings.Map(sanitizeRune, s)
+ if unicode.IsDigit(rune(s[0])) {
+ s = "key_" + s
+ }
+ return s
+}
+
+// converts anything that is not a letter or digit to an underscore.
+func sanitizeRune(r rune) rune {
+ if unicode.IsLetter(r) || unicode.IsDigit(r) {
+ return r
+ }
+ // Everything else turns into an underscore
+ return '_'
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go
new file mode 100644
index 000000000..701b10b10
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/option.go
@@ -0,0 +1,201 @@
+// Copyright 2020-2021 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/metric/metricdata"
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+
+ apioption "google.golang.org/api/option"
+)
+
+var userAgent = fmt.Sprintf("opentelemetry-go %s; google-cloud-metric-exporter %s", otel.Version(), Version())
+
+// MonitoredResourceDescription is the struct which holds information required to map OTel resource to specific
+// Google Cloud MonitoredResource.
+type MonitoredResourceDescription struct {
+ mrLabels map[string]struct{}
+ mrType string
+}
+
+// Option is function type that is passed to the exporter initialization function.
+type Option func(*options)
+
+// options is the struct to hold options for metricExporter and its client instance.
+type options struct {
+ // context allows you to provide a custom context for API calls.
+ //
+ // This context will be used several times: first, to create Cloud Monitoring
+ // clients, and then every time a new batch of metrics needs to be uploaded.
+ //
+ // If unset, context.Background() will be used.
+ context context.Context
+ // metricDescriptorTypeFormatter is the custom formtter for the MetricDescriptor.Type.
+ // By default, the format string is "workload.googleapis.com/[metric name]".
+ metricDescriptorTypeFormatter func(metricdata.Metrics) string
+ // resourceAttributeFilter determinies which resource attributes to
+ // add to metrics as metric labels. By default, it adds service.name,
+ // service.namespace, and service.instance.id.
+ resourceAttributeFilter attribute.Filter
+ // monitoredResourceDescription sets whether to attempt mapping the OTel Resource to a specific
+ // Google Cloud Monitored Resource. When provided, the exporter attempts to map only to the provided
+ // monitored resource type.
+ monitoredResourceDescription MonitoredResourceDescription
+ // projectID is the identifier of the Cloud Monitoring
+ // project the user is uploading the stats data to.
+ // If not set, this will default to your "Application Default Credentials".
+ // For details see: https://developers.google.com/accounts/docs/application-default-credentials.
+ //
+ // It will be used in the project_id label of a Google Cloud Monitoring monitored
+ // resource if the resource does not inherently belong to a specific
+ // project, e.g. on-premise resource like k8s_container or generic_task.
+ projectID string
+ // compression enables gzip compression on gRPC calls.
+ compression string
+ // monitoringClientOptions are additional options to be passed
+ // to the underlying Stackdriver Monitoring API client.
+ // Optional.
+ monitoringClientOptions []apioption.ClientOption
+ // destinationProjectQuota sets whether the request should use quota from
+ // the destination project for the request.
+ destinationProjectQuota bool
+
+ // disableCreateMetricDescriptors disables automatic MetricDescriptor creation
+ disableCreateMetricDescriptors bool
+
+ // enableSumOfSquaredDeviation enables calculation of an estimated sum of squared
+ // deviation. It isn't correct, so we don't send it by default.
+ enableSumOfSquaredDeviation bool
+
+ // createServiceTimeSeries sets whether to create timeseries using `CreateServiceTimeSeries`.
+ // Implicitly, this sets `disableCreateMetricDescriptors` to true.
+ createServiceTimeSeries bool
+}
+
+// WithProjectID sets Google Cloud Platform project as projectID.
+// Without using this option, it automatically detects the project ID
+// from the default credential detection process.
+// Please find the detailed order of the default credential detection process on the doc:
+// https://godoc.org/golang.org/x/oauth2/google#FindDefaultCredentials
+func WithProjectID(id string) func(o *options) {
+ return func(o *options) {
+ o.projectID = id
+ }
+}
+
+// WithDestinationProjectQuota enables per-request usage of the destination
+// project's quota. For example, when setting gcp.project.id on a metric.
+func WithDestinationProjectQuota() func(o *options) {
+ return func(o *options) {
+ o.destinationProjectQuota = true
+ }
+}
+
+// WithMonitoringClientOptions add the options for Cloud Monitoring client instance.
+// Available options are defined in.
+func WithMonitoringClientOptions(opts ...apioption.ClientOption) func(o *options) {
+ return func(o *options) {
+ o.monitoringClientOptions = append(o.monitoringClientOptions, opts...)
+ }
+}
+
+// WithMetricDescriptorTypeFormatter sets the custom formatter for MetricDescriptor.
+// Note that the format has to follow the convention defined in the official document.
+// The default is "workload.googleapis.com/[metric name]".
+// ref. https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom_metric_names
+func WithMetricDescriptorTypeFormatter(f func(metricdata.Metrics) string) func(o *options) {
+ return func(o *options) {
+ o.metricDescriptorTypeFormatter = f
+ }
+}
+
+// WithFilteredResourceAttributes determinies which resource attributes to
+// add to metrics as metric labels. By default, it adds service.name,
+// service.namespace, and service.instance.id. This is recommended to avoid
+// writing duplicate timeseries against the same monitored resource. Use
+// WithFilteredResourceAttributes(NoAttributes()) to disable the addition of
+// resource attributes to metric labels.
+func WithFilteredResourceAttributes(filter attribute.Filter) func(o *options) {
+ return func(o *options) {
+ o.resourceAttributeFilter = filter
+ }
+}
+
+// DefaultResourceAttributesFilter is the default filter applied to resource
+// attributes.
+func DefaultResourceAttributesFilter(kv attribute.KeyValue) bool {
+ return (kv.Key == semconv.ServiceNameKey ||
+ kv.Key == semconv.ServiceNamespaceKey ||
+ kv.Key == semconv.ServiceInstanceIDKey) && len(kv.Value.AsString()) > 0
+}
+
+// NoAttributes can be passed to WithFilteredResourceAttributes to disable
+// adding resource attributes as metric labels.
+func NoAttributes(attribute.KeyValue) bool {
+ return false
+}
+
+// WithDisableCreateMetricDescriptors will disable the automatic creation of
+// MetricDescriptors when an unknown metric is set to be exported.
+func WithDisableCreateMetricDescriptors() func(o *options) {
+ return func(o *options) {
+ o.disableCreateMetricDescriptors = true
+ }
+}
+
+// WithCompression sets the compression to use for gRPC requests.
+func WithCompression(c string) func(o *options) {
+ return func(o *options) {
+ o.compression = c
+ }
+}
+
+// WithSumOfSquaredDeviation sets the SumOfSquaredDeviation field on histograms.
+// It is an estimate, and is not the actual sum of squared deviations.
+func WithSumOfSquaredDeviation() func(o *options) {
+ return func(o *options) {
+ o.enableSumOfSquaredDeviation = true
+ }
+}
+
+// WithCreateServiceTimeSeries configures the exporter to use `CreateServiceTimeSeries` for creating timeseries.
+// If this is used, metric descriptors are not exported.
+func WithCreateServiceTimeSeries() func(o *options) {
+ return func(o *options) {
+ o.createServiceTimeSeries = true
+ o.disableCreateMetricDescriptors = true
+ }
+}
+
+// WithMonitoredResourceDescription configures the exporter to attempt to map the OpenTelemetry Resource to the provided
+// Google MonitoredResource. The provided mrLabels would be searched for in the OpenTelemetry Resource Attributes and if
+// found, would be included in the MonitoredResource labels.
+func WithMonitoredResourceDescription(mrType string, mrLabels []string) func(o *options) {
+ return func(o *options) {
+ mrLabelSet := make(map[string]struct{})
+ for _, label := range mrLabels {
+ mrLabelSet[label] = struct{}{}
+ }
+ o.monitoredResourceDescription = MonitoredResourceDescription{
+ mrType: mrType,
+ mrLabels: mrLabelSet,
+ }
+ }
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go
new file mode 100644
index 000000000..57dfab0c9
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric/version.go
@@ -0,0 +1,21 @@
+// Copyright 2020 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metric
+
+// Version is the current release version of the OpenTelemetry
+// Operations Metric Exporter in use.
+func Version() string {
+ return "0.50.0"
+}
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go
new file mode 100644
index 000000000..510391b82
--- /dev/null
+++ b/vendor/github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping/resourcemapping.go
@@ -0,0 +1,285 @@
+// Copyright 2022 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resourcemapping
+
+import (
+ "strings"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.24.0"
+ monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres"
+)
+
+const (
+ ProjectIDAttributeKey = "gcp.project.id"
+
+ awsAccount = "aws_account"
+ awsEc2Instance = "aws_ec2_instance"
+ clusterName = "cluster_name"
+ containerName = "container_name"
+ gceInstance = "gce_instance"
+ genericNode = "generic_node"
+ genericTask = "generic_task"
+ instanceID = "instance_id"
+ job = "job"
+ k8sCluster = "k8s_cluster"
+ k8sContainer = "k8s_container"
+ k8sNode = "k8s_node"
+ k8sPod = "k8s_pod"
+ location = "location"
+ namespace = "namespace"
+ namespaceName = "namespace_name"
+ nodeID = "node_id"
+ nodeName = "node_name"
+ podName = "pod_name"
+ region = "region"
+ taskID = "task_id"
+ zone = "zone"
+ gaeInstance = "gae_instance"
+ gaeApp = "gae_app"
+ gaeModuleID = "module_id"
+ gaeVersionID = "version_id"
+ cloudRunRevision = "cloud_run_revision"
+ cloudFunction = "cloud_function"
+ cloudFunctionName = "function_name"
+ serviceName = "service_name"
+ configurationName = "configuration_name"
+ revisionName = "revision_name"
+ bmsInstance = "baremetalsolution.googleapis.com/Instance"
+ unknownServicePrefix = "unknown_service"
+)
+
+var (
+ // monitoredResourceMappings contains mappings of GCM resource label keys onto mapping config from OTel
+ // resource for a given monitored resource type.
+ monitoredResourceMappings = map[string]map[string]struct {
+ // If none of the otelKeys are present in the Resource, fallback to this literal value
+ fallbackLiteral string
+ // OTel resource keys to try and populate the resource label from. For entries with
+ // multiple OTel resource keys, the keys' values will be coalesced in order until there
+ // is a non-empty value.
+ otelKeys []string
+ }{
+ gceInstance: {
+ zone: {otelKeys: []string{string(semconv.CloudAvailabilityZoneKey)}},
+ instanceID: {otelKeys: []string{string(semconv.HostIDKey)}},
+ },
+ k8sContainer: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ namespaceName: {otelKeys: []string{string(semconv.K8SNamespaceNameKey)}},
+ podName: {otelKeys: []string{string(semconv.K8SPodNameKey)}},
+ containerName: {otelKeys: []string{string(semconv.K8SContainerNameKey)}},
+ },
+ k8sPod: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ namespaceName: {otelKeys: []string{string(semconv.K8SNamespaceNameKey)}},
+ podName: {otelKeys: []string{string(semconv.K8SPodNameKey)}},
+ },
+ k8sNode: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ nodeName: {otelKeys: []string{string(semconv.K8SNodeNameKey)}},
+ },
+ k8sCluster: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ clusterName: {otelKeys: []string{string(semconv.K8SClusterNameKey)}},
+ },
+ gaeInstance: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ gaeModuleID: {otelKeys: []string{string(semconv.FaaSNameKey)}},
+ gaeVersionID: {otelKeys: []string{string(semconv.FaaSVersionKey)}},
+ instanceID: {otelKeys: []string{string(semconv.FaaSInstanceKey)}},
+ },
+ gaeApp: {
+ location: {otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ }},
+ gaeModuleID: {otelKeys: []string{string(semconv.FaaSNameKey)}},
+ gaeVersionID: {otelKeys: []string{string(semconv.FaaSVersionKey)}},
+ },
+ awsEc2Instance: {
+ instanceID: {otelKeys: []string{string(semconv.HostIDKey)}},
+ region: {
+ otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ },
+ },
+ awsAccount: {otelKeys: []string{string(semconv.CloudAccountIDKey)}},
+ },
+ bmsInstance: {
+ location: {otelKeys: []string{string(semconv.CloudRegionKey)}},
+ instanceID: {otelKeys: []string{string(semconv.HostIDKey)}},
+ },
+ genericTask: {
+ location: {
+ otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ },
+ fallbackLiteral: "global",
+ },
+ namespace: {otelKeys: []string{string(semconv.ServiceNamespaceKey)}},
+ job: {otelKeys: []string{string(semconv.ServiceNameKey), string(semconv.FaaSNameKey)}},
+ taskID: {otelKeys: []string{string(semconv.ServiceInstanceIDKey), string(semconv.FaaSInstanceKey)}},
+ },
+ genericNode: {
+ location: {
+ otelKeys: []string{
+ string(semconv.CloudAvailabilityZoneKey),
+ string(semconv.CloudRegionKey),
+ },
+ fallbackLiteral: "global",
+ },
+ namespace: {otelKeys: []string{string(semconv.ServiceNamespaceKey)}},
+ nodeID: {otelKeys: []string{string(semconv.HostIDKey), string(semconv.HostNameKey)}},
+ },
+ }
+)
+
+// ReadOnlyAttributes is an interface to abstract between pulling attributes from PData library or OTEL SDK.
+type ReadOnlyAttributes interface {
+ GetString(string) (string, bool)
+}
+
+// ResourceAttributesToLoggingMonitoredResource converts from a set of OTEL resource attributes into a
+// GCP monitored resource type and label set for Cloud Logging.
+// E.g.
+// This may output `gce_instance` type with appropriate labels.
+func ResourceAttributesToLoggingMonitoredResource(attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource {
+ cloudPlatform, _ := attrs.GetString(string(semconv.CloudPlatformKey))
+ switch cloudPlatform {
+ case semconv.CloudPlatformGCPAppEngine.Value.AsString():
+ return createMonitoredResource(gaeApp, attrs)
+ default:
+ return commonResourceAttributesToMonitoredResource(cloudPlatform, attrs)
+ }
+}
+
+// ResourceAttributesToMonitoringMonitoredResource converts from a set of OTEL resource attributes into a
+// GCP monitored resource type and label set for Cloud Monitoring
+// E.g.
+// This may output `gce_instance` type with appropriate labels.
+func ResourceAttributesToMonitoringMonitoredResource(attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource {
+ cloudPlatform, _ := attrs.GetString(string(semconv.CloudPlatformKey))
+ switch cloudPlatform {
+ case semconv.CloudPlatformGCPAppEngine.Value.AsString():
+ return createMonitoredResource(gaeInstance, attrs)
+ default:
+ return commonResourceAttributesToMonitoredResource(cloudPlatform, attrs)
+ }
+}
+
+func commonResourceAttributesToMonitoredResource(cloudPlatform string, attrs ReadOnlyAttributes) *monitoredrespb.MonitoredResource {
+ switch cloudPlatform {
+ case semconv.CloudPlatformGCPComputeEngine.Value.AsString():
+ return createMonitoredResource(gceInstance, attrs)
+ case semconv.CloudPlatformAWSEC2.Value.AsString():
+ return createMonitoredResource(awsEc2Instance, attrs)
+ // TODO(alex-basinov): replace this string literal with semconv.CloudPlatformGCPBareMetalSolution
+ // once https://github.com/open-telemetry/semantic-conventions/pull/64 makes its way
+ // into the semconv module.
+ case "gcp_bare_metal_solution":
+ return createMonitoredResource(bmsInstance, attrs)
+ default:
+ // if k8s.cluster.name is set, pattern match for various k8s resources.
+ // this will also match non-cloud k8s platforms like minikube.
+ if _, ok := attrs.GetString(string(semconv.K8SClusterNameKey)); ok {
+ // Try for most to least specific k8s_container, k8s_pod, etc
+ if _, ok := attrs.GetString(string(semconv.K8SContainerNameKey)); ok {
+ return createMonitoredResource(k8sContainer, attrs)
+ } else if _, ok := attrs.GetString(string(semconv.K8SPodNameKey)); ok {
+ return createMonitoredResource(k8sPod, attrs)
+ } else if _, ok := attrs.GetString(string(semconv.K8SNodeNameKey)); ok {
+ return createMonitoredResource(k8sNode, attrs)
+ }
+ return createMonitoredResource(k8sCluster, attrs)
+ }
+
+ // Fallback to generic_task
+ _, hasServiceName := attrs.GetString(string(semconv.ServiceNameKey))
+ _, hasFaaSName := attrs.GetString(string(semconv.FaaSNameKey))
+ _, hasServiceInstanceID := attrs.GetString(string(semconv.ServiceInstanceIDKey))
+ _, hasFaaSInstance := attrs.GetString(string(semconv.FaaSInstanceKey))
+ if (hasServiceName && hasServiceInstanceID) || (hasFaaSInstance && hasFaaSName) {
+ return createMonitoredResource(genericTask, attrs)
+ }
+
+ // Everything else fallback to generic_node
+ return createMonitoredResource(genericNode, attrs)
+ }
+}
+
+func createMonitoredResource(
+ monitoredResourceType string,
+ resourceAttrs ReadOnlyAttributes,
+) *monitoredrespb.MonitoredResource {
+ mappings := monitoredResourceMappings[monitoredResourceType]
+ mrLabels := make(map[string]string, len(mappings))
+
+ for mrKey, mappingConfig := range mappings {
+ mrValue := ""
+ ok := false
+ // Coalesce the possible keys in order
+ for _, otelKey := range mappingConfig.otelKeys {
+ mrValue, ok = resourceAttrs.GetString(otelKey)
+ if mrValue != "" && !strings.HasPrefix(mrValue, unknownServicePrefix) {
+ break
+ }
+ }
+ if mrValue == "" && contains(mappingConfig.otelKeys, string(semconv.ServiceNameKey)) {
+ // the service name started with unknown_service, and was ignored above
+ mrValue, ok = resourceAttrs.GetString(string(semconv.ServiceNameKey))
+ }
+ if !ok || mrValue == "" {
+ mrValue = mappingConfig.fallbackLiteral
+ }
+ mrLabels[mrKey] = sanitizeUTF8(mrValue)
+ }
+ return &monitoredrespb.MonitoredResource{
+ Type: monitoredResourceType,
+ Labels: mrLabels,
+ }
+}
+
+func contains(list []string, element string) bool {
+ for _, item := range list {
+ if item == element {
+ return true
+ }
+ }
+ return false
+}
+
+func sanitizeUTF8(s string) string {
+ return strings.ToValidUTF8(s, "�")
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/.travis.yml b/vendor/github.com/alecthomas/kingpin/v2/.travis.yml
new file mode 100644
index 000000000..9c45bacc0
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/.travis.yml
@@ -0,0 +1,14 @@
+sudo: false
+language: go
+install: go get -t -v ./...
+go:
+ - 1.2.x
+ - 1.3.x
+ - 1.4.x
+ - 1.5.x
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
diff --git a/vendor/github.com/alecthomas/kingpin/v2/COPYING b/vendor/github.com/alecthomas/kingpin/v2/COPYING
new file mode 100644
index 000000000..2993ec085
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/COPYING
@@ -0,0 +1,19 @@
+Copyright (C) 2014 Alec Thomas
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/alecthomas/kingpin/v2/README.md b/vendor/github.com/alecthomas/kingpin/v2/README.md
new file mode 100644
index 000000000..5495ca191
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/README.md
@@ -0,0 +1,709 @@
+# CONTRIBUTIONS ONLY
+
+**What does this mean?** I do not have time to fix issues myself. The only way fixes or new features will be added is by people submitting PRs. If you are interested in taking over maintenance and have a history of contributions to Kingpin, please let me know.
+
+**Current status.** Kingpin is largely feature stable. There hasn't been a need to add new features in a while, but there are some bugs that should be fixed.
+
+**Why?** I no longer use Kingpin personally (I now use [kong](https://github.com/alecthomas/kong)). Rather than leave the project in a limbo of people filing issues and wondering why they're not being worked on, I believe this notice will more clearly set expectations.
+
+# Kingpin - A Go (golang) command line and flag parser
+[](http://godoc.org/github.com/alecthomas/kingpin) [](https://github.com/alecthomas/kingpin/actions/workflows/ci.yml)
+
+
+
+
+
+
+
+- [Overview](#overview)
+- [Features](#features)
+- [User-visible changes between v1 and v2](#user-visible-changes-between-v1-and-v2)
+ - [Flags can be used at any point after their definition.](#flags-can-be-used-at-any-point-after-their-definition)
+ - [Short flags can be combined with their parameters](#short-flags-can-be-combined-with-their-parameters)
+- [API changes between v1 and v2](#api-changes-between-v1-and-v2)
+- [Versions](#versions)
+ - [V2 is the current stable version](#v2-is-the-current-stable-version)
+ - [V1 is the OLD stable version](#v1-is-the-old-stable-version)
+- [Change History](#change-history)
+- [Examples](#examples)
+ - [Simple Example](#simple-example)
+ - [Complex Example](#complex-example)
+- [Reference Documentation](#reference-documentation)
+ - [Displaying errors and usage information](#displaying-errors-and-usage-information)
+ - [Sub-commands](#sub-commands)
+ - [Custom Parsers](#custom-parsers)
+ - [Repeatable flags](#repeatable-flags)
+ - [Boolean Values](#boolean-values)
+ - [Default Values](#default-values)
+ - [Place-holders in Help](#place-holders-in-help)
+ - [Consuming all remaining arguments](#consuming-all-remaining-arguments)
+ - [Bash/ZSH Shell Completion](#bashzsh-shell-completion)
+ - [Supporting -h for help](#supporting--h-for-help)
+ - [Custom help](#custom-help)
+
+
+
+## Overview
+
+Kingpin is a [fluent-style](http://en.wikipedia.org/wiki/Fluent_interface),
+type-safe command-line parser. It supports flags, nested commands, and
+positional arguments.
+
+Install it with:
+
+ $ go get github.com/alecthomas/kingpin/v2
+
+It looks like this:
+
+```go
+var (
+ verbose = kingpin.Flag("verbose", "Verbose mode.").Short('v').Bool()
+ name = kingpin.Arg("name", "Name of user.").Required().String()
+)
+
+func main() {
+ kingpin.Parse()
+ fmt.Printf("%v, %s\n", *verbose, *name)
+}
+```
+
+More [examples](https://github.com/alecthomas/kingpin/tree/master/_examples) are available.
+
+Second to parsing, providing the user with useful help is probably the most
+important thing a command-line parser does. Kingpin tries to provide detailed
+contextual help if `--help` is encountered at any point in the command line
+(excluding after `--`).
+
+## Features
+
+- Help output that isn't as ugly as sin.
+- Fully [customisable help](#custom-help), via Go templates.
+- Parsed, type-safe flags (`kingpin.Flag("f", "help").Int()`)
+- Parsed, type-safe positional arguments (`kingpin.Arg("a", "help").Int()`).
+- Parsed, type-safe, arbitrarily deep commands (`kingpin.Command("c", "help")`).
+- Support for required flags and required positional arguments (`kingpin.Flag("f", "").Required().Int()`).
+- Support for arbitrarily nested default commands (`command.Default()`).
+- Callbacks per command, flag and argument (`kingpin.Command("c", "").Action(myAction)`).
+- POSIX-style short flag combining (`-a -b` -> `-ab`).
+- Short-flag+parameter combining (`-a parm` -> `-aparm`).
+- Read command-line from files (`@`).
+- Automatically generate man pages (`--help-man`).
+
+## User-visible changes between v1 and v2
+
+### Flags can be used at any point after their definition.
+
+Flags can be specified at any point after their definition, not just
+*immediately after their associated command*. From the chat example below, the
+following used to be required:
+
+```
+$ chat --server=chat.server.com:8080 post --image=~/Downloads/owls.jpg pics
+```
+
+But the following will now work:
+
+```
+$ chat post --server=chat.server.com:8080 --image=~/Downloads/owls.jpg pics
+```
+
+### Short flags can be combined with their parameters
+
+Previously, if a short flag was used, any argument to that flag would have to
+be separated by a space. That is no longer the case.
+
+## API changes between v1 and v2
+
+- `ParseWithFileExpansion()` is gone. The new parser directly supports expanding `@`.
+- Added `FatalUsage()` and `FatalUsageContext()` for displaying an error + usage and terminating.
+- `Dispatch()` renamed to `Action()`.
+- Added `ParseContext()` for parsing a command line into its intermediate context form without executing.
+- Added `Terminate()` function to override the termination function.
+- Added `UsageForContextWithTemplate()` for printing usage via a custom template.
+- Added `UsageTemplate()` for overriding the default template to use. Two templates are included:
+ 1. `DefaultUsageTemplate` - default template.
+ 2. `CompactUsageTemplate` - compact command template for larger applications.
+
+## Versions
+
+The current stable version is [github.com/alecthomas/kingpin/v2](https://github.com/alecthomas/kingpin/v2). The previous version, [gopkg.in/alecthomas/kingpin.v1](https://gopkg.in/alecthomas/kingpin.v1), is deprecated and in maintenance mode.
+
+### [V2](https://github.com/alecthomas/kingpin/v2) is the current stable version
+
+Installation:
+
+```sh
+$ go get github.com/alecthomas/kingpin/v2
+```
+
+### [V1](https://gopkg.in/alecthomas/kingpin.v1) is the OLD stable version
+
+Installation:
+
+```sh
+$ go get gopkg.in/alecthomas/kingpin.v1
+```
+
+## Change History
+
+- *2015-09-19* -- Stable v2.1.0 release.
+ - Added `command.Default()` to specify a default command to use if no other
+ command matches. This allows for convenient user shortcuts.
+ - Exposed `HelpFlag` and `VersionFlag` for further customisation.
+ - `Action()` and `PreAction()` added and both now support an arbitrary
+ number of callbacks.
+ - `kingpin.SeparateOptionalFlagsUsageTemplate`.
+ - `--help-long` and `--help-man` (hidden by default) flags.
+ - Flags are "interspersed" by default, but can be disabled with `app.Interspersed(false)`.
+ - Added flags for all simple builtin types (int8, uint16, etc.) and slice variants.
+ - Use `app.Writer(os.Writer)` to specify the default writer for all output functions.
+ - Dropped `os.Writer` prefix from all printf-like functions.
+
+- *2015-05-22* -- Stable v2.0.0 release.
+ - Initial stable release of v2.0.0.
+ - Fully supports interspersed flags, commands and arguments.
+ - Flags can be present at any point after their logical definition.
+ - Application.Parse() terminates if commands are present and a command is not parsed.
+ - Dispatch() -> Action().
+ - Actions are dispatched after all values are populated.
+ - Override termination function (defaults to os.Exit).
+ - Override output stream (defaults to os.Stderr).
+ - Templatised usage help, with default and compact templates.
+ - Make error/usage functions more consistent.
+ - Support argument expansion from files by default (with @).
+ - Fully public data model is available via .Model().
+ - Parser has been completely refactored.
+ - Parsing and execution has been split into distinct stages.
+ - Use `go generate` to generate repeated flags.
+ - Support combined short-flag+argument: -fARG.
+
+- *2015-01-23* -- Stable v1.3.4 release.
+ - Support "--" for separating flags from positional arguments.
+ - Support loading flags from files (ParseWithFileExpansion()). Use @FILE as an argument.
+ - Add post-app and post-cmd validation hooks. This allows arbitrary validation to be added.
+ - A bunch of improvements to help usage and formatting.
+ - Support arbitrarily nested sub-commands.
+
+- *2014-07-08* -- Stable v1.2.0 release.
+ - Pass any value through to `Strings()` when final argument.
+ Allows for values that look like flags to be processed.
+ - Allow `--help` to be used with commands.
+ - Support `Hidden()` flags.
+ - Parser for [units.Base2Bytes](https://github.com/alecthomas/units)
+ type. Allows for flags like `--ram=512MB` or `--ram=1GB`.
+ - Add an `Enum()` value, allowing only one of a set of values
+ to be selected. eg. `Flag(...).Enum("debug", "info", "warning")`.
+
+- *2014-06-27* -- Stable v1.1.0 release.
+ - Bug fixes.
+ - Always return an error (rather than panicing) when misconfigured.
+ - `OpenFile(flag, perm)` value type added, for finer control over opening files.
+ - Significantly improved usage formatting.
+
+- *2014-06-19* -- Stable v1.0.0 release.
+ - Support [cumulative positional](#consuming-all-remaining-arguments) arguments.
+ - Return error rather than panic when there are fatal errors not caught by
+ the type system. eg. when a default value is invalid.
+ - Use gokpg.in.
+
+- *2014-06-10* -- Place-holder streamlining.
+ - Renamed `MetaVar` to `PlaceHolder`.
+ - Removed `MetaVarFromDefault`. Kingpin now uses [heuristics](#place-holders-in-help)
+ to determine what to display.
+
+## Examples
+
+### Simple Example
+
+Kingpin can be used for simple flag+arg applications like so:
+
+```
+$ ping --help
+usage: ping [] []
+
+Flags:
+ --debug Enable debug mode.
+ --help Show help.
+ -t, --timeout=5s Timeout waiting for ping.
+
+Args:
+ IP address to ping.
+ [] Number of packets to send
+$ ping 1.2.3.4 5
+Would ping: 1.2.3.4 with timeout 5s and count 5
+```
+
+From the following source:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/alecthomas/kingpin/v2"
+)
+
+var (
+ debug = kingpin.Flag("debug", "Enable debug mode.").Bool()
+ timeout = kingpin.Flag("timeout", "Timeout waiting for ping.").Default("5s").Envar("PING_TIMEOUT").Short('t').Duration()
+ ip = kingpin.Arg("ip", "IP address to ping.").Required().IP()
+ count = kingpin.Arg("count", "Number of packets to send").Int()
+)
+
+func main() {
+ kingpin.Version("0.0.1")
+ kingpin.Parse()
+ fmt.Printf("Would ping: %s with timeout %s and count %d\n", *ip, *timeout, *count)
+}
+```
+
+#### Reading arguments from a file
+Kingpin supports reading arguments from a file.
+Create a file with the corresponding arguments:
+```
+echo -t=5\n > args
+```
+And now supply it:
+```
+$ ping @args
+```
+
+### Complex Example
+
+Kingpin can also produce complex command-line applications with global flags,
+subcommands, and per-subcommand flags, like this:
+
+```
+$ chat --help
+usage: chat [] [] [ ...]
+
+A command-line chat application.
+
+Flags:
+ --help Show help.
+ --debug Enable debug mode.
+ --server=127.0.0.1 Server address.
+
+Commands:
+ help []
+ Show help for a command.
+
+ register
+ Register a new user.
+
+ post [] []
+ Post a message to a channel.
+
+$ chat help post
+usage: chat [] post [] []
+
+Post a message to a channel.
+
+Flags:
+ --image=IMAGE Image to post.
+
+Args:
+ Channel to post to.
+ [] Text to post.
+
+$ chat post --image=~/Downloads/owls.jpg pics
+...
+```
+
+From this code:
+
+```go
+package main
+
+import (
+ "os"
+ "strings"
+ "github.com/alecthomas/kingpin/v2"
+)
+
+var (
+ app = kingpin.New("chat", "A command-line chat application.")
+ debug = app.Flag("debug", "Enable debug mode.").Bool()
+ serverIP = app.Flag("server", "Server address.").Default("127.0.0.1").IP()
+
+ register = app.Command("register", "Register a new user.")
+ registerNick = register.Arg("nick", "Nickname for user.").Required().String()
+ registerName = register.Arg("name", "Name of user.").Required().String()
+
+ post = app.Command("post", "Post a message to a channel.")
+ postImage = post.Flag("image", "Image to post.").File()
+ postChannel = post.Arg("channel", "Channel to post to.").Required().String()
+ postText = post.Arg("text", "Text to post.").Strings()
+)
+
+func main() {
+ switch kingpin.MustParse(app.Parse(os.Args[1:])) {
+ // Register user
+ case register.FullCommand():
+ println(*registerNick)
+
+ // Post message
+ case post.FullCommand():
+ if *postImage != nil {
+ }
+ text := strings.Join(*postText, " ")
+ println("Post:", text)
+ }
+}
+```
+
+## Reference Documentation
+
+### Displaying errors and usage information
+
+Kingpin exports a set of functions to provide consistent errors and usage
+information to the user.
+
+Error messages look something like this:
+
+ : error:
+
+The functions on `Application` are:
+
+Function | Purpose
+---------|--------------
+`Errorf(format, args)` | Display a printf formatted error to the user.
+`Fatalf(format, args)` | As with Errorf, but also call the termination handler.
+`FatalUsage(format, args)` | As with Fatalf, but also print contextual usage information.
+`FatalUsageContext(context, format, args)` | As with Fatalf, but also print contextual usage information from a `ParseContext`.
+`FatalIfError(err, format, args)` | Conditionally print an error prefixed with format+args, then call the termination handler
+
+There are equivalent global functions in the kingpin namespace for the default
+`kingpin.CommandLine` instance.
+
+### Sub-commands
+
+Kingpin supports nested sub-commands, with separate flag and positional
+arguments per sub-command. Note that positional arguments may only occur after
+sub-commands.
+
+For example:
+
+```go
+var (
+ deleteCommand = kingpin.Command("delete", "Delete an object.")
+ deleteUserCommand = deleteCommand.Command("user", "Delete a user.")
+ deleteUserUIDFlag = deleteUserCommand.Flag("uid", "Delete user by UID rather than username.")
+ deleteUserUsername = deleteUserCommand.Arg("username", "Username to delete.")
+ deletePostCommand = deleteCommand.Command("post", "Delete a post.")
+)
+
+func main() {
+ switch kingpin.Parse() {
+ case deleteUserCommand.FullCommand():
+ case deletePostCommand.FullCommand():
+ }
+}
+```
+
+### Custom Parsers
+
+Kingpin supports both flag and positional argument parsers for converting to
+Go types. For example, some included parsers are `Int()`, `Float()`,
+`Duration()` and `ExistingFile()` (see [parsers.go](./parsers.go) for a complete list of included parsers).
+
+Parsers conform to Go's [`flag.Value`](http://godoc.org/flag#Value)
+interface, so any existing implementations will work.
+
+For example, a parser for accumulating HTTP header values might look like this:
+
+```go
+type HTTPHeaderValue http.Header
+
+func (h *HTTPHeaderValue) Set(value string) error {
+ parts := strings.SplitN(value, ":", 2)
+ if len(parts) != 2 {
+ return fmt.Errorf("expected HEADER:VALUE got '%s'", value)
+ }
+ (*http.Header)(h).Add(parts[0], parts[1])
+ return nil
+}
+
+func (h *HTTPHeaderValue) String() string {
+ return ""
+}
+```
+
+As a convenience, I would recommend something like this:
+
+```go
+func HTTPHeader(s Settings) (target *http.Header) {
+ target = &http.Header{}
+ s.SetValue((*HTTPHeaderValue)(target))
+ return
+}
+```
+
+You would use it like so:
+
+```go
+headers = HTTPHeader(kingpin.Flag("header", "Add a HTTP header to the request.").Short('H'))
+```
+
+### Repeatable flags
+
+Depending on the `Value` they hold, some flags may be repeated. The
+`IsCumulative() bool` function on `Value` tells if it's safe to call `Set()`
+multiple times or if an error should be raised if several values are passed.
+
+The built-in `Value`s returning slices and maps, as well as `Counter` are
+examples of `Value`s that make a flag repeatable.
+
+### Boolean values
+
+Boolean values are uniquely managed by Kingpin. Each boolean flag will have a negative complement:
+`--` and `--no-`.
+
+### Default Values
+
+The default value is the zero value for a type. This can be overridden with
+the `Default(value...)` function on flags and arguments. This function accepts
+one or several strings, which are parsed by the value itself, so they *must*
+be compliant with the format expected.
+
+### Place-holders in Help
+
+The place-holder value for a flag is the value used in the help to describe
+the value of a non-boolean flag.
+
+The value provided to PlaceHolder() is used if provided, then the value
+provided by Default() if provided, then finally the capitalised flag name is
+used.
+
+Here are some examples of flags with various permutations:
+
+ --name=NAME // Flag(...).String()
+ --name="Harry" // Flag(...).Default("Harry").String()
+ --name=FULL-NAME // Flag(...).PlaceHolder("FULL-NAME").Default("Harry").String()
+
+### Consuming all remaining arguments
+
+A common command-line idiom is to use all remaining arguments for some
+purpose. eg. The following command accepts an arbitrary number of
+IP addresses as positional arguments:
+
+ ./cmd ping 10.1.1.1 192.168.1.1
+
+Such arguments are similar to [repeatable flags](#repeatable-flags), but for
+arguments. Therefore they use the same `IsCumulative() bool` function on the
+underlying `Value`, so the built-in `Value`s for which the `Set()` function
+can be called several times will consume multiple arguments.
+
+To implement the above example with a custom `Value`, we might do something
+like this:
+
+```go
+type ipList []net.IP
+
+func (i *ipList) Set(value string) error {
+ if ip := net.ParseIP(value); ip == nil {
+ return fmt.Errorf("'%s' is not an IP address", value)
+ } else {
+ *i = append(*i, ip)
+ return nil
+ }
+}
+
+func (i *ipList) String() string {
+ return ""
+}
+
+func (i *ipList) IsCumulative() bool {
+ return true
+}
+
+func IPList(s Settings) (target *[]net.IP) {
+ target = new([]net.IP)
+ s.SetValue((*ipList)(target))
+ return
+}
+```
+
+And use it like so:
+
+```go
+ips := IPList(kingpin.Arg("ips", "IP addresses to ping."))
+```
+
+### Bash/ZSH Shell Completion
+
+By default, all flags and commands/subcommands generate completions
+internally.
+
+Out of the box, CLI tools using kingpin should be able to take advantage
+of completion hinting for flags and commands. By specifying
+`--completion-bash` as the first argument, your CLI tool will show
+possible subcommands. By ending your argv with `--`, hints for flags
+will be shown.
+
+To allow your end users to take advantage you must package a
+`/etc/bash_completion.d` script with your distribution (or the equivalent
+for your target platform/shell). An alternative is to instruct your end
+user to source a script from their `bash_profile` (or equivalent).
+
+Fortunately Kingpin makes it easy to generate or source a script for use
+with end users shells. `./yourtool --completion-script-bash` and
+`./yourtool --completion-script-zsh` will generate these scripts for you.
+
+**Installation by Package**
+
+For the best user experience, you should bundle your pre-created
+completion script with your CLI tool and install it inside
+`/etc/bash_completion.d` (or equivalent). A good suggestion is to add
+this as an automated step to your build pipeline, in the implementation
+is improved for bug fixed.
+
+**Installation by `bash_profile`**
+
+Alternatively, instruct your users to add an additional statement to
+their `bash_profile` (or equivalent):
+
+```
+eval "$(your-cli-tool --completion-script-bash)"
+```
+
+Or for ZSH
+
+```
+eval "$(your-cli-tool --completion-script-zsh)"
+```
+
+#### Additional API
+To provide more flexibility, a completion option API has been
+exposed for flags to allow user defined completion options, to extend
+completions further than just EnumVar/Enum.
+
+
+**Provide Static Options**
+
+When using an `Enum` or `EnumVar`, users are limited to only the options
+given. Maybe we wish to hint possible options to the user, but also
+allow them to provide their own custom option. `HintOptions` gives
+this functionality to flags.
+
+```
+app := kingpin.New("completion", "My application with bash completion.")
+app.Flag("port", "Provide a port to connect to").
+ Required().
+ HintOptions("80", "443", "8080").
+ IntVar(&c.port)
+```
+
+**Provide Dynamic Options**
+Consider the case that you needed to read a local database or a file to
+provide suggestions. You can dynamically generate the options
+
+```
+func listHosts() []string {
+ // Provide a dynamic list of hosts from a hosts file or otherwise
+ // for bash completion. In this example we simply return static slice.
+
+ // You could use this functionality to reach into a hosts file to provide
+ // completion for a list of known hosts.
+ return []string{"sshhost.example", "webhost.example", "ftphost.example"}
+}
+
+app := kingpin.New("completion", "My application with bash completion.")
+app.Flag("flag-1", "").HintAction(listHosts).String()
+```
+
+**EnumVar/Enum**
+When using `Enum` or `EnumVar`, any provided options will be automatically
+used for bash autocompletion. However, if you wish to provide a subset or
+different options, you can use `HintOptions` or `HintAction` which will override
+the default completion options for `Enum`/`EnumVar`.
+
+
+**Examples**
+You can see an in depth example of the completion API within
+`examples/completion/main.go`
+
+
+### Supporting -h for help
+
+`kingpin.CommandLine.HelpFlag.Short('h')`
+
+Short help is also available when creating a more complicated app:
+
+```go
+var (
+ app = kingpin.New("chat", "A command-line chat application.")
+ // ...
+)
+
+func main() {
+ app.HelpFlag.Short('h')
+ switch kingpin.MustParse(app.Parse(os.Args[1:])) {
+ // ...
+ }
+}
+```
+
+### Custom help
+
+Kingpin v2 supports templatised help using the text/template library (actually, [a fork](https://github.com/alecthomas/template)).
+
+You can specify the template to use with the [Application.UsageTemplate()](http://godoc.org/github.com/alecthomas/kingpin/v2#Application.UsageTemplate) function.
+
+There are four included templates: `kingpin.DefaultUsageTemplate` is the default,
+`kingpin.CompactUsageTemplate` provides a more compact representation for more complex command-line structures,
+`kingpin.SeparateOptionalFlagsUsageTemplate` looks like the default template, but splits required
+and optional command flags into separate lists, and `kingpin.ManPageTemplate` is used to generate man pages.
+
+See the above templates for examples of usage, and the the function [UsageForContextWithTemplate()](https://github.com/alecthomas/kingpin/blob/master/usage.go#L198) method for details on the context.
+
+#### Default help template
+
+```
+$ go run ./examples/curl/curl.go --help
+usage: curl [] [ ...]
+
+An example implementation of curl.
+
+Flags:
+ --help Show help.
+ -t, --timeout=5s Set connection timeout.
+ -H, --headers=HEADER=VALUE
+ Add HTTP headers to the request.
+
+Commands:
+ help [...]
+ Show help.
+
+ get url
+ Retrieve a URL.
+
+ get file
+ Retrieve a file.
+
+ post []
+ POST a resource.
+```
+
+#### Compact help template
+
+```
+$ go run ./examples/curl/curl.go --help
+usage: curl [] [ ...]
+
+An example implementation of curl.
+
+Flags:
+ --help Show help.
+ -t, --timeout=5s Set connection timeout.
+ -H, --headers=HEADER=VALUE
+ Add HTTP headers to the request.
+
+Commands:
+ help [...]
+ get []
+ url
+ file
+ post []
+```
diff --git a/vendor/github.com/alecthomas/kingpin/v2/actions.go b/vendor/github.com/alecthomas/kingpin/v2/actions.go
new file mode 100644
index 000000000..72d6cbd40
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/actions.go
@@ -0,0 +1,42 @@
+package kingpin
+
+// Action callback executed at various stages after all values are populated.
+// The application, commands, arguments and flags all have corresponding
+// actions.
+type Action func(*ParseContext) error
+
+type actionMixin struct {
+ actions []Action
+ preActions []Action
+}
+
+type actionApplier interface {
+ applyActions(*ParseContext) error
+ applyPreActions(*ParseContext) error
+}
+
+func (a *actionMixin) addAction(action Action) {
+ a.actions = append(a.actions, action)
+}
+
+func (a *actionMixin) addPreAction(action Action) {
+ a.preActions = append(a.preActions, action)
+}
+
+func (a *actionMixin) applyActions(context *ParseContext) error {
+ for _, action := range a.actions {
+ if err := action(context); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (a *actionMixin) applyPreActions(context *ParseContext) error {
+ for _, preAction := range a.preActions {
+ if err := preAction(context); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/app.go b/vendor/github.com/alecthomas/kingpin/v2/app.go
new file mode 100644
index 000000000..4f1f31be2
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/app.go
@@ -0,0 +1,703 @@
+package kingpin
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "regexp"
+ "strings"
+ "text/template"
+)
+
+var (
+ ErrCommandNotSpecified = fmt.Errorf("command not specified")
+)
+
+var (
+ envarTransformRegexp = regexp.MustCompile(`[^a-zA-Z0-9_]+`)
+)
+
+type ApplicationValidator func(*Application) error
+
+// An Application contains the definitions of flags, arguments and commands
+// for an application.
+type Application struct {
+ cmdMixin
+ initialized bool
+
+ Name string
+ Help string
+
+ author string
+ version string
+ errorWriter io.Writer // Destination for errors.
+ usageWriter io.Writer // Destination for usage
+ usageTemplate string
+ usageFuncs template.FuncMap
+ validator ApplicationValidator
+ terminate func(status int) // See Terminate()
+ noInterspersed bool // can flags be interspersed with args (or must they come first)
+ defaultEnvars bool
+ completion bool
+
+ // Help flag. Exposed for user customisation.
+ HelpFlag *FlagClause
+ // Help command. Exposed for user customisation. May be nil.
+ HelpCommand *CmdClause
+ // Version flag. Exposed for user customisation. May be nil.
+ VersionFlag *FlagClause
+}
+
+// New creates a new Kingpin application instance.
+func New(name, help string) *Application {
+ a := &Application{
+ Name: name,
+ Help: help,
+ errorWriter: os.Stderr, // Left for backwards compatibility purposes.
+ usageWriter: os.Stderr,
+ usageTemplate: DefaultUsageTemplate,
+ terminate: os.Exit,
+ }
+ a.flagGroup = newFlagGroup()
+ a.argGroup = newArgGroup()
+ a.cmdGroup = newCmdGroup(a)
+ a.HelpFlag = a.Flag("help", "Show context-sensitive help (also try --help-long and --help-man).")
+ a.HelpFlag.Bool()
+ a.Flag("help-long", "Generate long help.").Hidden().PreAction(a.generateLongHelp).Bool()
+ a.Flag("help-man", "Generate a man page.").Hidden().PreAction(a.generateManPage).Bool()
+ a.Flag("completion-bash", "Output possible completions for the given args.").Hidden().BoolVar(&a.completion)
+ a.Flag("completion-script-bash", "Generate completion script for bash.").Hidden().PreAction(a.generateBashCompletionScript).Bool()
+ a.Flag("completion-script-zsh", "Generate completion script for ZSH.").Hidden().PreAction(a.generateZSHCompletionScript).Bool()
+
+ return a
+}
+
+func (a *Application) generateLongHelp(c *ParseContext) error {
+ a.Writer(os.Stdout)
+ if err := a.UsageForContextWithTemplate(c, 2, LongHelpTemplate); err != nil {
+ return err
+ }
+ a.terminate(0)
+ return nil
+}
+
+func (a *Application) generateManPage(c *ParseContext) error {
+ a.Writer(os.Stdout)
+ if err := a.UsageForContextWithTemplate(c, 2, ManPageTemplate); err != nil {
+ return err
+ }
+ a.terminate(0)
+ return nil
+}
+
+func (a *Application) generateBashCompletionScript(c *ParseContext) error {
+ a.Writer(os.Stdout)
+ if err := a.UsageForContextWithTemplate(c, 2, BashCompletionTemplate); err != nil {
+ return err
+ }
+ a.terminate(0)
+ return nil
+}
+
+func (a *Application) generateZSHCompletionScript(c *ParseContext) error {
+ a.Writer(os.Stdout)
+ if err := a.UsageForContextWithTemplate(c, 2, ZshCompletionTemplate); err != nil {
+ return err
+ }
+ a.terminate(0)
+ return nil
+}
+
+// DefaultEnvars configures all flags (that do not already have an associated
+// envar) to use a default environment variable in the form "_".
+//
+// For example, if the application is named "foo" and a flag is named "bar-
+// waz" the environment variable: "FOO_BAR_WAZ".
+func (a *Application) DefaultEnvars() *Application {
+ a.defaultEnvars = true
+ return a
+}
+
+// Terminate specifies the termination handler. Defaults to os.Exit(status).
+// If nil is passed, a no-op function will be used.
+func (a *Application) Terminate(terminate func(int)) *Application {
+ if terminate == nil {
+ terminate = func(int) {}
+ }
+ a.terminate = terminate
+ return a
+}
+
+// Writer specifies the writer to use for usage and errors. Defaults to os.Stderr.
+// DEPRECATED: See ErrorWriter and UsageWriter.
+func (a *Application) Writer(w io.Writer) *Application {
+ a.errorWriter = w
+ a.usageWriter = w
+ return a
+}
+
+// ErrorWriter sets the io.Writer to use for errors.
+func (a *Application) ErrorWriter(w io.Writer) *Application {
+ a.errorWriter = w
+ return a
+}
+
+// UsageWriter sets the io.Writer to use for errors.
+func (a *Application) UsageWriter(w io.Writer) *Application {
+ a.usageWriter = w
+ return a
+}
+
+// UsageTemplate specifies the text template to use when displaying usage
+// information. The default is UsageTemplate.
+func (a *Application) UsageTemplate(template string) *Application {
+ a.usageTemplate = template
+ return a
+}
+
+// UsageFuncs adds extra functions that can be used in the usage template.
+func (a *Application) UsageFuncs(funcs template.FuncMap) *Application {
+ a.usageFuncs = funcs
+ return a
+}
+
+// Validate sets a validation function to run when parsing.
+func (a *Application) Validate(validator ApplicationValidator) *Application {
+ a.validator = validator
+ return a
+}
+
+// ParseContext parses the given command line and returns the fully populated
+// ParseContext.
+func (a *Application) ParseContext(args []string) (*ParseContext, error) {
+ return a.parseContext(false, args)
+}
+
+func (a *Application) parseContext(ignoreDefault bool, args []string) (*ParseContext, error) {
+ if err := a.init(); err != nil {
+ return nil, err
+ }
+ context := tokenize(args, ignoreDefault)
+ err := parse(context, a)
+ return context, err
+}
+
+// Parse parses command-line arguments. It returns the selected command and an
+// error. The selected command will be a space separated subcommand, if
+// subcommands have been configured.
+//
+// This will populate all flag and argument values, call all callbacks, and so
+// on.
+func (a *Application) Parse(args []string) (command string, err error) {
+
+ context, parseErr := a.ParseContext(args)
+ selected := []string{}
+ var setValuesErr error
+
+ if context == nil {
+ // Since we do not throw error immediately, there could be a case
+ // where a context returns nil. Protect against that.
+ return "", parseErr
+ }
+
+ if err = a.setDefaults(context); err != nil {
+ return "", err
+ }
+
+ selected, setValuesErr = a.setValues(context)
+
+ if err = a.applyPreActions(context, !a.completion); err != nil {
+ return "", err
+ }
+
+ if a.completion {
+ a.generateBashCompletion(context)
+ a.terminate(0)
+ } else {
+ if parseErr != nil {
+ return "", parseErr
+ }
+
+ a.maybeHelp(context)
+ if !context.EOL() {
+ return "", fmt.Errorf("unexpected argument '%s'", context.Peek())
+ }
+
+ if setValuesErr != nil {
+ return "", setValuesErr
+ }
+
+ command, err = a.execute(context, selected)
+ if err == ErrCommandNotSpecified {
+ a.writeUsage(context, nil)
+ }
+ }
+ return command, err
+}
+
+func (a *Application) writeUsage(context *ParseContext, err error) {
+ if err != nil {
+ a.Errorf("%s", err)
+ }
+ if err := a.UsageForContext(context); err != nil {
+ panic(err)
+ }
+ if err != nil {
+ a.terminate(1)
+ } else {
+ a.terminate(0)
+ }
+}
+
+func (a *Application) maybeHelp(context *ParseContext) {
+ for _, element := range context.Elements {
+ if flag, ok := element.Clause.(*FlagClause); ok && flag == a.HelpFlag {
+ // Re-parse the command-line ignoring defaults, so that help works correctly.
+ context, _ = a.parseContext(true, context.rawArgs)
+ a.writeUsage(context, nil)
+ }
+ }
+}
+
+// Version adds a --version flag for displaying the application version.
+func (a *Application) Version(version string) *Application {
+ a.version = version
+ a.VersionFlag = a.Flag("version", "Show application version.").PreAction(func(*ParseContext) error {
+ fmt.Fprintln(a.usageWriter, version)
+ a.terminate(0)
+ return nil
+ })
+ a.VersionFlag.Bool()
+ return a
+}
+
+// Author sets the author output by some help templates.
+func (a *Application) Author(author string) *Application {
+ a.author = author
+ return a
+}
+
+// Action callback to call when all values are populated and parsing is
+// complete, but before any command, flag or argument actions.
+//
+// All Action() callbacks are called in the order they are encountered on the
+// command line.
+func (a *Application) Action(action Action) *Application {
+ a.addAction(action)
+ return a
+}
+
+// Action called after parsing completes but before validation and execution.
+func (a *Application) PreAction(action Action) *Application {
+ a.addPreAction(action)
+ return a
+}
+
+// Command adds a new top-level command.
+func (a *Application) Command(name, help string) *CmdClause {
+ return a.addCommand(name, help)
+}
+
+// Interspersed control if flags can be interspersed with positional arguments
+//
+// true (the default) means that they can, false means that all the flags must appear before the first positional arguments.
+func (a *Application) Interspersed(interspersed bool) *Application {
+ a.noInterspersed = !interspersed
+ return a
+}
+
+func (a *Application) defaultEnvarPrefix() string {
+ if a.defaultEnvars {
+ return a.Name
+ }
+ return ""
+}
+
+func (a *Application) init() error {
+ if a.initialized {
+ return nil
+ }
+ if a.cmdGroup.have() && a.argGroup.have() {
+ return fmt.Errorf("can't mix top-level Arg()s with Command()s")
+ }
+
+ // If we have subcommands, add a help command at the top-level.
+ if a.cmdGroup.have() {
+ var command []string
+ a.HelpCommand = a.Command("help", "Show help.").PreAction(func(context *ParseContext) error {
+ a.Usage(command)
+ a.terminate(0)
+ return nil
+ })
+ a.HelpCommand.Arg("command", "Show help on command.").StringsVar(&command)
+ // Make help first command.
+ l := len(a.commandOrder)
+ a.commandOrder = append(a.commandOrder[l-1:l], a.commandOrder[:l-1]...)
+ }
+
+ if err := a.flagGroup.init(a.defaultEnvarPrefix()); err != nil {
+ return err
+ }
+ if err := a.cmdGroup.init(); err != nil {
+ return err
+ }
+ if err := a.argGroup.init(); err != nil {
+ return err
+ }
+ for _, cmd := range a.commands {
+ if err := cmd.init(); err != nil {
+ return err
+ }
+ }
+ flagGroups := []*flagGroup{a.flagGroup}
+ for _, cmd := range a.commandOrder {
+ if err := checkDuplicateFlags(cmd, flagGroups); err != nil {
+ return err
+ }
+ }
+ a.initialized = true
+ return nil
+}
+
+// Recursively check commands for duplicate flags.
+func checkDuplicateFlags(current *CmdClause, flagGroups []*flagGroup) error {
+ // Check for duplicates.
+ for _, flags := range flagGroups {
+ for _, flag := range current.flagOrder {
+ if flag.shorthand != 0 {
+ if _, ok := flags.short[string(flag.shorthand)]; ok {
+ return fmt.Errorf("duplicate short flag -%c", flag.shorthand)
+ }
+ }
+ if _, ok := flags.long[flag.name]; ok {
+ return fmt.Errorf("duplicate long flag --%s", flag.name)
+ }
+ }
+ }
+ flagGroups = append(flagGroups, current.flagGroup)
+ // Check subcommands.
+ for _, subcmd := range current.commandOrder {
+ if err := checkDuplicateFlags(subcmd, flagGroups); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (a *Application) execute(context *ParseContext, selected []string) (string, error) {
+ var err error
+
+ if err = a.validateRequired(context); err != nil {
+ return "", err
+ }
+
+ if err = a.applyValidators(context); err != nil {
+ return "", err
+ }
+
+ if err = a.applyActions(context); err != nil {
+ return "", err
+ }
+
+ command := strings.Join(selected, " ")
+ if command == "" && a.cmdGroup.have() {
+ return "", ErrCommandNotSpecified
+ }
+ return command, err
+}
+
+func (a *Application) setDefaults(context *ParseContext) error {
+ flagElements := map[string]*ParseElement{}
+ for _, element := range context.Elements {
+ if flag, ok := element.Clause.(*FlagClause); ok {
+ if flag.name == "help" {
+ return nil
+ }
+
+ if flag.name == "version" {
+ return nil
+ }
+ flagElements[flag.name] = element
+ }
+ }
+
+ argElements := map[string]*ParseElement{}
+ for _, element := range context.Elements {
+ if arg, ok := element.Clause.(*ArgClause); ok {
+ argElements[arg.name] = element
+ }
+ }
+
+ // Check required flags and set defaults.
+ for _, flag := range context.flags.long {
+ if flagElements[flag.name] == nil {
+ if err := flag.setDefault(); err != nil {
+ return err
+ }
+ }
+ }
+
+ for _, arg := range context.arguments.args {
+ if argElements[arg.name] == nil {
+ if err := arg.setDefault(); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (a *Application) validateRequired(context *ParseContext) error {
+ flagElements := map[string]*ParseElement{}
+ for _, element := range context.Elements {
+ if flag, ok := element.Clause.(*FlagClause); ok {
+ flagElements[flag.name] = element
+ }
+ }
+
+ argElements := map[string]*ParseElement{}
+ for _, element := range context.Elements {
+ if arg, ok := element.Clause.(*ArgClause); ok {
+ argElements[arg.name] = element
+ }
+ }
+
+ // Check required flags and set defaults.
+ var missingFlags []string
+ for _, flag := range context.flags.long {
+ if flagElements[flag.name] == nil {
+ // Check required flags were provided.
+ if flag.needsValue() {
+ missingFlags = append(missingFlags, fmt.Sprintf("'--%s'", flag.name))
+ }
+ }
+ }
+ if len(missingFlags) != 0 {
+ return fmt.Errorf("required flag(s) %s not provided", strings.Join(missingFlags, ", "))
+ }
+
+ for _, arg := range context.arguments.args {
+ if argElements[arg.name] == nil {
+ if arg.needsValue() {
+ return fmt.Errorf("required argument '%s' not provided", arg.name)
+ }
+ }
+ }
+ return nil
+}
+
+func (a *Application) setValues(context *ParseContext) (selected []string, err error) {
+ // Set all arg and flag values.
+ var (
+ lastCmd *CmdClause
+ flagSet = map[string]struct{}{}
+ )
+ for _, element := range context.Elements {
+ switch clause := element.Clause.(type) {
+ case *FlagClause:
+ if _, ok := flagSet[clause.name]; ok {
+ if v, ok := clause.value.(repeatableFlag); !ok || !v.IsCumulative() {
+ return nil, fmt.Errorf("flag '%s' cannot be repeated", clause.name)
+ }
+ }
+ if err = clause.value.Set(*element.Value); err != nil {
+ return
+ }
+ flagSet[clause.name] = struct{}{}
+
+ case *ArgClause:
+ if err = clause.value.Set(*element.Value); err != nil {
+ return
+ }
+
+ case *CmdClause:
+ selected = append(selected, clause.name)
+ lastCmd = clause
+ }
+ }
+
+ if lastCmd != nil && len(lastCmd.commands) > 0 {
+ return nil, fmt.Errorf("must select a subcommand of '%s'", lastCmd.FullCommand())
+ }
+
+ return
+}
+
+func (a *Application) applyValidators(context *ParseContext) (err error) {
+ // Call command validation functions.
+ for _, element := range context.Elements {
+ if cmd, ok := element.Clause.(*CmdClause); ok && cmd.validator != nil {
+ if err = cmd.validator(cmd); err != nil {
+ return err
+ }
+ }
+ }
+
+ if a.validator != nil {
+ err = a.validator(a)
+ }
+ return err
+}
+
+func (a *Application) applyPreActions(context *ParseContext, dispatch bool) error {
+ if err := a.actionMixin.applyPreActions(context); err != nil {
+ return err
+ }
+ // Dispatch to actions.
+ if dispatch {
+ for _, element := range context.Elements {
+ if applier, ok := element.Clause.(actionApplier); ok {
+ if err := applier.applyPreActions(context); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ return nil
+}
+
+func (a *Application) applyActions(context *ParseContext) error {
+ if err := a.actionMixin.applyActions(context); err != nil {
+ return err
+ }
+ // Dispatch to actions.
+ for _, element := range context.Elements {
+ if applier, ok := element.Clause.(actionApplier); ok {
+ if err := applier.applyActions(context); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Errorf prints an error message to w in the format ": error: ".
+func (a *Application) Errorf(format string, args ...interface{}) {
+ fmt.Fprintf(a.errorWriter, a.Name+": error: "+format+"\n", args...)
+}
+
+// Fatalf writes a formatted error to w then terminates with exit status 1.
+func (a *Application) Fatalf(format string, args ...interface{}) {
+ a.Errorf(format, args...)
+ a.terminate(1)
+}
+
+// FatalUsage prints an error message followed by usage information, then
+// exits with a non-zero status.
+func (a *Application) FatalUsage(format string, args ...interface{}) {
+ a.Errorf(format, args...)
+ // Force usage to go to error output.
+ a.usageWriter = a.errorWriter
+ a.Usage([]string{})
+ a.terminate(1)
+}
+
+// FatalUsageContext writes a printf formatted error message to w, then usage
+// information for the given ParseContext, before exiting.
+func (a *Application) FatalUsageContext(context *ParseContext, format string, args ...interface{}) {
+ a.Errorf(format, args...)
+ if err := a.UsageForContext(context); err != nil {
+ panic(err)
+ }
+ a.terminate(1)
+}
+
+// FatalIfError prints an error and exits if err is not nil. The error is printed
+// with the given formatted string, if any.
+func (a *Application) FatalIfError(err error, format string, args ...interface{}) {
+ if err != nil {
+ prefix := ""
+ if format != "" {
+ prefix = fmt.Sprintf(format, args...) + ": "
+ }
+ a.Errorf(prefix+"%s", err)
+ a.terminate(1)
+ }
+}
+
+func (a *Application) completionOptions(context *ParseContext) []string {
+ args := context.rawArgs
+
+ var (
+ currArg string
+ prevArg string
+ target cmdMixin
+ )
+
+ numArgs := len(args)
+ if numArgs > 1 {
+ args = args[1:]
+ currArg = args[len(args)-1]
+ }
+ if numArgs > 2 {
+ prevArg = args[len(args)-2]
+ }
+
+ target = a.cmdMixin
+ if context.SelectedCommand != nil {
+ // A subcommand was in use. We will use it as the target
+ target = context.SelectedCommand.cmdMixin
+ }
+
+ if (currArg != "" && strings.HasPrefix(currArg, "--")) || strings.HasPrefix(prevArg, "--") {
+ if context.argsOnly {
+ return nil
+ }
+
+ // Perform completion for A flag. The last/current argument started with "-"
+ var (
+ flagName string // The name of a flag if given (could be half complete)
+ flagValue string // The value assigned to a flag (if given) (could be half complete)
+ )
+
+ if strings.HasPrefix(prevArg, "--") && !strings.HasPrefix(currArg, "--") {
+ // Matches: ./myApp --flag value
+ // Wont Match: ./myApp --flag --
+ flagName = prevArg[2:] // Strip the "--"
+ flagValue = currArg
+ } else if strings.HasPrefix(currArg, "--") {
+ // Matches: ./myApp --flag --
+ // Matches: ./myApp --flag somevalue --
+ // Matches: ./myApp --
+ flagName = currArg[2:] // Strip the "--"
+ }
+
+ options, flagMatched, valueMatched := target.FlagCompletion(flagName, flagValue)
+ if valueMatched {
+ // Value Matched. Show cmdCompletions
+ return target.CmdCompletion(context)
+ }
+
+ // Add top level flags if we're not at the top level and no match was found.
+ if context.SelectedCommand != nil && !flagMatched {
+ topOptions, topFlagMatched, topValueMatched := a.FlagCompletion(flagName, flagValue)
+ if topValueMatched {
+ // Value Matched. Back to cmdCompletions
+ return target.CmdCompletion(context)
+ }
+
+ if topFlagMatched {
+ // Top level had a flag which matched the input. Return it's options.
+ options = topOptions
+ } else {
+ // Add top level flags
+ options = append(options, topOptions...)
+ }
+ }
+ return options
+ }
+
+ // Perform completion for sub commands and arguments.
+ return target.CmdCompletion(context)
+}
+
+func (a *Application) generateBashCompletion(context *ParseContext) {
+ options := a.completionOptions(context)
+ fmt.Printf("%s", strings.Join(options, "\n"))
+}
+
+func envarTransform(name string) string {
+ return strings.ToUpper(envarTransformRegexp.ReplaceAllString(name, "_"))
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/args.go b/vendor/github.com/alecthomas/kingpin/v2/args.go
new file mode 100644
index 000000000..54e410719
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/args.go
@@ -0,0 +1,205 @@
+package kingpin
+
+import (
+ "fmt"
+)
+
+type argGroup struct {
+ args []*ArgClause
+}
+
+func newArgGroup() *argGroup {
+ return &argGroup{}
+}
+
+func (a *argGroup) have() bool {
+ return len(a.args) > 0
+}
+
+// GetArg gets an argument definition.
+//
+// This allows existing arguments to be modified after definition but before parsing. Useful for
+// modular applications.
+func (a *argGroup) GetArg(name string) *ArgClause {
+ for _, arg := range a.args {
+ if arg.name == name {
+ return arg
+ }
+ }
+ return nil
+}
+
+func (a *argGroup) Arg(name, help string) *ArgClause {
+ arg := newArg(name, help)
+ a.args = append(a.args, arg)
+ return arg
+}
+
+func (a *argGroup) init() error {
+ required := 0
+ seen := map[string]struct{}{}
+ previousArgMustBeLast := false
+ for i, arg := range a.args {
+ if previousArgMustBeLast {
+ return fmt.Errorf("Args() can't be followed by another argument '%s'", arg.name)
+ }
+ if arg.consumesRemainder() {
+ previousArgMustBeLast = true
+ }
+ if _, ok := seen[arg.name]; ok {
+ return fmt.Errorf("duplicate argument '%s'", arg.name)
+ }
+ seen[arg.name] = struct{}{}
+ if arg.required && required != i {
+ return fmt.Errorf("required arguments found after non-required")
+ }
+ if arg.required {
+ required++
+ }
+ if err := arg.init(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type ArgClause struct {
+ actionMixin
+ parserMixin
+ completionsMixin
+ envarMixin
+ name string
+ help string
+ defaultValues []string
+ placeholder string
+ hidden bool
+ required bool
+}
+
+func newArg(name, help string) *ArgClause {
+ a := &ArgClause{
+ name: name,
+ help: help,
+ }
+ return a
+}
+
+func (a *ArgClause) setDefault() error {
+ if a.HasEnvarValue() {
+ if v, ok := a.value.(remainderArg); !ok || !v.IsCumulative() {
+ // Use the value as-is
+ return a.value.Set(a.GetEnvarValue())
+ }
+ for _, value := range a.GetSplitEnvarValue() {
+ if err := a.value.Set(value); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ if len(a.defaultValues) > 0 {
+ for _, defaultValue := range a.defaultValues {
+ if err := a.value.Set(defaultValue); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ return nil
+}
+
+func (a *ArgClause) needsValue() bool {
+ haveDefault := len(a.defaultValues) > 0
+ return a.required && !(haveDefault || a.HasEnvarValue())
+}
+
+func (a *ArgClause) consumesRemainder() bool {
+ if r, ok := a.value.(remainderArg); ok {
+ return r.IsCumulative()
+ }
+ return false
+}
+
+// Hidden hides the argument from usage but still allows it to be used.
+func (a *ArgClause) Hidden() *ArgClause {
+ a.hidden = true
+ return a
+}
+
+// PlaceHolder sets the place-holder string used for arg values in the help. The
+// default behaviour is to use the arg name between < > brackets.
+func (a *ArgClause) PlaceHolder(value string) *ArgClause {
+ a.placeholder = value
+ return a
+}
+
+// Required arguments must be input by the user. They can not have a Default() value provided.
+func (a *ArgClause) Required() *ArgClause {
+ a.required = true
+ return a
+}
+
+// Default values for this argument. They *must* be parseable by the value of the argument.
+func (a *ArgClause) Default(values ...string) *ArgClause {
+ a.defaultValues = values
+ return a
+}
+
+// Envar overrides the default value(s) for a flag from an environment variable,
+// if it is set. Several default values can be provided by using new lines to
+// separate them.
+func (a *ArgClause) Envar(name string) *ArgClause {
+ a.envar = name
+ a.noEnvar = false
+ return a
+}
+
+// NoEnvar forces environment variable defaults to be disabled for this flag.
+// Most useful in conjunction with app.DefaultEnvars().
+func (a *ArgClause) NoEnvar() *ArgClause {
+ a.envar = ""
+ a.noEnvar = true
+ return a
+}
+
+func (a *ArgClause) Action(action Action) *ArgClause {
+ a.addAction(action)
+ return a
+}
+
+func (a *ArgClause) PreAction(action Action) *ArgClause {
+ a.addPreAction(action)
+ return a
+}
+
+// HintAction registers a HintAction (function) for the arg to provide completions
+func (a *ArgClause) HintAction(action HintAction) *ArgClause {
+ a.addHintAction(action)
+ return a
+}
+
+// HintOptions registers any number of options for the flag to provide completions
+func (a *ArgClause) HintOptions(options ...string) *ArgClause {
+ a.addHintAction(func() []string {
+ return options
+ })
+ return a
+}
+
+// Help sets the help message.
+func (a *ArgClause) Help(help string) *ArgClause {
+ a.help = help
+ return a
+}
+
+func (a *ArgClause) init() error {
+ if a.required && len(a.defaultValues) > 0 {
+ return fmt.Errorf("required argument '%s' with unusable default value", a.name)
+ }
+ if a.value == nil {
+ return fmt.Errorf("no parser defined for arg '%s'", a.name)
+ }
+ return nil
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/cmd.go b/vendor/github.com/alecthomas/kingpin/v2/cmd.go
new file mode 100644
index 000000000..cd7e6120b
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/cmd.go
@@ -0,0 +1,325 @@
+package kingpin
+
+import (
+ "fmt"
+ "strings"
+)
+
+type cmdMixin struct {
+ *flagGroup
+ *argGroup
+ *cmdGroup
+ actionMixin
+}
+
+// CmdCompletion returns completion options for arguments, if that's where
+// parsing left off, or commands if there aren't any unsatisfied args.
+func (c *cmdMixin) CmdCompletion(context *ParseContext) []string {
+ var options []string
+
+ // Count args already satisfied - we won't complete those, and add any
+ // default commands' alternatives, since they weren't listed explicitly
+ // and the user may want to explicitly list something else.
+ argsSatisfied := 0
+ allSatisfied := false
+ElementLoop:
+ for _, el := range context.Elements {
+ switch clause := el.Clause.(type) {
+ case *ArgClause:
+ // Each new element should reset the previous state
+ allSatisfied = false
+ options = nil
+
+ if el.Value != nil && *el.Value != "" {
+ // Get the list of valid options for the last argument
+ validOptions := c.argGroup.args[argsSatisfied].resolveCompletions()
+ if len(validOptions) == 0 {
+ // If there are no options for this argument,
+ // mark is as allSatisfied as we can't suggest anything
+ if !clause.consumesRemainder() {
+ argsSatisfied++
+ allSatisfied = true
+ }
+ continue ElementLoop
+ }
+
+ for _, opt := range validOptions {
+ if opt == *el.Value {
+ // We have an exact match
+ // We don't need to suggest any option
+ if !clause.consumesRemainder() {
+ argsSatisfied++
+ }
+ continue ElementLoop
+ }
+ if strings.HasPrefix(opt, *el.Value) {
+ // If the option match the partially entered argument, add it to the list
+ options = append(options, opt)
+ }
+ }
+ // Avoid further completion as we have done everything we could
+ if !clause.consumesRemainder() {
+ argsSatisfied++
+ allSatisfied = true
+ }
+ }
+ case *CmdClause:
+ options = append(options, clause.completionAlts...)
+ default:
+ }
+ }
+
+ if argsSatisfied < len(c.argGroup.args) && !allSatisfied {
+ // Since not all args have been satisfied, show options for the current one
+ options = append(options, c.argGroup.args[argsSatisfied].resolveCompletions()...)
+ } else {
+ // If all args are satisfied, then go back to completing commands
+ for _, cmd := range c.cmdGroup.commandOrder {
+ if !cmd.hidden {
+ options = append(options, cmd.name)
+ }
+ }
+ }
+
+ return options
+}
+
+func (c *cmdMixin) FlagCompletion(flagName string, flagValue string) (choices []string, flagMatch bool, optionMatch bool) {
+ // Check if flagName matches a known flag.
+ // If it does, show the options for the flag
+ // Otherwise, show all flags
+
+ options := []string{}
+
+ for _, flag := range c.flagGroup.flagOrder {
+ // Loop through each flag and determine if a match exists
+ if flag.name == flagName {
+ // User typed entire flag. Need to look for flag options.
+ options = flag.resolveCompletions()
+ if len(options) == 0 {
+ // No Options to Choose From, Assume Match.
+ return options, true, true
+ }
+
+ // Loop options to find if the user specified value matches
+ isPrefix := false
+ matched := false
+
+ for _, opt := range options {
+ if flagValue == opt {
+ matched = true
+ } else if strings.HasPrefix(opt, flagValue) {
+ isPrefix = true
+ }
+ }
+
+ // Matched Flag Directly
+ // Flag Value Not Prefixed, and Matched Directly
+ return options, true, !isPrefix && matched
+ }
+
+ if !flag.hidden {
+ options = append(options, "--"+flag.name)
+ }
+ }
+ // No Flag directly matched.
+ return options, false, false
+
+}
+
+type cmdGroup struct {
+ app *Application
+ parent *CmdClause
+ commands map[string]*CmdClause
+ commandOrder []*CmdClause
+}
+
+func (c *cmdGroup) defaultSubcommand() *CmdClause {
+ for _, cmd := range c.commandOrder {
+ if cmd.isDefault {
+ return cmd
+ }
+ }
+ return nil
+}
+
+func (c *cmdGroup) cmdNames() []string {
+ names := make([]string, 0, len(c.commandOrder))
+ for _, cmd := range c.commandOrder {
+ names = append(names, cmd.name)
+ }
+ return names
+}
+
+// GetArg gets a command definition.
+//
+// This allows existing commands to be modified after definition but before parsing. Useful for
+// modular applications.
+func (c *cmdGroup) GetCommand(name string) *CmdClause {
+ return c.commands[name]
+}
+
+func newCmdGroup(app *Application) *cmdGroup {
+ return &cmdGroup{
+ app: app,
+ commands: make(map[string]*CmdClause),
+ }
+}
+
+func (c *cmdGroup) flattenedCommands() (out []*CmdClause) {
+ for _, cmd := range c.commandOrder {
+ if len(cmd.commands) == 0 {
+ out = append(out, cmd)
+ }
+ out = append(out, cmd.flattenedCommands()...)
+ }
+ return
+}
+
+func (c *cmdGroup) addCommand(name, help string) *CmdClause {
+ cmd := newCommand(c.app, name, help)
+ c.commands[name] = cmd
+ c.commandOrder = append(c.commandOrder, cmd)
+ return cmd
+}
+
+func (c *cmdGroup) init() error {
+ seen := map[string]bool{}
+ if c.defaultSubcommand() != nil && !c.have() {
+ return fmt.Errorf("default subcommand %q provided but no subcommands defined", c.defaultSubcommand().name)
+ }
+ defaults := []string{}
+ for _, cmd := range c.commandOrder {
+ if cmd.isDefault {
+ defaults = append(defaults, cmd.name)
+ }
+ if seen[cmd.name] {
+ return fmt.Errorf("duplicate command %q", cmd.name)
+ }
+ seen[cmd.name] = true
+ for _, alias := range cmd.aliases {
+ if seen[alias] {
+ return fmt.Errorf("alias duplicates existing command %q", alias)
+ }
+ c.commands[alias] = cmd
+ }
+ if err := cmd.init(); err != nil {
+ return err
+ }
+ }
+ if len(defaults) > 1 {
+ return fmt.Errorf("more than one default subcommand exists: %s", strings.Join(defaults, ", "))
+ }
+ return nil
+}
+
+func (c *cmdGroup) have() bool {
+ return len(c.commands) > 0
+}
+
+type CmdClauseValidator func(*CmdClause) error
+
+// A CmdClause is a single top-level command. It encapsulates a set of flags
+// and either subcommands or positional arguments.
+type CmdClause struct {
+ cmdMixin
+ app *Application
+ name string
+ aliases []string
+ help string
+ helpLong string
+ isDefault bool
+ validator CmdClauseValidator
+ hidden bool
+ completionAlts []string
+}
+
+func newCommand(app *Application, name, help string) *CmdClause {
+ c := &CmdClause{
+ app: app,
+ name: name,
+ help: help,
+ }
+ c.flagGroup = newFlagGroup()
+ c.argGroup = newArgGroup()
+ c.cmdGroup = newCmdGroup(app)
+ return c
+}
+
+// Add an Alias for this command.
+func (c *CmdClause) Alias(name string) *CmdClause {
+ c.aliases = append(c.aliases, name)
+ return c
+}
+
+// Validate sets a validation function to run when parsing.
+func (c *CmdClause) Validate(validator CmdClauseValidator) *CmdClause {
+ c.validator = validator
+ return c
+}
+
+func (c *CmdClause) FullCommand() string {
+ out := []string{c.name}
+ for p := c.parent; p != nil; p = p.parent {
+ out = append([]string{p.name}, out...)
+ }
+ return strings.Join(out, " ")
+}
+
+// Command adds a new sub-command.
+func (c *CmdClause) Command(name, help string) *CmdClause {
+ cmd := c.addCommand(name, help)
+ cmd.parent = c
+ return cmd
+}
+
+// Default makes this command the default if commands don't match.
+func (c *CmdClause) Default() *CmdClause {
+ c.isDefault = true
+ return c
+}
+
+func (c *CmdClause) Action(action Action) *CmdClause {
+ c.addAction(action)
+ return c
+}
+
+func (c *CmdClause) PreAction(action Action) *CmdClause {
+ c.addPreAction(action)
+ return c
+}
+
+// Help sets the help message.
+func (c *CmdClause) Help(help string) *CmdClause {
+ c.help = help
+ return c
+}
+
+func (c *CmdClause) init() error {
+ if err := c.flagGroup.init(c.app.defaultEnvarPrefix()); err != nil {
+ return err
+ }
+ if c.argGroup.have() && c.cmdGroup.have() {
+ return fmt.Errorf("can't mix Arg()s with Command()s")
+ }
+ if err := c.argGroup.init(); err != nil {
+ return err
+ }
+ if err := c.cmdGroup.init(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *CmdClause) Hidden() *CmdClause {
+ c.hidden = true
+ return c
+}
+
+// HelpLong adds a long help text, which can be used in usage templates.
+// For example, to use a longer help text in the command-specific help
+// than in the apps root help.
+func (c *CmdClause) HelpLong(help string) *CmdClause {
+ c.helpLong = help
+ return c
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/completions.go b/vendor/github.com/alecthomas/kingpin/v2/completions.go
new file mode 100644
index 000000000..6e7b409fe
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/completions.go
@@ -0,0 +1,33 @@
+package kingpin
+
+// HintAction is a function type who is expected to return a slice of possible
+// command line arguments.
+type HintAction func() []string
+type completionsMixin struct {
+ hintActions []HintAction
+ builtinHintActions []HintAction
+}
+
+func (a *completionsMixin) addHintAction(action HintAction) {
+ a.hintActions = append(a.hintActions, action)
+}
+
+// Allow adding of HintActions which are added internally, ie, EnumVar
+func (a *completionsMixin) addHintActionBuiltin(action HintAction) {
+ a.builtinHintActions = append(a.builtinHintActions, action)
+}
+
+func (a *completionsMixin) resolveCompletions() []string {
+ var hints []string
+
+ options := a.builtinHintActions
+ if len(a.hintActions) > 0 {
+ // User specified their own hintActions. Use those instead.
+ options = a.hintActions
+ }
+
+ for _, hintAction := range options {
+ hints = append(hints, hintAction()...)
+ }
+ return hints
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/doc.go b/vendor/github.com/alecthomas/kingpin/v2/doc.go
new file mode 100644
index 000000000..8a72729d7
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/doc.go
@@ -0,0 +1,68 @@
+// Package kingpin provides command line interfaces like this:
+//
+// $ chat
+// usage: chat [] [] [ ...]
+//
+// Flags:
+// --debug enable debug mode
+// --help Show help.
+// --server=127.0.0.1 server address
+//
+// Commands:
+// help
+// Show help for a command.
+//
+// post []
+// Post a message to a channel.
+//
+// register
+// Register a new user.
+//
+// $ chat help post
+// usage: chat [] post [] []
+//
+// Post a message to a channel.
+//
+// Flags:
+// --image=IMAGE image to post
+//
+// Args:
+// channel to post to
+// [] text to post
+// $ chat post --image=~/Downloads/owls.jpg pics
+//
+// From code like this:
+//
+// package main
+//
+// import "github.com/alecthomas/kingpin/v2"
+//
+// var (
+// debug = kingpin.Flag("debug", "enable debug mode").Default("false").Bool()
+// serverIP = kingpin.Flag("server", "server address").Default("127.0.0.1").IP()
+//
+// register = kingpin.Command("register", "Register a new user.")
+// registerNick = register.Arg("nick", "nickname for user").Required().String()
+// registerName = register.Arg("name", "name of user").Required().String()
+//
+// post = kingpin.Command("post", "Post a message to a channel.")
+// postImage = post.Flag("image", "image to post").ExistingFile()
+// postChannel = post.Arg("channel", "channel to post to").Required().String()
+// postText = post.Arg("text", "text to post").String()
+// )
+//
+// func main() {
+// switch kingpin.Parse() {
+// // Register user
+// case "register":
+// println(*registerNick)
+//
+// // Post message
+// case "post":
+// if *postImage != nil {
+// }
+// if *postText != "" {
+// }
+// }
+// }
+package kingpin
diff --git a/vendor/github.com/alecthomas/kingpin/v2/envar.go b/vendor/github.com/alecthomas/kingpin/v2/envar.go
new file mode 100644
index 000000000..44e16de3b
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/envar.go
@@ -0,0 +1,40 @@
+package kingpin
+
+import (
+ "os"
+ "regexp"
+)
+
+var (
+ envVarValuesSeparator = "\r?\n"
+ envVarValuesTrimmer = regexp.MustCompile(envVarValuesSeparator + "$")
+ envVarValuesSplitter = regexp.MustCompile(envVarValuesSeparator)
+)
+
+type envarMixin struct {
+ envar string
+ noEnvar bool
+}
+
+func (e *envarMixin) HasEnvarValue() bool {
+ return e.GetEnvarValue() != ""
+}
+
+func (e *envarMixin) GetEnvarValue() string {
+ if e.noEnvar || e.envar == "" {
+ return ""
+ }
+ return os.Getenv(e.envar)
+}
+
+func (e *envarMixin) GetSplitEnvarValue() []string {
+ envarValue := e.GetEnvarValue()
+ if envarValue == "" {
+ return []string{}
+ }
+
+ // Split by new line to extract multiple values, if any.
+ trimmed := envVarValuesTrimmer.ReplaceAllString(envarValue, "")
+
+ return envVarValuesSplitter.Split(trimmed, -1)
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/flags.go b/vendor/github.com/alecthomas/kingpin/v2/flags.go
new file mode 100644
index 000000000..2b2938b48
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/flags.go
@@ -0,0 +1,332 @@
+package kingpin
+
+import (
+ "fmt"
+ "strings"
+)
+
+type flagGroup struct {
+ short map[string]*FlagClause
+ long map[string]*FlagClause
+ flagOrder []*FlagClause
+}
+
+func newFlagGroup() *flagGroup {
+ return &flagGroup{
+ short: map[string]*FlagClause{},
+ long: map[string]*FlagClause{},
+ }
+}
+
+// GetFlag gets a flag definition.
+//
+// This allows existing flags to be modified after definition but before parsing. Useful for
+// modular applications.
+func (f *flagGroup) GetFlag(name string) *FlagClause {
+ return f.long[name]
+}
+
+// Flag defines a new flag with the given long name and help.
+func (f *flagGroup) Flag(name, help string) *FlagClause {
+ flag := newFlag(name, help)
+ f.long[name] = flag
+ f.flagOrder = append(f.flagOrder, flag)
+ return flag
+}
+
+func (f *flagGroup) init(defaultEnvarPrefix string) error {
+ if err := f.checkDuplicates(); err != nil {
+ return err
+ }
+ for _, flag := range f.long {
+ if defaultEnvarPrefix != "" && !flag.noEnvar && flag.envar == "" {
+ flag.envar = envarTransform(defaultEnvarPrefix + "_" + flag.name)
+ }
+ if err := flag.init(); err != nil {
+ return err
+ }
+ if flag.shorthand != 0 {
+ f.short[string(flag.shorthand)] = flag
+ }
+ }
+ return nil
+}
+
+func (f *flagGroup) checkDuplicates() error {
+ seenShort := map[rune]bool{}
+ seenLong := map[string]bool{}
+ for _, flag := range f.flagOrder {
+ if flag.shorthand != 0 {
+ if _, ok := seenShort[flag.shorthand]; ok {
+ return fmt.Errorf("duplicate short flag -%c", flag.shorthand)
+ }
+ seenShort[flag.shorthand] = true
+ }
+ if _, ok := seenLong[flag.name]; ok {
+ return fmt.Errorf("duplicate long flag --%s", flag.name)
+ }
+ seenLong[flag.name] = true
+ }
+ return nil
+}
+
+func (f *flagGroup) parse(context *ParseContext) (*FlagClause, error) {
+ var token *Token
+
+loop:
+ for {
+ token = context.Peek()
+ switch token.Type {
+ case TokenEOL:
+ break loop
+
+ case TokenLong, TokenShort:
+ flagToken := token
+ defaultValue := ""
+ var flag *FlagClause
+ var ok bool
+ invert := false
+
+ name := token.Value
+ if token.Type == TokenLong {
+ flag, ok = f.long[name]
+ if !ok {
+ if strings.HasPrefix(name, "no-") {
+ name = name[3:]
+ invert = true
+ }
+ flag, ok = f.long[name]
+ }
+ if !ok {
+ return nil, fmt.Errorf("unknown long flag '%s'", flagToken)
+ }
+ } else {
+ flag, ok = f.short[name]
+ if !ok {
+ return nil, fmt.Errorf("unknown short flag '%s'", flagToken)
+ }
+ }
+
+ context.Next()
+
+ flag.isSetByUser()
+
+ fb, ok := flag.value.(boolFlag)
+ if ok && fb.IsBoolFlag() {
+ if invert {
+ defaultValue = "false"
+ } else {
+ defaultValue = "true"
+ }
+ } else {
+ if invert {
+ context.Push(token)
+ return nil, fmt.Errorf("unknown long flag '%s'", flagToken)
+ }
+ token = context.Peek()
+ if token.Type != TokenArg {
+ context.Push(token)
+ return nil, fmt.Errorf("expected argument for flag '%s'", flagToken)
+ }
+ context.Next()
+ defaultValue = token.Value
+ }
+
+ context.matchedFlag(flag, defaultValue)
+ return flag, nil
+
+ default:
+ break loop
+ }
+ }
+ return nil, nil
+}
+
+// FlagClause is a fluid interface used to build flags.
+type FlagClause struct {
+ parserMixin
+ actionMixin
+ completionsMixin
+ envarMixin
+ name string
+ shorthand rune
+ help string
+ defaultValues []string
+ placeholder string
+ hidden bool
+ setByUser *bool
+}
+
+func newFlag(name, help string) *FlagClause {
+ f := &FlagClause{
+ name: name,
+ help: help,
+ }
+ return f
+}
+
+func (f *FlagClause) setDefault() error {
+ if f.HasEnvarValue() {
+ if v, ok := f.value.(repeatableFlag); !ok || !v.IsCumulative() {
+ // Use the value as-is
+ return f.value.Set(f.GetEnvarValue())
+ } else {
+ for _, value := range f.GetSplitEnvarValue() {
+ if err := f.value.Set(value); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ }
+
+ if len(f.defaultValues) > 0 {
+ for _, defaultValue := range f.defaultValues {
+ if err := f.value.Set(defaultValue); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ return nil
+}
+
+func (f *FlagClause) isSetByUser() {
+ if f.setByUser != nil {
+ *f.setByUser = true
+ }
+}
+
+func (f *FlagClause) needsValue() bool {
+ haveDefault := len(f.defaultValues) > 0
+ return f.required && !(haveDefault || f.HasEnvarValue())
+}
+
+func (f *FlagClause) init() error {
+ if f.required && len(f.defaultValues) > 0 {
+ return fmt.Errorf("required flag '--%s' with default value that will never be used", f.name)
+ }
+ if f.value == nil {
+ return fmt.Errorf("no type defined for --%s (eg. .String())", f.name)
+ }
+ if v, ok := f.value.(repeatableFlag); (!ok || !v.IsCumulative()) && len(f.defaultValues) > 1 {
+ return fmt.Errorf("invalid default for '--%s', expecting single value", f.name)
+ }
+ return nil
+}
+
+// Dispatch to the given function after the flag is parsed and validated.
+func (f *FlagClause) Action(action Action) *FlagClause {
+ f.addAction(action)
+ return f
+}
+
+func (f *FlagClause) PreAction(action Action) *FlagClause {
+ f.addPreAction(action)
+ return f
+}
+
+// HintAction registers a HintAction (function) for the flag to provide completions
+func (a *FlagClause) HintAction(action HintAction) *FlagClause {
+ a.addHintAction(action)
+ return a
+}
+
+// HintOptions registers any number of options for the flag to provide completions
+func (a *FlagClause) HintOptions(options ...string) *FlagClause {
+ a.addHintAction(func() []string {
+ return options
+ })
+ return a
+}
+
+func (a *FlagClause) EnumVar(target *string, options ...string) {
+ a.parserMixin.EnumVar(target, options...)
+ a.addHintActionBuiltin(func() []string {
+ return options
+ })
+}
+
+func (a *FlagClause) Enum(options ...string) (target *string) {
+ a.addHintActionBuiltin(func() []string {
+ return options
+ })
+ return a.parserMixin.Enum(options...)
+}
+
+// IsSetByUser let to know if the flag was set by the user
+func (f *FlagClause) IsSetByUser(setByUser *bool) *FlagClause {
+ if setByUser != nil {
+ *setByUser = false
+ }
+ f.setByUser = setByUser
+ return f
+}
+
+// Default values for this flag. They *must* be parseable by the value of the flag.
+func (f *FlagClause) Default(values ...string) *FlagClause {
+ f.defaultValues = values
+ return f
+}
+
+// DEPRECATED: Use Envar(name) instead.
+func (f *FlagClause) OverrideDefaultFromEnvar(envar string) *FlagClause {
+ return f.Envar(envar)
+}
+
+// Envar overrides the default value(s) for a flag from an environment variable,
+// if it is set. Several default values can be provided by using new lines to
+// separate them.
+func (f *FlagClause) Envar(name string) *FlagClause {
+ f.envar = name
+ f.noEnvar = false
+ return f
+}
+
+// NoEnvar forces environment variable defaults to be disabled for this flag.
+// Most useful in conjunction with app.DefaultEnvars().
+func (f *FlagClause) NoEnvar() *FlagClause {
+ f.envar = ""
+ f.noEnvar = true
+ return f
+}
+
+// PlaceHolder sets the place-holder string used for flag values in the help. The
+// default behaviour is to use the value provided by Default() if provided,
+// then fall back on the capitalized flag name.
+func (f *FlagClause) PlaceHolder(placeholder string) *FlagClause {
+ f.placeholder = placeholder
+ return f
+}
+
+// Hidden hides a flag from usage but still allows it to be used.
+func (f *FlagClause) Hidden() *FlagClause {
+ f.hidden = true
+ return f
+}
+
+// Required makes the flag required. You can not provide a Default() value to a Required() flag.
+func (f *FlagClause) Required() *FlagClause {
+ f.required = true
+ return f
+}
+
+// Short sets the short flag name.
+func (f *FlagClause) Short(name rune) *FlagClause {
+ f.shorthand = name
+ return f
+}
+
+// Help sets the help message.
+func (f *FlagClause) Help(help string) *FlagClause {
+ f.help = help
+ return f
+}
+
+// Bool makes this flag a boolean flag.
+func (f *FlagClause) Bool() (target *bool) {
+ target = new(bool)
+ f.SetValue(newBoolValue(target))
+ return
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/global.go b/vendor/github.com/alecthomas/kingpin/v2/global.go
new file mode 100644
index 000000000..4d073eabb
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/global.go
@@ -0,0 +1,96 @@
+package kingpin
+
+import (
+ "os"
+ "path/filepath"
+)
+
+var (
+ // CommandLine is the default Kingpin parser.
+ CommandLine = New(filepath.Base(os.Args[0]), "")
+ // Global help flag. Exposed for user customisation.
+ HelpFlag = CommandLine.HelpFlag
+ // Top-level help command. Exposed for user customisation. May be nil.
+ HelpCommand = CommandLine.HelpCommand
+ // Global version flag. Exposed for user customisation. May be nil.
+ VersionFlag = CommandLine.VersionFlag
+ // Whether to file expansion with '@' is enabled.
+ EnableFileExpansion = true
+)
+
+// Command adds a new command to the default parser.
+func Command(name, help string) *CmdClause {
+ return CommandLine.Command(name, help)
+}
+
+// Flag adds a new flag to the default parser.
+func Flag(name, help string) *FlagClause {
+ return CommandLine.Flag(name, help)
+}
+
+// Arg adds a new argument to the top-level of the default parser.
+func Arg(name, help string) *ArgClause {
+ return CommandLine.Arg(name, help)
+}
+
+// Parse and return the selected command. Will call the termination handler if
+// an error is encountered.
+func Parse() string {
+ selected := MustParse(CommandLine.Parse(os.Args[1:]))
+ if selected == "" && CommandLine.cmdGroup.have() {
+ Usage()
+ CommandLine.terminate(0)
+ }
+ return selected
+}
+
+// Errorf prints an error message to stderr.
+func Errorf(format string, args ...interface{}) {
+ CommandLine.Errorf(format, args...)
+}
+
+// Fatalf prints an error message to stderr and exits.
+func Fatalf(format string, args ...interface{}) {
+ CommandLine.Fatalf(format, args...)
+}
+
+// FatalIfError prints an error and exits if err is not nil. The error is printed
+// with the given prefix.
+func FatalIfError(err error, format string, args ...interface{}) {
+ CommandLine.FatalIfError(err, format, args...)
+}
+
+// FatalUsage prints an error message followed by usage information, then
+// exits with a non-zero status.
+func FatalUsage(format string, args ...interface{}) {
+ CommandLine.FatalUsage(format, args...)
+}
+
+// FatalUsageContext writes a printf formatted error message to stderr, then
+// usage information for the given ParseContext, before exiting.
+func FatalUsageContext(context *ParseContext, format string, args ...interface{}) {
+ CommandLine.FatalUsageContext(context, format, args...)
+}
+
+// Usage prints usage to stderr.
+func Usage() {
+ CommandLine.Usage(os.Args[1:])
+}
+
+// Set global usage template to use (defaults to DefaultUsageTemplate).
+func UsageTemplate(template string) *Application {
+ return CommandLine.UsageTemplate(template)
+}
+
+// MustParse can be used with app.Parse(args) to exit with an error if parsing fails.
+func MustParse(command string, err error) string {
+ if err != nil {
+ Fatalf("%s, try --help", err)
+ }
+ return command
+}
+
+// Version adds a flag for displaying the application version number.
+func Version(version string) *Application {
+ return CommandLine.Version(version)
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/guesswidth.go b/vendor/github.com/alecthomas/kingpin/v2/guesswidth.go
new file mode 100644
index 000000000..a269531c8
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/guesswidth.go
@@ -0,0 +1,9 @@
+// +build appengine !linux,!freebsd,!darwin,!dragonfly,!netbsd,!openbsd
+
+package kingpin
+
+import "io"
+
+func guessWidth(w io.Writer) int {
+ return 80
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/guesswidth_unix.go b/vendor/github.com/alecthomas/kingpin/v2/guesswidth_unix.go
new file mode 100644
index 000000000..ad8163f55
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/guesswidth_unix.go
@@ -0,0 +1,38 @@
+// +build !appengine,linux freebsd darwin dragonfly netbsd openbsd
+
+package kingpin
+
+import (
+ "io"
+ "os"
+ "strconv"
+ "syscall"
+ "unsafe"
+)
+
+func guessWidth(w io.Writer) int {
+ // check if COLUMNS env is set to comply with
+ // http://pubs.opengroup.org/onlinepubs/009604499/basedefs/xbd_chap08.html
+ colsStr := os.Getenv("COLUMNS")
+ if colsStr != "" {
+ if cols, err := strconv.Atoi(colsStr); err == nil {
+ return cols
+ }
+ }
+
+ if t, ok := w.(*os.File); ok {
+ fd := t.Fd()
+ var dimensions [4]uint16
+
+ if _, _, err := syscall.Syscall6(
+ syscall.SYS_IOCTL,
+ uintptr(fd),
+ uintptr(syscall.TIOCGWINSZ),
+ uintptr(unsafe.Pointer(&dimensions)),
+ 0, 0, 0,
+ ); err == 0 {
+ return int(dimensions[1])
+ }
+ }
+ return 80
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/model.go b/vendor/github.com/alecthomas/kingpin/v2/model.go
new file mode 100644
index 000000000..382616c5d
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/model.go
@@ -0,0 +1,273 @@
+package kingpin
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// Data model for Kingpin command-line structure.
+
+var (
+ ignoreInCount = map[string]bool{
+ "help": true,
+ "help-long": true,
+ "help-man": true,
+ "completion-bash": true,
+ "completion-script-bash": true,
+ "completion-script-zsh": true,
+ }
+)
+
+type FlagGroupModel struct {
+ Flags []*FlagModel
+}
+
+func (f *FlagGroupModel) FlagSummary() string {
+ out := []string{}
+ count := 0
+
+ for _, flag := range f.Flags {
+
+ if !ignoreInCount[flag.Name] {
+ count++
+ }
+
+ if flag.Required {
+ if flag.IsBoolFlag() {
+ out = append(out, fmt.Sprintf("--[no-]%s", flag.Name))
+ } else {
+ out = append(out, fmt.Sprintf("--%s=%s", flag.Name, flag.FormatPlaceHolder()))
+ }
+ }
+ }
+ if count != len(out) {
+ out = append(out, "[]")
+ }
+ return strings.Join(out, " ")
+}
+
+type FlagModel struct {
+ Name string
+ Help string
+ Short rune
+ Default []string
+ Envar string
+ PlaceHolder string
+ Required bool
+ Hidden bool
+ Value Value
+}
+
+func (f *FlagModel) String() string {
+ if f.Value == nil {
+ return ""
+ }
+ return f.Value.String()
+}
+
+func (f *FlagModel) IsBoolFlag() bool {
+ if fl, ok := f.Value.(boolFlag); ok {
+ return fl.IsBoolFlag()
+ }
+ return false
+}
+
+func (f *FlagModel) FormatPlaceHolder() string {
+ if f.PlaceHolder != "" {
+ return f.PlaceHolder
+ }
+ if len(f.Default) > 0 {
+ ellipsis := ""
+ if len(f.Default) > 1 {
+ ellipsis = "..."
+ }
+ if _, ok := f.Value.(*stringValue); ok {
+ return strconv.Quote(f.Default[0]) + ellipsis
+ }
+ return f.Default[0] + ellipsis
+ }
+ return strings.ToUpper(f.Name)
+}
+
+func (f *FlagModel) HelpWithEnvar() string {
+ if f.Envar == "" {
+ return f.Help
+ }
+ return fmt.Sprintf("%s ($%s)", f.Help, f.Envar)
+}
+
+type ArgGroupModel struct {
+ Args []*ArgModel
+}
+
+func (a *ArgGroupModel) ArgSummary() string {
+ depth := 0
+ out := []string{}
+ for _, arg := range a.Args {
+ var h string
+ if arg.PlaceHolder != "" {
+ h = arg.PlaceHolder
+ } else {
+ h = "<" + arg.Name + ">"
+ }
+ if !arg.Required {
+ h = "[" + h
+ depth++
+ }
+ out = append(out, h)
+ }
+ out[len(out)-1] = out[len(out)-1] + strings.Repeat("]", depth)
+ return strings.Join(out, " ")
+}
+
+func (a *ArgModel) HelpWithEnvar() string {
+ if a.Envar == "" {
+ return a.Help
+ }
+ return fmt.Sprintf("%s ($%s)", a.Help, a.Envar)
+}
+
+type ArgModel struct {
+ Name string
+ Help string
+ Default []string
+ Envar string
+ PlaceHolder string
+ Required bool
+ Hidden bool
+ Value Value
+}
+
+func (a *ArgModel) String() string {
+ if a.Value == nil {
+ return ""
+ }
+
+ return a.Value.String()
+}
+
+type CmdGroupModel struct {
+ Commands []*CmdModel
+}
+
+func (c *CmdGroupModel) FlattenedCommands() (out []*CmdModel) {
+ for _, cmd := range c.Commands {
+ if len(cmd.Commands) == 0 {
+ out = append(out, cmd)
+ }
+ out = append(out, cmd.FlattenedCommands()...)
+ }
+ return
+}
+
+type CmdModel struct {
+ Name string
+ Aliases []string
+ Help string
+ HelpLong string
+ FullCommand string
+ Depth int
+ Hidden bool
+ Default bool
+ *FlagGroupModel
+ *ArgGroupModel
+ *CmdGroupModel
+}
+
+func (c *CmdModel) String() string {
+ return c.FullCommand
+}
+
+type ApplicationModel struct {
+ Name string
+ Help string
+ Version string
+ Author string
+ *ArgGroupModel
+ *CmdGroupModel
+ *FlagGroupModel
+}
+
+func (a *Application) Model() *ApplicationModel {
+ return &ApplicationModel{
+ Name: a.Name,
+ Help: a.Help,
+ Version: a.version,
+ Author: a.author,
+ FlagGroupModel: a.flagGroup.Model(),
+ ArgGroupModel: a.argGroup.Model(),
+ CmdGroupModel: a.cmdGroup.Model(),
+ }
+}
+
+func (a *argGroup) Model() *ArgGroupModel {
+ m := &ArgGroupModel{}
+ for _, arg := range a.args {
+ m.Args = append(m.Args, arg.Model())
+ }
+ return m
+}
+
+func (a *ArgClause) Model() *ArgModel {
+ return &ArgModel{
+ Name: a.name,
+ Help: a.help,
+ Default: a.defaultValues,
+ Envar: a.envar,
+ PlaceHolder: a.placeholder,
+ Required: a.required,
+ Hidden: a.hidden,
+ Value: a.value,
+ }
+}
+
+func (f *flagGroup) Model() *FlagGroupModel {
+ m := &FlagGroupModel{}
+ for _, fl := range f.flagOrder {
+ m.Flags = append(m.Flags, fl.Model())
+ }
+ return m
+}
+
+func (f *FlagClause) Model() *FlagModel {
+ return &FlagModel{
+ Name: f.name,
+ Help: f.help,
+ Short: rune(f.shorthand),
+ Default: f.defaultValues,
+ Envar: f.envar,
+ PlaceHolder: f.placeholder,
+ Required: f.required,
+ Hidden: f.hidden,
+ Value: f.value,
+ }
+}
+
+func (c *cmdGroup) Model() *CmdGroupModel {
+ m := &CmdGroupModel{}
+ for _, cm := range c.commandOrder {
+ m.Commands = append(m.Commands, cm.Model())
+ }
+ return m
+}
+
+func (c *CmdClause) Model() *CmdModel {
+ depth := 0
+ for i := c; i != nil; i = i.parent {
+ depth++
+ }
+ return &CmdModel{
+ Name: c.name,
+ Aliases: c.aliases,
+ Help: c.help,
+ HelpLong: c.helpLong,
+ Depth: depth,
+ Hidden: c.hidden,
+ Default: c.isDefault,
+ FullCommand: c.FullCommand(),
+ FlagGroupModel: c.flagGroup.Model(),
+ ArgGroupModel: c.argGroup.Model(),
+ CmdGroupModel: c.cmdGroup.Model(),
+ }
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/parser.go b/vendor/github.com/alecthomas/kingpin/v2/parser.go
new file mode 100644
index 000000000..5f28c78dc
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/parser.go
@@ -0,0 +1,396 @@
+package kingpin
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strings"
+ "unicode/utf8"
+)
+
+type TokenType int
+
+// Token types.
+const (
+ TokenShort TokenType = iota
+ TokenLong
+ TokenArg
+ TokenError
+ TokenEOL
+)
+
+func (t TokenType) String() string {
+ switch t {
+ case TokenShort:
+ return "short flag"
+ case TokenLong:
+ return "long flag"
+ case TokenArg:
+ return "argument"
+ case TokenError:
+ return "error"
+ case TokenEOL:
+ return ""
+ }
+ return "?"
+}
+
+var (
+ TokenEOLMarker = Token{-1, TokenEOL, ""}
+)
+
+type Token struct {
+ Index int
+ Type TokenType
+ Value string
+}
+
+func (t *Token) Equal(o *Token) bool {
+ return t.Index == o.Index
+}
+
+func (t *Token) IsFlag() bool {
+ return t.Type == TokenShort || t.Type == TokenLong
+}
+
+func (t *Token) IsEOF() bool {
+ return t.Type == TokenEOL
+}
+
+func (t *Token) String() string {
+ switch t.Type {
+ case TokenShort:
+ return "-" + t.Value
+ case TokenLong:
+ return "--" + t.Value
+ case TokenArg:
+ return t.Value
+ case TokenError:
+ return "error: " + t.Value
+ case TokenEOL:
+ return ""
+ default:
+ panic("unhandled type")
+ }
+}
+
+// A union of possible elements in a parse stack.
+type ParseElement struct {
+ // Clause is either *CmdClause, *ArgClause or *FlagClause.
+ Clause interface{}
+ // Value is corresponding value for an ArgClause or FlagClause (if any).
+ Value *string
+}
+
+// ParseContext holds the current context of the parser. When passed to
+// Action() callbacks Elements will be fully populated with *FlagClause,
+// *ArgClause and *CmdClause values and their corresponding arguments (if
+// any).
+type ParseContext struct {
+ SelectedCommand *CmdClause
+ ignoreDefault bool
+ argsOnly bool
+ peek []*Token
+ argi int // Index of current command-line arg we're processing.
+ args []string
+ rawArgs []string
+ flags *flagGroup
+ arguments *argGroup
+ argumenti int // Cursor into arguments
+ // Flags, arguments and commands encountered and collected during parse.
+ Elements []*ParseElement
+}
+
+func (p *ParseContext) nextArg() *ArgClause {
+ if p.argumenti >= len(p.arguments.args) {
+ return nil
+ }
+ arg := p.arguments.args[p.argumenti]
+ if !arg.consumesRemainder() {
+ p.argumenti++
+ }
+ return arg
+}
+
+func (p *ParseContext) next() {
+ p.argi++
+ p.args = p.args[1:]
+}
+
+// HasTrailingArgs returns true if there are unparsed command-line arguments.
+// This can occur if the parser can not match remaining arguments.
+func (p *ParseContext) HasTrailingArgs() bool {
+ return len(p.args) > 0
+}
+
+func tokenize(args []string, ignoreDefault bool) *ParseContext {
+ return &ParseContext{
+ ignoreDefault: ignoreDefault,
+ args: args,
+ rawArgs: args,
+ flags: newFlagGroup(),
+ arguments: newArgGroup(),
+ }
+}
+
+func (p *ParseContext) mergeFlags(flags *flagGroup) {
+ for _, flag := range flags.flagOrder {
+ if flag.shorthand != 0 {
+ p.flags.short[string(flag.shorthand)] = flag
+ }
+ p.flags.long[flag.name] = flag
+ p.flags.flagOrder = append(p.flags.flagOrder, flag)
+ }
+}
+
+func (p *ParseContext) mergeArgs(args *argGroup) {
+ p.arguments.args = append(p.arguments.args, args.args...)
+}
+
+func (p *ParseContext) EOL() bool {
+ return p.Peek().Type == TokenEOL
+}
+
+func (p *ParseContext) Error() bool {
+ return p.Peek().Type == TokenError
+}
+
+// Next token in the parse context.
+func (p *ParseContext) Next() *Token {
+ if len(p.peek) > 0 {
+ return p.pop()
+ }
+
+ // End of tokens.
+ if len(p.args) == 0 {
+ return &Token{Index: p.argi, Type: TokenEOL}
+ }
+
+ if p.argi > 0 && p.argi <= len(p.rawArgs) && p.rawArgs[p.argi-1] == "--" {
+ // If the previous argument was a --, from now on only arguments are parsed.
+ p.argsOnly = true
+ }
+ arg := p.args[0]
+ p.next()
+
+ if p.argsOnly {
+ return &Token{p.argi, TokenArg, arg}
+ }
+
+ if arg == "--" {
+ return p.Next()
+ }
+
+ if strings.HasPrefix(arg, "--") {
+ parts := strings.SplitN(arg[2:], "=", 2)
+ token := &Token{p.argi, TokenLong, parts[0]}
+ if len(parts) == 2 {
+ p.Push(&Token{p.argi, TokenArg, parts[1]})
+ }
+ return token
+ }
+
+ if strings.HasPrefix(arg, "-") {
+ if len(arg) == 1 {
+ return &Token{Index: p.argi, Type: TokenArg}
+ }
+ shortRune, size := utf8.DecodeRuneInString(arg[1:])
+ short := string(shortRune)
+ flag, ok := p.flags.short[short]
+ // Not a known short flag, we'll just return it anyway.
+ if !ok {
+ } else if fb, ok := flag.value.(boolFlag); ok && fb.IsBoolFlag() {
+ // Bool short flag.
+ } else {
+ // Short flag with combined argument: -fARG
+ token := &Token{p.argi, TokenShort, short}
+ if len(arg) > size+1 {
+ p.Push(&Token{p.argi, TokenArg, arg[size+1:]})
+ }
+ return token
+ }
+
+ if len(arg) > size+1 {
+ p.args = append([]string{"-" + arg[size+1:]}, p.args...)
+ }
+ return &Token{p.argi, TokenShort, short}
+ } else if EnableFileExpansion && strings.HasPrefix(arg, "@") {
+ expanded, err := ExpandArgsFromFile(arg[1:])
+ if err != nil {
+ return &Token{p.argi, TokenError, err.Error()}
+ }
+ if len(p.args) == 0 {
+ p.args = expanded
+ } else {
+ p.args = append(expanded, p.args...)
+ }
+ return p.Next()
+ }
+
+ return &Token{p.argi, TokenArg, arg}
+}
+
+func (p *ParseContext) Peek() *Token {
+ if len(p.peek) == 0 {
+ return p.Push(p.Next())
+ }
+ return p.peek[len(p.peek)-1]
+}
+
+func (p *ParseContext) Push(token *Token) *Token {
+ p.peek = append(p.peek, token)
+ return token
+}
+
+func (p *ParseContext) pop() *Token {
+ end := len(p.peek) - 1
+ token := p.peek[end]
+ p.peek = p.peek[0:end]
+ return token
+}
+
+func (p *ParseContext) String() string {
+ return p.SelectedCommand.FullCommand()
+}
+
+func (p *ParseContext) matchedFlag(flag *FlagClause, value string) {
+ p.Elements = append(p.Elements, &ParseElement{Clause: flag, Value: &value})
+}
+
+func (p *ParseContext) matchedArg(arg *ArgClause, value string) {
+ p.Elements = append(p.Elements, &ParseElement{Clause: arg, Value: &value})
+}
+
+func (p *ParseContext) matchedCmd(cmd *CmdClause) {
+ p.Elements = append(p.Elements, &ParseElement{Clause: cmd})
+ p.mergeFlags(cmd.flagGroup)
+ p.mergeArgs(cmd.argGroup)
+ p.SelectedCommand = cmd
+}
+
+// Expand arguments from a file. Lines starting with # will be treated as comments.
+func ExpandArgsFromFile(filename string) (out []string, err error) {
+ if filename == "" {
+ return nil, fmt.Errorf("expected @ file to expand arguments from")
+ }
+ r, err := os.Open(filename)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open arguments file %q: %s", filename, err)
+ }
+ defer r.Close()
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "#") || strings.TrimSpace(line) == "" {
+ continue
+ }
+ out = append(out, line)
+ }
+ err = scanner.Err()
+ if err != nil {
+ return nil, fmt.Errorf("failed to read arguments from %q: %s", filename, err)
+ }
+ return
+}
+
+func parse(context *ParseContext, app *Application) (err error) {
+ context.mergeFlags(app.flagGroup)
+ context.mergeArgs(app.argGroup)
+
+ cmds := app.cmdGroup
+ ignoreDefault := context.ignoreDefault
+
+loop:
+ for !context.EOL() && !context.Error() {
+ token := context.Peek()
+
+ switch token.Type {
+ case TokenLong, TokenShort:
+ if flag, err := context.flags.parse(context); err != nil {
+ if !ignoreDefault {
+ if cmd := cmds.defaultSubcommand(); cmd != nil {
+ cmd.completionAlts = cmds.cmdNames()
+ context.matchedCmd(cmd)
+ cmds = cmd.cmdGroup
+ break
+ }
+ }
+ return err
+ } else if flag == HelpFlag {
+ ignoreDefault = true
+ }
+
+ case TokenArg:
+ if cmds.have() {
+ selectedDefault := false
+ cmd, ok := cmds.commands[token.String()]
+ if !ok {
+ if !ignoreDefault {
+ if cmd = cmds.defaultSubcommand(); cmd != nil {
+ cmd.completionAlts = cmds.cmdNames()
+ selectedDefault = true
+ }
+ }
+ if cmd == nil {
+ return fmt.Errorf("expected command but got %q", token)
+ }
+ }
+ if cmd == HelpCommand {
+ ignoreDefault = true
+ }
+ cmd.completionAlts = nil
+ context.matchedCmd(cmd)
+ cmds = cmd.cmdGroup
+ if !selectedDefault {
+ context.Next()
+ }
+ } else if context.arguments.have() {
+ if app.noInterspersed {
+ // no more flags
+ context.argsOnly = true
+ }
+ arg := context.nextArg()
+ if arg == nil {
+ break loop
+ }
+ context.matchedArg(arg, token.String())
+ context.Next()
+ } else {
+ break loop
+ }
+
+ case TokenEOL:
+ break loop
+ }
+ }
+
+ // Move to innermost default command.
+ for !ignoreDefault {
+ if cmd := cmds.defaultSubcommand(); cmd != nil {
+ cmd.completionAlts = cmds.cmdNames()
+ context.matchedCmd(cmd)
+ cmds = cmd.cmdGroup
+ } else {
+ break
+ }
+ }
+
+ if context.Error() {
+ return fmt.Errorf("%s", context.Peek().Value)
+ }
+
+ if !context.EOL() {
+ return fmt.Errorf("unexpected %s", context.Peek())
+ }
+
+ // Set defaults for all remaining args.
+ for arg := context.nextArg(); arg != nil && !arg.consumesRemainder(); arg = context.nextArg() {
+ for _, defaultValue := range arg.defaultValues {
+ if err := arg.value.Set(defaultValue); err != nil {
+ return fmt.Errorf("invalid default value '%s' for argument '%s'", defaultValue, arg.name)
+ }
+ }
+ }
+
+ return
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/parsers.go b/vendor/github.com/alecthomas/kingpin/v2/parsers.go
new file mode 100644
index 000000000..5a0688215
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/parsers.go
@@ -0,0 +1,216 @@
+package kingpin
+
+import (
+ "net"
+ "net/url"
+ "os"
+ "time"
+
+ "github.com/alecthomas/units"
+)
+
+type Settings interface {
+ SetValue(value Value)
+}
+
+type parserMixin struct {
+ value Value
+ required bool
+}
+
+func (p *parserMixin) SetText(text Text) {
+ p.value = &wrapText{text}
+}
+
+func (p *parserMixin) SetValue(value Value) {
+ p.value = value
+}
+
+// StringMap provides key=value parsing into a map.
+func (p *parserMixin) StringMap() (target *map[string]string) {
+ target = &(map[string]string{})
+ p.StringMapVar(target)
+ return
+}
+
+// Duration sets the parser to a time.Duration parser.
+func (p *parserMixin) Duration() (target *time.Duration) {
+ target = new(time.Duration)
+ p.DurationVar(target)
+ return
+}
+
+// Bytes parses numeric byte units. eg. 1.5KB
+func (p *parserMixin) Bytes() (target *units.Base2Bytes) {
+ target = new(units.Base2Bytes)
+ p.BytesVar(target)
+ return
+}
+
+// IP sets the parser to a net.IP parser.
+func (p *parserMixin) IP() (target *net.IP) {
+ target = new(net.IP)
+ p.IPVar(target)
+ return
+}
+
+// TCP (host:port) address.
+func (p *parserMixin) TCP() (target **net.TCPAddr) {
+ target = new(*net.TCPAddr)
+ p.TCPVar(target)
+ return
+}
+
+// TCPVar (host:port) address.
+func (p *parserMixin) TCPVar(target **net.TCPAddr) {
+ p.SetValue(newTCPAddrValue(target))
+}
+
+// ExistingFile sets the parser to one that requires and returns an existing file.
+func (p *parserMixin) ExistingFile() (target *string) {
+ target = new(string)
+ p.ExistingFileVar(target)
+ return
+}
+
+// ExistingDir sets the parser to one that requires and returns an existing directory.
+func (p *parserMixin) ExistingDir() (target *string) {
+ target = new(string)
+ p.ExistingDirVar(target)
+ return
+}
+
+// ExistingFileOrDir sets the parser to one that requires and returns an existing file OR directory.
+func (p *parserMixin) ExistingFileOrDir() (target *string) {
+ target = new(string)
+ p.ExistingFileOrDirVar(target)
+ return
+}
+
+// File returns an os.File against an existing file.
+func (p *parserMixin) File() (target **os.File) {
+ target = new(*os.File)
+ p.FileVar(target)
+ return
+}
+
+// File attempts to open a File with os.OpenFile(flag, perm).
+func (p *parserMixin) OpenFile(flag int, perm os.FileMode) (target **os.File) {
+ target = new(*os.File)
+ p.OpenFileVar(target, flag, perm)
+ return
+}
+
+// URL provides a valid, parsed url.URL.
+func (p *parserMixin) URL() (target **url.URL) {
+ target = new(*url.URL)
+ p.URLVar(target)
+ return
+}
+
+// StringMap provides key=value parsing into a map.
+func (p *parserMixin) StringMapVar(target *map[string]string) {
+ p.SetValue(newStringMapValue(target))
+}
+
+// Float sets the parser to a float64 parser.
+func (p *parserMixin) Float() (target *float64) {
+ return p.Float64()
+}
+
+// Float sets the parser to a float64 parser.
+func (p *parserMixin) FloatVar(target *float64) {
+ p.Float64Var(target)
+}
+
+// Duration sets the parser to a time.Duration parser.
+func (p *parserMixin) DurationVar(target *time.Duration) {
+ p.SetValue(newDurationValue(target))
+}
+
+// BytesVar parses numeric byte units. eg. 1.5KB
+func (p *parserMixin) BytesVar(target *units.Base2Bytes) {
+ p.SetValue(newBytesValue(target))
+}
+
+// IP sets the parser to a net.IP parser.
+func (p *parserMixin) IPVar(target *net.IP) {
+ p.SetValue(newIPValue(target))
+}
+
+// ExistingFile sets the parser to one that requires and returns an existing file.
+func (p *parserMixin) ExistingFileVar(target *string) {
+ p.SetValue(newExistingFileValue(target))
+}
+
+// ExistingDir sets the parser to one that requires and returns an existing directory.
+func (p *parserMixin) ExistingDirVar(target *string) {
+ p.SetValue(newExistingDirValue(target))
+}
+
+// ExistingDir sets the parser to one that requires and returns an existing directory.
+func (p *parserMixin) ExistingFileOrDirVar(target *string) {
+ p.SetValue(newExistingFileOrDirValue(target))
+}
+
+// FileVar opens an existing file.
+func (p *parserMixin) FileVar(target **os.File) {
+ p.SetValue(newFileValue(target, os.O_RDONLY, 0))
+}
+
+// OpenFileVar calls os.OpenFile(flag, perm)
+func (p *parserMixin) OpenFileVar(target **os.File, flag int, perm os.FileMode) {
+ p.SetValue(newFileValue(target, flag, perm))
+}
+
+// URL provides a valid, parsed url.URL.
+func (p *parserMixin) URLVar(target **url.URL) {
+ p.SetValue(newURLValue(target))
+}
+
+// URLList provides a parsed list of url.URL values.
+func (p *parserMixin) URLList() (target *[]*url.URL) {
+ target = new([]*url.URL)
+ p.URLListVar(target)
+ return
+}
+
+// URLListVar provides a parsed list of url.URL values.
+func (p *parserMixin) URLListVar(target *[]*url.URL) {
+ p.SetValue(newURLListValue(target))
+}
+
+// Enum allows a value from a set of options.
+func (p *parserMixin) Enum(options ...string) (target *string) {
+ target = new(string)
+ p.EnumVar(target, options...)
+ return
+}
+
+// EnumVar allows a value from a set of options.
+func (p *parserMixin) EnumVar(target *string, options ...string) {
+ p.SetValue(newEnumFlag(target, options...))
+}
+
+// Enums allows a set of values from a set of options.
+func (p *parserMixin) Enums(options ...string) (target *[]string) {
+ target = new([]string)
+ p.EnumsVar(target, options...)
+ return
+}
+
+// EnumVar allows a value from a set of options.
+func (p *parserMixin) EnumsVar(target *[]string, options ...string) {
+ p.SetValue(newEnumsFlag(target, options...))
+}
+
+// A Counter increments a number each time it is encountered.
+func (p *parserMixin) Counter() (target *int) {
+ target = new(int)
+ p.CounterVar(target)
+ return
+}
+
+func (p *parserMixin) CounterVar(target *int) {
+ p.SetValue(newCounterValue(target))
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/templates.go b/vendor/github.com/alecthomas/kingpin/v2/templates.go
new file mode 100644
index 000000000..703c2cda7
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/templates.go
@@ -0,0 +1,262 @@
+package kingpin
+
+// Default usage template.
+var DefaultUsageTemplate = `{{define "FormatCommand" -}}
+{{if .FlagSummary}} {{.FlagSummary}}{{end -}}
+{{range .Args}}{{if not .Hidden}} {{if not .Required}}[{{end}}{{if .PlaceHolder}}{{.PlaceHolder}}{{else}}<{{.Name}}>{{end}}{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}{{end -}}
+{{end -}}
+
+{{define "FormatCommands" -}}
+{{range .FlattenedCommands -}}
+{{if not .Hidden -}}
+ {{.FullCommand}}{{if .Default}}*{{end}}{{template "FormatCommand" .}}
+{{.Help|Wrap 4}}
+{{end -}}
+{{end -}}
+{{end -}}
+
+{{define "FormatUsage" -}}
+{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}
+{{if .Help}}
+{{.Help|Wrap 0 -}}
+{{end -}}
+
+{{end -}}
+
+{{if .Context.SelectedCommand -}}
+usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}}
+{{ else -}}
+usage: {{.App.Name}}{{template "FormatUsage" .App}}
+{{end}}
+{{if .Context.Flags -}}
+Flags:
+{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}}
+{{end -}}
+{{if .Context.Args -}}
+Args:
+{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}}
+{{end -}}
+{{if .Context.SelectedCommand -}}
+{{if len .Context.SelectedCommand.Commands -}}
+Subcommands:
+{{template "FormatCommands" .Context.SelectedCommand}}
+{{end -}}
+{{else if .App.Commands -}}
+Commands:
+{{template "FormatCommands" .App}}
+{{end -}}
+`
+
+// Usage template where command's optional flags are listed separately
+var SeparateOptionalFlagsUsageTemplate = `{{define "FormatCommand" -}}
+{{if .FlagSummary}} {{.FlagSummary}}{{end -}}
+{{range .Args}}{{if not .Hidden}} {{if not .Required}}[{{end}}{{if .PlaceHolder}}{{.PlaceHolder}}{{else}}<{{.Name}}>{{end}}{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}{{end -}}
+{{end -}}
+
+{{define "FormatCommands" -}}
+{{range .FlattenedCommands -}}
+{{if not .Hidden -}}
+ {{.FullCommand}}{{if .Default}}*{{end}}{{template "FormatCommand" .}}
+{{.Help|Wrap 4}}
+{{end -}}
+{{end -}}
+{{end -}}
+
+{{define "FormatUsage" -}}
+{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}
+{{if .Help}}
+{{.Help|Wrap 0 -}}
+{{end -}}
+
+{{end -}}
+{{if .Context.SelectedCommand -}}
+usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}}
+{{else -}}
+usage: {{.App.Name}}{{template "FormatUsage" .App}}
+{{end -}}
+
+{{if .Context.Flags|RequiredFlags -}}
+Required flags:
+{{.Context.Flags|RequiredFlags|FlagsToTwoColumns|FormatTwoColumns}}
+{{end -}}
+{{if .Context.Flags|OptionalFlags -}}
+Optional flags:
+{{.Context.Flags|OptionalFlags|FlagsToTwoColumns|FormatTwoColumns}}
+{{end -}}
+{{if .Context.Args -}}
+Args:
+{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}}
+{{end -}}
+{{if .Context.SelectedCommand -}}
+Subcommands:
+{{if .Context.SelectedCommand.Commands -}}
+{{template "FormatCommands" .Context.SelectedCommand}}
+{{end -}}
+{{else if .App.Commands -}}
+Commands:
+{{template "FormatCommands" .App}}
+{{end -}}
+`
+
+// Usage template with compactly formatted commands.
+var CompactUsageTemplate = `{{define "FormatCommand" -}}
+{{if .FlagSummary}} {{.FlagSummary}}{{end -}}
+{{range .Args}}{{if not .Hidden}} {{if not .Required}}[{{end}}{{if .PlaceHolder}}{{.PlaceHolder}}{{else}}<{{.Name}}>{{end}}{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}{{end -}}
+{{end -}}
+
+{{define "FormatCommandList" -}}
+{{range . -}}
+{{if not .Hidden -}}
+{{.Depth|Indent}}{{.Name}}{{if .Default}}*{{end}}{{template "FormatCommand" .}}
+{{end -}}
+{{template "FormatCommandList" .Commands -}}
+{{end -}}
+{{end -}}
+
+{{define "FormatUsage" -}}
+{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}
+{{if .Help}}
+{{.Help|Wrap 0 -}}
+{{end -}}
+
+{{end -}}
+
+{{if .Context.SelectedCommand -}}
+usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}}
+{{else -}}
+usage: {{.App.Name}}{{template "FormatUsage" .App}}
+{{end -}}
+{{if .Context.Flags -}}
+Flags:
+{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}}
+{{end -}}
+{{if .Context.Args -}}
+Args:
+{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}}
+{{end -}}
+{{if .Context.SelectedCommand -}}
+{{if .Context.SelectedCommand.Commands -}}
+Commands:
+ {{.Context.SelectedCommand}}
+{{template "FormatCommandList" .Context.SelectedCommand.Commands}}
+{{end -}}
+{{else if .App.Commands -}}
+Commands:
+{{template "FormatCommandList" .App.Commands}}
+{{end -}}
+`
+
+var ManPageTemplate = `{{define "FormatFlags" -}}
+{{range .Flags -}}
+{{if not .Hidden -}}
+.TP
+\fB{{if .Short}}-{{.Short|Char}}, {{end}}--{{.Name}}{{if not .IsBoolFlag}}={{.FormatPlaceHolder}}{{end -}}\fR
+{{.Help}}
+{{end -}}
+{{end -}}
+{{end -}}
+
+{{define "FormatCommand" -}}
+{{if .FlagSummary}} {{.FlagSummary}}{{end -}}
+{{range .Args}}{{if not .Hidden}} {{if not .Required}}[{{end}}{{if .PlaceHolder}}{{.PlaceHolder}}{{else}}<{{.Name}}>{{end}}{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}{{end -}}
+{{end -}}
+
+{{define "FormatCommands" -}}
+{{range .FlattenedCommands -}}
+{{if not .Hidden -}}
+.SS
+\fB{{.FullCommand}}{{template "FormatCommand" . -}}\fR
+.PP
+{{.Help}}
+{{template "FormatFlags" . -}}
+{{end -}}
+{{end -}}
+{{end -}}
+
+{{define "FormatUsage" -}}
+{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end -}}\fR
+{{end -}}
+
+.TH {{.App.Name}} 1 {{.App.Version}} "{{.App.Author}}"
+.SH "NAME"
+{{.App.Name}}
+.SH "SYNOPSIS"
+.TP
+\fB{{.App.Name}}{{template "FormatUsage" .App}}
+.SH "DESCRIPTION"
+{{.App.Help}}
+.SH "OPTIONS"
+{{template "FormatFlags" .App -}}
+{{if .App.Commands -}}
+.SH "COMMANDS"
+{{template "FormatCommands" .App -}}
+{{end -}}
+`
+
+// Default usage template.
+var LongHelpTemplate = `{{define "FormatCommand" -}}
+{{if .FlagSummary}} {{.FlagSummary}}{{end -}}
+{{range .Args}}{{if not .Hidden}} {{if not .Required}}[{{end}}{{if .PlaceHolder}}{{.PlaceHolder}}{{else}}<{{.Name}}>{{end}}{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}{{end -}}
+{{end -}}
+
+{{define "FormatCommands" -}}
+{{range .FlattenedCommands -}}
+{{if not .Hidden -}}
+ {{.FullCommand}}{{template "FormatCommand" .}}
+{{.Help|Wrap 4}}
+{{with .Flags|FlagsToTwoColumns}}{{FormatTwoColumnsWithIndent . 4 2}}{{end}}
+{{end -}}
+{{end -}}
+{{end -}}
+
+{{define "FormatUsage" -}}
+{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}
+{{if .Help}}
+{{.Help|Wrap 0 -}}
+{{end -}}
+
+{{end -}}
+
+usage: {{.App.Name}}{{template "FormatUsage" .App}}
+{{if .Context.Flags -}}
+Flags:
+{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}}
+{{end -}}
+{{if .Context.Args -}}
+Args:
+{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}}
+{{end -}}
+{{if .App.Commands -}}
+Commands:
+{{template "FormatCommands" .App}}
+{{end -}}
+`
+
+var BashCompletionTemplate = `
+_{{.App.Name}}_bash_autocomplete() {
+ local cur prev opts base
+ COMPREPLY=()
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ opts=$( ${COMP_WORDS[0]} --completion-bash "${COMP_WORDS[@]:1:$COMP_CWORD}" )
+ COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
+ return 0
+}
+complete -F _{{.App.Name}}_bash_autocomplete -o default {{.App.Name}}
+
+`
+
+var ZshCompletionTemplate = `#compdef {{.App.Name}}
+
+_{{.App.Name}}() {
+ local matches=($(${words[1]} --completion-bash "${(@)words[2,$CURRENT]}"))
+ compadd -a matches
+
+ if [[ $compstate[nmatches] -eq 0 && $words[$CURRENT] != -* ]]; then
+ _files
+ fi
+}
+
+if [[ "$(basename -- ${(%):-%x})" != "_{{.App.Name}}" ]]; then
+ compdef _{{.App.Name}} {{.App.Name}}
+fi
+`
diff --git a/vendor/github.com/alecthomas/kingpin/v2/usage.go b/vendor/github.com/alecthomas/kingpin/v2/usage.go
new file mode 100644
index 000000000..9b3dd731f
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/usage.go
@@ -0,0 +1,225 @@
+package kingpin
+
+import (
+ "bytes"
+ "fmt"
+ "go/doc"
+ "io"
+ "strings"
+ "text/template"
+)
+
+var (
+ preIndent = " "
+)
+
+func formatTwoColumns(w io.Writer, indent, padding, width int, rows [][2]string) {
+ // Find size of first column.
+ s := 0
+ for _, row := range rows {
+ if c := len(row[0]); c > s && c < 30 {
+ s = c
+ }
+ }
+
+ indentStr := strings.Repeat(" ", indent)
+ offsetStr := strings.Repeat(" ", s+padding)
+
+ for _, row := range rows {
+ buf := bytes.NewBuffer(nil)
+ doc.ToText(buf, row[1], "", preIndent, width-s-padding-indent)
+ lines := strings.Split(strings.TrimRight(buf.String(), "\n"), "\n")
+ fmt.Fprintf(w, "%s%-*s%*s", indentStr, s, row[0], padding, "")
+ if len(row[0]) >= 30 {
+ fmt.Fprintf(w, "\n%s%s", indentStr, offsetStr)
+ }
+ fmt.Fprintf(w, "%s\n", lines[0])
+ for _, line := range lines[1:] {
+ fmt.Fprintf(w, "%s%s%s\n", indentStr, offsetStr, line)
+ }
+ }
+}
+
+// Usage writes application usage to w. It parses args to determine
+// appropriate help context, such as which command to show help for.
+func (a *Application) Usage(args []string) {
+ context, err := a.parseContext(true, args)
+ a.FatalIfError(err, "")
+ if err := a.UsageForContextWithTemplate(context, 2, a.usageTemplate); err != nil {
+ panic(err)
+ }
+}
+
+func formatAppUsage(app *ApplicationModel) string {
+ s := []string{app.Name}
+ if len(app.Flags) > 0 {
+ s = append(s, app.FlagSummary())
+ }
+ if len(app.Args) > 0 {
+ s = append(s, app.ArgSummary())
+ }
+ return strings.Join(s, " ")
+}
+
+func formatCmdUsage(app *ApplicationModel, cmd *CmdModel) string {
+ s := []string{app.Name, cmd.String()}
+ if len(cmd.Flags) > 0 {
+ s = append(s, cmd.FlagSummary())
+ }
+ if len(cmd.Args) > 0 {
+ s = append(s, cmd.ArgSummary())
+ }
+ return strings.Join(s, " ")
+}
+
+func formatFlag(haveShort bool, flag *FlagModel) string {
+ flagString := ""
+ flagName := flag.Name
+ if flag.IsBoolFlag() {
+ flagName = "[no-]" + flagName
+ }
+ if flag.Short != 0 {
+ flagString += fmt.Sprintf("-%c, --%s", flag.Short, flagName)
+ } else {
+ if haveShort {
+ flagString += fmt.Sprintf(" --%s", flagName)
+ } else {
+ flagString += fmt.Sprintf("--%s", flagName)
+ }
+ }
+ if !flag.IsBoolFlag() {
+ flagString += fmt.Sprintf("=%s", flag.FormatPlaceHolder())
+ }
+ if v, ok := flag.Value.(repeatableFlag); ok && v.IsCumulative() {
+ flagString += " ..."
+ }
+ return flagString
+}
+
+type templateParseContext struct {
+ SelectedCommand *CmdModel
+ *FlagGroupModel
+ *ArgGroupModel
+}
+
+type templateContext struct {
+ App *ApplicationModel
+ Width int
+ Context *templateParseContext
+}
+
+// UsageForContext displays usage information from a ParseContext (obtained from
+// Application.ParseContext() or Action(f) callbacks).
+func (a *Application) UsageForContext(context *ParseContext) error {
+ return a.UsageForContextWithTemplate(context, 2, a.usageTemplate)
+}
+
+// UsageForContextWithTemplate is the base usage function. You generally don't need to use this.
+func (a *Application) UsageForContextWithTemplate(context *ParseContext, indent int, tmpl string) error {
+ width := guessWidth(a.usageWriter)
+ funcs := template.FuncMap{
+ "Indent": func(level int) string {
+ return strings.Repeat(" ", level*indent)
+ },
+ "Wrap": func(indent int, s string) string {
+ buf := bytes.NewBuffer(nil)
+ indentText := strings.Repeat(" ", indent)
+ doc.ToText(buf, s, indentText, " "+indentText, width-indent)
+ return buf.String()
+ },
+ "FormatFlag": formatFlag,
+ "FlagsToTwoColumns": func(f []*FlagModel) [][2]string {
+ rows := [][2]string{}
+ haveShort := false
+ for _, flag := range f {
+ if flag.Short != 0 {
+ haveShort = true
+ break
+ }
+ }
+ for _, flag := range f {
+ if !flag.Hidden {
+ rows = append(rows, [2]string{formatFlag(haveShort, flag), flag.HelpWithEnvar()})
+ }
+ }
+ return rows
+ },
+ "RequiredFlags": func(f []*FlagModel) []*FlagModel {
+ requiredFlags := []*FlagModel{}
+ for _, flag := range f {
+ if flag.Required {
+ requiredFlags = append(requiredFlags, flag)
+ }
+ }
+ return requiredFlags
+ },
+ "OptionalFlags": func(f []*FlagModel) []*FlagModel {
+ optionalFlags := []*FlagModel{}
+ for _, flag := range f {
+ if !flag.Required {
+ optionalFlags = append(optionalFlags, flag)
+ }
+ }
+ return optionalFlags
+ },
+ "ArgsToTwoColumns": func(a []*ArgModel) [][2]string {
+ rows := [][2]string{}
+ for _, arg := range a {
+ if !arg.Hidden {
+ var s string
+ if arg.PlaceHolder != "" {
+ s = arg.PlaceHolder
+ } else {
+ s = "<" + arg.Name + ">"
+ }
+ if !arg.Required {
+ s = "[" + s + "]"
+ }
+ rows = append(rows, [2]string{s, arg.HelpWithEnvar()})
+ }
+ }
+ return rows
+ },
+ "FormatTwoColumns": func(rows [][2]string) string {
+ buf := bytes.NewBuffer(nil)
+ formatTwoColumns(buf, indent, indent, width, rows)
+ return buf.String()
+ },
+ "FormatTwoColumnsWithIndent": func(rows [][2]string, indent, padding int) string {
+ buf := bytes.NewBuffer(nil)
+ formatTwoColumns(buf, indent, padding, width, rows)
+ return buf.String()
+ },
+ "FormatAppUsage": formatAppUsage,
+ "FormatCommandUsage": formatCmdUsage,
+ "IsCumulative": func(value Value) bool {
+ r, ok := value.(remainderArg)
+ return ok && r.IsCumulative()
+ },
+ "Char": func(c rune) string {
+ return string(c)
+ },
+ }
+ for k, v := range a.usageFuncs {
+ funcs[k] = v
+ }
+
+ t, err := template.New("usage").Funcs(funcs).Parse(tmpl)
+ if err != nil {
+ return err
+ }
+ var selectedCommand *CmdModel
+ if context.SelectedCommand != nil {
+ selectedCommand = context.SelectedCommand.Model()
+ }
+ ctx := templateContext{
+ App: a.Model(),
+ Width: width,
+ Context: &templateParseContext{
+ SelectedCommand: selectedCommand,
+ FlagGroupModel: context.flags.Model(),
+ ArgGroupModel: context.arguments.Model(),
+ },
+ }
+ return t.Execute(a.usageWriter, ctx)
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/values.go b/vendor/github.com/alecthomas/kingpin/v2/values.go
new file mode 100644
index 000000000..e6e05bc21
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/values.go
@@ -0,0 +1,489 @@
+package kingpin
+
+//go:generate go run ./cmd/genvalues/main.go
+
+import (
+ "encoding"
+ "fmt"
+ "net"
+ "net/url"
+ "os"
+ "reflect"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/alecthomas/units"
+ "github.com/xhit/go-str2duration/v2"
+)
+
+// NOTE: Most of the base type values were lifted from:
+// http://golang.org/src/pkg/flag/flag.go?s=20146:20222
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+//
+// If a Value has an IsBoolFlag() bool method returning true, the command-line
+// parser makes --name equivalent to -name=true rather than using the next
+// command-line argument, and adds a --no-name counterpart for negating the
+// flag.
+type Value interface {
+ String() string
+ Set(string) error
+}
+
+// Getter is an interface that allows the contents of a Value to be retrieved.
+// It wraps the Value interface, rather than being part of it, because it
+// appeared after Go 1 and its compatibility rules. All Value types provided
+// by this package satisfy the Getter interface.
+type Getter interface {
+ Value
+ Get() interface{}
+}
+
+// Optional interface to indicate boolean flags that don't accept a value, and
+// implicitly have a --no- negation counterpart.
+type boolFlag interface {
+ IsBoolFlag() bool
+}
+
+// Optional interface for arguments that cumulatively consume all remaining
+// input.
+type remainderArg interface {
+ IsCumulative() bool
+}
+
+// Optional interface for flags that can be repeated.
+type repeatableFlag interface {
+ IsCumulative() bool
+}
+
+// Text is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+type Text interface {
+ encoding.TextMarshaler
+ encoding.TextUnmarshaler
+}
+
+type wrapText struct {
+ text Text
+}
+
+func (w wrapText) String() string {
+ buf, _ := w.text.MarshalText()
+ return string(buf)
+}
+
+func (w *wrapText) Set(s string) error {
+ return w.text.UnmarshalText([]byte(s))
+}
+
+type accumulator struct {
+ element func(value interface{}) Value
+ typ reflect.Type
+ slice reflect.Value
+}
+
+// Use reflection to accumulate values into a slice.
+//
+// target := []string{}
+// newAccumulator(&target, func (value interface{}) Value {
+// return newStringValue(value.(*string))
+// })
+func newAccumulator(slice interface{}, element func(value interface{}) Value) *accumulator {
+ typ := reflect.TypeOf(slice)
+ if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Slice {
+ panic("expected a pointer to a slice")
+ }
+ return &accumulator{
+ element: element,
+ typ: typ.Elem().Elem(),
+ slice: reflect.ValueOf(slice),
+ }
+}
+
+func (a *accumulator) String() string {
+ out := []string{}
+ s := a.slice.Elem()
+ for i := 0; i < s.Len(); i++ {
+ out = append(out, a.element(s.Index(i).Addr().Interface()).String())
+ }
+ return strings.Join(out, ",")
+}
+
+func (a *accumulator) Set(value string) error {
+ e := reflect.New(a.typ)
+ if err := a.element(e.Interface()).Set(value); err != nil {
+ return err
+ }
+ slice := reflect.Append(a.slice.Elem(), e.Elem())
+ a.slice.Elem().Set(slice)
+ return nil
+}
+
+func (a *accumulator) Get() interface{} {
+ return a.slice.Interface()
+}
+
+func (a *accumulator) IsCumulative() bool {
+ return true
+}
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(p *time.Duration) *durationValue {
+ return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+ v, err := str2duration.ParseDuration(s)
+ *d = durationValue(v)
+ return err
+}
+
+func (d *durationValue) Get() interface{} { return time.Duration(*d) }
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+// -- map[string]string Value
+type stringMapValue map[string]string
+
+func newStringMapValue(p *map[string]string) *stringMapValue {
+ return (*stringMapValue)(p)
+}
+
+var stringMapRegex = regexp.MustCompile("[:=]")
+
+func (s *stringMapValue) Set(value string) error {
+ parts := stringMapRegex.Split(value, 2)
+ if len(parts) != 2 {
+ return fmt.Errorf("expected KEY=VALUE got '%s'", value)
+ }
+ (*s)[parts[0]] = parts[1]
+ return nil
+}
+
+func (s *stringMapValue) Get() interface{} {
+ return (map[string]string)(*s)
+}
+
+func (s *stringMapValue) String() string {
+ return fmt.Sprintf("%s", map[string]string(*s))
+}
+
+func (s *stringMapValue) IsCumulative() bool {
+ return true
+}
+
+// -- net.IP Value
+type ipValue net.IP
+
+func newIPValue(p *net.IP) *ipValue {
+ return (*ipValue)(p)
+}
+
+func (i *ipValue) Set(value string) error {
+ if ip := net.ParseIP(value); ip == nil {
+ return fmt.Errorf("'%s' is not an IP address", value)
+ } else {
+ *i = *(*ipValue)(&ip)
+ return nil
+ }
+}
+
+func (i *ipValue) Get() interface{} {
+ return (net.IP)(*i)
+}
+
+func (i *ipValue) String() string {
+ return (*net.IP)(i).String()
+}
+
+// -- *net.TCPAddr Value
+type tcpAddrValue struct {
+ addr **net.TCPAddr
+}
+
+func newTCPAddrValue(p **net.TCPAddr) *tcpAddrValue {
+ return &tcpAddrValue{p}
+}
+
+func (i *tcpAddrValue) Set(value string) error {
+ if addr, err := net.ResolveTCPAddr("tcp", value); err != nil {
+ return fmt.Errorf("'%s' is not a valid TCP address: %s", value, err)
+ } else {
+ *i.addr = addr
+ return nil
+ }
+}
+
+func (t *tcpAddrValue) Get() interface{} {
+ return (*net.TCPAddr)(*t.addr)
+}
+
+func (i *tcpAddrValue) String() string {
+ return (*i.addr).String()
+}
+
+// -- existingFile Value
+
+type fileStatValue struct {
+ path *string
+ predicate func(os.FileInfo) error
+}
+
+func newFileStatValue(p *string, predicate func(os.FileInfo) error) *fileStatValue {
+ return &fileStatValue{
+ path: p,
+ predicate: predicate,
+ }
+}
+
+func (e *fileStatValue) Set(value string) error {
+ if s, err := os.Stat(value); os.IsNotExist(err) {
+ return fmt.Errorf("path '%s' does not exist", value)
+ } else if err != nil {
+ return err
+ } else if err := e.predicate(s); err != nil {
+ return err
+ }
+ *e.path = value
+ return nil
+}
+
+func (f *fileStatValue) Get() interface{} {
+ return (string)(*f.path)
+}
+
+func (e *fileStatValue) String() string {
+ return *e.path
+}
+
+// -- os.File value
+
+type fileValue struct {
+ f **os.File
+ flag int
+ perm os.FileMode
+}
+
+func newFileValue(p **os.File, flag int, perm os.FileMode) *fileValue {
+ return &fileValue{p, flag, perm}
+}
+
+func (f *fileValue) Set(value string) error {
+ if fd, err := os.OpenFile(value, f.flag, f.perm); err != nil {
+ return err
+ } else {
+ *f.f = fd
+ return nil
+ }
+}
+
+func (f *fileValue) Get() interface{} {
+ return (*os.File)(*f.f)
+}
+
+func (f *fileValue) String() string {
+ if *f.f == nil {
+ return ""
+ }
+ return (*f.f).Name()
+}
+
+// -- url.URL Value
+type urlValue struct {
+ u **url.URL
+}
+
+func newURLValue(p **url.URL) *urlValue {
+ return &urlValue{p}
+}
+
+func (u *urlValue) Set(value string) error {
+ if url, err := url.Parse(value); err != nil {
+ return fmt.Errorf("invalid URL: %s", err)
+ } else {
+ *u.u = url
+ return nil
+ }
+}
+
+func (u *urlValue) Get() interface{} {
+ return (*url.URL)(*u.u)
+}
+
+func (u *urlValue) String() string {
+ if *u.u == nil {
+ return ""
+ }
+ return (*u.u).String()
+}
+
+// -- []*url.URL Value
+type urlListValue []*url.URL
+
+func newURLListValue(p *[]*url.URL) *urlListValue {
+ return (*urlListValue)(p)
+}
+
+func (u *urlListValue) Set(value string) error {
+ if url, err := url.Parse(value); err != nil {
+ return fmt.Errorf("invalid URL: %s", err)
+ } else {
+ *u = append(*u, url)
+ return nil
+ }
+}
+
+func (u *urlListValue) Get() interface{} {
+ return ([]*url.URL)(*u)
+}
+
+func (u *urlListValue) String() string {
+ out := []string{}
+ for _, url := range *u {
+ out = append(out, url.String())
+ }
+ return strings.Join(out, ",")
+}
+
+func (u *urlListValue) IsCumulative() bool {
+ return true
+}
+
+// A flag whose value must be in a set of options.
+type enumValue struct {
+ value *string
+ options []string
+}
+
+func newEnumFlag(target *string, options ...string) *enumValue {
+ return &enumValue{
+ value: target,
+ options: options,
+ }
+}
+
+func (a *enumValue) String() string {
+ return *a.value
+}
+
+func (a *enumValue) Set(value string) error {
+ for _, v := range a.options {
+ if v == value {
+ *a.value = value
+ return nil
+ }
+ }
+ return fmt.Errorf("enum value must be one of %s, got '%s'", strings.Join(a.options, ","), value)
+}
+
+func (e *enumValue) Get() interface{} {
+ return (string)(*e.value)
+}
+
+// -- []string Enum Value
+type enumsValue struct {
+ value *[]string
+ options []string
+}
+
+func newEnumsFlag(target *[]string, options ...string) *enumsValue {
+ return &enumsValue{
+ value: target,
+ options: options,
+ }
+}
+
+func (s *enumsValue) Set(value string) error {
+ for _, v := range s.options {
+ if v == value {
+ *s.value = append(*s.value, value)
+ return nil
+ }
+ }
+ return fmt.Errorf("enum value must be one of %s, got '%s'", strings.Join(s.options, ","), value)
+}
+
+func (e *enumsValue) Get() interface{} {
+ return ([]string)(*e.value)
+}
+
+func (s *enumsValue) String() string {
+ return strings.Join(*s.value, ",")
+}
+
+func (s *enumsValue) IsCumulative() bool {
+ return true
+}
+
+// -- units.Base2Bytes Value
+type bytesValue units.Base2Bytes
+
+func newBytesValue(p *units.Base2Bytes) *bytesValue {
+ return (*bytesValue)(p)
+}
+
+func (d *bytesValue) Set(s string) error {
+ v, err := units.ParseBase2Bytes(s)
+ *d = bytesValue(v)
+ return err
+}
+
+func (d *bytesValue) Get() interface{} { return units.Base2Bytes(*d) }
+
+func (d *bytesValue) String() string { return (*units.Base2Bytes)(d).String() }
+
+func newExistingFileValue(target *string) *fileStatValue {
+ return newFileStatValue(target, func(s os.FileInfo) error {
+ if s.IsDir() {
+ return fmt.Errorf("'%s' is a directory", s.Name())
+ }
+ return nil
+ })
+}
+
+func newExistingDirValue(target *string) *fileStatValue {
+ return newFileStatValue(target, func(s os.FileInfo) error {
+ if !s.IsDir() {
+ return fmt.Errorf("'%s' is a file", s.Name())
+ }
+ return nil
+ })
+}
+
+func newExistingFileOrDirValue(target *string) *fileStatValue {
+ return newFileStatValue(target, func(s os.FileInfo) error { return nil })
+}
+
+type counterValue int
+
+func newCounterValue(n *int) *counterValue {
+ return (*counterValue)(n)
+}
+
+func (c *counterValue) Set(s string) error {
+ *c++
+ return nil
+}
+
+func (c *counterValue) Get() interface{} { return (int)(*c) }
+func (c *counterValue) IsBoolFlag() bool { return true }
+func (c *counterValue) String() string { return fmt.Sprintf("%d", *c) }
+func (c *counterValue) IsCumulative() bool { return true }
+
+func resolveHost(value string) (net.IP, error) {
+ if ip := net.ParseIP(value); ip != nil {
+ return ip, nil
+ } else {
+ if addr, err := net.ResolveIPAddr("ip", value); err != nil {
+ return nil, err
+ } else {
+ return addr.IP, nil
+ }
+ }
+}
diff --git a/vendor/github.com/alecthomas/kingpin/v2/values.json b/vendor/github.com/alecthomas/kingpin/v2/values.json
new file mode 100644
index 000000000..23c67448e
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/values.json
@@ -0,0 +1,25 @@
+[
+ {"type": "bool", "parser": "strconv.ParseBool(s)"},
+ {"type": "string", "parser": "s, error(nil)", "format": "string(*f.v)", "plural": "Strings"},
+ {"type": "uint", "parser": "strconv.ParseUint(s, 0, 64)", "plural": "Uints"},
+ {"type": "uint8", "parser": "strconv.ParseUint(s, 0, 8)"},
+ {"type": "uint16", "parser": "strconv.ParseUint(s, 0, 16)"},
+ {"type": "uint32", "parser": "strconv.ParseUint(s, 0, 32)"},
+ {"type": "uint64", "parser": "strconv.ParseUint(s, 0, 64)"},
+ {"type": "int", "parser": "strconv.ParseFloat(s, 64)", "plural": "Ints"},
+ {"type": "int8", "parser": "strconv.ParseInt(s, 0, 8)"},
+ {"type": "int16", "parser": "strconv.ParseInt(s, 0, 16)"},
+ {"type": "int32", "parser": "strconv.ParseInt(s, 0, 32)"},
+ {"type": "int64", "parser": "strconv.ParseInt(s, 0, 64)"},
+ {"type": "float64", "parser": "strconv.ParseFloat(s, 64)"},
+ {"type": "float32", "parser": "strconv.ParseFloat(s, 32)"},
+ {"name": "Duration", "type": "time.Duration", "no_value_parser": true},
+ {"name": "IP", "type": "net.IP", "no_value_parser": true},
+ {"name": "TCPAddr", "Type": "*net.TCPAddr", "plural": "TCPList", "no_value_parser": true},
+ {"name": "ExistingFile", "Type": "string", "plural": "ExistingFiles", "no_value_parser": true},
+ {"name": "ExistingDir", "Type": "string", "plural": "ExistingDirs", "no_value_parser": true},
+ {"name": "ExistingFileOrDir", "Type": "string", "plural": "ExistingFilesOrDirs", "no_value_parser": true},
+ {"name": "Regexp", "Type": "*regexp.Regexp", "parser": "regexp.Compile(s)"},
+ {"name": "ResolvedIP", "Type": "net.IP", "parser": "resolveHost(s)", "help": "Resolve a hostname or IP to an IP."},
+ {"name": "HexBytes", "Type": "[]byte", "parser": "hex.DecodeString(s)", "help": "Bytes as a hex string."}
+]
diff --git a/vendor/github.com/alecthomas/kingpin/v2/values_generated.go b/vendor/github.com/alecthomas/kingpin/v2/values_generated.go
new file mode 100644
index 000000000..8d492bf9c
--- /dev/null
+++ b/vendor/github.com/alecthomas/kingpin/v2/values_generated.go
@@ -0,0 +1,821 @@
+package kingpin
+
+import (
+ "encoding/hex"
+ "fmt"
+ "net"
+ "regexp"
+ "strconv"
+ "time"
+)
+
+// This file is autogenerated by "go generate .". Do not modify.
+
+// -- bool Value
+type boolValue struct{ v *bool }
+
+func newBoolValue(p *bool) *boolValue {
+ return &boolValue{p}
+}
+
+func (f *boolValue) Set(s string) error {
+ v, err := strconv.ParseBool(s)
+ if err == nil {
+ *f.v = (bool)(v)
+ }
+ return err
+}
+
+func (f *boolValue) Get() interface{} { return (bool)(*f.v) }
+
+func (f *boolValue) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Bool parses the next command-line value as bool.
+func (p *parserMixin) Bool() (target *bool) {
+ target = new(bool)
+ p.BoolVar(target)
+ return
+}
+
+func (p *parserMixin) BoolVar(target *bool) {
+ p.SetValue(newBoolValue(target))
+}
+
+// BoolList accumulates bool values into a slice.
+func (p *parserMixin) BoolList() (target *[]bool) {
+ target = new([]bool)
+ p.BoolListVar(target)
+ return
+}
+
+func (p *parserMixin) BoolListVar(target *[]bool) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newBoolValue(v.(*bool))
+ }))
+}
+
+// -- string Value
+type stringValue struct{ v *string }
+
+func newStringValue(p *string) *stringValue {
+ return &stringValue{p}
+}
+
+func (f *stringValue) Set(s string) error {
+ v, err := s, error(nil)
+ if err == nil {
+ *f.v = (string)(v)
+ }
+ return err
+}
+
+func (f *stringValue) Get() interface{} { return (string)(*f.v) }
+
+func (f *stringValue) String() string { return string(*f.v) }
+
+// String parses the next command-line value as string.
+func (p *parserMixin) String() (target *string) {
+ target = new(string)
+ p.StringVar(target)
+ return
+}
+
+func (p *parserMixin) StringVar(target *string) {
+ p.SetValue(newStringValue(target))
+}
+
+// Strings accumulates string values into a slice.
+func (p *parserMixin) Strings() (target *[]string) {
+ target = new([]string)
+ p.StringsVar(target)
+ return
+}
+
+func (p *parserMixin) StringsVar(target *[]string) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newStringValue(v.(*string))
+ }))
+}
+
+// -- uint Value
+type uintValue struct{ v *uint }
+
+func newUintValue(p *uint) *uintValue {
+ return &uintValue{p}
+}
+
+func (f *uintValue) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ if err == nil {
+ *f.v = (uint)(v)
+ }
+ return err
+}
+
+func (f *uintValue) Get() interface{} { return (uint)(*f.v) }
+
+func (f *uintValue) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Uint parses the next command-line value as uint.
+func (p *parserMixin) Uint() (target *uint) {
+ target = new(uint)
+ p.UintVar(target)
+ return
+}
+
+func (p *parserMixin) UintVar(target *uint) {
+ p.SetValue(newUintValue(target))
+}
+
+// Uints accumulates uint values into a slice.
+func (p *parserMixin) Uints() (target *[]uint) {
+ target = new([]uint)
+ p.UintsVar(target)
+ return
+}
+
+func (p *parserMixin) UintsVar(target *[]uint) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newUintValue(v.(*uint))
+ }))
+}
+
+// -- uint8 Value
+type uint8Value struct{ v *uint8 }
+
+func newUint8Value(p *uint8) *uint8Value {
+ return &uint8Value{p}
+}
+
+func (f *uint8Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 8)
+ if err == nil {
+ *f.v = (uint8)(v)
+ }
+ return err
+}
+
+func (f *uint8Value) Get() interface{} { return (uint8)(*f.v) }
+
+func (f *uint8Value) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Uint8 parses the next command-line value as uint8.
+func (p *parserMixin) Uint8() (target *uint8) {
+ target = new(uint8)
+ p.Uint8Var(target)
+ return
+}
+
+func (p *parserMixin) Uint8Var(target *uint8) {
+ p.SetValue(newUint8Value(target))
+}
+
+// Uint8List accumulates uint8 values into a slice.
+func (p *parserMixin) Uint8List() (target *[]uint8) {
+ target = new([]uint8)
+ p.Uint8ListVar(target)
+ return
+}
+
+func (p *parserMixin) Uint8ListVar(target *[]uint8) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newUint8Value(v.(*uint8))
+ }))
+}
+
+// -- uint16 Value
+type uint16Value struct{ v *uint16 }
+
+func newUint16Value(p *uint16) *uint16Value {
+ return &uint16Value{p}
+}
+
+func (f *uint16Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 16)
+ if err == nil {
+ *f.v = (uint16)(v)
+ }
+ return err
+}
+
+func (f *uint16Value) Get() interface{} { return (uint16)(*f.v) }
+
+func (f *uint16Value) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Uint16 parses the next command-line value as uint16.
+func (p *parserMixin) Uint16() (target *uint16) {
+ target = new(uint16)
+ p.Uint16Var(target)
+ return
+}
+
+func (p *parserMixin) Uint16Var(target *uint16) {
+ p.SetValue(newUint16Value(target))
+}
+
+// Uint16List accumulates uint16 values into a slice.
+func (p *parserMixin) Uint16List() (target *[]uint16) {
+ target = new([]uint16)
+ p.Uint16ListVar(target)
+ return
+}
+
+func (p *parserMixin) Uint16ListVar(target *[]uint16) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newUint16Value(v.(*uint16))
+ }))
+}
+
+// -- uint32 Value
+type uint32Value struct{ v *uint32 }
+
+func newUint32Value(p *uint32) *uint32Value {
+ return &uint32Value{p}
+}
+
+func (f *uint32Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 32)
+ if err == nil {
+ *f.v = (uint32)(v)
+ }
+ return err
+}
+
+func (f *uint32Value) Get() interface{} { return (uint32)(*f.v) }
+
+func (f *uint32Value) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Uint32 parses the next command-line value as uint32.
+func (p *parserMixin) Uint32() (target *uint32) {
+ target = new(uint32)
+ p.Uint32Var(target)
+ return
+}
+
+func (p *parserMixin) Uint32Var(target *uint32) {
+ p.SetValue(newUint32Value(target))
+}
+
+// Uint32List accumulates uint32 values into a slice.
+func (p *parserMixin) Uint32List() (target *[]uint32) {
+ target = new([]uint32)
+ p.Uint32ListVar(target)
+ return
+}
+
+func (p *parserMixin) Uint32ListVar(target *[]uint32) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newUint32Value(v.(*uint32))
+ }))
+}
+
+// -- uint64 Value
+type uint64Value struct{ v *uint64 }
+
+func newUint64Value(p *uint64) *uint64Value {
+ return &uint64Value{p}
+}
+
+func (f *uint64Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ if err == nil {
+ *f.v = (uint64)(v)
+ }
+ return err
+}
+
+func (f *uint64Value) Get() interface{} { return (uint64)(*f.v) }
+
+func (f *uint64Value) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Uint64 parses the next command-line value as uint64.
+func (p *parserMixin) Uint64() (target *uint64) {
+ target = new(uint64)
+ p.Uint64Var(target)
+ return
+}
+
+func (p *parserMixin) Uint64Var(target *uint64) {
+ p.SetValue(newUint64Value(target))
+}
+
+// Uint64List accumulates uint64 values into a slice.
+func (p *parserMixin) Uint64List() (target *[]uint64) {
+ target = new([]uint64)
+ p.Uint64ListVar(target)
+ return
+}
+
+func (p *parserMixin) Uint64ListVar(target *[]uint64) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newUint64Value(v.(*uint64))
+ }))
+}
+
+// -- int Value
+type intValue struct{ v *int }
+
+func newIntValue(p *int) *intValue {
+ return &intValue{p}
+}
+
+func (f *intValue) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ if err == nil {
+ *f.v = (int)(v)
+ }
+ return err
+}
+
+func (f *intValue) Get() interface{} { return (int)(*f.v) }
+
+func (f *intValue) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Int parses the next command-line value as int.
+func (p *parserMixin) Int() (target *int) {
+ target = new(int)
+ p.IntVar(target)
+ return
+}
+
+func (p *parserMixin) IntVar(target *int) {
+ p.SetValue(newIntValue(target))
+}
+
+// Ints accumulates int values into a slice.
+func (p *parserMixin) Ints() (target *[]int) {
+ target = new([]int)
+ p.IntsVar(target)
+ return
+}
+
+func (p *parserMixin) IntsVar(target *[]int) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newIntValue(v.(*int))
+ }))
+}
+
+// -- int8 Value
+type int8Value struct{ v *int8 }
+
+func newInt8Value(p *int8) *int8Value {
+ return &int8Value{p}
+}
+
+func (f *int8Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 8)
+ if err == nil {
+ *f.v = (int8)(v)
+ }
+ return err
+}
+
+func (f *int8Value) Get() interface{} { return (int8)(*f.v) }
+
+func (f *int8Value) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Int8 parses the next command-line value as int8.
+func (p *parserMixin) Int8() (target *int8) {
+ target = new(int8)
+ p.Int8Var(target)
+ return
+}
+
+func (p *parserMixin) Int8Var(target *int8) {
+ p.SetValue(newInt8Value(target))
+}
+
+// Int8List accumulates int8 values into a slice.
+func (p *parserMixin) Int8List() (target *[]int8) {
+ target = new([]int8)
+ p.Int8ListVar(target)
+ return
+}
+
+func (p *parserMixin) Int8ListVar(target *[]int8) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newInt8Value(v.(*int8))
+ }))
+}
+
+// -- int16 Value
+type int16Value struct{ v *int16 }
+
+func newInt16Value(p *int16) *int16Value {
+ return &int16Value{p}
+}
+
+func (f *int16Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 16)
+ if err == nil {
+ *f.v = (int16)(v)
+ }
+ return err
+}
+
+func (f *int16Value) Get() interface{} { return (int16)(*f.v) }
+
+func (f *int16Value) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Int16 parses the next command-line value as int16.
+func (p *parserMixin) Int16() (target *int16) {
+ target = new(int16)
+ p.Int16Var(target)
+ return
+}
+
+func (p *parserMixin) Int16Var(target *int16) {
+ p.SetValue(newInt16Value(target))
+}
+
+// Int16List accumulates int16 values into a slice.
+func (p *parserMixin) Int16List() (target *[]int16) {
+ target = new([]int16)
+ p.Int16ListVar(target)
+ return
+}
+
+func (p *parserMixin) Int16ListVar(target *[]int16) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newInt16Value(v.(*int16))
+ }))
+}
+
+// -- int32 Value
+type int32Value struct{ v *int32 }
+
+func newInt32Value(p *int32) *int32Value {
+ return &int32Value{p}
+}
+
+func (f *int32Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 32)
+ if err == nil {
+ *f.v = (int32)(v)
+ }
+ return err
+}
+
+func (f *int32Value) Get() interface{} { return (int32)(*f.v) }
+
+func (f *int32Value) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Int32 parses the next command-line value as int32.
+func (p *parserMixin) Int32() (target *int32) {
+ target = new(int32)
+ p.Int32Var(target)
+ return
+}
+
+func (p *parserMixin) Int32Var(target *int32) {
+ p.SetValue(newInt32Value(target))
+}
+
+// Int32List accumulates int32 values into a slice.
+func (p *parserMixin) Int32List() (target *[]int32) {
+ target = new([]int32)
+ p.Int32ListVar(target)
+ return
+}
+
+func (p *parserMixin) Int32ListVar(target *[]int32) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newInt32Value(v.(*int32))
+ }))
+}
+
+// -- int64 Value
+type int64Value struct{ v *int64 }
+
+func newInt64Value(p *int64) *int64Value {
+ return &int64Value{p}
+}
+
+func (f *int64Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ if err == nil {
+ *f.v = (int64)(v)
+ }
+ return err
+}
+
+func (f *int64Value) Get() interface{} { return (int64)(*f.v) }
+
+func (f *int64Value) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Int64 parses the next command-line value as int64.
+func (p *parserMixin) Int64() (target *int64) {
+ target = new(int64)
+ p.Int64Var(target)
+ return
+}
+
+func (p *parserMixin) Int64Var(target *int64) {
+ p.SetValue(newInt64Value(target))
+}
+
+// Int64List accumulates int64 values into a slice.
+func (p *parserMixin) Int64List() (target *[]int64) {
+ target = new([]int64)
+ p.Int64ListVar(target)
+ return
+}
+
+func (p *parserMixin) Int64ListVar(target *[]int64) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newInt64Value(v.(*int64))
+ }))
+}
+
+// -- float64 Value
+type float64Value struct{ v *float64 }
+
+func newFloat64Value(p *float64) *float64Value {
+ return &float64Value{p}
+}
+
+func (f *float64Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ if err == nil {
+ *f.v = (float64)(v)
+ }
+ return err
+}
+
+func (f *float64Value) Get() interface{} { return (float64)(*f.v) }
+
+func (f *float64Value) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Float64 parses the next command-line value as float64.
+func (p *parserMixin) Float64() (target *float64) {
+ target = new(float64)
+ p.Float64Var(target)
+ return
+}
+
+func (p *parserMixin) Float64Var(target *float64) {
+ p.SetValue(newFloat64Value(target))
+}
+
+// Float64List accumulates float64 values into a slice.
+func (p *parserMixin) Float64List() (target *[]float64) {
+ target = new([]float64)
+ p.Float64ListVar(target)
+ return
+}
+
+func (p *parserMixin) Float64ListVar(target *[]float64) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newFloat64Value(v.(*float64))
+ }))
+}
+
+// -- float32 Value
+type float32Value struct{ v *float32 }
+
+func newFloat32Value(p *float32) *float32Value {
+ return &float32Value{p}
+}
+
+func (f *float32Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 32)
+ if err == nil {
+ *f.v = (float32)(v)
+ }
+ return err
+}
+
+func (f *float32Value) Get() interface{} { return (float32)(*f.v) }
+
+func (f *float32Value) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Float32 parses the next command-line value as float32.
+func (p *parserMixin) Float32() (target *float32) {
+ target = new(float32)
+ p.Float32Var(target)
+ return
+}
+
+func (p *parserMixin) Float32Var(target *float32) {
+ p.SetValue(newFloat32Value(target))
+}
+
+// Float32List accumulates float32 values into a slice.
+func (p *parserMixin) Float32List() (target *[]float32) {
+ target = new([]float32)
+ p.Float32ListVar(target)
+ return
+}
+
+func (p *parserMixin) Float32ListVar(target *[]float32) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newFloat32Value(v.(*float32))
+ }))
+}
+
+// DurationList accumulates time.Duration values into a slice.
+func (p *parserMixin) DurationList() (target *[]time.Duration) {
+ target = new([]time.Duration)
+ p.DurationListVar(target)
+ return
+}
+
+func (p *parserMixin) DurationListVar(target *[]time.Duration) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newDurationValue(v.(*time.Duration))
+ }))
+}
+
+// IPList accumulates net.IP values into a slice.
+func (p *parserMixin) IPList() (target *[]net.IP) {
+ target = new([]net.IP)
+ p.IPListVar(target)
+ return
+}
+
+func (p *parserMixin) IPListVar(target *[]net.IP) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newIPValue(v.(*net.IP))
+ }))
+}
+
+// TCPList accumulates *net.TCPAddr values into a slice.
+func (p *parserMixin) TCPList() (target *[]*net.TCPAddr) {
+ target = new([]*net.TCPAddr)
+ p.TCPListVar(target)
+ return
+}
+
+func (p *parserMixin) TCPListVar(target *[]*net.TCPAddr) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newTCPAddrValue(v.(**net.TCPAddr))
+ }))
+}
+
+// ExistingFiles accumulates string values into a slice.
+func (p *parserMixin) ExistingFiles() (target *[]string) {
+ target = new([]string)
+ p.ExistingFilesVar(target)
+ return
+}
+
+func (p *parserMixin) ExistingFilesVar(target *[]string) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newExistingFileValue(v.(*string))
+ }))
+}
+
+// ExistingDirs accumulates string values into a slice.
+func (p *parserMixin) ExistingDirs() (target *[]string) {
+ target = new([]string)
+ p.ExistingDirsVar(target)
+ return
+}
+
+func (p *parserMixin) ExistingDirsVar(target *[]string) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newExistingDirValue(v.(*string))
+ }))
+}
+
+// ExistingFilesOrDirs accumulates string values into a slice.
+func (p *parserMixin) ExistingFilesOrDirs() (target *[]string) {
+ target = new([]string)
+ p.ExistingFilesOrDirsVar(target)
+ return
+}
+
+func (p *parserMixin) ExistingFilesOrDirsVar(target *[]string) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newExistingFileOrDirValue(v.(*string))
+ }))
+}
+
+// -- *regexp.Regexp Value
+type regexpValue struct{ v **regexp.Regexp }
+
+func newRegexpValue(p **regexp.Regexp) *regexpValue {
+ return ®expValue{p}
+}
+
+func (f *regexpValue) Set(s string) error {
+ v, err := regexp.Compile(s)
+ if err == nil {
+ *f.v = (*regexp.Regexp)(v)
+ }
+ return err
+}
+
+func (f *regexpValue) Get() interface{} { return (*regexp.Regexp)(*f.v) }
+
+func (f *regexpValue) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Regexp parses the next command-line value as *regexp.Regexp.
+func (p *parserMixin) Regexp() (target **regexp.Regexp) {
+ target = new(*regexp.Regexp)
+ p.RegexpVar(target)
+ return
+}
+
+func (p *parserMixin) RegexpVar(target **regexp.Regexp) {
+ p.SetValue(newRegexpValue(target))
+}
+
+// RegexpList accumulates *regexp.Regexp values into a slice.
+func (p *parserMixin) RegexpList() (target *[]*regexp.Regexp) {
+ target = new([]*regexp.Regexp)
+ p.RegexpListVar(target)
+ return
+}
+
+func (p *parserMixin) RegexpListVar(target *[]*regexp.Regexp) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newRegexpValue(v.(**regexp.Regexp))
+ }))
+}
+
+// -- net.IP Value
+type resolvedIPValue struct{ v *net.IP }
+
+func newResolvedIPValue(p *net.IP) *resolvedIPValue {
+ return &resolvedIPValue{p}
+}
+
+func (f *resolvedIPValue) Set(s string) error {
+ v, err := resolveHost(s)
+ if err == nil {
+ *f.v = (net.IP)(v)
+ }
+ return err
+}
+
+func (f *resolvedIPValue) Get() interface{} { return (net.IP)(*f.v) }
+
+func (f *resolvedIPValue) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Resolve a hostname or IP to an IP.
+func (p *parserMixin) ResolvedIP() (target *net.IP) {
+ target = new(net.IP)
+ p.ResolvedIPVar(target)
+ return
+}
+
+func (p *parserMixin) ResolvedIPVar(target *net.IP) {
+ p.SetValue(newResolvedIPValue(target))
+}
+
+// ResolvedIPList accumulates net.IP values into a slice.
+func (p *parserMixin) ResolvedIPList() (target *[]net.IP) {
+ target = new([]net.IP)
+ p.ResolvedIPListVar(target)
+ return
+}
+
+func (p *parserMixin) ResolvedIPListVar(target *[]net.IP) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newResolvedIPValue(v.(*net.IP))
+ }))
+}
+
+// -- []byte Value
+type hexBytesValue struct{ v *[]byte }
+
+func newHexBytesValue(p *[]byte) *hexBytesValue {
+ return &hexBytesValue{p}
+}
+
+func (f *hexBytesValue) Set(s string) error {
+ v, err := hex.DecodeString(s)
+ if err == nil {
+ *f.v = ([]byte)(v)
+ }
+ return err
+}
+
+func (f *hexBytesValue) Get() interface{} { return ([]byte)(*f.v) }
+
+func (f *hexBytesValue) String() string { return fmt.Sprintf("%v", *f.v) }
+
+// Bytes as a hex string.
+func (p *parserMixin) HexBytes() (target *[]byte) {
+ target = new([]byte)
+ p.HexBytesVar(target)
+ return
+}
+
+func (p *parserMixin) HexBytesVar(target *[]byte) {
+ p.SetValue(newHexBytesValue(target))
+}
+
+// HexBytesList accumulates []byte values into a slice.
+func (p *parserMixin) HexBytesList() (target *[][]byte) {
+ target = new([][]byte)
+ p.HexBytesListVar(target)
+ return
+}
+
+func (p *parserMixin) HexBytesListVar(target *[][]byte) {
+ p.SetValue(newAccumulator(target, func(v interface{}) Value {
+ return newHexBytesValue(v.(*[]byte))
+ }))
+}
diff --git a/vendor/github.com/alecthomas/units/renovate.json5 b/vendor/github.com/alecthomas/units/renovate.json5
index 897864b85..6bb4acde9 100644
--- a/vendor/github.com/alecthomas/units/renovate.json5
+++ b/vendor/github.com/alecthomas/units/renovate.json5
@@ -8,4 +8,8 @@
"group:allNonMajor",
"schedule:earlyMondays", // Run once a week.
],
+ postUpdateOptions: [
+ "gomodTidy",
+ "gomodUpdateImportPaths"
+ ]
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/.gitignore b/vendor/github.com/aws/aws-sdk-go-v2/.gitignore
deleted file mode 100644
index e736820b3..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/.gitignore
+++ /dev/null
@@ -1,12 +0,0 @@
-dist
-/doc
-/doc-staging
-.yardoc
-Gemfile.lock
-/internal/awstesting/integration/smoke/**/importmarker__.go
-/internal/awstesting/integration/smoke/_test/
-/vendor
-/private/model/cli/gen-api/gen-api
-.gradle/
-build/
-.idea/
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/.golangci.toml b/vendor/github.com/aws/aws-sdk-go-v2/.golangci.toml
deleted file mode 100644
index 8792d0ca6..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/.golangci.toml
+++ /dev/null
@@ -1,27 +0,0 @@
-[run]
-concurrency = 4
-timeout = "1m"
-issues-exit-code = 0
-modules-download-mode = "readonly"
-allow-parallel-runners = true
-skip-dirs = ["internal/repotools"]
-skip-dirs-use-default = true
-skip-files = ["service/transcribestreaming/eventstream_test.go"]
-[output]
-format = "github-actions"
-
-[linters-settings.cyclop]
-skip-tests = false
-
-[linters-settings.errcheck]
-check-blank = true
-
-[linters]
-disable-all = true
-enable = ["errcheck"]
-fast = false
-
-[issues]
-exclude-use-default = false
-
-# Refer config definitions at https://golangci-lint.run/usage/configuration/#config-file
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/.travis.yml b/vendor/github.com/aws/aws-sdk-go-v2/.travis.yml
deleted file mode 100644
index 4b498a7a2..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/.travis.yml
+++ /dev/null
@@ -1,31 +0,0 @@
-language: go
-sudo: true
-dist: bionic
-
-branches:
- only:
- - main
-
-os:
- - linux
- - osx
- # Travis doesn't work with windows and Go tip
- #- windows
-
-go:
- - tip
-
-matrix:
- allow_failures:
- - go: tip
-
-before_install:
- - if [ "$TRAVIS_OS_NAME" = "windows" ]; then choco install make; fi
- - (cd /tmp/; go get golang.org/x/lint/golint)
-
-env:
- - EACHMODULE_CONCURRENCY=4
-
-script:
- - make ci-test-no-generate;
-
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md
deleted file mode 100644
index 78de90a37..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/CHANGELOG.md
+++ /dev/null
@@ -1,5715 +0,0 @@
-# Release (2022-09-20)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2`: v1.16.16
- * **Documentation**: added clafirfication on the Credential object to show usage of loadDefaultConfig to load credentials
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.58.0](service/ec2/CHANGELOG.md#v1580-2022-09-20)
- * **Feature**: This release adds support for blocked paths to Amazon VPC Reachability Analyzer.
-
-# Release (2022-09-19)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.18.0](service/cloudtrail/CHANGELOG.md#v1180-2022-09-19)
- * **Feature**: This release includes support for importing existing trails into CloudTrail Lake.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.57.0](service/ec2/CHANGELOG.md#v1570-2022-09-19)
- * **Feature**: This release adds CapacityAllocations field to DescribeCapacityReservations
-* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.17.0](service/mediaconnect/CHANGELOG.md#v1170-2022-09-19)
- * **Feature**: This change allows the customer to use the SRT Caller protocol as part of their flows
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.26.0](service/rds/CHANGELOG.md#v1260-2022-09-19)
- * **Feature**: This release adds support for Amazon RDS Proxy with SQL Server compatibility.
-
-# Release (2022-09-16)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.13.0](service/codestarnotifications/CHANGELOG.md#v1130-2022-09-16)
- * **Feature**: This release adds tag based access control for the UntagResource API.
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.21](service/ecs/CHANGELOG.md#v11821-2022-09-16)
- * **Documentation**: This release supports new task definition sizes.
-
-# Release (2022-09-15)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.17.0](service/dynamodb/CHANGELOG.md#v1170-2022-09-15)
- * **Feature**: Increased DynamoDB transaction limit from 25 to 100.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.56.0](service/ec2/CHANGELOG.md#v1560-2022-09-15)
- * **Feature**: This feature allows customers to create tags for vpc-endpoint-connections and vpc-endpoint-service-permissions.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.43.0](service/sagemaker/CHANGELOG.md#v1430-2022-09-15)
- * **Feature**: Amazon SageMaker Automatic Model Tuning now supports specifying Hyperband strategy for tuning jobs, which uses a multi-fidelity based tuning strategy to stop underperforming hyperparameter configurations early.
-
-# Release (2022-09-14)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/feature/rds/auth`: [v1.2.0](feature/rds/auth/CHANGELOG.md#v120-2022-09-14)
- * **Feature**: Updated `BuildAuthToken` to validate the provided endpoint contains a port.
-* `github.com/aws/aws-sdk-go-v2/internal/v4a`: [v1.0.13](internal/v4a/CHANGELOG.md#v1013-2022-09-14)
- * **Bug Fix**: Fixes an issues where an error from an underlying SigV4 credential provider would not be surfaced from the SigV4a credential provider. Contribution by [sakthipriyan-aqfer](https://github.com/sakthipriyan-aqfer).
-* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.18.0](service/acmpca/CHANGELOG.md#v1180-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.7.0](service/amplifyuibuilder/CHANGELOG.md#v170-2022-09-14)
- * **Feature**: Amplify Studio UIBuilder is introducing forms functionality. Forms can be configured from Data Store models, JSON, or from scratch. These forms can then be generated in your project and used like any other React components.
-* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.14.0](service/appconfig/CHANGELOG.md#v1140-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.17.0](service/appflow/CHANGELOG.md#v1170-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.16.0](service/appmesh/CHANGELOG.md#v1160-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.17.0](service/cloudtrail/CHANGELOG.md#v1170-2022-09-14)
- * **Feature**: This release adds CloudTrail getChannel and listChannels APIs to allow customer to view the ServiceLinkedChannel configurations.
-* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.12.0](service/codestar/CHANGELOG.md#v1120-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.12.0](service/codestarnotifications/CHANGELOG.md#v1120-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.14.0](service/cognitoidentity/CHANGELOG.md#v1140-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.20.0](service/cognitoidentityprovider/CHANGELOG.md#v1200-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.26.0](service/configservice/CHANGELOG.md#v1260-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.30.0](service/connect/CHANGELOG.md#v1300-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.12.0](service/connectparticipant/CHANGELOG.md#v1120-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.20.0](service/costexplorer/CHANGELOG.md#v1200-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.19.0](service/customerprofiles/CHANGELOG.md#v1190-2022-09-14)
- * **Feature**: Added isUnstructured in response for Customer Profiles Integration APIs
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.16.0](service/dataexchange/CHANGELOG.md#v1160-2022-09-14)
- * **Feature**: Documentation updates for AWS Data Exchange.
-* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.8.0](service/drs/CHANGELOG.md#v180-2022-09-14)
- * **Feature**: Fixed the data type of lagDuration that is returned in Describe Source Server API
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.55.0](service/ec2/CHANGELOG.md#v1550-2022-09-14)
- * **Feature**: Documentation updates for Amazon EC2.
- * **Feature**: This release adds support to send VPC Flow Logs to kinesis-data-firehose as new destination type
- * **Feature**: This update introduces API operations to manage and create local gateway route tables, CoIP pools, and VIF group associations.
- * **Feature**: Two new features for local gateway route tables: support for static routes targeting Elastic Network Interfaces and direct VPC routing.
-* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.22.0](service/eks/CHANGELOG.md#v1220-2022-09-14)
- * **Feature**: Adding support for local Amazon EKS clusters on Outposts
- * **Feature**: Adds support for EKS Addons ResolveConflicts "preserve" flag. Also adds new update failed status for EKS Addons.
-* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.14.0](service/emrcontainers/CHANGELOG.md#v1140-2022-09-14)
- * **Feature**: EMR on EKS now allows running Spark SQL using the newly introduced Spark SQL Job Driver in the Start Job Run API
-* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.2.0](service/emrserverless/CHANGELOG.md#v120-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.9.0](service/evidently/CHANGELOG.md#v190-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
- * **Feature**: This release adds support for the client-side evaluation - powered by AWS AppConfig feature.
-* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.13.0](service/finspacedata/CHANGELOG.md#v1130-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.13.0](service/fis/CHANGELOG.md#v1130-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.12](service/fsx/CHANGELOG.md#v12412-2022-09-14)
- * **Documentation**: Documentation update for Amazon FSx.
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.31.0](service/glue/CHANGELOG.md#v1310-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.18.0](service/greengrassv2/CHANGELOG.md#v1180-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.15.3](service/identitystore/CHANGELOG.md#v1153-2022-09-14)
- * **Documentation**: Documentation updates for the Identity Store CLI Reference.
-* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.20.0](service/imagebuilder/CHANGELOG.md#v1200-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.8.0](service/inspector2/CHANGELOG.md#v180-2022-09-14)
- * **Feature**: This release adds new fields like fixAvailable, fixedInVersion and remediation to the finding model. The requirement to have vulnerablePackages in the finding model has also been removed. The documentation has been updated to reflect these changes.
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.29.0](service/iot/CHANGELOG.md#v1290-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.13.0](service/iotanalytics/CHANGELOG.md#v1130-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.14.0](service/iotsecuretunneling/CHANGELOG.md#v1140-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.25.0](service/iotsitewise/CHANGELOG.md#v1250-2022-09-14)
- * **Feature**: Allow specifying units in Asset Properties
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.34.0](service/kendra/CHANGELOG.md#v1340-2022-09-14)
- * **Feature**: This release enables our customer to choose the option of Sharepoint 2019 for the on-premise Sharepoint connector.
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.24.0](service/lexmodelsv2/CHANGELOG.md#v1240-2022-09-14)
- * **Feature**: This release is for supporting Composite Slot Type feature in AWS Lex V2. Composite Slot Type will help developer to logically group coherent slots and maintain their inter-relationships in runtime conversation.
-* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.15.0](service/lexruntimev2/CHANGELOG.md#v1150-2022-09-14)
- * **Feature**: This release is for supporting Composite Slot Type feature in AWS Lex V2. Composite Slot Type will help developer to logically group coherent slots and maintain their inter-relationships in runtime conversation.
-* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.18.0](service/lookoutmetrics/CHANGELOG.md#v1180-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
- * **Feature**: Release dimension value filtering feature to allow customers to define dimension filters for including only a subset of their dataset to be used by LookoutMetrics.
-* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.1.0](service/m2/CHANGELOG.md#v110-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.23.0](service/medialive/CHANGELOG.md#v1230-2022-09-14)
- * **Feature**: This change exposes API settings which allow Dolby Atmos and Dolby Vision to be used when running a channel using Elemental Media Live
-* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.19.0](service/networkfirewall/CHANGELOG.md#v1190-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.15.0](service/pi/CHANGELOG.md#v1150-2022-09-14)
- * **Feature**: Increases the maximum values of two RDS Performance Insights APIs. The maximum value of the Limit parameter of DimensionGroup is 25. The MaxResult maximum is now 25 for the following APIs: DescribeDimensionKeys, GetResourceMetrics, ListAvailableResourceDimensions, and ListAvailableResourceMetrics.
-* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.17.0](service/pricing/CHANGELOG.md#v1170-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.25.0](service/quicksight/CHANGELOG.md#v1250-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.26.9](service/redshift/CHANGELOG.md#v1269-2022-09-14)
- * **Documentation**: This release updates documentation for AQUA features and other description updates.
-* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.22.0](service/route53/CHANGELOG.md#v1220-2022-09-14)
- * **Feature**: Amazon Route 53 now supports the Middle East (UAE) Region (me-central-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region.
-* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.10.0](service/route53recoverycluster/CHANGELOG.md#v1100-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.22.0](service/s3control/CHANGELOG.md#v1220-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.42.0](service/sagemaker/CHANGELOG.md#v1420-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
- * **Feature**: SageMaker Hosting now allows customization on ML instance storage volume size, model data download timeout and inference container startup ping health check timeout for each ProductionVariant in CreateEndpointConfig API.
- * **Feature**: This release adds HyperParameterTuningJob type in Search API.
- * **Feature**: This release adds Mode to AutoMLJobConfig.
-* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.14.0](service/sagemakera2iruntime/CHANGELOG.md#v1140-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.16.0](service/secretsmanager/CHANGELOG.md#v1160-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.14.0](service/servicecatalogappregistry/CHANGELOG.md#v1140-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.14.0](service/sfn/CHANGELOG.md#v1140-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.18.0](service/sns/CHANGELOG.md#v1180-2022-09-14)
- * **Feature**: Amazon SNS introduces the Data Protection Policy APIs, which enable customers to attach a data protection policy to an SNS topic. This allows topic owners to enable the new message data protection feature to audit and block sensitive data that is exchanged through their topics.
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.28.0](service/ssm/CHANGELOG.md#v1280-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
- * **Feature**: This release adds support for Systems Manager State Manager Association tagging.
-* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.14.0](service/timestreamwrite/CHANGELOG.md#v1140-2022-09-14)
- * **Feature**: Fixed a bug in the API client generation which caused some operation parameters to be incorrectly generated as value types instead of pointer types. The service API always required these affected parameters to be nilable. This fixes the SDK client to match the expectations of the the service API.
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.22.0](service/transfer/CHANGELOG.md#v1220-2022-09-14)
- * **Feature**: This release introduces the ability to have multiple server host keys for any of your Transfer Family servers that use the SFTP protocol.
-
-# Release (2022-09-02.2)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.15.2](service/identitystore/CHANGELOG.md#v1152-2022-09-022)
- * **Bug Fix**: Reverts a change to the identitystore module so that MaxResults members of ListGroupMemberShips, ListGroupMembershipsForMembers, ListGroups, and ListUsers are correctly generated as pointer types instead of value types
-
-# Release (2022-09-02)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.19.0](service/cognitoidentityprovider/CHANGELOG.md#v1190-2022-09-02)
- * **Feature**: This release adds a new "AuthSessionValidity" field to the UserPoolClient in Cognito. Application admins can configure this value for their users' authentication duration, which is currently fixed at 3 minutes, up to 15 minutes. Setting this field will also apply to the SMS MFA authentication flow.
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.29.0](service/connect/CHANGELOG.md#v1290-2022-09-02)
- * **Feature**: This release adds search APIs for Routing Profiles and Queues, which can be used to search for those resources within a Connect Instance.
-* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.19.0](service/mediapackage/CHANGELOG.md#v1190-2022-09-02)
- * **Feature**: Added support for AES_CTR encryption to CMAF origin endpoints
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.41.0](service/sagemaker/CHANGELOG.md#v1410-2022-09-02)
- * **Feature**: This release enables administrators to attribute user activity and API calls from Studio notebooks, Data Wrangler and Canvas to specific users even when users share the same execution IAM role. ExecutionRoleIdentityConfig at Sagemaker domain level enables this feature.
-
-# Release (2022-09-01)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.16.11](service/codegurureviewer/CHANGELOG.md#v11611-2022-09-01)
- * **Documentation**: Documentation updates to fix formatting issues in CLI and SDK documentation.
-* `github.com/aws/aws-sdk-go-v2/service/controltower`: [v1.0.0](service/controltower/CHANGELOG.md#v100-2022-09-01)
- * **Release**: New AWS service client module
- * **Feature**: This release contains the first SDK for AWS Control Tower. It introduces a new set of APIs: EnableControl, DisableControl, GetControlOperation, and ListEnabledControls.
-* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.21.10](service/route53/CHANGELOG.md#v12110-2022-09-01)
- * **Documentation**: Documentation updates for Amazon Route 53.
-
-# Release (2022-08-31)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.20.2](service/cloudfront/CHANGELOG.md#v1202-2022-08-31)
- * **Documentation**: Update API documentation for CloudFront origin access control (OAC)
-* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.15.0](service/identitystore/CHANGELOG.md#v1150-2022-08-31)
- * **Feature**: Expand IdentityStore API to support Create, Read, Update, Delete and Get operations for User, Group and GroupMembership resources.
-* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.13.0](service/iotthingsgraph/CHANGELOG.md#v1130-2022-08-31)
- * **Feature**: This release deprecates all APIs of the ThingsGraph service
-* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.18.0](service/ivs/CHANGELOG.md#v1180-2022-08-31)
- * **Feature**: IVS Merge Fragmented Streams. This release adds support for recordingReconnectWindow field in IVS recordingConfigurations. For more information see https://docs.aws.amazon.com/ivs/latest/APIReference/Welcome.html
-* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.12.12](service/rdsdata/CHANGELOG.md#v11212-2022-08-31)
- * **Documentation**: Documentation updates for RDS Data API
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.40.0](service/sagemaker/CHANGELOG.md#v1400-2022-08-31)
- * **Feature**: SageMaker Inference Recommender now accepts Inference Recommender fields: Domain, Task, Framework, SamplePayloadUrl, SupportedContentTypes, SupportedInstanceTypes, directly in our CreateInferenceRecommendationsJob API through ContainerConfig
-
-# Release (2022-08-30)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.17.0](service/greengrassv2/CHANGELOG.md#v1170-2022-08-30)
- * **Feature**: Adds topologyFilter to ListInstalledComponentsRequest which allows filtration of components by ROOT or ALL (including root and dependency components). Adds lastStatusChangeTimestamp to ListInstalledComponents response to show the last time a component changed state on a device.
-* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.14.15](service/identitystore/CHANGELOG.md#v11415-2022-08-30)
- * **Documentation**: Documentation updates for the Identity Store CLI Reference.
-* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.15.0](service/lookoutequipment/CHANGELOG.md#v1150-2022-08-30)
- * **Feature**: This release adds new apis for providing labels.
-* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.23.0](service/macie2/CHANGELOG.md#v1230-2022-08-30)
- * **Feature**: This release of the Amazon Macie API adds support for using allow lists to define specific text and text patterns to ignore when inspecting data sources for sensitive data.
-* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.11.19](service/sso/CHANGELOG.md#v11119-2022-08-30)
- * **Documentation**: Documentation updates for the AWS IAM Identity Center Portal CLI Reference.
-* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.7](service/ssoadmin/CHANGELOG.md#v1157-2022-08-30)
- * **Documentation**: Documentation updates for the AWS IAM Identity Center CLI Reference.
-
-# Release (2022-08-29)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.9](service/fsx/CHANGELOG.md#v1249-2022-08-29)
- * **Documentation**: Documentation updates for Amazon FSx for NetApp ONTAP.
-* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.11.0](service/voiceid/CHANGELOG.md#v1110-2022-08-29)
- * **Feature**: Amazon Connect Voice ID now detects voice spoofing. When a prospective fraudster tries to spoof caller audio using audio playback or synthesized speech, Voice ID will return a risk score and outcome to indicate the how likely it is that the voice is spoofed.
-
-# Release (2022-08-26)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.18.0](service/mediapackage/CHANGELOG.md#v1180-2022-08-26)
- * **Feature**: This release adds Ads AdTriggers and AdsOnDeliveryRestrictions to describe calls for CMAF endpoints on MediaPackage.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.25.1](service/rds/CHANGELOG.md#v1251-2022-08-26)
- * **Documentation**: Removes support for RDS Custom from DBInstanceClass in ModifyDBInstance
-
-# Release (2022-08-25)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.13](service/elasticloadbalancingv2/CHANGELOG.md#v11813-2022-08-25)
- * **Documentation**: Documentation updates for ELBv2. Gateway Load Balancer now supports Configurable Flow Stickiness, enabling you to configure the hashing used to maintain stickiness of flows to a specific target appliance.
-* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.15.0](service/gamelift/CHANGELOG.md#v1150-2022-08-25)
- * **Feature**: This release adds support for eight EC2 local zones as fleet locations; Atlanta, Chicago, Dallas, Denver, Houston, Kansas City (us-east-1-mci-1a), Los Angeles, and Phoenix. It also adds support for C5d, C6a, C6i, and R5d EC2 instance families.
-* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.22.0](service/iotwireless/CHANGELOG.md#v1220-2022-08-25)
- * **Feature**: This release includes a new feature for the customers to enable the LoRa gateways to send out beacons for Class B devices and an option to select one or more gateways for Class C devices when sending the LoRaWAN downlink messages.
-* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.13](service/ivschat/CHANGELOG.md#v1013-2022-08-25)
- * **Documentation**: Documentation change for IVS Chat API Reference. Doc-only update to add a paragraph on ARNs to the Welcome section.
-* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.8.0](service/panorama/CHANGELOG.md#v180-2022-08-25)
- * **Feature**: Support sorting and filtering in ListDevices API, and add more fields to device listings and single device detail
-* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.13.0](service/ssooidc/CHANGELOG.md#v1130-2022-08-25)
- * **Feature**: Updated required request parameters on IAM Identity Center's OIDC CreateToken action.
-
-# Release (2022-08-24)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.20.0](service/cloudfront/CHANGELOG.md#v1200-2022-08-24)
- * **Feature**: Adds support for CloudFront origin access control (OAC), making it possible to restrict public access to S3 bucket origins in all AWS Regions, those with SSE-KMS, and more.
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.25.0](service/configservice/CHANGELOG.md#v1250-2022-08-24)
- * **Feature**: AWS Config now supports ConformancePackTemplate documents in SSM Docs for the deployment and update of conformance packs.
-* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.14](service/iam/CHANGELOG.md#v11814-2022-08-24)
- * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM).
-* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.17.1](service/ivs/CHANGELOG.md#v1171-2022-08-24)
- * **Documentation**: Documentation Change for IVS API Reference - Doc-only update to type field description for CreateChannel and UpdateChannel actions and for Channel data type. Also added Amazon Resource Names (ARNs) paragraph to Welcome section.
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.24.0](service/quicksight/CHANGELOG.md#v1240-2022-08-24)
- * **Feature**: Added a new optional property DashboardVisual under ExperienceConfiguration parameter of GenerateEmbedUrlForAnonymousUser and GenerateEmbedUrlForRegisteredUser API operations. This supports embedding of specific visuals in QuickSight dashboards.
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.21.5](service/transfer/CHANGELOG.md#v1215-2022-08-24)
- * **Documentation**: Documentation updates for AWS Transfer Family
-
-# Release (2022-08-23)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.25.0](service/rds/CHANGELOG.md#v1250-2022-08-23)
- * **Feature**: RDS for Oracle supports Oracle Data Guard switchover and read replica backups.
-* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.5](service/ssoadmin/CHANGELOG.md#v1155-2022-08-23)
- * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On)
-
-# Release (2022-08-22)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.19.5](service/docdb/CHANGELOG.md#v1195-2022-08-22)
- * **Documentation**: Update document for volume clone
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.54.0](service/ec2/CHANGELOG.md#v1540-2022-08-22)
- * **Feature**: R6a instances are powered by 3rd generation AMD EPYC (Milan) processors delivering all-core turbo frequency of 3.6 GHz. C6id, M6id, and R6id instances are powered by 3rd generation Intel Xeon Scalable processor (Ice Lake) delivering all-core turbo frequency of 3.5 GHz.
-* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.23.0](service/forecast/CHANGELOG.md#v1230-2022-08-22)
- * **Feature**: releasing What-If Analysis APIs and update ARN regex pattern to be more strict in accordance with security recommendation
-* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.12.0](service/forecastquery/CHANGELOG.md#v1120-2022-08-22)
- * **Feature**: releasing What-If Analysis APIs
-* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.24.0](service/iotsitewise/CHANGELOG.md#v1240-2022-08-22)
- * **Feature**: Enable non-unique asset names under different hierarchies
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.23.0](service/lexmodelsv2/CHANGELOG.md#v1230-2022-08-22)
- * **Feature**: This release introduces a new feature to stop a running BotRecommendation Job for Automated Chatbot Designer.
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.23.0](service/securityhub/CHANGELOG.md#v1230-2022-08-22)
- * **Feature**: Added new resource details objects to ASFF, including resources for AwsBackupBackupVault, AwsBackupBackupPlan and AwsBackupRecoveryPoint. Added FixAvailable, FixedInVersion and Remediation to Vulnerability.
-* `github.com/aws/aws-sdk-go-v2/service/supportapp`: [v1.0.0](service/supportapp/CHANGELOG.md#v100-2022-08-22)
- * **Release**: New AWS service client module
- * **Feature**: This is the initial SDK release for the AWS Support App in Slack.
-
-# Release (2022-08-19)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.28.0](service/connect/CHANGELOG.md#v1280-2022-08-19)
- * **Feature**: This release adds SearchSecurityProfiles API which can be used to search for Security Profile resources within a Connect Instance.
-* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.12](service/ivschat/CHANGELOG.md#v1012-2022-08-19)
- * **Documentation**: Documentation Change for IVS Chat API Reference - Doc-only update to change text/description for tags field.
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.33.0](service/kendra/CHANGELOG.md#v1330-2022-08-19)
- * **Feature**: This release adds support for a new authentication type - Personal Access Token (PAT) for confluence server.
-* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.17.0](service/lookoutmetrics/CHANGELOG.md#v1170-2022-08-19)
- * **Feature**: This release is to make GetDataQualityMetrics API publicly available.
-
-# Release (2022-08-18)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.1.0](service/chimesdkmediapipelines/CHANGELOG.md#v110-2022-08-18)
- * **Feature**: The Amazon Chime SDK now supports live streaming of real-time video from the Amazon Chime SDK sessions to streaming platforms such as Amazon IVS and Amazon Elemental MediaLive. We have also added support for concatenation to create a single media capture file.
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.21.0](service/cloudwatch/CHANGELOG.md#v1210-2022-08-18)
- * **Feature**: Add support for managed Contributor Insights Rules
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.18.4](service/cognitoidentityprovider/CHANGELOG.md#v1184-2022-08-18)
- * **Documentation**: This change is being made simply to fix the public documentation based on the models. We have included the PasswordChange and ResendCode events, along with the Pass, Fail and InProgress status. We have removed the Success and Failure status which are never returned by our APIs.
-* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.16.0](service/dynamodb/CHANGELOG.md#v1160-2022-08-18)
- * **Feature**: This release adds support for importing data from S3 into a new DynamoDB table
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.53.0](service/ec2/CHANGELOG.md#v1530-2022-08-18)
- * **Feature**: This release adds support for VPN log options , a new feature allowing S2S VPN connections to send IKE activity logs to CloudWatch Logs
-* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.15.0](service/networkmanager/CHANGELOG.md#v1150-2022-08-18)
- * **Feature**: Add TransitGatewayPeeringAttachmentId property to TransitGatewayPeering Model
-
-# Release (2022-08-17)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.15.0](service/appmesh/CHANGELOG.md#v1150-2022-08-17)
- * **Feature**: AWS App Mesh release to support Multiple Listener and Access Log Format feature
-* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.1.0](service/connectcampaigns/CHANGELOG.md#v110-2022-08-17)
- * **Feature**: Updated exceptions for Amazon Connect Outbound Campaign api's.
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.32.0](service/kendra/CHANGELOG.md#v1320-2022-08-17)
- * **Feature**: This release adds Zendesk connector (which allows you to specify Zendesk SAAS platform as data source), Proxy Support for Sharepoint and Confluence Server (which allows you to specify the proxy configuration if proxy is required to connect to your Sharepoint/Confluence Server as data source).
-* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.17.0](service/lakeformation/CHANGELOG.md#v1170-2022-08-17)
- * **Feature**: This release adds a new API support "AssumeDecoratedRoleWithSAML" and also release updates the corresponding documentation.
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.24.0](service/lambda/CHANGELOG.md#v1240-2022-08-17)
- * **Feature**: Added support for customization of Consumer Group ID for MSK and Kafka Event Source Mappings.
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.22.0](service/lexmodelsv2/CHANGELOG.md#v1220-2022-08-17)
- * **Feature**: This release introduces support for enhanced conversation design with the ability to define custom conversation flows with conditional branching and new bot responses.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.24.0](service/rds/CHANGELOG.md#v1240-2022-08-17)
- * **Feature**: Adds support for Internet Protocol Version 6 (IPv6) for RDS Aurora database clusters.
-* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.18](service/secretsmanager/CHANGELOG.md#v11518-2022-08-17)
- * **Documentation**: Documentation updates for Secrets Manager.
-
-# Release (2022-08-16)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.20.0](service/rekognition/CHANGELOG.md#v1200-2022-08-16)
- * **Feature**: This release adds APIs which support copying an Amazon Rekognition Custom Labels model and managing project policies across AWS account.
-* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.14.12](service/servicecatalog/CHANGELOG.md#v11412-2022-08-16)
- * **Documentation**: Documentation updates for Service Catalog
-
-# Release (2022-08-15)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.19.0](service/cloudfront/CHANGELOG.md#v1190-2022-08-15)
- * **Feature**: Adds Http 3 support to distributions
-* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.14.13](service/identitystore/CHANGELOG.md#v11413-2022-08-15)
- * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On)
-* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.11.17](service/sso/CHANGELOG.md#v11117-2022-08-15)
- * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On)
-* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.9.0](service/wisdom/CHANGELOG.md#v190-2022-08-15)
- * **Feature**: This release introduces a new API PutFeedback that allows submitting feedback to Wisdom on content relevance.
-
-# Release (2022-08-14)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/config`: [v1.17.0](config/CHANGELOG.md#v1170-2022-08-14)
- * **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present.
-* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.15.0](service/amp/CHANGELOG.md#v1150-2022-08-14)
- * **Feature**: This release adds log APIs that allow customers to manage logging for their Amazon Managed Service for Prometheus workspaces.
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.11.0](service/chimesdkmessaging/CHANGELOG.md#v1110-2022-08-14)
- * **Feature**: The Amazon Chime SDK now supports channels with up to one million participants with elastic channels.
-* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.17.0](service/ivs/CHANGELOG.md#v1170-2022-08-14)
- * **Feature**: Updates various list api MaxResults ranges
-* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.12.0](service/personalizeruntime/CHANGELOG.md#v1120-2022-08-14)
- * **Feature**: This release provides support for promotions in AWS Personalize runtime.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.23.6](service/rds/CHANGELOG.md#v1236-2022-08-14)
- * **Documentation**: Adds support for RDS Custom to DBInstanceClass in ModifyDBInstance
-
-# Release (2022-08-11)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/backupstorage`: [v1.0.0](service/backupstorage/CHANGELOG.md#v100-2022-08-11)
- * **Release**: New AWS service client module
- * **Feature**: This is the first public release of AWS Backup Storage. We are exposing some previously-internal APIs for use by external services. These APIs are not meant to be used directly by customers.
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.30.0](service/glue/CHANGELOG.md#v1300-2022-08-11)
- * **Feature**: Add support for Python 3.9 AWS Glue Python Shell jobs
-* `github.com/aws/aws-sdk-go-v2/service/privatenetworks`: [v1.0.0](service/privatenetworks/CHANGELOG.md#v100-2022-08-11)
- * **Release**: New AWS service client module
- * **Feature**: This is the initial SDK release for AWS Private 5G. AWS Private 5G is a managed service that makes it easy to deploy, operate, and scale your own private mobile network at your on-premises location.
-
-# Release (2022-08-10)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/config`: [v1.16.0](config/CHANGELOG.md#v1160-2022-08-10)
- * **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`.
-* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.12.0](service/dlm/CHANGELOG.md#v1120-2022-08-10)
- * **Feature**: This release adds support for excluding specific data (non-boot) volumes from multi-volume snapshot sets created by snapshot lifecycle policies
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.52.0](service/ec2/CHANGELOG.md#v1520-2022-08-10)
- * **Feature**: This release adds support for excluding specific data (non-root) volumes from multi-volume snapshot sets created from instances.
-
-# Release (2022-08-09)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.20.0](service/cloudwatch/CHANGELOG.md#v1200-2022-08-09)
- * **Feature**: Various quota increases related to dimensions and custom metrics
-* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.18.0](service/location/CHANGELOG.md#v1180-2022-08-09)
- * **Feature**: Amazon Location Service now allows circular geofences in BatchPutGeofence, PutGeofence, and GetGeofence APIs.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.39.0](service/sagemaker/CHANGELOG.md#v1390-2022-08-09)
- * **Feature**: Amazon SageMaker Automatic Model Tuning now supports specifying multiple alternate EC2 instance types to make tuning jobs more robust when the preferred instance type is not available due to insufficient capacity.
-* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.13.0](service/sagemakera2iruntime/CHANGELOG.md#v1130-2022-08-09)
- * **Feature**: Fix bug with parsing ISO-8601 CreationTime in Java SDK in DescribeHumanLoop
-
-# Release (2022-08-08)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2`: v1.16.9
- * **Bug Fix**: aws/signer/v4: Fixes a panic in SDK's handling of endpoint URLs with ports by correcting how URL path is parsed from opaque URLs. Fixes [#1294](https://github.com/aws/aws-sdk-go-v2/issues/1294).
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.29.0](service/glue/CHANGELOG.md#v1290-2022-08-08)
- * **Feature**: Add an option to run non-urgent or non-time sensitive Glue Jobs on spare capacity
-* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.14.10](service/identitystore/CHANGELOG.md#v11410-2022-08-08)
- * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On)
-* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.21.0](service/iotwireless/CHANGELOG.md#v1210-2022-08-08)
- * **Feature**: AWS IoT Wireless release support for sidewalk data reliability.
-* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.17.0](service/pinpoint/CHANGELOG.md#v1170-2022-08-08)
- * **Feature**: Adds support for Advance Quiet Time in Journeys. Adds RefreshOnSegmentUpdate and WaitForQuietTime to JourneyResponse.
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.23.2](service/quicksight/CHANGELOG.md#v1232-2022-08-08)
- * **Documentation**: A series of documentation updates to the QuickSight API reference.
-* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.11.14](service/sso/CHANGELOG.md#v11114-2022-08-08)
- * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On)
-* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.2](service/ssoadmin/CHANGELOG.md#v1152-2022-08-08)
- * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On)
-* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.12.12](service/ssooidc/CHANGELOG.md#v11212-2022-08-08)
- * **Documentation**: Documentation updates to reflect service rename - AWS IAM Identity Center (successor to AWS Single Sign-On)
-
-# Release (2022-08-04)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.13.0](service/chimesdkmeetings/CHANGELOG.md#v1130-2022-08-04)
- * **Feature**: Adds support for Tags on Amazon Chime SDK WebRTC sessions
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.24.0](service/configservice/CHANGELOG.md#v1240-2022-08-04)
- * **Feature**: Add resourceType enums for Athena, GlobalAccelerator, Detective and EC2 types
-* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.21.3](service/databasemigrationservice/CHANGELOG.md#v1213-2022-08-04)
- * **Documentation**: Documentation updates for Database Migration Service (DMS).
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.28.0](service/iot/CHANGELOG.md#v1280-2022-08-04)
- * **Feature**: The release is to support attach a provisioning template to CACert for JITP function, Customer now doesn't have to hardcode a roleArn and templateBody during register a CACert to enable JITP.
-
-# Release (2022-08-03)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.18.0](service/cognitoidentityprovider/CHANGELOG.md#v1180-2022-08-03)
- * **Feature**: Add a new exception type, ForbiddenException, that is returned when request is not allowed
-* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.22.0](service/wafv2/CHANGELOG.md#v1220-2022-08-03)
- * **Feature**: You can now associate an AWS WAF web ACL with an Amazon Cognito user pool.
-
-# Release (2022-08-02)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/licensemanagerusersubscriptions`: [v1.0.0](service/licensemanagerusersubscriptions/CHANGELOG.md#v100-2022-08-02)
- * **Release**: New AWS service client module
- * **Feature**: This release supports user based subscription for Microsoft Visual Studio Professional and Enterprise on EC2.
-* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.21.0](service/personalize/CHANGELOG.md#v1210-2022-08-02)
- * **Feature**: This release adds support for incremental bulk ingestion for the Personalize CreateDatasetImportJob API.
-
-# Release (2022-08-01)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.23.1](service/configservice/CHANGELOG.md#v1231-2022-08-01)
- * **Documentation**: Documentation update for PutConfigRule and PutOrganizationConfigRule
-* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.22.0](service/workspaces/CHANGELOG.md#v1220-2022-08-01)
- * **Feature**: This release introduces ModifySamlProperties, a new API that allows control of SAML properties associated with a WorkSpaces directory. The DescribeWorkspaceDirectories API will now additionally return SAML properties in its responses.
-
-# Release (2022-07-29)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.51.0](service/ec2/CHANGELOG.md#v1510-2022-07-29)
- * **Feature**: Documentation updates for Amazon EC2.
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.4](service/fsx/CHANGELOG.md#v1244-2022-07-29)
- * **Documentation**: Documentation updates for Amazon FSx
-* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.17.0](service/shield/CHANGELOG.md#v1170-2022-07-29)
- * **Feature**: AWS Shield Advanced now supports filtering for ListProtections and ListProtectionGroups.
-
-# Release (2022-07-28)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.50.1](service/ec2/CHANGELOG.md#v1501-2022-07-28)
- * **Documentation**: Documentation updates for VM Import/Export.
-* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.16.0](service/elasticsearchservice/CHANGELOG.md#v1160-2022-07-28)
- * **Feature**: This release adds support for gp3 EBS (Elastic Block Store) storage.
-* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.14.0](service/lookoutvision/CHANGELOG.md#v1140-2022-07-28)
- * **Feature**: This release introduces support for image segmentation models and updates CPU accelerator options for models hosted on edge devices.
-* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.10.0](service/opensearch/CHANGELOG.md#v1100-2022-07-28)
- * **Feature**: This release adds support for gp3 EBS (Elastic Block Store) storage.
-
-# Release (2022-07-27)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.20.0](service/auditmanager/CHANGELOG.md#v1200-2022-07-27)
- * **Feature**: This release adds an exceeded quota exception to several APIs. We added a ServiceQuotaExceededException for the following operations: CreateAssessment, CreateControl, CreateAssessmentFramework, and UpdateAssessmentStatus.
-* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.21.0](service/chime/CHANGELOG.md#v1210-2022-07-27)
- * **Feature**: Chime VoiceConnector will now support ValidateE911Address which will allow customers to prevalidate their addresses included in their SIP invites for emergency calling
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.23.0](service/configservice/CHANGELOG.md#v1230-2022-07-27)
- * **Feature**: This release adds ListConformancePackComplianceScores API to support the new compliance score feature, which provides a percentage of the number of compliant rule-resource combinations in a conformance pack compared to the number of total possible rule-resource combinations in the conformance pack.
-* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.14.0](service/globalaccelerator/CHANGELOG.md#v1140-2022-07-27)
- * **Feature**: Global Accelerator now supports dual-stack accelerators, enabling support for IPv4 and IPv6 traffic.
-* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.13.0](service/marketplacecatalog/CHANGELOG.md#v1130-2022-07-27)
- * **Feature**: The SDK for the StartChangeSet API will now automatically set and use an idempotency token in the ClientRequestToken request parameter if the customer does not provide it.
-* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.17.0](service/polly/CHANGELOG.md#v1170-2022-07-27)
- * **Feature**: Amazon Polly adds new English and Hindi voice - Kajal. Kajal is available as Neural voice only.
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.27.5](service/ssm/CHANGELOG.md#v1275-2022-07-27)
- * **Documentation**: Adding doc updates for OpsCenter support in Service Setting actions.
-* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.21.0](service/workspaces/CHANGELOG.md#v1210-2022-07-27)
- * **Feature**: Added CreateWorkspaceImage API to create a new WorkSpace image from an existing WorkSpace.
-
-# Release (2022-07-26)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.15.0](service/appsync/CHANGELOG.md#v1150-2022-07-26)
- * **Feature**: Adds support for a new API to evaluate mapping templates with mock data, allowing you to remotely unit test your AppSync resolvers and functions.
-* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.16.0](service/detective/CHANGELOG.md#v1160-2022-07-26)
- * **Feature**: Added the ability to get data source package information for the behavior graph. Graph administrators can now start (or stop) optional datasources on the behavior graph.
-* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.15.0](service/guardduty/CHANGELOG.md#v1150-2022-07-26)
- * **Feature**: Amazon GuardDuty introduces a new Malware Protection feature that triggers malware scan on selected EC2 instance resources, after the service detects a potentially malicious activity.
-* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.13.0](service/lookoutvision/CHANGELOG.md#v1130-2022-07-26)
- * **Feature**: This release introduces support for the automatic scaling of inference units used by Amazon Lookout for Vision models.
-* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.22.0](service/macie2/CHANGELOG.md#v1220-2022-07-26)
- * **Feature**: This release adds support for retrieving (revealing) sample occurrences of sensitive data that Amazon Macie detects and reports in findings.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.23.1](service/rds/CHANGELOG.md#v1231-2022-07-26)
- * **Documentation**: Adds support for using RDS Proxies with RDS for MariaDB databases.
-* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.19.0](service/rekognition/CHANGELOG.md#v1190-2022-07-26)
- * **Feature**: This release introduces support for the automatic scaling of inference units used by Amazon Rekognition Custom Labels models.
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.22.3](service/securityhub/CHANGELOG.md#v1223-2022-07-26)
- * **Documentation**: Documentation updates for AWS Security Hub
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.21.0](service/transfer/CHANGELOG.md#v1210-2022-07-26)
- * **Feature**: AWS Transfer Family now supports Applicability Statement 2 (AS2), a network protocol used for the secure and reliable transfer of critical Business-to-Business (B2B) data over the public internet using HTTP/HTTPS as the transport mechanism.
-
-# Release (2022-07-25)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.23.6](service/autoscaling/CHANGELOG.md#v1236-2022-07-25)
- * **Documentation**: Documentation update for Amazon EC2 Auto Scaling.
-
-# Release (2022-07-22)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.7.0](service/account/CHANGELOG.md#v170-2022-07-22)
- * **Feature**: This release enables customers to manage the primary contact information for their AWS accounts. For more information, see https://docs.aws.amazon.com/accounts/latest/reference/API_Operations.html
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.50.0](service/ec2/CHANGELOG.md#v1500-2022-07-22)
- * **Feature**: Added support for EC2 M1 Mac instances. For more information, please visit aws.amazon.com/mac.
-* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.15.0](service/iotdeviceadvisor/CHANGELOG.md#v1150-2022-07-22)
- * **Feature**: Added new service feature (Early access only) - Long Duration Test, where customers can test the IoT device to observe how it behaves when the device is in operation for longer period.
-* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.22.0](service/medialive/CHANGELOG.md#v1220-2022-07-22)
- * **Feature**: Link devices now support remote rebooting. Link devices now support maintenance windows. Maintenance windows allow a Link device to install software updates without stopping the MediaLive channel. The channel will experience a brief loss of input from the device while updates are installed.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.23.0](service/rds/CHANGELOG.md#v1230-2022-07-22)
- * **Feature**: This release adds the "ModifyActivityStream" API with support for audit policy state locking and unlocking.
-* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.21.0](service/transcribe/CHANGELOG.md#v1210-2022-07-22)
- * **Feature**: Remove unsupported language codes for StartTranscriptionJob and update VocabularyFileUri for UpdateMedicalVocabulary
-
-# Release (2022-07-21)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.18.0](service/athena/CHANGELOG.md#v1180-2022-07-21)
- * **Feature**: This feature allows customers to retrieve runtime statistics for completed queries
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.19.0](service/cloudwatch/CHANGELOG.md#v1190-2022-07-21)
- * **Feature**: Adding support for the suppression of Composite Alarm actions
-* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.21.1](service/databasemigrationservice/CHANGELOG.md#v1211-2022-07-21)
- * **Documentation**: Documentation updates for Database Migration Service (DMS).
-* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.19.0](service/docdb/CHANGELOG.md#v1190-2022-07-21)
- * **Feature**: Enable copy-on-write restore type
-* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.14.0](service/ec2instanceconnect/CHANGELOG.md#v1140-2022-07-21)
- * **Feature**: This release includes a new exception type "EC2InstanceUnavailableException" for SendSSHPublicKey and SendSerialConsoleSSHPublicKey APIs.
-* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.20.0](service/frauddetector/CHANGELOG.md#v1200-2022-07-21)
- * **Feature**: The release introduces Account Takeover Insights (ATI) model. The ATI model detects fraud relating to account takeover. This release also adds support for new variable types: ARE_CREDENTIALS_VALID and SESSION_ID and adds new structures to Model Version APIs.
-* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.23.0](service/iotsitewise/CHANGELOG.md#v1230-2022-07-21)
- * **Feature**: Added asynchronous API to ingest bulk historical and current data into IoT SiteWise.
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.31.0](service/kendra/CHANGELOG.md#v1310-2022-07-21)
- * **Feature**: Amazon Kendra now provides Oauth2 support for SharePoint Online. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-sharepoint.html
-* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.18.0](service/networkfirewall/CHANGELOG.md#v1180-2022-07-21)
- * **Feature**: Network Firewall now supports referencing dynamic IP sets from stateful rule groups, for IP sets stored in Amazon VPC prefix lists.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.22.1](service/rds/CHANGELOG.md#v1221-2022-07-21)
- * **Documentation**: Adds support for creating an RDS Proxy for an RDS for MariaDB database.
-
-# Release (2022-07-20)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.17.11](service/acmpca/CHANGELOG.md#v11711-2022-07-20)
- * **Documentation**: AWS Certificate Manager (ACM) Private Certificate Authority (PCA) documentation updates
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.27.0](service/iot/CHANGELOG.md#v1270-2022-07-20)
- * **Feature**: GA release the ability to enable/disable IoT Fleet Indexing for Device Defender and Named Shadow information, and search them through IoT Fleet Indexing APIs. This includes Named Shadow Selection as a part of the UpdateIndexingConfiguration API.
-
-# Release (2022-07-19)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.18.0](service/devopsguru/CHANGELOG.md#v1180-2022-07-19)
- * **Feature**: Added new APIs for log anomaly detection feature.
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.28.1](service/glue/CHANGELOG.md#v1281-2022-07-19)
- * **Documentation**: Documentation updates for AWS Glue Job Timeout and Autoscaling
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.38.0](service/sagemaker/CHANGELOG.md#v1380-2022-07-19)
- * **Feature**: Fixed an issue with cross account QueryLineage
-* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.12.0](service/sagemakeredge/CHANGELOG.md#v1120-2022-07-19)
- * **Feature**: Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices.
-* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.20.0](service/workspaces/CHANGELOG.md#v1200-2022-07-19)
- * **Feature**: Increased the character limit of the login message from 850 to 2000 characters.
-
-# Release (2022-07-18)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.14.0](service/applicationdiscoveryservice/CHANGELOG.md#v1140-2022-07-18)
- * **Feature**: Add AWS Agentless Collector details to the GetDiscoverySummary API response
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.49.1](service/ec2/CHANGELOG.md#v1491-2022-07-18)
- * **Documentation**: Documentation updates for Amazon EC2.
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.22.0](service/elasticache/CHANGELOG.md#v1220-2022-07-18)
- * **Feature**: Adding AutoMinorVersionUpgrade in the DescribeReplicationGroups API
-* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.18.0](service/kms/CHANGELOG.md#v1180-2022-07-18)
- * **Feature**: Added support for the SM2 KeySpec in China Partition Regions
-* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.17.0](service/mediapackage/CHANGELOG.md#v1170-2022-07-18)
- * **Feature**: This release adds "IncludeIframeOnlyStream" for Dash endpoints and increases the number of supported video and audio encryption presets for Speke v2
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.37.0](service/sagemaker/CHANGELOG.md#v1370-2022-07-18)
- * **Feature**: Amazon SageMaker Edge Manager provides lightweight model deployment feature to deploy machine learning models on requested devices.
-* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.15.0](service/ssoadmin/CHANGELOG.md#v1150-2022-07-18)
- * **Feature**: AWS SSO now supports attaching customer managed policies and a permissions boundary to your permission sets. This release adds new API operations to manage and view the customer managed policies and the permissions boundary for a given permission set.
-
-# Release (2022-07-15)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.18.3](service/datasync/CHANGELOG.md#v1183-2022-07-15)
- * **Documentation**: Documentation updates for AWS DataSync regarding configuring Amazon FSx for ONTAP location security groups and SMB user permissions.
-* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.7.0](service/drs/CHANGELOG.md#v170-2022-07-15)
- * **Feature**: Changed existing APIs to allow choosing a dynamic volume type for replicating volumes, to reduce costs for customers.
-* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.8.0](service/evidently/CHANGELOG.md#v180-2022-07-15)
- * **Feature**: This release adds support for the new segmentation feature.
-* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.21.0](service/wafv2/CHANGELOG.md#v1210-2022-07-15)
- * **Feature**: This SDK release provide customers ability to add sensitivity level for WAF SQLI Match Statements.
-
-# Release (2022-07-14)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.17.0](service/athena/CHANGELOG.md#v1170-2022-07-14)
- * **Feature**: This release updates data types that contain either QueryExecutionId, NamedQueryId or ExpectedBucketOwner. Ids must be between 1 and 128 characters and contain only non-whitespace characters. ExpectedBucketOwner must be 12-digit string.
-* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.13.0](service/codeartifact/CHANGELOG.md#v1130-2022-07-14)
- * **Feature**: This release introduces Package Origin Controls, a mechanism used to counteract Dependency Confusion attacks. Adds two new APIs, PutPackageOriginConfiguration and DescribePackage, and updates the ListPackage, DescribePackageVersion and ListPackageVersion APIs in support of the feature.
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.22.0](service/configservice/CHANGELOG.md#v1220-2022-07-14)
- * **Feature**: Update ResourceType enum with values for Route53Resolver, Batch, DMS, Workspaces, Stepfunctions, SageMaker, ElasticLoadBalancingV2, MSK types
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.49.0](service/ec2/CHANGELOG.md#v1490-2022-07-14)
- * **Feature**: This release adds flow logs for Transit Gateway to allow customers to gain deeper visibility and insights into network traffic through their Transit Gateways.
-* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.18.0](service/fms/CHANGELOG.md#v1180-2022-07-14)
- * **Feature**: Adds support for strict ordering in stateful rule groups in Network Firewall policies.
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.28.0](service/glue/CHANGELOG.md#v1280-2022-07-14)
- * **Feature**: This release adds an additional worker type for Glue Streaming jobs.
-* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.7.0](service/inspector2/CHANGELOG.md#v170-2022-07-14)
- * **Feature**: This release adds support for Inspector V2 scan configurations through the get and update configuration APIs. Currently this allows configuring ECR automated re-scan duration to lifetime or 180 days or 30 days.
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.30.0](service/kendra/CHANGELOG.md#v1300-2022-07-14)
- * **Feature**: This release adds AccessControlConfigurations which allow you to redefine your document level access control without the need for content re-indexing.
-* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.13.0](service/nimble/CHANGELOG.md#v1130-2022-07-14)
- * **Feature**: Amazon Nimble Studio adds support for IAM-based access to AWS resources for Nimble Studio components and custom studio components. Studio Component scripts use these roles on Nimble Studio workstation to mount filesystems, access S3 buckets, or other configured resources in the Studio's AWS account
-* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.22.0](service/outposts/CHANGELOG.md#v1220-2022-07-14)
- * **Feature**: This release adds the ShipmentInformation and AssetInformationList fields to the GetOrder API response.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.36.0](service/sagemaker/CHANGELOG.md#v1360-2022-07-14)
- * **Feature**: This release adds support for G5, P4d, and C6i instance types in Amazon SageMaker Inference and increases the number of hyperparameters that can be searched from 20 to 30 in Amazon SageMaker Automatic Model Tuning
-
-# Release (2022-07-13)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.13.0](service/appconfig/CHANGELOG.md#v1130-2022-07-13)
- * **Feature**: Adding Create, Get, Update, Delete, and List APIs for new two new resources: Extensions and ExtensionAssociations.
-
-# Release (2022-07-12)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.14.0](service/networkmanager/CHANGELOG.md#v1140-2022-07-12)
- * **Feature**: This release adds general availability API support for AWS Cloud WAN.
-
-# Release (2022-07-11)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.48.0](service/ec2/CHANGELOG.md#v1480-2022-07-11)
- * **Feature**: Build, manage, and monitor a unified global network that connects resources running across your cloud and on-premises environments using the AWS Cloud WAN APIs.
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.26.0](service/redshift/CHANGELOG.md#v1260-2022-07-11)
- * **Feature**: This release adds a new --snapshot-arn field for describe-cluster-snapshots, describe-node-configuration-options, restore-from-cluster-snapshot, authorize-snapshot-acsess, and revoke-snapshot-acsess APIs. It allows customers to give a Redshift snapshot ARN or a Redshift Serverless ARN as input.
-* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.2.2](service/redshiftserverless/CHANGELOG.md#v122-2022-07-11)
- * **Documentation**: Removed prerelease language for GA launch.
-
-# Release (2022-07-08)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.17.0](service/backup/CHANGELOG.md#v1170-2022-07-08)
- * **Feature**: This release adds support for authentication using IAM user identity instead of passed IAM role, identified by excluding the IamRoleArn field in the StartRestoreJob API. This feature applies to only resource clients with a destructive restore nature (e.g. SAP HANA).
-
-# Release (2022-07-07)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.12.0](service/chimesdkmeetings/CHANGELOG.md#v1120-2022-07-07)
- * **Feature**: Adds support for AppKeys and TenantIds in Amazon Chime SDK WebRTC sessions
-* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.21.0](service/databasemigrationservice/CHANGELOG.md#v1210-2022-07-07)
- * **Feature**: New api to migrate event subscriptions to event bridge rules
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.26.0](service/iot/CHANGELOG.md#v1260-2022-07-07)
- * **Feature**: This release adds support to register a CA certificate without having to provide a verification certificate. This also allows multiple AWS accounts to register the same CA in the same region.
-* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.20.0](service/iotwireless/CHANGELOG.md#v1200-2022-07-07)
- * **Feature**: Adds 5 APIs: PutPositionConfiguration, GetPositionConfiguration, ListPositionConfigurations, UpdatePosition, GetPosition for the new Positioning Service feature which enables customers to configure solvers to calculate position of LoRaWAN devices, or specify position of LoRaWAN devices & gateways.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.35.0](service/sagemaker/CHANGELOG.md#v1350-2022-07-07)
- * **Feature**: Heterogeneous clusters: the ability to launch training jobs with multiple instance types. This enables running component of the training job on the instance type that is most suitable for it. e.g. doing data processing and augmentation on CPU instances and neural network training on GPU instances
-
-# Release (2022-07-06)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.22.0](service/cloudformation/CHANGELOG.md#v1220-2022-07-06)
- * **Feature**: My AWS Service (placeholder) - Add a new feature Account-level Targeting for StackSet operation
-* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.16.0](service/synthetics/CHANGELOG.md#v1160-2022-07-06)
- * **Feature**: This release introduces Group feature, which enables users to group cross-region canaries.
-
-# Release (2022-07-05)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.21.5](service/configservice/CHANGELOG.md#v1215-2022-07-05)
- * **Documentation**: Updating documentation service limits
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.21.0](service/lexmodelsv2/CHANGELOG.md#v1210-2022-07-05)
- * **Feature**: This release introduces additional optional parameters "messageSelectionStrategy" to PromptSpecification, which enables the users to configure the bot to play messages in orderly manner.
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.23.0](service/quicksight/CHANGELOG.md#v1230-2022-07-05)
- * **Feature**: This release allows customers to programmatically create QuickSight accounts with Enterprise and Enterprise + Q editions. It also releases allowlisting domains for embedding QuickSight dashboards at runtime through the embedding APIs.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.22.0](service/rds/CHANGELOG.md#v1220-2022-07-05)
- * **Feature**: Adds waiters support for DBCluster.
-* `github.com/aws/aws-sdk-go-v2/service/rolesanywhere`: [v1.0.0](service/rolesanywhere/CHANGELOG.md#v100-2022-07-05)
- * **Release**: New AWS service client module
- * **Feature**: IAM Roles Anywhere allows your workloads such as servers, containers, and applications to obtain temporary AWS credentials and use the same IAM roles and policies that you have configured for your AWS workloads to access AWS resources.
-* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.19.0](service/sqs/CHANGELOG.md#v1190-2022-07-05)
- * **Feature**: Adds support for the SQS client to automatically validate message checksums for SendMessage, SendMessageBatch, and ReceiveMessage. A DisableMessageChecksumValidation parameter has been added to the Options struct for SQS package. Setting this to true will disable the checksum validation. This can be set when creating a client, or per operation call.
-* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.15.0](service/ssmincidents/CHANGELOG.md#v1150-2022-07-05)
- * **Feature**: Adds support for tagging incident-record on creation by providing incident tags in the template within a response-plan.
-
-# Release (2022-07-01)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.20.0](service/databasemigrationservice/CHANGELOG.md#v1200-2022-07-01)
- * **Feature**: Added new features for AWS DMS version 3.4.7 that includes new endpoint settings for S3, OpenSearch, Postgres, SQLServer and Oracle.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.21.5](service/rds/CHANGELOG.md#v1215-2022-07-01)
- * **Documentation**: Adds support for additional retention periods to Performance Insights.
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.27.0](service/s3/CHANGELOG.md#v1270-2022-07-01)
- * **Feature**: Add presign support for HeadBucket, DeleteObject, and DeleteBucket. Fixes [#1076](https://github.com/aws/aws-sdk-go-v2/issues/1076).
-
-# Release (2022-06-30)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.16.0](service/athena/CHANGELOG.md#v1160-2022-06-30)
- * **Feature**: This feature introduces the API support for Athena's parameterized query and BatchGetPreparedStatement API.
-* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.18.0](service/customerprofiles/CHANGELOG.md#v1180-2022-06-30)
- * **Feature**: This release adds the optional MinAllowedConfidenceScoreForMerging parameter to the CreateDomain, UpdateDomain, and GetAutoMergingPreview APIs in Customer Profiles. This parameter is used as a threshold to influence the profile auto-merging step of the Identity Resolution process.
-* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.20.0](service/emr/CHANGELOG.md#v1200-2022-06-30)
- * **Feature**: This release adds support for the ExecutionRoleArn parameter in the AddJobFlowSteps and DescribeStep APIs. Customers can use ExecutionRoleArn to specify the IAM role used for each job they submit using the AddJobFlowSteps API.
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.27.0](service/glue/CHANGELOG.md#v1270-2022-06-30)
- * **Feature**: This release adds tag as an input of CreateDatabase
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.29.0](service/kendra/CHANGELOG.md#v1290-2022-06-30)
- * **Feature**: Amazon Kendra now provides a data source connector for alfresco
-* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.13.0](service/mwaa/CHANGELOG.md#v1130-2022-06-30)
- * **Feature**: Documentation updates for Amazon Managed Workflows for Apache Airflow.
-* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.16.0](service/pricing/CHANGELOG.md#v1160-2022-06-30)
- * **Feature**: Documentation update for GetProducts Response.
-* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.16.0](service/wellarchitected/CHANGELOG.md#v1160-2022-06-30)
- * **Feature**: Added support for UpdateGlobalSettings API. Added status filter to ListWorkloadShares and ListLensShares.
-* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.16.0](service/workmail/CHANGELOG.md#v1160-2022-06-30)
- * **Feature**: This release adds support for managing user availability configurations in Amazon WorkMail.
-
-# Release (2022-06-29)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2`: v1.16.6
- * **Bug Fix**: Fix aws/signer/v4 to not double sign Content-Length header. Fixes [#1728](https://github.com/aws/aws-sdk-go-v2/issues/1728). Thanks to @matelang for creating the issue and PR.
-* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.17.0](service/appstream/CHANGELOG.md#v1170-2022-06-29)
- * **Feature**: Includes support for StreamingExperienceSettings in CreateStack and UpdateStack APIs
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.18.7](service/elasticloadbalancingv2/CHANGELOG.md#v1187-2022-06-29)
- * **Documentation**: This release adds two attributes for ALB. One, helps to preserve the host header and the other helps to modify, preserve, or remove the X-Forwarded-For header in the HTTP request.
-* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.19.0](service/emr/CHANGELOG.md#v1190-2022-06-29)
- * **Feature**: This release introduces additional optional parameter "Throughput" to VolumeSpecification to enable user to configure throughput for gp3 ebs volumes.
-* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.21.0](service/medialive/CHANGELOG.md#v1210-2022-06-29)
- * **Feature**: This release adds support for automatic renewal of MediaLive reservations at the end of each reservation term. Automatic renewal is optional. This release also adds support for labelling accessibility-focused audio and caption tracks in HLS outputs.
-* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.2.0](service/redshiftserverless/CHANGELOG.md#v120-2022-06-29)
- * **Feature**: Add new API operations for Amazon Redshift Serverless, a new way of using Amazon Redshift without needing to manually manage provisioned clusters. The new operations let you interact with Redshift Serverless resources, such as create snapshots, list VPC endpoints, delete resource policies, and more.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.34.0](service/sagemaker/CHANGELOG.md#v1340-2022-06-29)
- * **Feature**: This release adds: UpdateFeatureGroup, UpdateFeatureMetadata, DescribeFeatureMetadata APIs; FeatureMetadata type in Search API; LastModifiedTime, LastUpdateStatus, OnlineStoreTotalSizeBytes in DescribeFeatureGroup API.
-* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.14.0](service/translate/CHANGELOG.md#v1140-2022-06-29)
- * **Feature**: Added ListLanguages API which can be used to list the languages supported by Translate.
-
-# Release (2022-06-28)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.18.0](service/datasync/CHANGELOG.md#v1180-2022-06-28)
- * **Feature**: AWS DataSync now supports Amazon FSx for NetApp ONTAP locations.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.47.0](service/ec2/CHANGELOG.md#v1470-2022-06-28)
- * **Feature**: This release adds a new spread placement group to EC2 Placement Groups: host level spread, which spread instances between physical hosts, available to Outpost customers only. CreatePlacementGroup and DescribePlacementGroups APIs were updated with a new parameter: SpreadLevel to support this feature.
-* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.12.0](service/finspacedata/CHANGELOG.md#v1120-2022-06-28)
- * **Feature**: Release new API GetExternalDataViewAccessDetails
-* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.16.0](service/polly/CHANGELOG.md#v1160-2022-06-28)
- * **Feature**: Add 4 new neural voices - Pedro (es-US), Liam (fr-CA), Daniel (de-DE) and Arthur (en-GB).
-
-# Release (2022-06-24.2)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.13.7](service/emrcontainers/CHANGELOG.md#v1137-2022-06-242)
- * **Bug Fix**: Fixes bug with incorrect modeled timestamp format
-
-# Release (2022-06-23)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.14.0](service/lookoutequipment/CHANGELOG.md#v1140-2022-06-23)
- * **Feature**: This release adds visualizations to the scheduled inference results. Users will be able to see interference results, including diagnostic results from their running inference schedulers.
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.25.1](service/mediaconvert/CHANGELOG.md#v1251-2022-06-23)
- * **Documentation**: AWS Elemental MediaConvert SDK has released support for automatic DolbyVision metadata generation when converting HDR10 to DolbyVision.
-* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.15.0](service/mgn/CHANGELOG.md#v1150-2022-06-23)
- * **Feature**: New and modified APIs for the Post-Migration Framework
-* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.6.0](service/migrationhubrefactorspaces/CHANGELOG.md#v160-2022-06-23)
- * **Feature**: This release adds the new API UpdateRoute that allows route to be updated to ACTIVE/INACTIVE state. In addition, CreateRoute API will now allow users to create route in ACTIVE/INACTIVE state.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.33.0](service/sagemaker/CHANGELOG.md#v1330-2022-06-23)
- * **Feature**: SageMaker Ground Truth now supports Virtual Private Cloud. Customers can launch labeling jobs and access to their private workforce in VPC mode.
-
-# Release (2022-06-22)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.15.8](service/apigateway/CHANGELOG.md#v1158-2022-06-22)
- * **Documentation**: Documentation updates for Amazon API Gateway
-* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.15.0](service/pricing/CHANGELOG.md#v1150-2022-06-22)
- * **Feature**: This release introduces 1 update to the GetProducts API. The serviceCode attribute is now required when you use the GetProductsRequest.
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.20.0](service/transfer/CHANGELOG.md#v1200-2022-06-22)
- * **Feature**: Until today, the service supported only RSA host keys and user keys. Now with this launch, Transfer Family has expanded the support for ECDSA and ED25519 host keys and user keys, enabling customers to support a broader set of clients by choosing RSA, ECDSA, and ED25519 host and user keys.
-
-# Release (2022-06-21)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.46.0](service/ec2/CHANGELOG.md#v1460-2022-06-21)
- * **Feature**: This release adds support for Private IP VPNs, a new feature allowing S2S VPN connections to use private ip addresses as the tunnel outside ip address over Direct Connect as transport.
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.9](service/ecs/CHANGELOG.md#v1189-2022-06-21)
- * **Documentation**: Amazon ECS UpdateService now supports the following parameters: PlacementStrategies, PlacementConstraints and CapacityProviderStrategy.
-* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.15.0](service/wellarchitected/CHANGELOG.md#v1150-2022-06-21)
- * **Feature**: Adds support for lens tagging, Adds support for multiple helpful-resource urls and multiple improvement-plan urls.
-
-# Release (2022-06-20)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.14.0](service/directoryservice/CHANGELOG.md#v1140-2022-06-20)
- * **Feature**: This release adds support for describing and updating AWS Managed Microsoft AD settings
-* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.17.7](service/kafka/CHANGELOG.md#v1177-2022-06-20)
- * **Documentation**: Documentation updates to use Az Id during cluster creation.
-* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.21.0](service/outposts/CHANGELOG.md#v1210-2022-06-20)
- * **Feature**: This release adds the AssetLocation structure to the ListAssets response. AssetLocation includes the RackElevation for an Asset.
-
-# Release (2022-06-17)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.27.0](service/connect/CHANGELOG.md#v1270-2022-06-17)
- * **Feature**: This release updates these APIs: UpdateInstanceAttribute, DescribeInstanceAttribute and ListInstanceAttributes. You can use it to programmatically enable/disable High volume outbound communications using attribute type HIGH_VOLUME_OUTBOUND on the specified Amazon Connect instance.
-* `github.com/aws/aws-sdk-go-v2/service/connectcampaigns`: [v1.0.0](service/connectcampaigns/CHANGELOG.md#v100-2022-06-17)
- * **Release**: New AWS service client module
- * **Feature**: Added Amazon Connect high volume outbound communications SDK.
-* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.15.7](service/dynamodb/CHANGELOG.md#v1157-2022-06-17)
- * **Documentation**: Doc only update for DynamoDB service
-* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.13.7](service/dynamodbstreams/CHANGELOG.md#v1137-2022-06-17)
- * **Documentation**: Doc only update for DynamoDB service
-
-# Release (2022-06-16)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.16.0](service/redshiftdata/CHANGELOG.md#v1160-2022-06-16)
- * **Feature**: This release adds a new --workgroup-name field to operations that connect to an endpoint. Customers can now execute queries against their serverless workgroups.
-* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.1.0](service/redshiftserverless/CHANGELOG.md#v110-2022-06-16)
- * **Feature**: Add new API operations for Amazon Redshift Serverless, a new way of using Amazon Redshift without needing to manually manage provisioned clusters. The new operations let you interact with Redshift Serverless resources, such as create snapshots, list VPC endpoints, delete resource policies, and more.
-* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.11](service/secretsmanager/CHANGELOG.md#v11511-2022-06-16)
- * **Documentation**: Documentation updates for Secrets Manager
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.22.0](service/securityhub/CHANGELOG.md#v1220-2022-06-16)
- * **Feature**: Added Threats field for security findings. Added new resource details for ECS Container, ECS Task, RDS SecurityGroup, Kinesis Stream, EC2 TransitGateway, EFS AccessPoint, CloudFormation Stack, CloudWatch Alarm, VPC Peering Connection and WAF Rules
-
-# Release (2022-06-15)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.11.0](service/finspacedata/CHANGELOG.md#v1110-2022-06-15)
- * **Feature**: This release adds a new set of APIs, GetPermissionGroup, DisassociateUserFromPermissionGroup, AssociateUserToPermissionGroup, ListPermissionGroupsByUser, ListUsersByPermissionGroup.
-* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.14.0](service/guardduty/CHANGELOG.md#v1140-2022-06-15)
- * **Feature**: Adds finding fields available from GuardDuty Console. Adds FreeTrial related operations. Deprecates the use of various APIs related to Master Accounts and Replace them with Administrator Accounts.
-* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.13.0](service/servicecatalogappregistry/CHANGELOG.md#v1130-2022-06-15)
- * **Feature**: This release adds a new API ListAttributeGroupsForApplication that returns associated attribute groups of an application. In addition, the UpdateApplication and UpdateAttributeGroup APIs will not allow users to update the 'Name' attribute.
-* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.19.0](service/workspaces/CHANGELOG.md#v1190-2022-06-15)
- * **Feature**: Added new field "reason" to OperationNotSupportedException. Receiving this exception in the DeregisterWorkspaceDirectory API will now return a reason giving more context on the failure.
-
-# Release (2022-06-14)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.13.0](service/budgets/CHANGELOG.md#v1130-2022-06-14)
- * **Feature**: Add a budgets ThrottlingException. Update the CostFilters value pattern.
-* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.16.0](service/lookoutmetrics/CHANGELOG.md#v1160-2022-06-14)
- * **Feature**: Adding filters to Alert and adding new UpdateAlert API.
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.25.0](service/mediaconvert/CHANGELOG.md#v1250-2022-06-14)
- * **Feature**: AWS Elemental MediaConvert SDK has added support for rules that constrain Automatic-ABR rendition selection when generating ABR package ladders.
-
-# Release (2022-06-13)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.20.0](service/outposts/CHANGELOG.md#v1200-2022-06-13)
- * **Feature**: This release adds API operations AWS uses to install Outpost servers.
-
-# Release (2022-06-10)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.19.7](service/frauddetector/CHANGELOG.md#v1197-2022-06-10)
- * **Documentation**: Documentation updates for Amazon Fraud Detector (AWSHawksNest)
-
-# Release (2022-06-09)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.11.0](service/chimesdkmeetings/CHANGELOG.md#v1110-2022-06-09)
- * **Feature**: Adds support for live transcription in AWS GovCloud (US) Regions.
-
-# Release (2022-06-08)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.19.0](service/databasemigrationservice/CHANGELOG.md#v1190-2022-06-08)
- * **Feature**: This release adds DMS Fleet Advisor APIs and exposes functionality for DMS Fleet Advisor. It adds functionality to create and modify fleet advisor instances, and to collect and analyze information about the local data infrastructure.
-* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.18.7](service/iam/CHANGELOG.md#v1187-2022-06-08)
- * **Documentation**: Documentation updates for AWS Identity and Access Management (IAM).
-* `github.com/aws/aws-sdk-go-v2/service/m2`: [v1.0.0](service/m2/CHANGELOG.md#v100-2022-06-08)
- * **Release**: New AWS service client module
- * **Feature**: AWS Mainframe Modernization service is a managed mainframe service and set of tools for planning, migrating, modernizing, and running mainframe workloads on AWS
-* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.17.0](service/neptune/CHANGELOG.md#v1170-2022-06-08)
- * **Feature**: This release adds support for Neptune to be configured as a global database, with a primary DB cluster in one region, and up to five secondary DB clusters in other regions.
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.25.0](service/redshift/CHANGELOG.md#v1250-2022-06-08)
- * **Feature**: Adds new API GetClusterCredentialsWithIAM to return temporary credentials.
-* `github.com/aws/aws-sdk-go-v2/service/redshiftserverless`: [v1.0.0](service/redshiftserverless/CHANGELOG.md#v100-2022-06-08)
- * **Release**: New AWS service client module
- * **Feature**: Add new API operations for Amazon Redshift Serverless, a new way of using Amazon Redshift without needing to manually manage provisioned clusters. The new operations let you interact with Redshift Serverless resources, such as create snapshots, list VPC endpoints, delete resource policies, and more.
-
-# Release (2022-06-07)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.19.0](service/auditmanager/CHANGELOG.md#v1190-2022-06-07)
- * **Feature**: This release introduces 2 updates to the Audit Manager API. The roleType and roleArn attributes are now required when you use the CreateAssessment or UpdateAssessment operation. We also added a throttling exception to the RegisterAccount API operation.
-* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.19.0](service/costexplorer/CHANGELOG.md#v1190-2022-06-07)
- * **Feature**: Added two new APIs to support cost allocation tags operations: ListCostAllocationTags, UpdateCostAllocationTagsStatus.
-
-# Release (2022-06-06)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.10.0](service/chimesdkmessaging/CHANGELOG.md#v1100-2022-06-06)
- * **Feature**: This release adds support for searching channels by members via the SearchChannels API, removes required restrictions for Name and Mode in UpdateChannel API and enhances CreateChannel API by exposing member and moderator list as well as channel id as optional parameters.
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.26.0](service/connect/CHANGELOG.md#v1260-2022-06-06)
- * **Feature**: This release adds a new API, GetCurrentUserData, which returns real-time details about users' current activity.
-
-# Release (2022-06-02)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.16.0](service/applicationinsights/CHANGELOG.md#v1160-2022-06-02)
- * **Feature**: Provide Account Level onboarding support through CFN/CLI
-* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.12.6](service/codeartifact/CHANGELOG.md#v1126-2022-06-02)
- * **Documentation**: Documentation updates for CodeArtifact
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.25.0](service/connect/CHANGELOG.md#v1250-2022-06-02)
- * **Feature**: This release adds the following features: 1) New APIs to manage (create, list, update) task template resources, 2) Updates to startTaskContact API to support task templates, and 3) new TransferContact API to programmatically transfer in-progress tasks via a contact flow.
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.28.0](service/kendra/CHANGELOG.md#v1280-2022-06-02)
- * **Feature**: Amazon Kendra now provides a data source connector for GitHub. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-github.html
-* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.14.0](service/proton/CHANGELOG.md#v1140-2022-06-02)
- * **Feature**: Add new "Components" API to enable users to Create, Delete and Update AWS Proton components.
-* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.10.0](service/voiceid/CHANGELOG.md#v1100-2022-06-02)
- * **Feature**: Added a new attribute ServerSideEncryptionUpdateDetails to Domain and DomainSummary.
-
-# Release (2022-06-01)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.6.0](service/backupgateway/CHANGELOG.md#v160-2022-06-01)
- * **Feature**: Adds GetGateway and UpdateGatewaySoftwareNow API and adds hypervisor name to UpdateHypervisor API
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.10.0](service/chimesdkmeetings/CHANGELOG.md#v1100-2022-06-01)
- * **Feature**: Adds support for centrally controlling each participant's ability to send and receive audio, video and screen share within a WebRTC session. Attendee capabilities can be specified when the attendee is created and updated during the session with the new BatchUpdateAttendeeCapabilitiesExcept API.
-* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.22.0](service/forecast/CHANGELOG.md#v1220-2022-06-01)
- * **Feature**: Added Format field to Import and Export APIs in Amazon Forecast. Added TimeSeriesSelector to Create Forecast API.
-* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.21.0](service/route53/CHANGELOG.md#v1210-2022-06-01)
- * **Feature**: Add new APIs to support Route 53 IP Based Routing
-
-# Release (2022-05-31)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.17.0](service/cognitoidentityprovider/CHANGELOG.md#v1170-2022-05-31)
- * **Feature**: Amazon Cognito now supports IP Address propagation for all unauthenticated APIs (e.g. SignUp, ForgotPassword).
-* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.6.0](service/drs/CHANGELOG.md#v160-2022-05-31)
- * **Feature**: Changed existing APIs and added new APIs to accommodate using multiple AWS accounts with AWS Elastic Disaster Recovery.
-* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.22.0](service/iotsitewise/CHANGELOG.md#v1220-2022-05-31)
- * **Feature**: This release adds the following new optional field to the IoT SiteWise asset resource: assetDescription.
-* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.15.0](service/lookoutmetrics/CHANGELOG.md#v1150-2022-05-31)
- * **Feature**: Adding backtest mode to detectors using the Cloudwatch data source.
-* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.20.0](service/transcribe/CHANGELOG.md#v1200-2022-05-31)
- * **Feature**: Amazon Transcribe now supports automatic language identification for multi-lingual audio in batch mode.
-
-# Release (2022-05-27)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.16.0](service/appflow/CHANGELOG.md#v1160-2022-05-27)
- * **Feature**: Adding the following features/changes: Parquet output that preserves typing from the source connector, Failed executions threshold before deactivation for scheduled flows, increasing max size of access and refresh token from 2048 to 4096
-* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.17.0](service/datasync/CHANGELOG.md#v1170-2022-05-27)
- * **Feature**: AWS DataSync now supports TLS encryption in transit, file system policies and access points for EFS locations.
-* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.1.0](service/emrserverless/CHANGELOG.md#v110-2022-05-27)
- * **Feature**: This release adds support for Amazon EMR Serverless, a serverless runtime environment that simplifies running analytics applications using the latest open source frameworks such as Apache Spark and Apache Hive.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.32.0](service/sagemaker/CHANGELOG.md#v1320-2022-05-27)
- * **Feature**: Amazon SageMaker Notebook Instances now allows configuration of Instance Metadata Service version and Amazon SageMaker Studio now supports G5 instance types.
-
-# Release (2022-05-26)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.45.0](service/ec2/CHANGELOG.md#v1450-2022-05-26)
- * **Feature**: C7g instances, powered by the latest generation AWS Graviton3 processors, provide the best price performance in Amazon EC2 for compute-intensive workloads.
-* `github.com/aws/aws-sdk-go-v2/service/emrserverless`: [v1.0.0](service/emrserverless/CHANGELOG.md#v100-2022-05-26)
- * **Release**: New AWS service client module
- * **Feature**: This release adds support for Amazon EMR Serverless, a serverless runtime environment that simplifies running analytics applications using the latest open source frameworks such as Apache Spark and Apache Hive.
-* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.21.0](service/forecast/CHANGELOG.md#v1210-2022-05-26)
- * **Feature**: Introduced a new field in Auto Predictor as Time Alignment Boundary. It helps in aligning the timestamps generated during Forecast exports
-* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.22.0](service/lightsail/CHANGELOG.md#v1220-2022-05-26)
- * **Feature**: Amazon Lightsail now supports the ability to configure a Lightsail Container Service to pull images from Amazon ECR private repositories in your account.
-
-# Release (2022-05-25)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.15.6](service/apigateway/CHANGELOG.md#v1156-2022-05-25)
- * **Documentation**: Documentation updates for Amazon API Gateway
-* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.12.3](service/apprunner/CHANGELOG.md#v1123-2022-05-25)
- * **Documentation**: Documentation-only update added for CodeConfiguration.
-* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.21.0](service/cloudformation/CHANGELOG.md#v1210-2022-05-25)
- * **Feature**: Add a new parameter statusReason to DescribeStackSetOperation output for additional details
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.24.0](service/fsx/CHANGELOG.md#v1240-2022-05-25)
- * **Feature**: This release adds root squash support to FSx for Lustre to restrict root level access from clients by mapping root users to a less-privileged user/group with limited permissions.
-* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.14.0](service/lookoutmetrics/CHANGELOG.md#v1140-2022-05-25)
- * **Feature**: Adding AthenaSourceConfig for MetricSet APIs to support Athena as a data source.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.31.0](service/sagemaker/CHANGELOG.md#v1310-2022-05-25)
- * **Feature**: Amazon SageMaker Autopilot adds support for manually selecting features from the input dataset using the CreateAutoMLJob API.
-* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.9](service/secretsmanager/CHANGELOG.md#v1159-2022-05-25)
- * **Documentation**: Documentation updates for Secrets Manager
-* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.9.0](service/voiceid/CHANGELOG.md#v190-2022-05-25)
- * **Feature**: VoiceID will now automatically expire Speakers if they haven't been accessed for Enrollment, Re-enrollment or Successful Auth for three years. The Speaker APIs now return a "LastAccessedAt" time for Speakers, and the EvaluateSession API returns "SPEAKER_EXPIRED" Auth Decision for EXPIRED Speakers.
-
-# Release (2022-05-24)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.16.0](service/cognitoidentityprovider/CHANGELOG.md#v1160-2022-05-24)
- * **Feature**: Amazon Cognito now supports requiring attribute verification (ex. email and phone number) before update.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.44.0](service/ec2/CHANGELOG.md#v1440-2022-05-24)
- * **Feature**: Stop Protection feature enables customers to protect their instances from accidental stop actions.
-* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.4](service/ivschat/CHANGELOG.md#v104-2022-05-24)
- * **Documentation**: Doc-only update. For MessageReviewHandler structure, added timeout period in the description of the fallbackResult field
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.24.0](service/mediaconvert/CHANGELOG.md#v1240-2022-05-24)
- * **Feature**: AWS Elemental MediaConvert SDK has added support for rules that constrain Automatic-ABR rendition selection when generating ABR package ladders.
-* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.13.0](service/networkmanager/CHANGELOG.md#v1130-2022-05-24)
- * **Feature**: This release adds Multi Account API support for a TGW Global Network, to enable and disable AWSServiceAccess with AwsOrganizations for Network Manager service and dependency CloudFormation StackSets service.
-
-# Release (2022-05-23)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.21.0](service/elasticache/CHANGELOG.md#v1210-2022-05-23)
- * **Feature**: Added support for encryption in transit for Memcached clusters. Customers can now launch Memcached cluster with encryption in transit enabled when using Memcached version 1.6.12 or later.
-* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.20.0](service/forecast/CHANGELOG.md#v1200-2022-05-23)
- * **Feature**: New APIs for Monitor that help you understand how your predictors perform over time.
-* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.20.0](service/personalize/CHANGELOG.md#v1200-2022-05-23)
- * **Feature**: Adding modelMetrics as part of DescribeRecommender API response for Personalize.
-
-# Release (2022-05-20)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.15.7](service/cloudwatchlogs/CHANGELOG.md#v1157-2022-05-20)
- * **Documentation**: Doc-only update to publish the new valid values for log retention
-* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.18.0](service/comprehend/CHANGELOG.md#v1180-2022-05-20)
- * **Feature**: Comprehend releases 14 new entity types for DetectPiiEntities and ContainsPiiEntities APIs.
-
-# Release (2022-05-19)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.1.0](service/gamesparks/CHANGELOG.md#v110-2022-05-19)
- * **Feature**: This release adds an optional DeploymentResult field in the responses of GetStageDeploymentIntegrationTests and ListStageDeploymentIntegrationTests APIs.
-* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.13.0](service/lookoutmetrics/CHANGELOG.md#v1130-2022-05-19)
- * **Feature**: In this release we added SnsFormat to SNSConfiguration to support human readable alert.
-
-# Release (2022-05-18)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.14.0](service/appmesh/CHANGELOG.md#v1140-2022-05-18)
- * **Feature**: This release updates the existing Create and Update APIs for meshes and virtual nodes by adding a new IP preference field. This new IP preference field can be used to control the IP versions being used with the mesh and allows for IPv6 support within App Mesh.
-* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.18.3](service/batch/CHANGELOG.md#v1183-2022-05-18)
- * **Documentation**: Documentation updates for AWS Batch.
-* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.16.0](service/greengrassv2/CHANGELOG.md#v1160-2022-05-18)
- * **Feature**: This release adds the new DeleteDeployment API operation that you can use to delete deployment resources. This release also adds support for discontinued AWS-provided components, so AWS can communicate when a component has any issues that you should consider before you deploy it.
-* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.12.0](service/ioteventsdata/CHANGELOG.md#v1120-2022-05-18)
- * **Feature**: Introducing new API for deleting detectors: BatchDeleteDetector.
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.22.0](service/quicksight/CHANGELOG.md#v1220-2022-05-18)
- * **Feature**: API UpdatePublicSharingSettings enables IAM admins to enable/disable account level setting for public access of dashboards. When enabled, owners/co-owners for dashboards can enable public access on their dashboards. These dashboards can only be accessed through share link or embedding.
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.19.0](service/transfer/CHANGELOG.md#v1190-2022-05-18)
- * **Feature**: AWS Transfer Family now supports SetStat server configuration option, which provides the ability to ignore SetStat command issued by file transfer clients, enabling customers to upload files without any errors.
-
-# Release (2022-05-17)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/internal/ini`: [v1.3.12](internal/ini/CHANGELOG.md#v1312-2022-05-17)
- * **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used.
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.25.0](service/glue/CHANGELOG.md#v1250-2022-05-17)
- * **Feature**: This release adds a new optional parameter called codeGenNodeConfiguration to CRUD job APIs that allows users to manage visual jobs via APIs. The updated CreateJob and UpdateJob will create jobs that can be viewed in Glue Studio as a visual graph. GetJob can be used to get codeGenNodeConfiguration.
-* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.13.1](service/iotsecuretunneling/CHANGELOG.md#v1131-2022-05-17)
- * **Bug Fix**: Fixes iotsecuretunneling and mobile API clients to use the correct name for signing requests, Fixes [#1686](https://github.com/aws/aws-sdk-go-v2/issues/1686).
-* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.17.2](service/kms/CHANGELOG.md#v1172-2022-05-17)
- * **Documentation**: Add HMAC best practice tip, annual rotation of AWS managed keys.
-* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.11.5](service/mobile/CHANGELOG.md#v1115-2022-05-17)
- * **Bug Fix**: Fixes iotsecuretunneling and mobile API clients to use the correct name for signing requests, Fixes [#1686](https://github.com/aws/aws-sdk-go-v2/issues/1686).
-
-# Release (2022-05-16)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.13.0](service/applicationdiscoveryservice/CHANGELOG.md#v1130-2022-05-16)
- * **Feature**: Add Migration Evaluator Collector details to the GetDiscoverySummary API response
-* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.18.0](service/cloudfront/CHANGELOG.md#v1180-2022-05-16)
- * **Feature**: Introduced a new error (TooLongCSPInResponseHeadersPolicy) that is returned when the value of the Content-Security-Policy header in a response headers policy exceeds the maximum allowed length.
-* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.18.1](service/rekognition/CHANGELOG.md#v1181-2022-05-16)
- * **Documentation**: Documentation updates for Amazon Rekognition.
-* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.6.0](service/resiliencehub/CHANGELOG.md#v160-2022-05-16)
- * **Feature**: In this release, we are introducing support for Amazon Elastic Container Service, Amazon Route 53, AWS Elastic Disaster Recovery, AWS Backup in addition to the existing supported Services. This release also supports Terraform file input from S3 and scheduling daily assessments
-* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.14.2](service/servicecatalog/CHANGELOG.md#v1142-2022-05-16)
- * **Documentation**: Updated the descriptions for the ListAcceptedPortfolioShares API description and the PortfolioShareType parameters.
-* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.16.5](service/sts/CHANGELOG.md#v1165-2022-05-16)
- * **Documentation**: Documentation updates for AWS Security Token Service.
-* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.6.0](service/workspacesweb/CHANGELOG.md#v160-2022-05-16)
- * **Feature**: Amazon WorkSpaces Web now supports Administrator timeout control
-
-# Release (2022-05-13)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.9.0](service/grafana/CHANGELOG.md#v190-2022-05-13)
- * **Feature**: This release adds APIs for creating and deleting API keys in an Amazon Managed Grafana workspace.
-
-# Release (2022-05-12)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.43.0](service/ec2/CHANGELOG.md#v1430-2022-05-12)
- * **Feature**: This release introduces a target type Gateway Load Balancer Endpoint for mirrored traffic. Customers can now specify GatewayLoadBalancerEndpoint option during the creation of a traffic mirror target.
-* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.10.5](service/finspacedata/CHANGELOG.md#v1105-2022-05-12)
- * **Documentation**: We've now deprecated CreateSnapshot permission for creating a data view, instead use CreateDataView permission.
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.25.1](service/iot/CHANGELOG.md#v1251-2022-05-12)
- * **Documentation**: Documentation update for China region ListMetricValues for IoT
-* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.2](service/ivschat/CHANGELOG.md#v102-2022-05-12)
- * **Documentation**: Documentation-only updates for IVS Chat API Reference.
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.27.0](service/kendra/CHANGELOG.md#v1270-2022-05-12)
- * **Feature**: Amazon Kendra now provides a data source connector for Jira. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-jira.html
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.23.0](service/lambda/CHANGELOG.md#v1230-2022-05-12)
- * **Feature**: Lambda releases NodeJs 16 managed runtime to be available in all commercial regions.
-* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.21.0](service/lightsail/CHANGELOG.md#v1210-2022-05-12)
- * **Feature**: This release adds support to include inactive database bundles in the response of the GetRelationalDatabaseBundles request.
-* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.19.1](service/outposts/CHANGELOG.md#v1191-2022-05-12)
- * **Documentation**: Documentation updates for AWS Outposts.
-* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.14.0](service/ssmincidents/CHANGELOG.md#v1140-2022-05-12)
- * **Feature**: Adding support for dynamic SSM Runbook parameter values. Updating validation pattern for engagements. Adding ConflictException to UpdateReplicationSet API contract.
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.6](service/transfer/CHANGELOG.md#v1186-2022-05-12)
- * **Documentation**: AWS Transfer Family now accepts ECDSA keys for server host keys
-
-# Release (2022-05-11)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.42.0](service/ec2/CHANGELOG.md#v1420-2022-05-11)
- * **Feature**: This release updates AWS PrivateLink APIs to support IPv6 for PrivateLink Services and Endpoints of type 'Interface'.
-* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.7](service/secretsmanager/CHANGELOG.md#v1157-2022-05-11)
- * **Documentation**: Doc only update for Secrets Manager that fixes several customer-reported issues.
-
-# Release (2022-05-10)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.17.5](service/computeoptimizer/CHANGELOG.md#v1175-2022-05-10)
- * **Documentation**: Documentation updates for Compute Optimizer
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.41.0](service/ec2/CHANGELOG.md#v1410-2022-05-10)
- * **Feature**: Added support for using NitroTPM and UEFI Secure Boot on EC2 instances.
-* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.21.0](service/eks/CHANGELOG.md#v1210-2022-05-10)
- * **Feature**: Adds BOTTLEROCKET_ARM_64_NVIDIA and BOTTLEROCKET_x86_64_NVIDIA AMI types to EKS managed nodegroups
-* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.18.0](service/emr/CHANGELOG.md#v1180-2022-05-10)
- * **Feature**: This release updates the Amazon EMR ModifyInstanceGroups API to support "MERGE" type cluster reconfiguration. Also, added the ability to specify a particular Amazon Linux release for all nodes in a cluster launch request.
-* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.5.5](service/migrationhubrefactorspaces/CHANGELOG.md#v155-2022-05-10)
- * **Documentation**: AWS Migration Hub Refactor Spaces documentation only update to fix a formatting issue.
-
-# Release (2022-05-09)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/config`: [v1.15.5](config/CHANGELOG.md#v1155-2022-05-09)
- * **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682)
-* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.10.0](service/cloudcontrol/CHANGELOG.md#v1100-2022-05-09)
- * **Feature**: SDK release for Cloud Control API to include paginators for Python SDK.
-* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.7.0](service/evidently/CHANGELOG.md#v170-2022-05-09)
- * **Feature**: Add detail message inside GetExperimentResults API response to indicate experiment result availability
-* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.13.5](service/ssmcontacts/CHANGELOG.md#v1135-2022-05-09)
- * **Documentation**: Fixed an error in the DescribeEngagement example for AWS Incident Manager.
-
-# Release (2022-05-06)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.40.0](service/ec2/CHANGELOG.md#v1400-2022-05-06)
- * **Feature**: Add new state values for IPAMs, IPAM Scopes, and IPAM Pools.
-* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.17.0](service/location/CHANGELOG.md#v1170-2022-05-06)
- * **Feature**: Amazon Location Service now includes a MaxResults parameter for ListGeofences requests.
-* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.16.0](service/mediapackage/CHANGELOG.md#v1160-2022-05-06)
- * **Feature**: This release adds Dvb Dash 2014 as an available profile option for Dash Origin Endpoints.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.21.1](service/rds/CHANGELOG.md#v1211-2022-05-06)
- * **Documentation**: Various documentation improvements.
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.24.0](service/redshift/CHANGELOG.md#v1240-2022-05-06)
- * **Feature**: Introduces new field 'LoadSampleData' in CreateCluster operation. Customers can now specify 'LoadSampleData' option during creation of a cluster, which results in loading of sample data in the cluster that is created.
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.21.1](service/securityhub/CHANGELOG.md#v1211-2022-05-06)
- * **Documentation**: Documentation updates for Security Hub API reference
-
-# Release (2022-05-05)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.16.0](service/datasync/CHANGELOG.md#v1160-2022-05-05)
- * **Feature**: AWS DataSync now supports a new ObjectTags Task API option that can be used to control whether Object Tags are transferred.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.39.0](service/ec2/CHANGELOG.md#v1390-2022-05-05)
- * **Feature**: Amazon EC2 I4i instances are powered by 3rd generation Intel Xeon Scalable processors and feature up to 30 TB of local AWS Nitro SSD storage
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.25.0](service/iot/CHANGELOG.md#v1250-2022-05-05)
- * **Feature**: AWS IoT Jobs now allows you to create up to 100,000 active continuous and snapshot jobs by using concurrency control.
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.26.0](service/kendra/CHANGELOG.md#v1260-2022-05-05)
- * **Feature**: AWS Kendra now supports hierarchical facets for a query. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/filtering.html
-
-# Release (2022-05-04)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.16.0](service/backup/CHANGELOG.md#v1160-2022-05-04)
- * **Feature**: Adds support to 2 new filters about job complete time for 3 list jobs APIs in AWS Backup
-* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.13.0](service/iotsecuretunneling/CHANGELOG.md#v1130-2022-05-04)
- * **Feature**: This release introduces a new API RotateTunnelAccessToken that allow revoking the existing tokens and generate new tokens
-* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.20.1](service/lightsail/CHANGELOG.md#v1201-2022-05-04)
- * **Documentation**: Documentation updates for Lightsail
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.27.0](service/ssm/CHANGELOG.md#v1270-2022-05-04)
- * **Feature**: This release adds the TargetMaps parameter in SSM State Manager API.
-
-# Release (2022-05-03)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.38.0](service/ec2/CHANGELOG.md#v1380-2022-05-03)
- * **Feature**: Adds support for allocating Dedicated Hosts on AWS Outposts. The AllocateHosts API now accepts an OutpostArn request parameter, and the DescribeHosts API now includes an OutpostArn response parameter.
-* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.12.0](service/kinesisvideo/CHANGELOG.md#v1120-2022-05-03)
- * **Feature**: Add support for multiple image feature related APIs for configuring image generation and notification of a video stream. Add "GET_IMAGES" to the list of supported API names for the GetDataEndpoint API.
-* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.13.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1130-2022-05-03)
- * **Feature**: Add support for GetImages API for retrieving images from a video stream
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.26.8](service/s3/CHANGELOG.md#v1268-2022-05-03)
- * **Documentation**: Documentation only update for doc bug fixes for the S3 API docs.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.30.0](service/sagemaker/CHANGELOG.md#v1300-2022-05-03)
- * **Feature**: SageMaker Autopilot adds new metrics for all candidate models generated by Autopilot experiments; RStudio on SageMaker now allows users to bring your own development environment in a custom image.
-
-# Release (2022-05-02)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.16.0](service/organizations/CHANGELOG.md#v1160-2022-05-02)
- * **Feature**: This release adds the INVALID_PAYMENT_INSTRUMENT as a fail reason and an error message.
-* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.19.0](service/outposts/CHANGELOG.md#v1190-2022-05-02)
- * **Feature**: This release adds a new API called ListAssets to the Outposts SDK, which lists the hardware assets in an Outpost.
-* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.15.0](service/synthetics/CHANGELOG.md#v1150-2022-05-02)
- * **Feature**: CloudWatch Synthetics has introduced a new feature to provide customers with an option to delete the underlying resources that Synthetics canary creates when the user chooses to delete the canary.
-
-# Release (2022-04-29)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.16.0](service/codegurureviewer/CHANGELOG.md#v1160-2022-04-29)
- * **Feature**: Amazon CodeGuru Reviewer now supports suppressing recommendations from being generated on specific files and directories.
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.23.0](service/mediaconvert/CHANGELOG.md#v1230-2022-04-29)
- * **Feature**: AWS Elemental MediaConvert SDK nows supports creation of Dolby Vision profile 8.1, the ability to generate black frames of video, and introduces audio-only DASH and CMAF support.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.21.0](service/rds/CHANGELOG.md#v1210-2022-04-29)
- * **Feature**: Feature - Adds support for Internet Protocol Version 6 (IPv6) on RDS database instances.
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.26.0](service/ssm/CHANGELOG.md#v1260-2022-04-29)
- * **Feature**: Update the StartChangeRequestExecution, adding TargetMaps to the Runbook parameter
-* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.20.0](service/wafv2/CHANGELOG.md#v1200-2022-04-29)
- * **Feature**: You can now inspect all request headers and all cookies. You can now specify how to handle oversize body contents in your rules that inspect the body.
-
-# Release (2022-04-28)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.18.5](service/auditmanager/CHANGELOG.md#v1185-2022-04-28)
- * **Documentation**: This release adds documentation updates for Audit Manager. We provided examples of how to use the Custom_ prefix for the keywordValue attribute. We also provided more details about the DeleteAssessmentReport operation.
-* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.16.0](service/braket/CHANGELOG.md#v1160-2022-04-28)
- * **Feature**: This release enables Braket Hybrid Jobs with Embedded Simulators to have multiple instances.
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.24.0](service/connect/CHANGELOG.md#v1240-2022-04-28)
- * **Feature**: This release introduces an API for changing the current agent status of a user in Connect.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.37.0](service/ec2/CHANGELOG.md#v1370-2022-04-28)
- * **Feature**: This release adds support to query the public key and creation date of EC2 Key Pairs. Additionally, the format (pem or ppk) of a key pair can be specified when creating a new key pair.
-* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.13.5](service/guardduty/CHANGELOG.md#v1135-2022-04-28)
- * **Documentation**: Documentation update for API description.
-* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.17.0](service/networkfirewall/CHANGELOG.md#v1170-2022-04-28)
- * **Feature**: AWS Network Firewall adds support for stateful threat signature AWS managed rule groups.
-
-# Release (2022-04-27)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.11.5](service/amplify/CHANGELOG.md#v1115-2022-04-27)
- * **Documentation**: Documentation only update to support the Amplify GitHub App feature launch
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines`: [v1.0.0](service/chimesdkmediapipelines/CHANGELOG.md#v100-2022-04-27)
- * **Release**: New AWS service client module
- * **Feature**: For Amazon Chime SDK meetings, the Amazon Chime Media Pipelines SDK allows builders to capture audio, video, and content share streams. You can also capture meeting events, live transcripts, and data messages. The pipelines save the artifacts to an Amazon S3 bucket that you designate.
-* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.16.0](service/cloudtrail/CHANGELOG.md#v1160-2022-04-27)
- * **Feature**: Increases the retention period maximum to 2557 days. Deprecates unused fields of the ListEventDataStores API response. Updates documentation.
-* `github.com/aws/aws-sdk-go-v2/service/internal/checksum`: [v1.1.5](service/internal/checksum/CHANGELOG.md#v115-2022-04-27)
- * **Bug Fix**: Fixes a bug that could cause the SigV4 payload hash to be incorrectly encoded, leading to signing errors.
-* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.19.0](service/iotwireless/CHANGELOG.md#v1190-2022-04-27)
- * **Feature**: Add list support for event configurations, allow to get and update event configurations by resource type, support LoRaWAN events; Make NetworkAnalyzerConfiguration as a resource, add List, Create, Delete API support; Add FCntStart attribute support for ABP WirelessDevice.
-* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.13.0](service/lookoutequipment/CHANGELOG.md#v1130-2022-04-27)
- * **Feature**: This release adds the following new features: 1) Introduces an option for automatic schema creation 2) Now allows for Ingestion of data containing most common errors and allows automatic data cleaning 3) Introduces new API ListSensorStatistics that gives further information about the ingested data
-* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.18.0](service/rekognition/CHANGELOG.md#v1180-2022-04-27)
- * **Feature**: This release adds support to configure stream-processor resources for label detections on streaming-videos. UpateStreamProcessor API is also launched with this release, which could be used to update an existing stream-processor.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.29.0](service/sagemaker/CHANGELOG.md#v1290-2022-04-27)
- * **Feature**: Amazon SageMaker Autopilot adds support for custom validation dataset and validation ratio through the CreateAutoMLJob and DescribeAutoMLJob APIs.
-
-# Release (2022-04-26)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.17.0](service/cloudfront/CHANGELOG.md#v1170-2022-04-26)
- * **Feature**: CloudFront now supports the Server-Timing header in HTTP responses sent from CloudFront. You can use this header to view metrics that help you gain insights about the behavior and performance of CloudFront. To use this header, enable it in a response headers policy.
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.24.2](service/glue/CHANGELOG.md#v1242-2022-04-26)
- * **Documentation**: This release adds documentation for the APIs to create, read, delete, list, and batch read of AWS Glue custom patterns, and for Lake Formation configuration settings in the AWS Glue crawler.
-* `github.com/aws/aws-sdk-go-v2/service/ivschat`: [v1.0.0](service/ivschat/CHANGELOG.md#v100-2022-04-26)
- * **Release**: New AWS service client module
- * **Feature**: Adds new APIs for IVS Chat, a feature for building interactive chat experiences alongside an IVS broadcast.
-* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.20.0](service/lightsail/CHANGELOG.md#v1200-2022-04-26)
- * **Feature**: This release adds support for Lightsail load balancer HTTP to HTTPS redirect and TLS policy configuration.
-* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.16.0](service/networkfirewall/CHANGELOG.md#v1160-2022-04-26)
- * **Feature**: AWS Network Firewall now enables customers to use a customer managed AWS KMS key for the encryption of their firewall resources.
-* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.14.5](service/pricing/CHANGELOG.md#v1145-2022-04-26)
- * **Documentation**: Documentation updates for Price List API
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.28.0](service/sagemaker/CHANGELOG.md#v1280-2022-04-26)
- * **Feature**: SageMaker Inference Recommender now accepts customer KMS key ID for encryption of endpoints and compilation outputs created during inference recommendation.
-
-# Release (2022-04-25)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2`: v1.16.3
- * **Dependency Update**: Update SDK's internal copy of golang.org/x/sync/singleflight to address issue with test failing due to timeing issues
-* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.12.0](credentials/CHANGELOG.md#v1120-2022-04-25)
- * **Feature**: Adds Duration and Policy options that can be used when creating stscreds.WebIdentityRoleProvider credentials provider.
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.23.0](service/connect/CHANGELOG.md#v1230-2022-04-25)
- * **Feature**: This release adds SearchUsers API which can be used to search for users with a Connect Instance
-* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.14.4](service/gamelift/CHANGELOG.md#v1144-2022-04-25)
- * **Documentation**: Documentation updates for Amazon GameLift.
-* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.13.0](service/mq/CHANGELOG.md#v1130-2022-04-25)
- * **Feature**: This release adds the CRITICAL_ACTION_REQUIRED broker state and the ActionRequired API property. CRITICAL_ACTION_REQUIRED informs you when your broker is degraded. ActionRequired provides you with a code which you can use to find instructions in the Developer Guide on how to resolve the issue.
-* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.12.0](service/rdsdata/CHANGELOG.md#v1120-2022-04-25)
- * **Feature**: Support to receive SQL query results in the form of a simplified JSON string. This enables developers using the new JSON string format to more easily convert it to an object using popular JSON string parsing libraries.
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.21.0](service/securityhub/CHANGELOG.md#v1210-2022-04-25)
- * **Feature**: Security Hub now lets you opt-out of auto-enabling the defaults standards (CIS and FSBP) in accounts that are auto-enabled with Security Hub via Security Hub's integration with AWS Organizations.
-
-# Release (2022-04-22)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.9.0](service/chimesdkmeetings/CHANGELOG.md#v190-2022-04-22)
- * **Feature**: Include additional exceptions types.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.36.0](service/ec2/CHANGELOG.md#v1360-2022-04-22)
- * **Feature**: Adds support for waiters that automatically poll for a deleted NAT Gateway until it reaches the deleted state.
-
-# Release (2022-04-21)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.5](service/elasticache/CHANGELOG.md#v1205-2022-04-21)
- * **Documentation**: Doc only update for ElastiCache
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.24.0](service/glue/CHANGELOG.md#v1240-2022-04-21)
- * **Feature**: This release adds APIs to create, read, delete, list, and batch read of Glue custom entity types
-* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.21.0](service/iotsitewise/CHANGELOG.md#v1210-2022-04-21)
- * **Feature**: This release adds 3 new batch data query APIs : BatchGetAssetPropertyValue, BatchGetAssetPropertyValueHistory and BatchGetAssetPropertyAggregates
-* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.7.0](service/iottwinmaker/CHANGELOG.md#v170-2022-04-21)
- * **Feature**: General availability (GA) for AWS IoT TwinMaker. For more information, see https://docs.aws.amazon.com/iot-twinmaker/latest/apireference/Welcome.html
-* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.12.0](service/lookoutmetrics/CHANGELOG.md#v1120-2022-04-21)
- * **Feature**: Added DetectMetricSetConfig API for detecting configuration required for creating metric set from provided S3 data source.
-* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.17.0](service/mediatailor/CHANGELOG.md#v1170-2022-04-21)
- * **Feature**: This release introduces tiered channels and adds support for live sources. Customers using a STANDARD channel can now create programs using live sources.
-* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.5](service/secretsmanager/CHANGELOG.md#v1155-2022-04-21)
- * **Documentation**: Documentation updates for Secrets Manager
-* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.17.0](service/storagegateway/CHANGELOG.md#v1170-2022-04-21)
- * **Feature**: This release adds support for minimum of 5 character length virtual tape barcodes.
-* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.8.0](service/wisdom/CHANGELOG.md#v180-2022-04-21)
- * **Feature**: This release updates the GetRecommendations API to include a trigger event list for classifying and grouping recommendations.
-
-# Release (2022-04-20)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.22.0](service/connect/CHANGELOG.md#v1220-2022-04-20)
- * **Feature**: This release adds APIs to search, claim, release, list, update, and describe phone numbers. You can also use them to associate and disassociate contact flows to phone numbers.
-* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.21.0](service/macie2/CHANGELOG.md#v1210-2022-04-20)
- * **Feature**: Sensitive data findings in Amazon Macie now indicate how Macie found the sensitive data that produced a finding (originType).
-* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.14.0](service/mgn/CHANGELOG.md#v1140-2022-04-20)
- * **Feature**: Removed required annotation from input fields in Describe operations requests. Added quotaValue to ServiceQuotaExceededException
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.20.0](service/rds/CHANGELOG.md#v1200-2022-04-20)
- * **Feature**: Added a new cluster-level attribute to set the capacity range for Aurora Serverless v2 instances.
-
-# Release (2022-04-19)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.23.0](service/autoscaling/CHANGELOG.md#v1230-2022-04-19)
- * **Feature**: EC2 Auto Scaling now adds default instance warm-up times for all scaling activities, health check replacements, and other replacement events in the Auto Scaling instance lifecycle.
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.25.0](service/kendra/CHANGELOG.md#v1250-2022-04-19)
- * **Feature**: Amazon Kendra now provides a data source connector for Quip. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-quip.html
-* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.17.0](service/kms/CHANGELOG.md#v1170-2022-04-19)
- * **Feature**: Adds support for KMS keys and APIs that generate and verify HMAC codes
-* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.19.0](service/personalize/CHANGELOG.md#v1190-2022-04-19)
- * **Feature**: Adding StartRecommender and StopRecommender APIs for Personalize.
-* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.15.0](service/polly/CHANGELOG.md#v1150-2022-04-19)
- * **Feature**: Amazon Polly adds new Austrian German voice - Hannah. Hannah is available as Neural voice only.
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.23.0](service/redshift/CHANGELOG.md#v1230-2022-04-19)
- * **Feature**: Introduces new fields for LogDestinationType and LogExports on EnableLogging requests and Enable/Disable/DescribeLogging responses. Customers can now select CloudWatch Logs as a destination for their Audit Logs.
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.25.0](service/ssm/CHANGELOG.md#v1250-2022-04-19)
- * **Feature**: Added offset support for specifying the number of days to wait after the date and time specified by a CRON expression when creating SSM association.
-* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.15.0](service/textract/CHANGELOG.md#v1150-2022-04-19)
- * **Feature**: This release adds support for specifying and extracting information from documents using the Queries feature within Analyze Document API
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.4](service/transfer/CHANGELOG.md#v1184-2022-04-19)
- * **Documentation**: This release contains corrected HomeDirectoryMappings examples for several API functions: CreateAccess, UpdateAccess, CreateUser, and UpdateUser,.
-* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.12.0](service/worklink/CHANGELOG.md#v1120-2022-04-19)
- * **Feature**: Amazon WorkLink is no longer supported. This will be removed in a future version of the SDK.
-
-# Release (2022-04-15)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.9.0](feature/dynamodb/attributevalue/CHANGELOG.md#v190-2022-04-15)
- * **Feature**: Support has been added for specifying a custom time format when encoding and decoding DynamoDB AttributeValues. Use `EncoderOptions.EncodeTime` to specify a custom time encoding function, and use `DecoderOptions.DecodeTime` for specifying how to handle the corresponding AttributeValues using the format. Thank you [Pablo Lopez](https://github.com/plopezlpz) for this contribution.
-* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.9.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v190-2022-04-15)
- * **Feature**: Support has been added for specifying a custom time format when encoding and decoding DynamoDB AttributeValues. Use `EncoderOptions.EncodeTime` to specify a custom time encoding function, and use `DecoderOptions.DecodeTime` for specifying how to handle the corresponding AttributeValues using the format. Thank you [Pablo Lopez](https://github.com/plopezlpz) for this contribution.
-* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.15.0](service/athena/CHANGELOG.md#v1150-2022-04-15)
- * **Feature**: This release adds subfields, ErrorMessage, Retryable, to the AthenaError response object in the GetQueryExecution API when a query fails.
-* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.19.0](service/lightsail/CHANGELOG.md#v1190-2022-04-15)
- * **Feature**: This release adds support to describe the synchronization status of the account-level block public access feature for your Amazon Lightsail buckets.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.19.0](service/rds/CHANGELOG.md#v1190-2022-04-15)
- * **Feature**: Removes Amazon RDS on VMware with the deletion of APIs related to Custom Availability Zones and Media installation
-
-# Release (2022-04-14)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.15.0](service/appflow/CHANGELOG.md#v1150-2022-04-14)
- * **Feature**: Enables users to pass custom token URL parameters for Oauth2 authentication during create connector profile
-* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.16.0](service/appstream/CHANGELOG.md#v1160-2022-04-14)
- * **Feature**: Includes updates for create and update fleet APIs to manage the session scripts locations for Elastic fleets.
-* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.18.0](service/batch/CHANGELOG.md#v1180-2022-04-14)
- * **Feature**: Enables configuration updates for compute environments with BEST_FIT_PROGRESSIVE and SPOT_CAPACITY_OPTIMIZED allocation strategies.
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.18.1](service/cloudwatch/CHANGELOG.md#v1181-2022-04-14)
- * **Documentation**: Updates documentation for additional statistics in CloudWatch Metric Streams.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.35.1](service/ec2/CHANGELOG.md#v1351-2022-04-14)
- * **Documentation**: Documentation updates for Amazon EC2.
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.23.0](service/glue/CHANGELOG.md#v1230-2022-04-14)
- * **Feature**: Auto Scaling for Glue version 3.0 and later jobs to dynamically scale compute resources. This SDK change provides customers with the auto-scaled DPU usage
-
-# Release (2022-04-13)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.18.0](service/cloudwatch/CHANGELOG.md#v1180-2022-04-13)
- * **Feature**: Adds support for additional statistics in CloudWatch Metric Streams.
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.23.0](service/fsx/CHANGELOG.md#v1230-2022-04-13)
- * **Feature**: This release adds support for deploying FSx for ONTAP file systems in a single Availability Zone.
-
-# Release (2022-04-12)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.17.0](service/devopsguru/CHANGELOG.md#v1170-2022-04-12)
- * **Feature**: This release adds new APIs DeleteInsight to deletes the insight along with the associated anomalies, events and recommendations.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.35.0](service/ec2/CHANGELOG.md#v1350-2022-04-12)
- * **Feature**: X2idn and X2iedn instances are powered by 3rd generation Intel Xeon Scalable processors with an all-core turbo frequency up to 3.5 GHzAmazon EC2. C6a instances are powered by 3rd generation AMD EPYC processors.
-* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.17.0](service/efs/CHANGELOG.md#v1170-2022-04-12)
- * **Feature**: Amazon EFS adds support for a ThrottlingException when using the CreateAccessPoint API if the account is nearing the AccessPoint limit(120).
-* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.6.0](service/iottwinmaker/CHANGELOG.md#v160-2022-04-12)
- * **Feature**: This release adds the following new features: 1) ListEntities API now supports search using ExternalId. 2) BatchPutPropertyValue and GetPropertyValueHistory API now allows users to represent time in sub-second level precisions.
-* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.15.4](service/kinesis/CHANGELOG.md#v1154-2022-04-12)
- * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly.
-* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.14.4](service/lexruntimev2/CHANGELOG.md#v1144-2022-04-12)
- * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly.
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.26.5](service/s3/CHANGELOG.md#v1265-2022-04-12)
- * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly.
-* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.6.4](service/transcribestreaming/CHANGELOG.md#v164-2022-04-12)
- * **Bug Fix**: Fixes an issue that caused the unexported constructor function names for EventStream types to be swapped for the event reader and writer respectivly.
-
-# Release (2022-04-11)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.6.0](service/amplifyuibuilder/CHANGELOG.md#v160-2022-04-11)
- * **Feature**: In this release, we have added the ability to bind events to component level actions.
-* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.12.0](service/apprunner/CHANGELOG.md#v1120-2022-04-11)
- * **Feature**: This release adds tracing for App Runner services with X-Ray using AWS Distro for OpenTelemetry. New APIs: CreateObservabilityConfiguration, DescribeObservabilityConfiguration, ListObservabilityConfigurations, and DeleteObservabilityConfiguration. Updated APIs: CreateService and UpdateService.
-* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.18.0](service/workspaces/CHANGELOG.md#v1180-2022-04-11)
- * **Feature**: Added API support that allows customers to create GPU-enabled WorkSpaces using EC2 G4dn instances.
-
-# Release (2022-04-08)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.22.0](service/mediaconvert/CHANGELOG.md#v1220-2022-04-08)
- * **Feature**: AWS Elemental MediaConvert SDK has added support for the pass-through of WebVTT styling to WebVTT outputs, pass-through of KLV metadata to supported formats, and improved filter support for processing 444/RGB content.
-* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.17.0](service/mediapackagevod/CHANGELOG.md#v1170-2022-04-08)
- * **Feature**: This release adds ScteMarkersSource as an available field for Dash Packaging Configurations. When set to MANIFEST, MediaPackage will source the SCTE-35 markers from the manifest. When set to SEGMENTS, MediaPackage will source the SCTE-35 markers from the segments.
-* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.19.0](service/wafv2/CHANGELOG.md#v1190-2022-04-08)
- * **Feature**: Add a new CurrentDefaultVersion field to ListAvailableManagedRuleGroupVersions API response; add a new VersioningSupported boolean to each ManagedRuleGroup returned from ListAvailableManagedRuleGroups API response.
-
-# Release (2022-04-07)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/internal/v4a`: [v1.0.0](internal/v4a/CHANGELOG.md#v100-2022-04-07)
- * **Release**: New internal v4a signing module location.
-* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.18.0](service/docdb/CHANGELOG.md#v1180-2022-04-07)
- * **Feature**: Added support to enable/disable performance insights when creating or modifying db instances
-* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.16.0](service/eventbridge/CHANGELOG.md#v1160-2022-04-07)
- * **Feature**: Adds new EventBridge Endpoint resources for disaster recovery, multi-region failover, and cross-region replication capabilities to help you build resilient event-driven applications.
-* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.18.0](service/personalize/CHANGELOG.md#v1180-2022-04-07)
- * **Feature**: This release provides tagging support in AWS Personalize.
-* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.14.4](service/pi/CHANGELOG.md#v1144-2022-04-07)
- * **Documentation**: Adds support for DocumentDB to the Performance Insights API.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.27.0](service/sagemaker/CHANGELOG.md#v1270-2022-04-07)
- * **Feature**: Amazon Sagemaker Notebook Instances now supports G5 instance types
-
-# Release (2022-04-06)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.21.0](service/configservice/CHANGELOG.md#v1210-2022-04-06)
- * **Feature**: Add resourceType enums for AWS::EMR::SecurityConfiguration and AWS::SageMaker::CodeRepository
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.24.0](service/kendra/CHANGELOG.md#v1240-2022-04-06)
- * **Feature**: Amazon Kendra now provides a data source connector for Box. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-box.html
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.22.0](service/lambda/CHANGELOG.md#v1220-2022-04-06)
- * **Feature**: This release adds new APIs for creating and managing Lambda Function URLs and adds a new FunctionUrlAuthType parameter to the AddPermission API. Customers can use Function URLs to create built-in HTTPS endpoints on their functions.
-* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.7.0](service/panorama/CHANGELOG.md#v170-2022-04-06)
- * **Feature**: Added Brand field to device listings.
-
-# Release (2022-04-05)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.15.0](service/datasync/CHANGELOG.md#v1150-2022-04-05)
- * **Feature**: AWS DataSync now supports Amazon FSx for OpenZFS locations.
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.22.0](service/fsx/CHANGELOG.md#v1220-2022-04-05)
- * **Feature**: Provide customers more visibility into file system status by adding new "Misconfigured Unavailable" status for Amazon FSx for Windows File Server.
-* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.21.4](service/s3control/CHANGELOG.md#v1214-2022-04-05)
- * **Documentation**: Documentation-only update for doc bug fixes for the S3 Control API docs.
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.20.0](service/securityhub/CHANGELOG.md#v1200-2022-04-05)
- * **Feature**: Added additional ASFF details for RdsSecurityGroup AutoScalingGroup, ElbLoadBalancer, CodeBuildProject and RedshiftCluster.
-
-# Release (2022-04-04)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.24.0](service/iot/CHANGELOG.md#v1240-2022-04-04)
- * **Feature**: AWS IoT - AWS IoT Device Defender adds support to list metric datapoints collected for IoT devices through the ListMetricValues API
-* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.13.0](service/proton/CHANGELOG.md#v1130-2022-04-04)
- * **Feature**: SDK release to support tagging for AWS Proton Repository resource
-* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.14.0](service/servicecatalog/CHANGELOG.md#v1140-2022-04-04)
- * **Feature**: This release adds ProvisioningArtifictOutputKeys to DescribeProvisioningParameters to reference the outputs of a Provisioned Product and deprecates ProvisioningArtifactOutputs.
-* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.12.4](service/sms/CHANGELOG.md#v1124-2022-04-04)
- * **Documentation**: Revised product update notice for SMS console deprecation.
-
-# Release (2022-04-01)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.21.0](service/connect/CHANGELOG.md#v1210-2022-04-01)
- * **Feature**: This release updates these APIs: UpdateInstanceAttribute, DescribeInstanceAttribute and ListInstanceAttributes. You can use it to programmatically enable/disable multi-party conferencing using attribute type MULTI_PARTY_CONFERENCING on the specified Amazon Connect instance.
-
-# Release (2022-03-31)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.8.4](feature/dynamodb/attributevalue/CHANGELOG.md#v184-2022-03-31)
- * **Documentation**: Fixes documentation typos in Number type's helper methods
-* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.8.4](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v184-2022-03-31)
- * **Documentation**: Fixes documentation typos in Number type's helper methods
-* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.18.3](service/auditmanager/CHANGELOG.md#v1183-2022-03-31)
- * **Documentation**: This release adds documentation updates for Audit Manager. The updates provide data deletion guidance when a customer deregisters Audit Manager or deregisters a delegated administrator.
-* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.9.0](service/cloudcontrol/CHANGELOG.md#v190-2022-03-31)
- * **Feature**: SDK release for Cloud Control API in Amazon Web Services China (Beijing) Region, operated by Sinnet, and Amazon Web Services China (Ningxia) Region, operated by NWCD
-* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.20.0](service/databrew/CHANGELOG.md#v1200-2022-03-31)
- * **Feature**: This AWS Glue Databrew release adds feature to support ORC as an input format.
-* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.8.0](service/grafana/CHANGELOG.md#v180-2022-03-31)
- * **Feature**: This release adds tagging support to the Managed Grafana service. New APIs: TagResource, UntagResource and ListTagsForResource. Updates: add optional field tags to support tagging while calling CreateWorkspace.
-* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoicev2`: [v1.0.0](service/pinpointsmsvoicev2/CHANGELOG.md#v100-2022-03-31)
- * **Release**: New AWS service client module
- * **Feature**: Amazon Pinpoint now offers a version 2.0 suite of SMS and voice APIs, providing increased control over sending and configuration. This release is a new SDK for sending SMS and voice messages called PinpointSMSVoiceV2.
-* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.9.0](service/route53recoverycluster/CHANGELOG.md#v190-2022-03-31)
- * **Feature**: This release adds a new API "ListRoutingControls" to list routing control states using the highly reliable Route 53 ARC data plane endpoints.
-* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.17.0](service/workspaces/CHANGELOG.md#v1170-2022-03-31)
- * **Feature**: Added APIs that allow you to customize the logo, login message, and help links in the WorkSpaces client login page. To learn more, visit https://docs.aws.amazon.com/workspaces/latest/adminguide/customize-branding.html
-
-# Release (2022-03-30)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.34.0](service/ec2/CHANGELOG.md#v1340-2022-03-30)
- * **Feature**: This release simplifies the auto-recovery configuration process enabling customers to set the recovery behavior to disabled or default
-* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.17.0](service/fms/CHANGELOG.md#v1170-2022-03-30)
- * **Feature**: AWS Firewall Manager now supports the configuration of third-party policies that can use either the centralized or distributed deployment models.
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.21.0](service/fsx/CHANGELOG.md#v1210-2022-03-30)
- * **Feature**: This release adds support for modifying throughput capacity for FSx for ONTAP file systems.
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.23.3](service/iot/CHANGELOG.md#v1233-2022-03-30)
- * **Documentation**: Doc only update for IoT that fixes customer-reported issues.
-* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.12.0](service/iotdataplane/CHANGELOG.md#v1120-2022-03-30)
- * **Feature**: Update the default AWS IoT Core Data Plane endpoint from VeriSign signed to ATS signed. If you have firewalls with strict egress rules, configure the rules to grant you access to data-ats.iot.[region].amazonaws.com or data-ats.iot.[region].amazonaws.com.cn.
-
-# Release (2022-03-29)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.15.0](service/organizations/CHANGELOG.md#v1150-2022-03-29)
- * **Feature**: This release provides the new CloseAccount API that enables principals in the management account to close any member account within an organization.
-
-# Release (2022-03-28)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.17.3](service/acmpca/CHANGELOG.md#v1173-2022-03-28)
- * **Documentation**: Updating service name entities
-* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.20.0](service/medialive/CHANGELOG.md#v1200-2022-03-28)
- * **Feature**: This release adds support for selecting a maintenance window.
-
-# Release (2022-03-25)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.17.0](service/batch/CHANGELOG.md#v1170-2022-03-25)
- * **Feature**: Bug Fix: Fixed a bug where shapes were marked as unboxed and were not serialized and sent over the wire, causing an API error from the service.
- * This is a breaking change, and has been accepted due to the API operation not being usable due to the members modeled as unboxed (aka value) types. The update changes the members to boxed (aka pointer) types so that the zero value of the members can be handled correctly by the SDK and service. Your application will fail to compile with the updated module. To workaround this you'll need to update your application to use pointer types for the members impacted.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.33.0](service/ec2/CHANGELOG.md#v1330-2022-03-25)
- * **Feature**: This is release adds support for Amazon VPC Reachability Analyzer to analyze path through a Transit Gateway.
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.24.0](service/ssm/CHANGELOG.md#v1240-2022-03-25)
- * **Feature**: This Patch Manager release supports creating, updating, and deleting Patch Baselines for Rocky Linux OS.
-
-# Release (2022-03-24)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.20.0](service/configservice/CHANGELOG.md#v1200-2022-03-24)
- * **Feature**: Added new APIs GetCustomRulePolicy and GetOrganizationCustomRulePolicy, and updated existing APIs PutConfigRule, DescribeConfigRule, DescribeConfigRuleEvaluationStatus, PutOrganizationConfigRule, DescribeConfigRule to support a new feature for building AWS Config rules with AWS CloudFormation Guard
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.21.0](service/lambda/CHANGELOG.md#v1210-2022-03-24)
- * **Feature**: Adds support for increased ephemeral storage (/tmp) up to 10GB for Lambda functions. Customers can now provision up to 10 GB of ephemeral storage per function instance, a 20x increase over the previous limit of 512 MB.
-* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.19.0](service/transcribe/CHANGELOG.md#v1190-2022-03-24)
- * **Feature**: This release adds an additional parameter for subtitling with Amazon Transcribe batch jobs: outputStartIndex.
-
-# Release (2022-03-23)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2`: v1.16.0
- * **Feature**: Update CredentialsCache to make use of two new optional CredentialsProvider interfaces to give the cache, per provider, behavior how the cache handles credentials that fail to refresh, and adjusting expires time. See [aws.CredentialsCache](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#CredentialsCache) for more details.
- * **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy.
-* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.11.0](credentials/CHANGELOG.md#v1110-2022-03-23)
- * **Feature**: Update `ec2rolecreds` package's `Provider` to implememnt support for CredentialsCache new optional caching strategy interfaces, HandleFailRefreshCredentialsCacheStrategy and AdjustExpiresByCredentialsCacheStrategy.
-* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.18.0](service/auditmanager/CHANGELOG.md#v1180-2022-03-23)
- * **Feature**: This release updates 1 API parameter, the SnsArn attribute. The character length and regex pattern for the SnsArn attribute have been updated, which enables you to deselect an SNS topic when using the UpdateSettings operation.
-* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.15.0](service/ebs/CHANGELOG.md#v1150-2022-03-23)
- * **Feature**: Increased the maximum supported value for the Timeout parameter of the StartSnapshot API from 60 minutes to 4320 minutes. Changed the HTTP error code for ConflictException from 503 to 409.
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.2](service/elasticache/CHANGELOG.md#v1202-2022-03-23)
- * **Documentation**: Doc only update for ElastiCache
-* `github.com/aws/aws-sdk-go-v2/service/gamesparks`: [v1.0.0](service/gamesparks/CHANGELOG.md#v100-2022-03-23)
- * **Release**: New AWS service client module
- * **Feature**: Released the preview of Amazon GameSparks, a fully managed AWS service that provides a multi-service backend for game developers.
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.22.0](service/redshift/CHANGELOG.md#v1220-2022-03-23)
- * **Feature**: This release adds a new [--encrypted | --no-encrypted] field in restore-from-cluster-snapshot API. Customers can now restore an unencrypted snapshot to a cluster encrypted with AWS Managed Key or their own KMS key.
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.23.0](service/ssm/CHANGELOG.md#v1230-2022-03-23)
- * **Feature**: Update AddTagsToResource, ListTagsForResource, and RemoveTagsFromResource APIs to reflect the support for tagging Automation resources. Includes other minor documentation updates.
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.1](service/transfer/CHANGELOG.md#v1181-2022-03-23)
- * **Documentation**: Documentation updates for AWS Transfer Family to describe how to remove an associated workflow from a server.
-
-# Release (2022-03-22)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.18.0](service/costexplorer/CHANGELOG.md#v1180-2022-03-22)
- * **Feature**: Added three new APIs to support tagging and resource-level authorization on Cost Explorer resources: TagResource, UntagResource, ListTagsForResource. Added optional parameters to CreateCostCategoryDefinition, CreateAnomalySubscription and CreateAnomalyMonitor APIs to support Tag On Create.
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.2](service/ecs/CHANGELOG.md#v1182-2022-03-22)
- * **Documentation**: Documentation only update to address tickets
-* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.16.0](service/lakeformation/CHANGELOG.md#v1160-2022-03-22)
- * **Feature**: The release fixes the incorrect permissions called out in the documentation - DESCRIBE_TAG, ASSOCIATE_TAG, DELETE_TAG, ALTER_TAG. This trebuchet release fixes the corresponding SDK and documentation.
-* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.16.0](service/location/CHANGELOG.md#v1160-2022-03-22)
- * **Feature**: Amazon Location Service now includes a MaxResults parameter for GetDevicePositionHistory requests.
-* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.14.0](service/polly/CHANGELOG.md#v1140-2022-03-22)
- * **Feature**: Amazon Polly adds new Catalan voice - Arlet. Arlet is available as Neural voice only.
-
-# Release (2022-03-21)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.8.0](service/chimesdkmeetings/CHANGELOG.md#v180-2022-03-21)
- * **Feature**: Add support for media replication to link multiple WebRTC media sessions together to reach larger and global audiences. Participants connected to a replica session can be granted access to join the primary session and can switch sessions with their existing WebRTC connection
-* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.17.0](service/ecr/CHANGELOG.md#v1170-2022-03-21)
- * **Feature**: This release includes a fix in the DescribeImageScanFindings paginated output.
-* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.16.0](service/mediaconnect/CHANGELOG.md#v1160-2022-03-21)
- * **Feature**: This release adds support for selecting a maintenance window.
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.21.0](service/quicksight/CHANGELOG.md#v1210-2022-03-21)
- * **Feature**: AWS QuickSight Service Features - Expand public API support for group management.
-* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.16.1](service/ram/CHANGELOG.md#v1161-2022-03-21)
- * **Documentation**: Document improvements to the RAM API operations and parameter descriptions.
-
-# Release (2022-03-18)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.22.0](service/glue/CHANGELOG.md#v1220-2022-03-18)
- * **Feature**: Added 9 new APIs for AWS Glue Interactive Sessions: ListSessions, StopSession, CreateSession, GetSession, DeleteSession, RunStatement, GetStatement, ListStatements, CancelStatement
-
-# Release (2022-03-16)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.17.0](service/acmpca/CHANGELOG.md#v1170-2022-03-16)
- * **Feature**: AWS Certificate Manager (ACM) Private Certificate Authority (CA) now supports customizable certificate subject names and extensions.
-* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.13.0](service/amplifybackend/CHANGELOG.md#v1130-2022-03-16)
- * **Feature**: Adding the ability to customize Cognito verification messages for email and SMS in CreateBackendAuth and UpdateBackendAuth. Adding deprecation documentation for ForgotPassword in CreateBackendAuth and UpdateBackendAuth
-* `github.com/aws/aws-sdk-go-v2/service/billingconductor`: [v1.0.0](service/billingconductor/CHANGELOG.md#v100-2022-03-16)
- * **Release**: New AWS service client module
- * **Feature**: This is the initial SDK release for AWS Billing Conductor. The AWS Billing Conductor is a customizable billing service, allowing you to customize your billing data to match your desired business structure.
-* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.13.0](service/s3outposts/CHANGELOG.md#v1130-2022-03-16)
- * **Feature**: S3 on Outposts is releasing a new API, ListSharedEndpoints, that lists all endpoints associated with S3 on Outpost, that has been shared by Resource Access Manager (RAM).
-* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.13.0](service/ssmincidents/CHANGELOG.md#v1130-2022-03-16)
- * **Feature**: Removed incorrect validation pattern for IncidentRecordSource.invokedBy
-
-# Release (2022-03-15)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.15.0](service/cognitoidentityprovider/CHANGELOG.md#v1150-2022-03-15)
- * **Feature**: Updated EmailConfigurationType and SmsConfigurationType to reflect that you can now choose Amazon SES and Amazon SNS resources in the same Region.
-* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.15.0](service/dataexchange/CHANGELOG.md#v1150-2022-03-15)
- * **Feature**: This feature enables data providers to use the RevokeRevision operation to revoke subscriber access to a given revision. Subscribers are unable to interact with assets within a revoked revision.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.32.0](service/ec2/CHANGELOG.md#v1320-2022-03-15)
- * **Feature**: Adds the Cascade parameter to the DeleteIpam API. Customers can use this parameter to automatically delete their IPAM, including non-default scopes, pools, cidrs, and allocations. There mustn't be any pools provisioned in the default public scope to use this parameter.
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.1](service/ecs/CHANGELOG.md#v1181-2022-03-15)
- * **Documentation**: Documentation only update to address tickets
-* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.0.2](service/keyspaces/CHANGELOG.md#v102-2022-03-15)
- * **Documentation**: Fixing formatting issues in CLI and SDK documentation
-* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.15.1](service/location/CHANGELOG.md#v1151-2022-03-15)
- * **Documentation**: New HERE style "VectorHereExplore" and "VectorHereExploreTruck".
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.18.1](service/rds/CHANGELOG.md#v1181-2022-03-15)
- * **Documentation**: Various documentation improvements
-* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.17.0](service/robomaker/CHANGELOG.md#v1170-2022-03-15)
- * **Feature**: This release deprecates ROS, Ubuntu and Gazbeo from RoboMaker Simulation Service Software Suites in favor of user-supplied containers and Relaxed Software Suites.
-
-# Release (2022-03-14)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.19.0](service/configservice/CHANGELOG.md#v1190-2022-03-14)
- * **Feature**: Add resourceType enums for AWS::ECR::PublicRepository and AWS::EC2::LaunchTemplate
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.1](service/elasticache/CHANGELOG.md#v1201-2022-03-14)
- * **Documentation**: Doc only update for ElastiCache
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.23.0](service/kendra/CHANGELOG.md#v1230-2022-03-14)
- * **Feature**: Amazon Kendra now provides a data source connector for Slack. For more information, see https://docs.aws.amazon.com/kendra/latest/dg/data-source-slack.html
-* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.14.0](service/timestreamquery/CHANGELOG.md#v1140-2022-03-14)
- * **Feature**: Amazon Timestream Scheduled Queries now support Timestamp datatype in a multi-measure record.
-
-# Release (2022-03-11)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.20.0](service/chime/CHANGELOG.md#v1200-2022-03-11)
- * **Feature**: Chime VoiceConnector Logging APIs will now support MediaMetricLogs. Also CreateMeetingDialOut now returns AccessDeniedException.
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.20.0](service/connect/CHANGELOG.md#v1200-2022-03-11)
- * **Feature**: This release adds support for enabling Rich Messaging when starting a new chat session via the StartChatContact API. Rich Messaging enables the following formatting options: bold, italics, hyperlinks, bulleted lists, and numbered lists.
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.20.0](service/lambda/CHANGELOG.md#v1200-2022-03-11)
- * **Feature**: Adds PrincipalOrgID support to AddPermission API. Customers can use it to manage permissions to lambda functions at AWS Organizations level.
-* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.18.0](service/outposts/CHANGELOG.md#v1180-2022-03-11)
- * **Feature**: This release adds address filters for listSites
-* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.15.1](service/secretsmanager/CHANGELOG.md#v1151-2022-03-11)
- * **Documentation**: Documentation updates for Secrets Manager.
-* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.6.0](service/transcribestreaming/CHANGELOG.md#v160-2022-03-11)
- * **Feature**: Amazon Transcribe StartTranscription API now supports additional parameters for Language Identification feature: customVocabularies and customFilterVocabularies
-
-# Release (2022-03-10)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.20.0](service/lexmodelsv2/CHANGELOG.md#v1200-2022-03-10)
- * **Feature**: This release makes slotTypeId an optional parameter in CreateSlot and UpdateSlot APIs in Amazon Lex V2 for model building. Customers can create and update slots without specifying a slot type id.
-* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.18.0](service/transcribe/CHANGELOG.md#v1180-2022-03-10)
- * **Feature**: Documentation fix for API `StartMedicalTranscriptionJobRequest`, now showing min sample rate as 16khz
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.18.0](service/transfer/CHANGELOG.md#v1180-2022-03-10)
- * **Feature**: Adding more descriptive error types for managed workflows
-
-# Release (2022-03-09)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.17.0](service/comprehend/CHANGELOG.md#v1170-2022-03-09)
- * **Feature**: Amazon Comprehend now supports extracting the sentiment associated with entities such as brands, products and services from text documents.
-
-# Release (2022-03-08.3)
-
-* No change notes available for this release.
-
-# Release (2022-03-08.2)
-
-* No change notes available for this release.
-
-# Release (2022-03-08)
-
-## General Highlights
-* **Feature**: Updated `github.com/aws/smithy-go` to latest version
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.11.0](service/amplify/CHANGELOG.md#v1110-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.5.0](service/amplifyuibuilder/CHANGELOG.md#v150-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.14.0](service/appflow/CHANGELOG.md#v1140-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.11.0](service/apprunner/CHANGELOG.md#v1110-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.14.0](service/athena/CHANGELOG.md#v1140-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.15.0](service/braket/CHANGELOG.md#v1150-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.7.0](service/chimesdkmeetings/CHANGELOG.md#v170-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.15.0](service/cloudtrail/CHANGELOG.md#v1150-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.19.0](service/connect/CHANGELOG.md#v1190-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.16.0](service/devopsguru/CHANGELOG.md#v1160-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.31.0](service/ec2/CHANGELOG.md#v1310-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.16.0](service/ecr/CHANGELOG.md#v1160-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.18.0](service/ecs/CHANGELOG.md#v1180-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.20.0](service/elasticache/CHANGELOG.md#v1200-2022-03-08)
- * **Documentation**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.10.0](service/finspacedata/CHANGELOG.md#v1100-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.12.0](service/fis/CHANGELOG.md#v1120-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.20.0](service/fsx/CHANGELOG.md#v1200-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.14.0](service/gamelift/CHANGELOG.md#v1140-2022-03-08)
- * **Documentation**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.15.0](service/greengrassv2/CHANGELOG.md#v1150-2022-03-08)
- * **Documentation**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/internal/checksum`: [v1.1.0](service/internal/checksum/CHANGELOG.md#v110-2022-03-08)
- * **Feature**: Updates the SDK's checksum validation logic to require opt-in to output response payload validation. The SDK was always preforming output response payload checksum validation, not respecting the output validation model option. Fixes [#1606](https://github.com/aws/aws-sdk-go-v2/issues/1606)
-* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.8.0](service/kafkaconnect/CHANGELOG.md#v180-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.22.0](service/kendra/CHANGELOG.md#v1220-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/keyspaces`: [v1.0.0](service/keyspaces/CHANGELOG.md#v100-2022-03-08)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.14.0](service/macie/CHANGELOG.md#v1140-2022-03-08)
- * **Documentation**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.15.0](service/mediapackage/CHANGELOG.md#v1150-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.13.0](service/mgn/CHANGELOG.md#v1130-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.5.0](service/migrationhubrefactorspaces/CHANGELOG.md#v150-2022-03-08)
- * **Documentation**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.12.0](service/mq/CHANGELOG.md#v1120-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.6.0](service/panorama/CHANGELOG.md#v160-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.18.0](service/rds/CHANGELOG.md#v1180-2022-03-08)
- * **Documentation**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.8.0](service/route53recoverycluster/CHANGELOG.md#v180-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.12.0](service/servicecatalogappregistry/CHANGELOG.md#v1120-2022-03-08)
- * **Documentation**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.18.0](service/sqs/CHANGELOG.md#v1180-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.16.0](service/sts/CHANGELOG.md#v1160-2022-03-08)
- * **Documentation**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.14.0](service/synthetics/CHANGELOG.md#v1140-2022-03-08)
- * **Documentation**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.13.0](service/timestreamquery/CHANGELOG.md#v1130-2022-03-08)
- * **Documentation**: Updated service client model to latest release.
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.17.0](service/transfer/CHANGELOG.md#v1170-2022-03-08)
- * **Feature**: Updated service client model to latest release.
-
-# Release (2022-02-24.2)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.21.0](service/autoscaling/CHANGELOG.md#v1210-2022-02-242)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.18.0](service/databrew/CHANGELOG.md#v1180-2022-02-242)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.15.0](service/fms/CHANGELOG.md#v1150-2022-02-242)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.17.0](service/lightsail/CHANGELOG.md#v1170-2022-02-242)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.19.0](service/route53/CHANGELOG.md#v1190-2022-02-242)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.20.0](service/s3control/CHANGELOG.md#v1200-2022-02-242)
- * **Feature**: API client updated
-
-# Release (2022-02-24)
-
-## General Highlights
-* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options.
-* **Feature**: Updated `github.com/aws/smithy-go` to latest version
-* **Bug Fix**: Fixes the AWS Sigv4 signer to trim header value's whitespace when computing the canonical headers block of the string to sign.
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2`: v1.14.0
- * **Feature**: Add new AdaptiveMode retryer to aws/retry package. This new retryer uses dynamic token bucketing with client ratelimiting when throttle responses are received.
- * **Feature**: Adds new interface aws.RetryerV2, replacing aws.Retryer and deprecating the GetInitialToken method in favor of GetAttemptToken so Context can be provided. The SDK will use aws.RetryerV2 internally. Wrapping aws.Retryers as aws.RetryerV2 automatically.
-* `github.com/aws/aws-sdk-go-v2/config`: [v1.14.0](config/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: Adds support for loading RetryMaxAttempts and RetryMod from the environment and shared configuration files. These parameters drive how the SDK's API client will initialize its default retryer, if custome retryer has not been specified. See [config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) module and [aws.Config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config) for more information about and how to use these new options.
- * **Feature**: Adds support for the `ca_bundle` parameter in shared config and credentials files. The usage of the file is the same as environment variable, `AWS_CA_BUNDLE`, but sourced from shared config. Fixes [#1589](https://github.com/aws/aws-sdk-go-v2/issues/1589)
-* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.9.0](credentials/CHANGELOG.md#v190-2022-02-24)
- * **Feature**: Adds support for `SourceIdentity` to `stscreds.AssumeRoleProvider` [#1588](https://github.com/aws/aws-sdk-go-v2/pull/1588). Fixes [#1575](https://github.com/aws/aws-sdk-go-v2/issues/1575)
-* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.7.0](feature/dynamodb/attributevalue/CHANGELOG.md#v170-2022-02-24)
- * **Feature**: Fixes [#645](https://github.com/aws/aws-sdk-go-v2/issues/645), [#411](https://github.com/aws/aws-sdk-go-v2/issues/411) by adding support for (un)marshaling AttributeValue maps to Go maps key types of string, number, bool, and types implementing encoding.Text(un)Marshaler interface
- * **Bug Fix**: Fixes [#1569](https://github.com/aws/aws-sdk-go-v2/issues/1569) inconsistent serialization of Go struct field names
-* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression`: [v1.4.0](feature/dynamodb/expression/CHANGELOG.md#v140-2022-02-24)
- * **Feature**: Add support for expression names with dots via new NameBuilder function NameNoDotSplit, related to [aws/aws-sdk-go#2570](https://github.com/aws/aws-sdk-go/issues/2570)
-* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.7.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v170-2022-02-24)
- * **Feature**: Fixes [#645](https://github.com/aws/aws-sdk-go-v2/issues/645), [#411](https://github.com/aws/aws-sdk-go-v2/issues/411) by adding support for (un)marshaling AttributeValue maps to Go maps key types of string, number, bool, and types implementing encoding.Text(un)Marshaler interface
- * **Bug Fix**: Fixes [#1569](https://github.com/aws/aws-sdk-go-v2/issues/1569) inconsistent serialization of Go struct field names
-* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.14.0](service/accessanalyzer/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.5.0](service/account/CHANGELOG.md#v150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.13.0](service/acm/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.15.0](service/acmpca/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/alexaforbusiness`: [v1.13.0](service/alexaforbusiness/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.13.0](service/amp/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.10.0](service/amplify/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.11.0](service/amplifybackend/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.4.0](service/amplifyuibuilder/CHANGELOG.md#v140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.14.0](service/apigateway/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.9.0](service/apigatewaymanagementapi/CHANGELOG.md#v190-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.11.0](service/apigatewayv2/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.11.0](service/appconfig/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.3.0](service/appconfigdata/CHANGELOG.md#v130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.13.0](service/appflow/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.12.0](service/appintegrations/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.14.0](service/applicationautoscaling/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.8.0](service/applicationcostprofiler/CHANGELOG.md#v180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/applicationdiscoveryservice`: [v1.11.0](service/applicationdiscoveryservice/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.14.0](service/applicationinsights/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.12.0](service/appmesh/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.10.0](service/apprunner/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.14.0](service/appstream/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.13.0](service/appsync/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.13.0](service/athena/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.16.0](service/auditmanager/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.20.0](service/autoscaling/CHANGELOG.md#v1200-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.11.0](service/autoscalingplans/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.14.0](service/backup/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.4.0](service/backupgateway/CHANGELOG.md#v140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.15.0](service/batch/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.14.0](service/braket/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/budgets`: [v1.11.0](service/budgets/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.18.0](service/chime/CHANGELOG.md#v1180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.8.0](service/chimesdkidentity/CHANGELOG.md#v180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.6.0](service/chimesdkmeetings/CHANGELOG.md#v160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.8.0](service/chimesdkmessaging/CHANGELOG.md#v180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.15.0](service/cloud9/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.7.0](service/cloudcontrol/CHANGELOG.md#v170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.11.0](service/clouddirectory/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.19.0](service/cloudformation/CHANGELOG.md#v1190-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.15.0](service/cloudfront/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.11.0](service/cloudhsm/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.12.0](service/cloudhsmv2/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.12.0](service/cloudsearch/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudsearchdomain`: [v1.10.0](service/cloudsearchdomain/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.14.0](service/cloudtrail/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.16.0](service/cloudwatch/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.13.0](service/cloudwatchevents/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.14.0](service/cloudwatchlogs/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.11.0](service/codeartifact/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.18.0](service/codebuild/CHANGELOG.md#v1180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.12.0](service/codecommit/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.13.0](service/codedeploy/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.11.0](service/codeguruprofiler/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.14.0](service/codegurureviewer/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codepipeline`: [v1.12.0](service/codepipeline/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codestar`: [v1.10.0](service/codestar/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codestarconnections`: [v1.12.0](service/codestarconnections/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.10.0](service/codestarnotifications/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.12.0](service/cognitoidentity/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.13.0](service/cognitoidentityprovider/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cognitosync`: [v1.10.0](service/cognitosync/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.15.0](service/comprehend/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.12.0](service/comprehendmedical/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.16.0](service/computeoptimizer/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.17.0](service/configservice/CHANGELOG.md#v1170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.18.0](service/connect/CHANGELOG.md#v1180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.11.0](service/connectcontactlens/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.10.0](service/connectparticipant/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/costandusagereportservice`: [v1.12.0](service/costandusagereportservice/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.16.0](service/costexplorer/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.16.0](service/customerprofiles/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.17.0](service/databasemigrationservice/CHANGELOG.md#v1170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.17.0](service/databrew/CHANGELOG.md#v1170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.13.0](service/dataexchange/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/datapipeline`: [v1.12.0](service/datapipeline/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.13.0](service/datasync/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.10.0](service/dax/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.14.0](service/detective/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.12.0](service/devicefarm/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.15.0](service/devopsguru/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.16.0](service/directconnect/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.12.0](service/directoryservice/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.10.0](service/dlm/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.16.0](service/docdb/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.4.0](service/drs/CHANGELOG.md#v140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.14.0](service/dynamodb/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.12.0](service/dynamodbstreams/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.13.0](service/ebs/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.30.0](service/ec2/CHANGELOG.md#v1300-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.12.0](service/ec2instanceconnect/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.15.0](service/ecr/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.12.0](service/ecrpublic/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.17.0](service/ecs/CHANGELOG.md#v1170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.15.0](service/efs/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.19.0](service/eks/CHANGELOG.md#v1190-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.19.0](service/elasticache/CHANGELOG.md#v1190-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.13.0](service/elasticbeanstalk/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticinference`: [v1.10.0](service/elasticinference/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.13.0](service/elasticloadbalancing/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.17.0](service/elasticloadbalancingv2/CHANGELOG.md#v1170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.14.0](service/elasticsearchservice/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elastictranscoder`: [v1.12.0](service/elastictranscoder/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.16.0](service/emr/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.12.0](service/emrcontainers/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.14.0](service/eventbridge/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.5.0](service/evidently/CHANGELOG.md#v150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.7.0](service/finspace/CHANGELOG.md#v170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.9.0](service/finspacedata/CHANGELOG.md#v190-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.13.0](service/firehose/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.11.0](service/fis/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.14.0](service/fms/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.18.0](service/forecast/CHANGELOG.md#v1180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.10.0](service/forecastquery/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
- * **Bug Fix**: Fixed an issue that resulted in the wrong service endpoints being constructed.
-* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.18.0](service/frauddetector/CHANGELOG.md#v1180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.19.0](service/fsx/CHANGELOG.md#v1190-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.13.0](service/gamelift/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.12.0](service/glacier/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/globalaccelerator`: [v1.12.0](service/globalaccelerator/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.20.0](service/glue/CHANGELOG.md#v1200-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.6.0](service/grafana/CHANGELOG.md#v160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.12.0](service/greengrass/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.14.0](service/greengrassv2/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.12.0](service/groundstation/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.12.0](service/guardduty/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.14.0](service/health/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.13.0](service/healthlake/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.11.0](service/honeycode/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.17.0](service/iam/CHANGELOG.md#v1170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.13.0](service/identitystore/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.18.0](service/imagebuilder/CHANGELOG.md#v1180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/inspector`: [v1.11.0](service/inspector/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.5.0](service/inspector2/CHANGELOG.md#v150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/internal/checksum`: [v1.0.0](service/internal/checksum/CHANGELOG.md#v100-2022-02-24)
- * **Release**: New module for computing checksums
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.22.0](service/iot/CHANGELOG.md#v1220-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.9.0](service/iot1clickdevicesservice/CHANGELOG.md#v190-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iot1clickprojects`: [v1.10.0](service/iot1clickprojects/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.11.0](service/iotanalytics/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.10.0](service/iotdataplane/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.13.0](service/iotdeviceadvisor/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.13.0](service/iotevents/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.10.0](service/ioteventsdata/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.11.0](service/iotfleethub/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.10.0](service/iotjobsdataplane/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotsecuretunneling`: [v1.11.0](service/iotsecuretunneling/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.19.0](service/iotsitewise/CHANGELOG.md#v1190-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotthingsgraph`: [v1.11.0](service/iotthingsgraph/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.4.0](service/iottwinmaker/CHANGELOG.md#v140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.17.0](service/iotwireless/CHANGELOG.md#v1170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.15.0](service/ivs/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.16.0](service/kafka/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.7.0](service/kafkaconnect/CHANGELOG.md#v170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.21.0](service/kendra/CHANGELOG.md#v1210-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.14.0](service/kinesis/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.12.0](service/kinesisanalytics/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.13.0](service/kinesisanalyticsv2/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kinesisvideo`: [v1.10.0](service/kinesisvideo/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kinesisvideoarchivedmedia`: [v1.11.0](service/kinesisvideoarchivedmedia/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kinesisvideomedia`: [v1.9.0](service/kinesisvideomedia/CHANGELOG.md#v190-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kinesisvideosignaling`: [v1.9.0](service/kinesisvideosignaling/CHANGELOG.md#v190-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.15.0](service/kms/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.14.0](service/lakeformation/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.18.0](service/lambda/CHANGELOG.md#v1180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.15.0](service/lexmodelbuildingservice/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.18.0](service/lexmodelsv2/CHANGELOG.md#v1180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexruntimeservice`: [v1.11.0](service/lexruntimeservice/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.13.0](service/lexruntimev2/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.14.0](service/licensemanager/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.16.0](service/lightsail/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.14.0](service/location/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.11.0](service/lookoutequipment/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.10.0](service/lookoutmetrics/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.11.0](service/lookoutvision/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/machinelearning`: [v1.13.0](service/machinelearning/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.13.0](service/macie/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.19.0](service/macie2/CHANGELOG.md#v1190-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.11.0](service/managedblockchain/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/marketplacecatalog`: [v1.11.0](service/marketplacecatalog/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/marketplacecommerceanalytics`: [v1.10.0](service/marketplacecommerceanalytics/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/marketplaceentitlementservice`: [v1.10.0](service/marketplaceentitlementservice/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.12.0](service/marketplacemetering/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.14.0](service/mediaconnect/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.20.0](service/mediaconvert/CHANGELOG.md#v1200-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.18.0](service/medialive/CHANGELOG.md#v1180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.14.0](service/mediapackage/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.15.0](service/mediapackagevod/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediastore`: [v1.11.0](service/mediastore/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediastoredata`: [v1.11.0](service/mediastoredata/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.15.0](service/mediatailor/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.8.0](service/memorydb/CHANGELOG.md#v180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.12.0](service/mgn/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/migrationhub`: [v1.11.0](service/migrationhub/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/migrationhubconfig`: [v1.11.0](service/migrationhubconfig/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.4.0](service/migrationhubrefactorspaces/CHANGELOG.md#v140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.4.0](service/migrationhubstrategy/CHANGELOG.md#v140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mobile`: [v1.10.0](service/mobile/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.11.0](service/mq/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mturk`: [v1.12.0](service/mturk/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.11.0](service/mwaa/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.15.0](service/neptune/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.14.0](service/networkfirewall/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.11.0](service/networkmanager/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.11.0](service/nimble/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.8.0](service/opensearch/CHANGELOG.md#v180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/opsworks`: [v1.12.0](service/opsworks/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.13.0](service/opsworkscm/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/organizations`: [v1.13.0](service/organizations/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.16.0](service/outposts/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.5.0](service/panorama/CHANGELOG.md#v150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.16.0](service/personalize/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/personalizeevents`: [v1.10.0](service/personalizeevents/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.10.0](service/personalizeruntime/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.13.0](service/pi/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.15.0](service/pinpoint/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/pinpointemail`: [v1.10.0](service/pinpointemail/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.9.0](service/pinpointsmsvoice/CHANGELOG.md#v190-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.12.0](service/polly/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.13.0](service/pricing/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.11.0](service/proton/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.13.0](service/qldb/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.12.0](service/qldbsession/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.19.0](service/quicksight/CHANGELOG.md#v1190-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.15.0](service/ram/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.5.0](service/rbin/CHANGELOG.md#v150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.17.0](service/rds/CHANGELOG.md#v1170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rdsdata`: [v1.10.0](service/rdsdata/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.20.0](service/redshift/CHANGELOG.md#v1200-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.14.0](service/redshiftdata/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.16.0](service/rekognition/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.4.0](service/resiliencehub/CHANGELOG.md#v140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.11.0](service/resourcegroups/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.12.0](service/resourcegroupstaggingapi/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.15.0](service/robomaker/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.18.0](service/route53/CHANGELOG.md#v1180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.11.0](service/route53domains/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.7.0](service/route53recoverycluster/CHANGELOG.md#v170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.8.0](service/route53recoverycontrolconfig/CHANGELOG.md#v180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.7.0](service/route53recoveryreadiness/CHANGELOG.md#v170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.14.0](service/route53resolver/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.5.0](service/rum/CHANGELOG.md#v150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.25.0](service/s3/CHANGELOG.md#v1250-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.19.0](service/s3control/CHANGELOG.md#v1190-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.11.0](service/s3outposts/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.25.0](service/sagemaker/CHANGELOG.md#v1250-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.11.0](service/sagemakera2iruntime/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemakeredge`: [v1.10.0](service/sagemakeredge/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.10.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.14.0](service/sagemakerruntime/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.10.0](service/savingsplans/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.13.0](service/schemas/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.14.0](service/secretsmanager/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.18.0](service/securityhub/CHANGELOG.md#v1180-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/serverlessapplicationrepository`: [v1.10.0](service/serverlessapplicationrepository/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.12.0](service/servicecatalog/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.11.0](service/servicecatalogappregistry/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.16.0](service/servicediscovery/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/servicequotas`: [v1.12.0](service/servicequotas/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.13.0](service/ses/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.12.0](service/sesv2/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.12.0](service/sfn/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.15.0](service/shield/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.12.0](service/signer/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.11.0](service/sms/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.14.0](service/snowball/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.7.0](service/snowdevicemanagement/CHANGELOG.md#v170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.16.0](service/sns/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.17.0](service/sqs/CHANGELOG.md#v1170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.21.0](service/ssm/CHANGELOG.md#v1210-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.12.0](service/ssmcontacts/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.11.0](service/ssmincidents/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.10.0](service/sso/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.13.0](service/ssoadmin/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.11.0](service/ssooidc/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.15.0](service/storagegateway/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.15.0](service/sts/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.12.0](service/support/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.12.0](service/swf/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.13.0](service/synthetics/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.13.0](service/textract/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.12.0](service/timestreamquery/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.12.0](service/timestreamwrite/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.16.0](service/transcribe/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.4.0](service/transcribestreaming/CHANGELOG.md#v140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.16.0](service/transfer/CHANGELOG.md#v1160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.12.0](service/translate/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.7.0](service/voiceid/CHANGELOG.md#v170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.10.0](service/waf/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.11.0](service/wafregional/CHANGELOG.md#v1110-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.17.0](service/wafv2/CHANGELOG.md#v1170-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.13.0](service/wellarchitected/CHANGELOG.md#v1130-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.6.0](service/wisdom/CHANGELOG.md#v160-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/workdocs`: [v1.10.0](service/workdocs/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/worklink`: [v1.10.0](service/worklink/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.14.0](service/workmail/CHANGELOG.md#v1140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/workmailmessageflow`: [v1.10.0](service/workmailmessageflow/CHANGELOG.md#v1100-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.15.0](service/workspaces/CHANGELOG.md#v1150-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.4.0](service/workspacesweb/CHANGELOG.md#v140-2022-02-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.12.0](service/xray/CHANGELOG.md#v1120-2022-02-24)
- * **Feature**: API client updated
-
-# Release (2022-01-28)
-
-## General Highlights
-* **Bug Fix**: Fixes the SDK's handling of `duration_sections` in the shared credentials file or specified in multiple shared config and shared credentials files under the same profile. [#1568](https://github.com/aws/aws-sdk-go-v2/pull/1568). Thanks to [Amir Szekely](https://github.com/kichik) for help reproduce this bug.
-* **Bug Fix**: Updates SDK API client deserialization to pre-allocate byte slice and string response payloads, [#1565](https://github.com/aws/aws-sdk-go-v2/pull/1565). Thanks to [Tyson Mote](https://github.com/tysonmote) for submitting this PR.
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/config`: [v1.13.1](config/CHANGELOG.md#v1131-2022-01-28)
- * **Bug Fix**: Fixes LoadDefaultConfig handling of errors returned by passed in functional options. Previously errors returned from the LoadOptions passed into LoadDefaultConfig were incorrectly ignored. [#1562](https://github.com/aws/aws-sdk-go-v2/pull/1562). Thanks to [Pinglei Guo](https://github.com/pingleig) for submitting this PR.
- * **Bug Fix**: Updates `config` module to use os.UserHomeDir instead of hard coded environment variable for OS. [#1563](https://github.com/aws/aws-sdk-go-v2/pull/1563)
-* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.13.0](service/applicationinsights/CHANGELOG.md#v1130-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.13.1](service/cloudtrail/CHANGELOG.md#v1131-2022-01-28)
- * **Documentation**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.13.1](service/codegurureviewer/CHANGELOG.md#v1131-2022-01-28)
- * **Documentation**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.16.0](service/configservice/CHANGELOG.md#v1160-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.17.0](service/connect/CHANGELOG.md#v1170-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.12.1](service/ebs/CHANGELOG.md#v1121-2022-01-28)
- * **Documentation**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.29.0](service/ec2/CHANGELOG.md#v1290-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ec2instanceconnect`: [v1.11.0](service/ec2instanceconnect/CHANGELOG.md#v1110-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.14.0](service/efs/CHANGELOG.md#v1140-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/fis`: [v1.10.0](service/fis/CHANGELOG.md#v1100-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.17.0](service/frauddetector/CHANGELOG.md#v1170-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.18.0](service/fsx/CHANGELOG.md#v1180-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.11.0](service/greengrass/CHANGELOG.md#v1110-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.13.0](service/greengrassv2/CHANGELOG.md#v1130-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.11.0](service/guardduty/CHANGELOG.md#v1110-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.10.0](service/honeycode/CHANGELOG.md#v1100-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.14.0](service/ivs/CHANGELOG.md#v1140-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.15.0](service/kafka/CHANGELOG.md#v1150-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.13.0](service/location/CHANGELOG.md#v1130-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.9.0](service/lookoutmetrics/CHANGELOG.md#v190-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.18.0](service/macie2/CHANGELOG.md#v1180-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.19.0](service/mediaconvert/CHANGELOG.md#v1190-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.14.0](service/mediatailor/CHANGELOG.md#v1140-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.14.0](service/ram/CHANGELOG.md#v1140-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.6.1](service/route53recoveryreadiness/CHANGELOG.md#v161-2022-01-28)
- * **Documentation**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.24.0](service/sagemaker/CHANGELOG.md#v1240-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.17.0](service/securityhub/CHANGELOG.md#v1170-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.14.0](service/storagegateway/CHANGELOG.md#v1140-2022-01-28)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.15.0](service/transcribe/CHANGELOG.md#v1150-2022-01-28)
- * **Feature**: Updated to latest API model.
-
-# Release (2022-01-14)
-
-## General Highlights
-* **Feature**: Updated `github.com/aws/smithy-go` to latest version
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2`: v1.13.0
- * **Bug Fix**: Updates the Retry middleware to release the retry token, on subsequent attempts. This fixes #1413, and is based on PR #1424
-* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.6.0](feature/dynamodb/attributevalue/CHANGELOG.md#v160-2022-01-14)
- * **Feature**: Adds new MarshalWithOptions and UnmarshalWithOptions helpers allowing Encoding and Decoding options to be specified when serializing AttributeValues. Addresses issue: https://github.com/aws/aws-sdk-go-v2/issues/1494
-* `github.com/aws/aws-sdk-go-v2/feature/dynamodbstreams/attributevalue`: [v1.6.0](feature/dynamodbstreams/attributevalue/CHANGELOG.md#v160-2022-01-14)
- * **Feature**: Adds new MarshalWithOptions and UnmarshalWithOptions helpers allowing Encoding and Decoding options to be specified when serializing AttributeValues. Addresses issue: https://github.com/aws/aws-sdk-go-v2/issues/1494
-* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.12.0](service/appsync/CHANGELOG.md#v1120-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.10.0](service/autoscalingplans/CHANGELOG.md#v1100-2022-01-14)
- * **Documentation**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.15.0](service/computeoptimizer/CHANGELOG.md#v1150-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.15.0](service/costexplorer/CHANGELOG.md#v1150-2022-01-14)
- * **Documentation**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.16.0](service/databasemigrationservice/CHANGELOG.md#v1160-2022-01-14)
- * **Documentation**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.16.0](service/databrew/CHANGELOG.md#v1160-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.28.0](service/ec2/CHANGELOG.md#v1280-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.18.0](service/elasticache/CHANGELOG.md#v1180-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.13.0](service/elasticsearchservice/CHANGELOG.md#v1130-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.8.0](service/finspacedata/CHANGELOG.md#v180-2022-01-14)
- * **Documentation**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.13.0](service/fms/CHANGELOG.md#v1130-2022-01-14)
- * **Documentation**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.19.0](service/glue/CHANGELOG.md#v1190-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/honeycode`: [v1.9.0](service/honeycode/CHANGELOG.md#v190-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.12.0](service/identitystore/CHANGELOG.md#v1120-2022-01-14)
- * **Documentation**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.9.0](service/ioteventsdata/CHANGELOG.md#v190-2022-01-14)
- * **Documentation**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.16.0](service/iotwireless/CHANGELOG.md#v1160-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.20.0](service/kendra/CHANGELOG.md#v1200-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.17.0](service/lexmodelsv2/CHANGELOG.md#v1170-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.12.0](service/lexruntimev2/CHANGELOG.md#v1120-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.8.0](service/lookoutmetrics/CHANGELOG.md#v180-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.17.0](service/medialive/CHANGELOG.md#v1170-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.13.0](service/mediatailor/CHANGELOG.md#v1130-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.10.0](service/mwaa/CHANGELOG.md#v1100-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.10.0](service/nimble/CHANGELOG.md#v1100-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.7.0](service/opensearch/CHANGELOG.md#v170-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.12.0](service/pi/CHANGELOG.md#v1120-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.14.0](service/pinpoint/CHANGELOG.md#v1140-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.16.0](service/rds/CHANGELOG.md#v1160-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.20.0](service/ssm/CHANGELOG.md#v1200-2022-01-14)
- * **Feature**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.9.0](service/sso/CHANGELOG.md#v190-2022-01-14)
- * **Documentation**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.14.0](service/transcribe/CHANGELOG.md#v1140-2022-01-14)
- * **Documentation**: Updated API models
-* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.14.0](service/workspaces/CHANGELOG.md#v1140-2022-01-14)
- * **Feature**: Updated API models
-
-# Release (2022-01-07)
-
-## General Highlights
-* **Feature**: Updated `github.com/aws/smithy-go` to latest version
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/config`: [v1.12.0](config/CHANGELOG.md#v1120-2022-01-07)
- * **Feature**: Add load option for CredentialCache. Adds a new member to the LoadOptions struct, CredentialsCacheOptions. This member allows specifying a function that will be used to configure the CredentialsCache. The CredentialsCacheOptions will only be used if the configuration loader will wrap the underlying credential provider in the CredentialsCache.
-* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.12.0](service/appstream/CHANGELOG.md#v1120-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.12.0](service/cloudtrail/CHANGELOG.md#v1120-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.12.0](service/detective/CHANGELOG.md#v1120-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.27.0](service/ec2/CHANGELOG.md#v1270-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.15.0](service/ecs/CHANGELOG.md#v1150-2022-01-07)
- * **Documentation**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.17.0](service/eks/CHANGELOG.md#v1170-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.18.0](service/glue/CHANGELOG.md#v1180-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.11.0](service/greengrassv2/CHANGELOG.md#v1110-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.20.0](service/iot/CHANGELOG.md#v1200-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.12.0](service/lakeformation/CHANGELOG.md#v1120-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.16.0](service/lambda/CHANGELOG.md#v1160-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.17.0](service/mediaconvert/CHANGELOG.md#v1170-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.17.0](service/quicksight/CHANGELOG.md#v1170-2022-01-07)
- * **Documentation**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.15.0](service/rds/CHANGELOG.md#v1150-2022-01-07)
- * **Documentation**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.14.0](service/rekognition/CHANGELOG.md#v1140-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.23.0](service/s3/CHANGELOG.md#v1230-2022-01-07)
- * **Documentation**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.17.0](service/s3control/CHANGELOG.md#v1170-2022-01-07)
- * **Documentation**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.9.0](service/s3outposts/CHANGELOG.md#v190-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.22.0](service/sagemaker/CHANGELOG.md#v1220-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.12.0](service/secretsmanager/CHANGELOG.md#v1120-2022-01-07)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.9.0](service/ssooidc/CHANGELOG.md#v190-2022-01-07)
- * **Feature**: API client updated
-
-# Release (2021-12-21)
-
-## General Highlights
-* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens.
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.11.0](service/accessanalyzer/CHANGELOG.md#v1110-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.10.0](service/acm/CHANGELOG.md#v1100-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.11.0](service/apigateway/CHANGELOG.md#v1110-2021-12-21)
- * **Documentation**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.11.0](service/applicationautoscaling/CHANGELOG.md#v1110-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.10.0](service/appsync/CHANGELOG.md#v1100-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.17.0](service/autoscaling/CHANGELOG.md#v1170-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.3.0](service/chimesdkmeetings/CHANGELOG.md#v130-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.5.0](service/chimesdkmessaging/CHANGELOG.md#v150-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.4.0](service/cloudcontrol/CHANGELOG.md#v140-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.16.0](service/cloudformation/CHANGELOG.md#v1160-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.13.0](service/cloudwatch/CHANGELOG.md#v1130-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.10.0](service/cloudwatchevents/CHANGELOG.md#v1100-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.11.0](service/cloudwatchlogs/CHANGELOG.md#v1110-2021-12-21)
- * **Feature**: API client updated
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.10.0](service/codedeploy/CHANGELOG.md#v1100-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/comprehendmedical`: [v1.9.0](service/comprehendmedical/CHANGELOG.md#v190-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.13.0](service/configservice/CHANGELOG.md#v1130-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.13.0](service/customerprofiles/CHANGELOG.md#v1130-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.14.0](service/databasemigrationservice/CHANGELOG.md#v1140-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.10.0](service/datasync/CHANGELOG.md#v1100-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.12.0](service/devopsguru/CHANGELOG.md#v1120-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.13.0](service/directconnect/CHANGELOG.md#v1130-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.13.0](service/docdb/CHANGELOG.md#v1130-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.11.0](service/dynamodb/CHANGELOG.md#v1110-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.9.0](service/dynamodbstreams/CHANGELOG.md#v190-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.26.0](service/ec2/CHANGELOG.md#v1260-2021-12-21)
- * **Feature**: API client updated
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.12.0](service/ecr/CHANGELOG.md#v1120-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.14.0](service/ecs/CHANGELOG.md#v1140-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.16.0](service/elasticache/CHANGELOG.md#v1160-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.10.0](service/elasticloadbalancing/CHANGELOG.md#v1100-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.14.0](service/elasticloadbalancingv2/CHANGELOG.md#v1140-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.11.0](service/elasticsearchservice/CHANGELOG.md#v1110-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.13.0](service/emr/CHANGELOG.md#v1130-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.11.0](service/eventbridge/CHANGELOG.md#v1110-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.6.0](service/finspacedata/CHANGELOG.md#v160-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.15.0](service/forecast/CHANGELOG.md#v1150-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.9.0](service/glacier/CHANGELOG.md#v190-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.9.0](service/groundstation/CHANGELOG.md#v190-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.11.0](service/health/CHANGELOG.md#v1110-2021-12-21)
- * **Documentation**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.15.0](service/imagebuilder/CHANGELOG.md#v1150-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.19.0](service/iot/CHANGELOG.md#v1190-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.11.0](service/kinesis/CHANGELOG.md#v1110-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.9.0](service/kinesisanalytics/CHANGELOG.md#v190-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.10.0](service/kinesisanalyticsv2/CHANGELOG.md#v1100-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.12.0](service/kms/CHANGELOG.md#v1120-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.15.0](service/lambda/CHANGELOG.md#v1150-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.15.0](service/lexmodelsv2/CHANGELOG.md#v1150-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.10.0](service/location/CHANGELOG.md#v1100-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.6.0](service/lookoutmetrics/CHANGELOG.md#v160-2021-12-21)
- * **Feature**: API client updated
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/lookoutvision`: [v1.8.0](service/lookoutvision/CHANGELOG.md#v180-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/marketplacemetering`: [v1.9.0](service/marketplacemetering/CHANGELOG.md#v190-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.11.0](service/mediaconnect/CHANGELOG.md#v1110-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.12.0](service/neptune/CHANGELOG.md#v1120-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.11.0](service/networkfirewall/CHANGELOG.md#v1110-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.8.0](service/nimble/CHANGELOG.md#v180-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.5.0](service/opensearch/CHANGELOG.md#v150-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.13.0](service/outposts/CHANGELOG.md#v1130-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.10.0](service/pi/CHANGELOG.md#v1100-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.10.0](service/qldb/CHANGELOG.md#v1100-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.14.0](service/rds/CHANGELOG.md#v1140-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.17.0](service/redshift/CHANGELOG.md#v1170-2021-12-21)
- * **Feature**: API client updated
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.8.0](service/resourcegroups/CHANGELOG.md#v180-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.9.0](service/resourcegroupstaggingapi/CHANGELOG.md#v190-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.15.0](service/route53/CHANGELOG.md#v1150-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/route53domains`: [v1.8.0](service/route53domains/CHANGELOG.md#v180-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.5.0](service/route53recoverycontrolconfig/CHANGELOG.md#v150-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.22.0](service/s3/CHANGELOG.md#v1220-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.16.0](service/s3control/CHANGELOG.md#v1160-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.21.0](service/sagemaker/CHANGELOG.md#v1210-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/savingsplans`: [v1.7.3](service/savingsplans/CHANGELOG.md#v173-2021-12-21)
- * **Documentation**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.11.0](service/secretsmanager/CHANGELOG.md#v1110-2021-12-21)
- * **Documentation**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.14.0](service/securityhub/CHANGELOG.md#v1140-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.9.0](service/sfn/CHANGELOG.md#v190-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/sms`: [v1.8.0](service/sms/CHANGELOG.md#v180-2021-12-21)
- * **Documentation**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.13.0](service/sns/CHANGELOG.md#v1130-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.14.0](service/sqs/CHANGELOG.md#v1140-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.18.0](service/ssm/CHANGELOG.md#v1180-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.12.0](service/sts/CHANGELOG.md#v1120-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.9.0](service/support/CHANGELOG.md#v190-2021-12-21)
- * **Documentation**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.9.0](service/swf/CHANGELOG.md#v190-2021-12-21)
- * **Feature**: Updated to latest service endpoints
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.13.0](service/transfer/CHANGELOG.md#v1130-2021-12-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.11.0](service/workmail/CHANGELOG.md#v1110-2021-12-21)
- * **Feature**: API client updated
-
-# Release (2021-12-03)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.10.1](service/accessanalyzer/CHANGELOG.md#v1101-2021-12-03)
- * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller.
-* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.9.3](service/amp/CHANGELOG.md#v193-2021-12-03)
- * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller.
-* `github.com/aws/aws-sdk-go-v2/service/amplifyuibuilder`: [v1.0.0](service/amplifyuibuilder/CHANGELOG.md#v100-2021-12-03)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.8.3](service/appmesh/CHANGELOG.md#v183-2021-12-03)
- * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller.
-* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.10.2](service/braket/CHANGELOG.md#v1102-2021-12-03)
- * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller.
-* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.7.3](service/codeguruprofiler/CHANGELOG.md#v173-2021-12-03)
- * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller.
-* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.1.1](service/evidently/CHANGELOG.md#v111-2021-12-03)
- * **Bug Fix**: Fixed a bug that prevented the resolution of the correct endpoint for some API operations.
-* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.2.3](service/grafana/CHANGELOG.md#v123-2021-12-03)
- * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller.
-* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.9.2](service/location/CHANGELOG.md#v192-2021-12-03)
- * **Bug Fix**: Fixed a bug that prevented the resolution of the correct endpoint for some API operations.
- * **Bug Fix**: Fixed an issue that caused some operations to not be signed using sigv4, resulting in authentication failures.
-* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.7.0](service/networkmanager/CHANGELOG.md#v170-2021-12-03)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.7.3](service/nimble/CHANGELOG.md#v173-2021-12-03)
- * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller.
-* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.7.2](service/proton/CHANGELOG.md#v172-2021-12-03)
- * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller.
-* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.10.0](service/ram/CHANGELOG.md#v1100-2021-12-03)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.12.0](service/rekognition/CHANGELOG.md#v1120-2021-12-03)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.3.3](service/snowdevicemanagement/CHANGELOG.md#v133-2021-12-03)
- * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller.
-* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.2.3](service/wisdom/CHANGELOG.md#v123-2021-12-03)
- * **Bug Fix**: Fixed an issue that prevent auto-filling of an API's idempotency parameters when not explictly provided by the caller.
-
-# Release (2021-12-02)
-
-## General Highlights
-* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514))
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/config`: [v1.11.0](config/CHANGELOG.md#v1110-2021-12-02)
- * **Feature**: Add support for specifying `EndpointResolverWithOptions` on `LoadOptions`, and associated `WithEndpointResolverWithOptions`.
-* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.10.0](service/accessanalyzer/CHANGELOG.md#v1100-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.9.0](service/applicationinsights/CHANGELOG.md#v190-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/backupgateway`: [v1.0.0](service/backupgateway/CHANGELOG.md#v100-2021-12-02)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/cloudhsm`: [v1.8.0](service/cloudhsm/CHANGELOG.md#v180-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.11.0](service/devopsguru/CHANGELOG.md#v1110-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.12.0](service/directconnect/CHANGELOG.md#v1120-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.10.0](service/dynamodb/CHANGELOG.md#v1100-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.25.0](service/ec2/CHANGELOG.md#v1250-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.1.0](service/evidently/CHANGELOG.md#v110-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.14.0](service/fsx/CHANGELOG.md#v1140-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.16.0](service/glue/CHANGELOG.md#v1160-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.1.0](service/inspector2/CHANGELOG.md#v110-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.18.0](service/iot/CHANGELOG.md#v1180-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iottwinmaker`: [v1.0.0](service/iottwinmaker/CHANGELOG.md#v100-2021-12-02)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.11.0](service/kafka/CHANGELOG.md#v1110-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.17.0](service/kendra/CHANGELOG.md#v1170-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.10.0](service/kinesis/CHANGELOG.md#v1100-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.10.0](service/lakeformation/CHANGELOG.md#v1100-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.14.0](service/lexmodelsv2/CHANGELOG.md#v1140-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.10.0](service/lexruntimev2/CHANGELOG.md#v1100-2021-12-02)
- * **Feature**: Support has been added for the `StartConversation` API.
-* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.12.0](service/outposts/CHANGELOG.md#v1120-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.1.0](service/rbin/CHANGELOG.md#v110-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.10.0](service/redshiftdata/CHANGELOG.md#v1100-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.1.0](service/rum/CHANGELOG.md#v110-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.21.0](service/s3/CHANGELOG.md#v1210-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.20.0](service/sagemaker/CHANGELOG.md#v1200-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.11.0](service/sagemakerruntime/CHANGELOG.md#v1110-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.11.0](service/shield/CHANGELOG.md#v1110-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.10.0](service/snowball/CHANGELOG.md#v1100-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.10.0](service/storagegateway/CHANGELOG.md#v1100-2021-12-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/workspacesweb`: [v1.0.0](service/workspacesweb/CHANGELOG.md#v100-2021-12-02)
- * **Release**: New AWS service client module
-
-# Release (2021-11-30)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.16.0](service/autoscaling/CHANGELOG.md#v1160-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.10.0](service/backup/CHANGELOG.md#v1100-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.10.0](service/braket/CHANGELOG.md#v1100-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.2.0](service/chimesdkmeetings/CHANGELOG.md#v120-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.15.0](service/cloudformation/CHANGELOG.md#v1150-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.13.0](service/computeoptimizer/CHANGELOG.md#v1130-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.13.0](service/connect/CHANGELOG.md#v1130-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.12.0](service/customerprofiles/CHANGELOG.md#v1120-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.13.0](service/databasemigrationservice/CHANGELOG.md#v1130-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.9.0](service/dataexchange/CHANGELOG.md#v190-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.9.0](service/dynamodb/CHANGELOG.md#v190-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.24.0](service/ec2/CHANGELOG.md#v1240-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.11.0](service/ecr/CHANGELOG.md#v1110-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.13.0](service/ecs/CHANGELOG.md#v1130-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.15.0](service/eks/CHANGELOG.md#v1150-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.15.0](service/elasticache/CHANGELOG.md#v1150-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.13.0](service/elasticloadbalancingv2/CHANGELOG.md#v1130-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.10.0](service/elasticsearchservice/CHANGELOG.md#v1100-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/evidently`: [v1.0.0](service/evidently/CHANGELOG.md#v100-2021-11-30)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.5.0](service/finspacedata/CHANGELOG.md#v150-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.14.0](service/imagebuilder/CHANGELOG.md#v1140-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/inspector2`: [v1.0.0](service/inspector2/CHANGELOG.md#v100-2021-11-30)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery`: [v1.3.2](service/internal/endpoint-discovery/CHANGELOG.md#v132-2021-11-30)
- * **Bug Fix**: Fixed a race condition that caused concurrent calls relying on endpoint discovery to share the same `url.URL` reference in their operation's http.Request.
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.17.0](service/iot/CHANGELOG.md#v1170-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.9.0](service/iotdeviceadvisor/CHANGELOG.md#v190-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.15.0](service/iotsitewise/CHANGELOG.md#v1150-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.13.0](service/iotwireless/CHANGELOG.md#v1130-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.14.0](service/lambda/CHANGELOG.md#v1140-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.14.0](service/macie2/CHANGELOG.md#v1140-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.8.0](service/mgn/CHANGELOG.md#v180-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/migrationhubrefactorspaces`: [v1.0.0](service/migrationhubrefactorspaces/CHANGELOG.md#v100-2021-11-30)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.4.0](service/opensearch/CHANGELOG.md#v140-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.11.0](service/outposts/CHANGELOG.md#v1110-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.12.0](service/personalize/CHANGELOG.md#v1120-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/personalizeruntime`: [v1.7.0](service/personalizeruntime/CHANGELOG.md#v170-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.12.0](service/pinpoint/CHANGELOG.md#v1120-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.7.0](service/proton/CHANGELOG.md#v170-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.15.0](service/quicksight/CHANGELOG.md#v1150-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rbin`: [v1.0.0](service/rbin/CHANGELOG.md#v100-2021-11-30)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.13.0](service/rds/CHANGELOG.md#v1130-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.16.0](service/redshift/CHANGELOG.md#v1160-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rum`: [v1.0.0](service/rum/CHANGELOG.md#v100-2021-11-30)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.20.0](service/s3/CHANGELOG.md#v1200-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.15.0](service/s3control/CHANGELOG.md#v1150-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.13.0](service/sqs/CHANGELOG.md#v1130-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.17.0](service/ssm/CHANGELOG.md#v1170-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.11.0](service/sts/CHANGELOG.md#v1110-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.10.0](service/textract/CHANGELOG.md#v1100-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.8.0](service/timestreamquery/CHANGELOG.md#v180-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.8.0](service/timestreamwrite/CHANGELOG.md#v180-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.1.0](service/transcribestreaming/CHANGELOG.md#v110-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.8.0](service/translate/CHANGELOG.md#v180-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.9.0](service/wellarchitected/CHANGELOG.md#v190-2021-11-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.11.0](service/workspaces/CHANGELOG.md#v1110-2021-11-30)
- * **Feature**: API client updated
-
-# Release (2021-11-19)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2`: v1.11.1
- * **Bug Fix**: Fixed a bug that prevented aws.EndpointResolverWithOptionsFunc from satisfying the aws.EndpointResolverWithOptions interface.
-* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.8.0](service/amplifybackend/CHANGELOG.md#v180-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.10.0](service/apigateway/CHANGELOG.md#v1100-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appconfig`: [v1.7.0](service/appconfig/CHANGELOG.md#v170-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appconfigdata`: [v1.0.0](service/appconfigdata/CHANGELOG.md#v100-2021-11-19)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.8.0](service/applicationinsights/CHANGELOG.md#v180-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.10.0](service/appstream/CHANGELOG.md#v1100-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.12.0](service/auditmanager/CHANGELOG.md#v1120-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.11.0](service/batch/CHANGELOG.md#v1110-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.14.0](service/chime/CHANGELOG.md#v1140-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.1.0](service/chimesdkmeetings/CHANGELOG.md#v110-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.14.0](service/cloudformation/CHANGELOG.md#v1140-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.10.0](service/cloudtrail/CHANGELOG.md#v1100-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.12.0](service/cloudwatch/CHANGELOG.md#v1120-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.12.0](service/connect/CHANGELOG.md#v1120-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.12.0](service/databasemigrationservice/CHANGELOG.md#v1120-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.13.0](service/databrew/CHANGELOG.md#v1130-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.10.0](service/devopsguru/CHANGELOG.md#v1100-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/drs`: [v1.0.0](service/drs/CHANGELOG.md#v100-2021-11-19)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.8.0](service/dynamodbstreams/CHANGELOG.md#v180-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.23.0](service/ec2/CHANGELOG.md#v1230-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.14.0](service/eks/CHANGELOG.md#v1140-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.14.0](service/forecast/CHANGELOG.md#v1140-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.10.0](service/ivs/CHANGELOG.md#v1100-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.10.0](service/kafka/CHANGELOG.md#v1100-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.16.0](service/kendra/CHANGELOG.md#v1160-2021-11-19)
- * **Announcement**: Fix API modeling bug incorrectly generating `DocumentAttributeValue` type as a union instead of a structure. This update corrects this bug by correcting the `DocumentAttributeValue` type to be a `struct` instead of an `interface`. This change also removes the `DocumentAttributeValueMember` types. To migrate to this change your application using service/kendra will need to be updated to use struct members in `DocumentAttributeValue` instead of `DocumentAttributeValueMember` types.
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.11.0](service/kms/CHANGELOG.md#v1110-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.13.0](service/lambda/CHANGELOG.md#v1130-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.13.0](service/lexmodelsv2/CHANGELOG.md#v1130-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.9.0](service/lexruntimev2/CHANGELOG.md#v190-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.9.0](service/location/CHANGELOG.md#v190-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.15.0](service/mediaconvert/CHANGELOG.md#v1150-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.14.0](service/medialive/CHANGELOG.md#v1140-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.7.0](service/mgn/CHANGELOG.md#v170-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/migrationhubstrategy`: [v1.0.0](service/migrationhubstrategy/CHANGELOG.md#v100-2021-11-19)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.9.0](service/qldb/CHANGELOG.md#v190-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.9.0](service/qldbsession/CHANGELOG.md#v190-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.15.0](service/redshift/CHANGELOG.md#v1150-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.12.0](service/sns/CHANGELOG.md#v1120-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.16.0](service/ssm/CHANGELOG.md#v1160-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.12.0](service/transfer/CHANGELOG.md#v1120-2021-11-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.14.0](service/wafv2/CHANGELOG.md#v1140-2021-11-19)
- * **Feature**: API client updated
-
-# Release (2021-11-12)
-
-## General Highlights
-* **Feature**: Service clients now support custom endpoints that have an initial URI path defined.
-* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature.
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.9.0](service/backup/CHANGELOG.md#v190-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.10.0](service/batch/CHANGELOG.md#v1100-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmeetings`: [v1.0.0](service/chimesdkmeetings/CHANGELOG.md#v100-2021-11-12)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.12.0](service/computeoptimizer/CHANGELOG.md#v1120-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.11.0](service/connect/CHANGELOG.md#v1110-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.12.0](service/docdb/CHANGELOG.md#v1120-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.8.0](service/dynamodb/CHANGELOG.md#v180-2021-11-12)
- * **Documentation**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.22.0](service/ec2/CHANGELOG.md#v1220-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.12.0](service/ecs/CHANGELOG.md#v1120-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.9.0](service/gamelift/CHANGELOG.md#v190-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.9.0](service/greengrassv2/CHANGELOG.md#v190-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.10.0](service/health/CHANGELOG.md#v1100-2021-11-12)
- * **Documentation**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.9.0](service/identitystore/CHANGELOG.md#v190-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.12.0](service/iotwireless/CHANGELOG.md#v1120-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.11.0](service/neptune/CHANGELOG.md#v1110-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.12.0](service/rds/CHANGELOG.md#v1120-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/resiliencehub`: [v1.0.0](service/resiliencehub/CHANGELOG.md#v100-2021-11-12)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.8.0](service/resourcegroupstaggingapi/CHANGELOG.md#v180-2021-11-12)
- * **Documentation**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.14.0](service/s3control/CHANGELOG.md#v1140-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.19.0](service/sagemaker/CHANGELOG.md#v1190-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.10.0](service/sagemakerruntime/CHANGELOG.md#v1100-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.7.0](service/ssmincidents/CHANGELOG.md#v170-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.11.0](service/transcribe/CHANGELOG.md#v1110-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/translate`: [v1.7.0](service/translate/CHANGELOG.md#v170-2021-11-12)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.13.0](service/wafv2/CHANGELOG.md#v1130-2021-11-12)
- * **Feature**: Updated service to latest API model.
-
-# Release (2021-11-06)
-
-## General Highlights
-* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
-* **Feature**: Updated `github.com/aws/smithy-go` to latest version
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream`: [v1.0.0](aws/protocol/eventstream/CHANGELOG.md#v100-2021-11-06)
- * **Announcement**: Support has been added for AWS EventStream APIs for Kinesis, S3, and Transcribe Streaming. Support for the Lex Runtime V2 EventStream API will be added in a future release.
- * **Release**: Protocol support has been added for AWS event stream.
-* `github.com/aws/aws-sdk-go-v2/internal/endpoints/v2`: [v2.0.0](internal/endpoints/v2/CHANGELOG.md#v200-2021-11-06)
- * **Release**: Endpoint Variant Model Support
-* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.6.0](service/applicationinsights/CHANGELOG.md#v160-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.8.0](service/appstream/CHANGELOG.md#v180-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.11.0](service/auditmanager/CHANGELOG.md#v1110-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.14.0](service/autoscaling/CHANGELOG.md#v1140-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.13.0](service/chime/CHANGELOG.md#v1130-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.4.0](service/chimesdkidentity/CHANGELOG.md#v140-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.4.0](service/chimesdkmessaging/CHANGELOG.md#v140-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.10.0](service/cloudfront/CHANGELOG.md#v1100-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.7.0](service/codecommit/CHANGELOG.md#v170-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.10.0](service/connect/CHANGELOG.md#v1100-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.7.0](service/connectcontactlens/CHANGELOG.md#v170-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/connectparticipant`: [v1.6.0](service/connectparticipant/CHANGELOG.md#v160-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.10.0](service/databasemigrationservice/CHANGELOG.md#v1100-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.8.0](service/datasync/CHANGELOG.md#v180-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.11.0](service/docdb/CHANGELOG.md#v1110-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.9.0](service/ebs/CHANGELOG.md#v190-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.21.0](service/ec2/CHANGELOG.md#v1210-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.9.0](service/ecr/CHANGELOG.md#v190-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.11.0](service/ecs/CHANGELOG.md#v1110-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.12.0](service/eks/CHANGELOG.md#v1120-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.13.0](service/elasticache/CHANGELOG.md#v1130-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.9.0](service/elasticsearchservice/CHANGELOG.md#v190-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.8.0](service/emrcontainers/CHANGELOG.md#v180-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.4.0](service/finspace/CHANGELOG.md#v140-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.12.0](service/fsx/CHANGELOG.md#v1120-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.8.0](service/gamelift/CHANGELOG.md#v180-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.9.0](service/health/CHANGELOG.md#v190-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.12.0](service/iam/CHANGELOG.md#v1120-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/internal/eventstreamtesting`: [v1.0.0](service/internal/eventstreamtesting/CHANGELOG.md#v100-2021-11-06)
- * **Release**: Protocol support has been added for AWS event stream.
-* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.13.0](service/iotsitewise/CHANGELOG.md#v1130-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.14.0](service/kendra/CHANGELOG.md#v1140-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.8.0](service/kinesis/CHANGELOG.md#v180-2021-11-06)
- * **Feature**: Support has been added for the SubscribeToShard API.
-* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.9.0](service/kms/CHANGELOG.md#v190-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.12.0](service/lightsail/CHANGELOG.md#v1120-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.13.0](service/macie2/CHANGELOG.md#v1130-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.6.0](service/mgn/CHANGELOG.md#v160-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.10.0](service/neptune/CHANGELOG.md#v1100-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/networkmanager`: [v1.6.0](service/networkmanager/CHANGELOG.md#v160-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.6.0](service/nimble/CHANGELOG.md#v160-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.3.0](service/opensearch/CHANGELOG.md#v130-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.14.0](service/quicksight/CHANGELOG.md#v1140-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.11.0](service/rds/CHANGELOG.md#v1110-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.10.0](service/rekognition/CHANGELOG.md#v1100-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.9.0](service/route53resolver/CHANGELOG.md#v190-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.18.0](service/s3/CHANGELOG.md#v1180-2021-11-06)
- * **Feature**: Support has been added for the SelectObjectContent API.
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.13.0](service/s3control/CHANGELOG.md#v1130-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.18.0](service/sagemaker/CHANGELOG.md#v1180-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.11.0](service/servicediscovery/CHANGELOG.md#v1110-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.6.0](service/ssmincidents/CHANGELOG.md#v160-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/sso`: [v1.6.0](service/sso/CHANGELOG.md#v160-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.8.0](service/storagegateway/CHANGELOG.md#v180-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.7.0](service/support/CHANGELOG.md#v170-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.8.0](service/textract/CHANGELOG.md#v180-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.10.0](service/transcribe/CHANGELOG.md#v1100-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/transcribestreaming`: [v1.0.0](service/transcribestreaming/CHANGELOG.md#v100-2021-11-06)
- * **Release**: New AWS service client module
- * **Feature**: Support has been added for the StartStreamTranscription and StartMedicalStreamTranscription APIs.
-* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.6.0](service/waf/CHANGELOG.md#v160-2021-11-06)
- * **Feature**: Updated service to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.2.0](service/wisdom/CHANGELOG.md#v120-2021-11-06)
- * **Feature**: Updated service to latest API model.
-
-# Release (2021-10-21)
-
-## General Highlights
-* **Feature**: Updated to latest version
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2`: v1.10.0
- * **Feature**: Adds dynamic signing middleware that switches to unsigned payload when TLS is enabled.
-* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.8.0](service/appflow/CHANGELOG.md#v180-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.8.0](service/applicationautoscaling/CHANGELOG.md#v180-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.13.0](service/autoscaling/CHANGELOG.md#v1130-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.3.0](service/chimesdkmessaging/CHANGELOG.md#v130-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.11.0](service/cloudformation/CHANGELOG.md#v1110-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.7.0](service/cloudsearch/CHANGELOG.md#v170-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.7.0](service/cloudtrail/CHANGELOG.md#v170-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.9.0](service/cloudwatch/CHANGELOG.md#v190-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.7.0](service/cloudwatchevents/CHANGELOG.md#v170-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.8.0](service/cloudwatchlogs/CHANGELOG.md#v180-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codedeploy`: [v1.7.0](service/codedeploy/CHANGELOG.md#v170-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.10.0](service/configservice/CHANGELOG.md#v1100-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.7.0](service/dataexchange/CHANGELOG.md#v170-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.9.0](service/directconnect/CHANGELOG.md#v190-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.10.0](service/docdb/CHANGELOG.md#v1100-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.6.0](service/dynamodb/CHANGELOG.md#v160-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.20.0](service/ec2/CHANGELOG.md#v1200-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.8.0](service/ecr/CHANGELOG.md#v180-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.10.0](service/ecs/CHANGELOG.md#v1100-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.9.0](service/efs/CHANGELOG.md#v190-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.12.0](service/elasticache/CHANGELOG.md#v1120-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.7.0](service/elasticloadbalancing/CHANGELOG.md#v170-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.10.0](service/elasticloadbalancingv2/CHANGELOG.md#v1100-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.10.0](service/emr/CHANGELOG.md#v1100-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.8.0](service/eventbridge/CHANGELOG.md#v180-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/glacier`: [v1.6.0](service/glacier/CHANGELOG.md#v160-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.13.0](service/glue/CHANGELOG.md#v1130-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.8.0](service/ivs/CHANGELOG.md#v180-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.13.0](service/kendra/CHANGELOG.md#v1130-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.7.0](service/kinesis/CHANGELOG.md#v170-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.7.0](service/kinesisanalyticsv2/CHANGELOG.md#v170-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.8.0](service/kms/CHANGELOG.md#v180-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.10.0](service/lambda/CHANGELOG.md#v1100-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.13.0](service/mediaconvert/CHANGELOG.md#v1130-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.9.0](service/mediapackage/CHANGELOG.md#v190-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.10.0](service/mediapackagevod/CHANGELOG.md#v1100-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.9.0](service/mediatailor/CHANGELOG.md#v190-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.9.0](service/neptune/CHANGELOG.md#v190-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/panorama`: [v1.0.0](service/panorama/CHANGELOG.md#v100-2021-10-21)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.13.0](service/quicksight/CHANGELOG.md#v1130-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.10.0](service/rds/CHANGELOG.md#v1100-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.12.0](service/redshift/CHANGELOG.md#v1120-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.10.0](service/robomaker/CHANGELOG.md#v1100-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.17.0](service/s3/CHANGELOG.md#v1170-2021-10-21)
- * **Feature**: Updates S3 streaming operations - PutObject, UploadPart, WriteGetObjectResponse to use unsigned payload signing auth when TLS is enabled.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.17.0](service/sagemaker/CHANGELOG.md#v1170-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.12.0](service/securityhub/CHANGELOG.md#v1120-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sfn`: [v1.6.0](service/sfn/CHANGELOG.md#v160-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.9.0](service/sns/CHANGELOG.md#v190-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.10.0](service/sqs/CHANGELOG.md#v1100-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.7.0](service/storagegateway/CHANGELOG.md#v170-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.8.0](service/sts/CHANGELOG.md#v180-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/swf`: [v1.6.0](service/swf/CHANGELOG.md#v160-2021-10-21)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.8.0](service/workmail/CHANGELOG.md#v180-2021-10-21)
- * **Feature**: API client updated
-
-# Release (2021-10-11)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`: [v1.6.0](feature/ec2/imds/CHANGELOG.md#v160-2021-10-11)
- * **Feature**: Respect passed in Context Deadline/Timeout. Updates the IMDS Client operations to not override the passed in Context's Deadline or Timeout options. If an Client operation is called with a Context with a Deadline or Timeout, the client will no longer override it with the client's default timeout.
- * **Bug Fix**: Fix IMDS client's response handling and operation timeout race. Fixes #1253
-* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.5.0](service/amplifybackend/CHANGELOG.md#v150-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.7.0](service/applicationautoscaling/CHANGELOG.md#v170-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.3.0](service/apprunner/CHANGELOG.md#v130-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.6.0](service/backup/CHANGELOG.md#v160-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.11.0](service/chime/CHANGELOG.md#v1110-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.11.0](service/codebuild/CHANGELOG.md#v1110-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.10.0](service/databrew/CHANGELOG.md#v1100-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.19.0](service/ec2/CHANGELOG.md#v1190-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.8.0](service/efs/CHANGELOG.md#v180-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.9.0](service/elasticloadbalancingv2/CHANGELOG.md#v190-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.7.0](service/firehose/CHANGELOG.md#v170-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.10.0](service/frauddetector/CHANGELOG.md#v1100-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.10.0](service/fsx/CHANGELOG.md#v1100-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.12.0](service/glue/CHANGELOG.md#v1120-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/grafana`: [v1.0.0](service/grafana/CHANGELOG.md#v100-2021-10-11)
- * **Release**: New AWS service client module
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.8.0](service/iotevents/CHANGELOG.md#v180-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.12.0](service/kendra/CHANGELOG.md#v1120-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.7.0](service/kms/CHANGELOG.md#v170-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.9.0](service/lexmodelsv2/CHANGELOG.md#v190-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.6.0](service/lexruntimev2/CHANGELOG.md#v160-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.6.0](service/location/CHANGELOG.md#v160-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.12.0](service/mediaconvert/CHANGELOG.md#v1120-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.10.0](service/medialive/CHANGELOG.md#v1100-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.16.0](service/sagemaker/CHANGELOG.md#v1160-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.7.0](service/secretsmanager/CHANGELOG.md#v170-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.11.0](service/securityhub/CHANGELOG.md#v1110-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.12.0](service/ssm/CHANGELOG.md#v1120-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.6.0](service/ssooidc/CHANGELOG.md#v160-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.7.0](service/synthetics/CHANGELOG.md#v170-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.6.0](service/textract/CHANGELOG.md#v160-2021-10-11)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.7.0](service/workmail/CHANGELOG.md#v170-2021-10-11)
- * **Feature**: API client updated
-
-# Release (2021-09-30)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/account`: [v1.0.0](service/account/CHANGELOG.md#v100-2021-09-30)
- * **Release**: New AWS service client module
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.6.0](service/amp/CHANGELOG.md#v160-2021-09-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.7.0](service/appintegrations/CHANGELOG.md#v170-2021-09-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudcontrol`: [v1.0.0](service/cloudcontrol/CHANGELOG.md#v100-2021-09-30)
- * **Release**: New AWS service client module
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudhsmv2`: [v1.5.0](service/cloudhsmv2/CHANGELOG.md#v150-2021-09-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.8.0](service/connect/CHANGELOG.md#v180-2021-09-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/dataexchange`: [v1.6.0](service/dataexchange/CHANGELOG.md#v160-2021-09-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.8.0](service/elasticloadbalancingv2/CHANGELOG.md#v180-2021-09-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.11.0](service/imagebuilder/CHANGELOG.md#v1110-2021-09-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.9.0](service/lambda/CHANGELOG.md#v190-2021-09-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.11.0](service/macie2/CHANGELOG.md#v1110-2021-09-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.7.0](service/networkfirewall/CHANGELOG.md#v170-2021-09-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.8.0](service/pinpoint/CHANGELOG.md#v180-2021-09-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sesv2`: [v1.6.0](service/sesv2/CHANGELOG.md#v160-2021-09-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.8.0](service/transfer/CHANGELOG.md#v180-2021-09-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/voiceid`: [v1.0.0](service/voiceid/CHANGELOG.md#v100-2021-09-30)
- * **Release**: New AWS service client module
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/wisdom`: [v1.0.0](service/wisdom/CHANGELOG.md#v100-2021-09-30)
- * **Release**: New AWS service client module
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/workmail`: [v1.6.0](service/workmail/CHANGELOG.md#v160-2021-09-30)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.7.0](service/workspaces/CHANGELOG.md#v170-2021-09-30)
- * **Feature**: API client updated
-
-# Release (2021-09-24)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression`: [v1.2.4](feature/dynamodb/expression/CHANGELOG.md#v124-2021-09-24)
- * **Documentation**: Fixes typo in NameBuilder.NamesList example documentation to use the correct variable name.
-* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.6.0](service/appmesh/CHANGELOG.md#v160-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.7.0](service/appsync/CHANGELOG.md#v170-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.9.0](service/auditmanager/CHANGELOG.md#v190-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.5.0](service/codecommit/CHANGELOG.md#v150-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.8.0](service/comprehend/CHANGELOG.md#v180-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.8.0](service/databasemigrationservice/CHANGELOG.md#v180-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.18.0](service/ec2/CHANGELOG.md#v1180-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.7.0](service/ecr/CHANGELOG.md#v170-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.7.0](service/elasticsearchservice/CHANGELOG.md#v170-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.10.0](service/iam/CHANGELOG.md#v1100-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.6.0](service/identitystore/CHANGELOG.md#v160-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.10.0](service/imagebuilder/CHANGELOG.md#v1100-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.13.0](service/iot/CHANGELOG.md#v1130-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.7.0](service/iotevents/CHANGELOG.md#v170-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.1.0](service/kafkaconnect/CHANGELOG.md#v110-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.6.0](service/lakeformation/CHANGELOG.md#v160-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.8.0](service/lexmodelsv2/CHANGELOG.md#v180-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.5.0](service/lexruntimev2/CHANGELOG.md#v150-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.8.0](service/licensemanager/CHANGELOG.md#v180-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.11.0](service/mediaconvert/CHANGELOG.md#v1110-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.9.0](service/mediapackagevod/CHANGELOG.md#v190-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.8.0](service/mediatailor/CHANGELOG.md#v180-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.1.0](service/opensearch/CHANGELOG.md#v110-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.12.0](service/quicksight/CHANGELOG.md#v1120-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.11.0](service/ssm/CHANGELOG.md#v1110-2021-09-24)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.10.0](service/wafv2/CHANGELOG.md#v1100-2021-09-24)
- * **Feature**: API client updated
-
-# Release (2021-09-17)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.10.0](service/chime/CHANGELOG.md#v1100-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.10.1](service/cloudformation/CHANGELOG.md#v1101-2021-09-17)
- * **Documentation**: Updated API client documentation.
-* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.7.0](service/comprehend/CHANGELOG.md#v170-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.17.0](service/ec2/CHANGELOG.md#v1170-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/ecr`: [v1.6.0](service/ecr/CHANGELOG.md#v160-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.12.0](service/iot/CHANGELOG.md#v1120-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/kafkaconnect`: [v1.0.0](service/kafkaconnect/CHANGELOG.md#v100-2021-09-17)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.7.0](service/lexmodelsv2/CHANGELOG.md#v170-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.4.0](service/lexruntimev2/CHANGELOG.md#v140-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.10.0](service/macie2/CHANGELOG.md#v1100-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.8.0](service/mediapackagevod/CHANGELOG.md#v180-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.6.0](service/networkfirewall/CHANGELOG.md#v160-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/pinpoint`: [v1.7.0](service/pinpoint/CHANGELOG.md#v170-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.11.0](service/quicksight/CHANGELOG.md#v1110-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.9.0](service/rds/CHANGELOG.md#v190-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.9.0](service/robomaker/CHANGELOG.md#v190-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.16.0](service/s3/CHANGELOG.md#v1160-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.15.0](service/sagemaker/CHANGELOG.md#v1150-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.5.0](service/ssooidc/CHANGELOG.md#v150-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.8.0](service/transcribe/CHANGELOG.md#v180-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.9.0](service/wafv2/CHANGELOG.md#v190-2021-09-17)
- * **Feature**: Updated API client and endpoints to latest revision.
-
-# Release (2021-09-10)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.4.1](credentials/CHANGELOG.md#v141-2021-09-10)
- * **Documentation**: Fixes the AssumeRoleProvider's documentation for using custom TokenProviders.
-* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.5.0](service/amp/CHANGELOG.md#v150-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.7.0](service/braket/CHANGELOG.md#v170-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.2.0](service/chimesdkidentity/CHANGELOG.md#v120-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.2.0](service/chimesdkmessaging/CHANGELOG.md#v120-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.7.0](service/codegurureviewer/CHANGELOG.md#v170-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.10.0](service/eks/CHANGELOG.md#v1100-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.11.0](service/elasticache/CHANGELOG.md#v1110-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.9.0](service/emr/CHANGELOG.md#v190-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.10.0](service/forecast/CHANGELOG.md#v1100-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.9.0](service/frauddetector/CHANGELOG.md#v190-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kafka`: [v1.7.0](service/kafka/CHANGELOG.md#v170-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.4.0](service/lookoutequipment/CHANGELOG.md#v140-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.8.0](service/mediapackage/CHANGELOG.md#v180-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/opensearch`: [v1.0.0](service/opensearch/CHANGELOG.md#v100-2021-09-10)
- * **Release**: New AWS service client module
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.8.0](service/outposts/CHANGELOG.md#v180-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.7.0](service/ram/CHANGELOG.md#v170-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.14.0](service/sagemaker/CHANGELOG.md#v1140-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.9.0](service/servicediscovery/CHANGELOG.md#v190-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.5.0](service/ssmcontacts/CHANGELOG.md#v150-2021-09-10)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/xray`: [v1.6.0](service/xray/CHANGELOG.md#v160-2021-09-10)
- * **Feature**: API client updated
-
-# Release (2021-09-02)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/config`: [v1.8.0](config/CHANGELOG.md#v180-2021-09-02)
- * **Feature**: Add support for S3 Multi-Region Access Point ARNs.
-* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.7.0](service/accessanalyzer/CHANGELOG.md#v170-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.8.0](service/acmpca/CHANGELOG.md#v180-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.8.0](service/cloud9/CHANGELOG.md#v180-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.10.0](service/cloudformation/CHANGELOG.md#v1100-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.6.0](service/cloudtrail/CHANGELOG.md#v160-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.10.0](service/codebuild/CHANGELOG.md#v1100-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.9.0](service/computeoptimizer/CHANGELOG.md#v190-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.9.0](service/configservice/CHANGELOG.md#v190-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.7.0](service/ebs/CHANGELOG.md#v170-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.16.0](service/ec2/CHANGELOG.md#v1160-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.7.0](service/efs/CHANGELOG.md#v170-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.8.0](service/emr/CHANGELOG.md#v180-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.6.0](service/firehose/CHANGELOG.md#v160-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.8.0](service/frauddetector/CHANGELOG.md#v180-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.9.0](service/fsx/CHANGELOG.md#v190-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/internal/s3shared`: [v1.7.0](service/internal/s3shared/CHANGELOG.md#v170-2021-09-02)
- * **Feature**: Add support for S3 Multi-Region Access Point ARNs.
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.11.0](service/iot/CHANGELOG.md#v1110-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotjobsdataplane`: [v1.5.0](service/iotjobsdataplane/CHANGELOG.md#v150-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.7.0](service/ivs/CHANGELOG.md#v170-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.6.0](service/kms/CHANGELOG.md#v160-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.9.0](service/lexmodelbuildingservice/CHANGELOG.md#v190-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.7.0](service/mediatailor/CHANGELOG.md#v170-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.2.0](service/memorydb/CHANGELOG.md#v120-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.5.0](service/mwaa/CHANGELOG.md#v150-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.6.0](service/polly/CHANGELOG.md#v160-2021-09-02)
- * **Feature**: API client updated
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.10.0](service/quicksight/CHANGELOG.md#v1100-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.15.0](service/s3/CHANGELOG.md#v1150-2021-09-02)
- * **Feature**: API client updated
- * **Feature**: Add support for S3 Multi-Region Access Point ARNs.
-* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.11.0](service/s3control/CHANGELOG.md#v1110-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.7.0](service/sagemakerruntime/CHANGELOG.md#v170-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/schemas`: [v1.6.0](service/schemas/CHANGELOG.md#v160-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.10.0](service/securityhub/CHANGELOG.md#v1100-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.5.0](service/servicecatalogappregistry/CHANGELOG.md#v150-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.9.0](service/sqs/CHANGELOG.md#v190-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.4.0](service/ssmincidents/CHANGELOG.md#v140-2021-09-02)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.7.0](service/transfer/CHANGELOG.md#v170-2021-09-02)
- * **Feature**: API client updated
-
-# Release (2021-08-27)
-
-## General Highlights
-* **Feature**: Updated `github.com/aws/smithy-go` to latest version
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.4.0](credentials/CHANGELOG.md#v140-2021-08-27)
- * **Feature**: Adds support for Tags and TransitiveTagKeys to stscreds.AssumeRoleProvider. Closes https://github.com/aws/aws-sdk-go-v2/issues/723
-* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/attributevalue`: [v1.2.0](feature/dynamodb/attributevalue/CHANGELOG.md#v120-2021-08-27)
- * **Bug Fix**: Fix unmarshaler's decoding of AttributeValueMemberN into a type that is a string alias.
-* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.7.0](service/acmpca/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.5.0](service/amplify/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.4.0](service/amplifybackend/CHANGELOG.md#v140-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.7.0](service/apigateway/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/apigatewaymanagementapi`: [v1.4.0](service/apigatewaymanagementapi/CHANGELOG.md#v140-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.7.0](service/appflow/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/applicationinsights`: [v1.4.0](service/applicationinsights/CHANGELOG.md#v140-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.2.0](service/apprunner/CHANGELOG.md#v120-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/appstream`: [v1.6.0](service/appstream/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.6.0](service/appsync/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.6.0](service/athena/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.8.0](service/auditmanager/CHANGELOG.md#v180-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/autoscalingplans`: [v1.5.0](service/autoscalingplans/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/backup`: [v1.5.0](service/backup/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.7.0](service/batch/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.6.0](service/braket/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.1.0](service/chimesdkidentity/CHANGELOG.md#v110-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.1.0](service/chimesdkmessaging/CHANGELOG.md#v110-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.5.0](service/cloudtrail/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.6.0](service/cloudwatchevents/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.5.0](service/codeartifact/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.9.0](service/codebuild/CHANGELOG.md#v190-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/codecommit`: [v1.4.0](service/codecommit/CHANGELOG.md#v140-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.5.0](service/codeguruprofiler/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/codestarnotifications`: [v1.4.0](service/codestarnotifications/CHANGELOG.md#v140-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.5.0](service/cognitoidentity/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.6.0](service/cognitoidentityprovider/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/comprehend`: [v1.6.0](service/comprehend/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.8.0](service/computeoptimizer/CHANGELOG.md#v180-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/connectcontactlens`: [v1.5.0](service/connectcontactlens/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.9.0](service/customerprofiles/CHANGELOG.md#v190-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.7.0](service/databasemigrationservice/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.6.0](service/datasync/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.4.0](service/dax/CHANGELOG.md#v140-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.5.0](service/directoryservice/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/dlm`: [v1.5.0](service/dlm/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/dynamodbstreams`: [v1.4.0](service/dynamodbstreams/CHANGELOG.md#v140-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.15.0](service/ec2/CHANGELOG.md#v1150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/ecrpublic`: [v1.5.0](service/ecrpublic/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.6.0](service/efs/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.9.0](service/eks/CHANGELOG.md#v190-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.6.0](service/emrcontainers/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.7.0](service/eventbridge/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.2.0](service/finspace/CHANGELOG.md#v120-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.2.0](service/finspacedata/CHANGELOG.md#v120-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/firehose`: [v1.5.0](service/firehose/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.7.0](service/fms/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.9.0](service/forecast/CHANGELOG.md#v190-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/forecastquery`: [v1.4.0](service/forecastquery/CHANGELOG.md#v140-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.7.0](service/frauddetector/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.8.0](service/fsx/CHANGELOG.md#v180-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/gamelift`: [v1.6.0](service/gamelift/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.11.0](service/glue/CHANGELOG.md#v1110-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.6.0](service/groundstation/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/guardduty`: [v1.5.0](service/guardduty/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.7.0](service/health/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.6.0](service/healthlake/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.10.0](service/iot/CHANGELOG.md#v1100-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/iot1clickdevicesservice`: [v1.4.0](service/iot1clickdevicesservice/CHANGELOG.md#v140-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.5.0](service/iotanalytics/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/iotdataplane`: [v1.4.0](service/iotdataplane/CHANGELOG.md#v140-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/iotfleethub`: [v1.5.0](service/iotfleethub/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.11.0](service/iotsitewise/CHANGELOG.md#v1110-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/ivs`: [v1.6.0](service/ivs/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.5.0](service/lakeformation/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.6.0](service/lexmodelsv2/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.3.0](service/lexruntimev2/CHANGELOG.md#v130-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.7.0](service/licensemanager/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.10.0](service/lightsail/CHANGELOG.md#v1100-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/lookoutequipment`: [v1.3.0](service/lookoutequipment/CHANGELOG.md#v130-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.3.0](service/lookoutmetrics/CHANGELOG.md#v130-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.9.0](service/macie2/CHANGELOG.md#v190-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.10.0](service/mediaconvert/CHANGELOG.md#v1100-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/mediapackage`: [v1.7.0](service/mediapackage/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.7.0](service/mediapackagevod/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.5.0](service/mq/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/networkfirewall`: [v1.5.0](service/networkfirewall/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.7.0](service/outposts/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.6.0](service/pi/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/pinpointsmsvoice`: [v1.4.0](service/pinpointsmsvoice/CHANGELOG.md#v140-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.5.0](service/polly/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.6.0](service/qldb/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/qldbsession`: [v1.5.0](service/qldbsession/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.6.0](service/ram/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.8.0](service/rekognition/CHANGELOG.md#v180-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi`: [v1.5.0](service/resourcegroupstaggingapi/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.8.0](service/robomaker/CHANGELOG.md#v180-2021-08-27)
- * **Bug Fix**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.1.0](service/route53recoverycontrolconfig/CHANGELOG.md#v110-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.7.0](service/route53resolver/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.14.0](service/s3/CHANGELOG.md#v1140-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.10.0](service/s3control/CHANGELOG.md#v1100-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.5.0](service/s3outposts/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/servicecatalog`: [v1.5.0](service/servicecatalog/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/servicecatalogappregistry`: [v1.4.0](service/servicecatalogappregistry/CHANGELOG.md#v140-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/signer`: [v1.5.0](service/signer/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/ssooidc`: [v1.4.0](service/ssooidc/CHANGELOG.md#v140-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.6.0](service/storagegateway/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.6.0](service/synthetics/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.5.0](service/textract/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.7.0](service/transcribe/CHANGELOG.md#v170-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.6.0](service/transfer/CHANGELOG.md#v160-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/wafregional`: [v1.5.0](service/wafregional/CHANGELOG.md#v150-2021-08-27)
- * **Feature**: Updated API model to latest revision.
-
-# Release (2021-08-19)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/apigateway`: [v1.6.0](service/apigateway/CHANGELOG.md#v160-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/apigatewayv2`: [v1.5.0](service/apigatewayv2/CHANGELOG.md#v150-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.6.0](service/appflow/CHANGELOG.md#v160-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/applicationautoscaling`: [v1.5.0](service/applicationautoscaling/CHANGELOG.md#v150-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.6.0](service/cloud9/CHANGELOG.md#v160-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/clouddirectory`: [v1.4.0](service/clouddirectory/CHANGELOG.md#v140-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.6.0](service/cloudwatchlogs/CHANGELOG.md#v160-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.8.0](service/codebuild/CHANGELOG.md#v180-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.7.0](service/configservice/CHANGELOG.md#v170-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.8.0](service/costexplorer/CHANGELOG.md#v180-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/customerprofiles`: [v1.8.0](service/customerprofiles/CHANGELOG.md#v180-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.8.0](service/databrew/CHANGELOG.md#v180-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/directoryservice`: [v1.4.0](service/directoryservice/CHANGELOG.md#v140-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.14.0](service/ec2/CHANGELOG.md#v1140-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.9.0](service/elasticache/CHANGELOG.md#v190-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.6.0](service/emr/CHANGELOG.md#v160-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.10.0](service/iotsitewise/CHANGELOG.md#v1100-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.7.0](service/lambda/CHANGELOG.md#v170-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.6.0](service/licensemanager/CHANGELOG.md#v160-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/memorydb`: [v1.0.0](service/memorydb/CHANGELOG.md#v100-2021-08-19)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.8.0](service/quicksight/CHANGELOG.md#v180-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.10.0](service/route53/CHANGELOG.md#v1100-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.6.0](service/route53resolver/CHANGELOG.md#v160-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.13.0](service/s3/CHANGELOG.md#v1130-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.12.0](service/sagemaker/CHANGELOG.md#v1120-2021-08-19)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemakerruntime`: [v1.5.0](service/sagemakerruntime/CHANGELOG.md#v150-2021-08-19)
- * **Feature**: API client updated
-
-# Release (2021-08-12)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/feature/cloudfront/sign`: [v1.3.1](feature/cloudfront/sign/CHANGELOG.md#v131-2021-08-12)
- * **Bug Fix**: Update to not escape HTML when encoding the policy.
-* `github.com/aws/aws-sdk-go-v2/service/athena`: [v1.5.0](service/athena/CHANGELOG.md#v150-2021-08-12)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.11.0](service/autoscaling/CHANGELOG.md#v1110-2021-08-12)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.8.0](service/chime/CHANGELOG.md#v180-2021-08-12)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkidentity`: [v1.0.0](service/chimesdkidentity/CHANGELOG.md#v100-2021-08-12)
- * **Release**: New AWS service client module
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chimesdkmessaging`: [v1.0.0](service/chimesdkmessaging/CHANGELOG.md#v100-2021-08-12)
- * **Release**: New AWS service client module
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.7.0](service/codebuild/CHANGELOG.md#v170-2021-08-12)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.6.0](service/connect/CHANGELOG.md#v160-2021-08-12)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ebs`: [v1.5.0](service/ebs/CHANGELOG.md#v150-2021-08-12)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.8.0](service/ecs/CHANGELOG.md#v180-2021-08-12)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.5.0](service/lexmodelsv2/CHANGELOG.md#v150-2021-08-12)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.9.0](service/lightsail/CHANGELOG.md#v190-2021-08-12)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/nimble`: [v1.3.0](service/nimble/CHANGELOG.md#v130-2021-08-12)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.7.0](service/rekognition/CHANGELOG.md#v170-2021-08-12)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.9.0](service/route53/CHANGELOG.md#v190-2021-08-12)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/snowdevicemanagement`: [v1.0.0](service/snowdevicemanagement/CHANGELOG.md#v100-2021-08-12)
- * **Release**: New AWS service client module
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.9.0](service/ssm/CHANGELOG.md#v190-2021-08-12)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.5.0](service/synthetics/CHANGELOG.md#v150-2021-08-12)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.7.0](service/wafv2/CHANGELOG.md#v170-2021-08-12)
- * **Feature**: API client updated
-
-# Release (2021-08-04)
-
-## General Highlights
-* **Feature**: adds error handling for defered close calls
-* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2`: v1.8.0
- * **Bug Fix**: Corrected an issue where the retryer was not using the last attempt's ResultMetadata as the bases for the return result from the stack. ([#1345](https://github.com/aws/aws-sdk-go-v2/pull/1345))
-* `github.com/aws/aws-sdk-go-v2/feature/dynamodb/expression`: [v1.2.0](feature/dynamodb/expression/CHANGELOG.md#v120-2021-08-04)
- * **Feature**: Add IsSet helper for ConditionBuilder and KeyConditionBuilder ([#1329](https://github.com/aws/aws-sdk-go-v2/pull/1329))
-* `github.com/aws/aws-sdk-go-v2/service/accessanalyzer`: [v1.5.2](service/accessanalyzer/CHANGELOG.md#v152-2021-08-04)
- * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349))
-* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.3.1](service/amp/CHANGELOG.md#v131-2021-08-04)
- * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349))
-* `github.com/aws/aws-sdk-go-v2/service/appintegrations`: [v1.5.0](service/appintegrations/CHANGELOG.md#v150-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.4.2](service/appmesh/CHANGELOG.md#v142-2021-08-04)
- * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349))
-* `github.com/aws/aws-sdk-go-v2/service/appsync`: [v1.5.0](service/appsync/CHANGELOG.md#v150-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/auditmanager`: [v1.7.0](service/auditmanager/CHANGELOG.md#v170-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/batch`: [v1.6.0](service/batch/CHANGELOG.md#v160-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.5.2](service/braket/CHANGELOG.md#v152-2021-08-04)
- * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349))
-* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.7.0](service/chime/CHANGELOG.md#v170-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.8.0](service/cloudformation/CHANGELOG.md#v180-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.7.0](service/cloudwatch/CHANGELOG.md#v170-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.6.0](service/codebuild/CHANGELOG.md#v160-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/codeguruprofiler`: [v1.4.2](service/codeguruprofiler/CHANGELOG.md#v142-2021-08-04)
- * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349))
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.5.0](service/cognitoidentityprovider/CHANGELOG.md#v150-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.7.0](service/computeoptimizer/CHANGELOG.md#v170-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.7.0](service/databrew/CHANGELOG.md#v170-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.7.0](service/directconnect/CHANGELOG.md#v170-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.13.0](service/ec2/CHANGELOG.md#v1130-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.7.0](service/ecs/CHANGELOG.md#v170-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.6.0](service/elasticloadbalancingv2/CHANGELOG.md#v160-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/emr`: [v1.5.0](service/emr/CHANGELOG.md#v150-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/emrcontainers`: [v1.5.0](service/emrcontainers/CHANGELOG.md#v150-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.6.0](service/eventbridge/CHANGELOG.md#v160-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.10.0](service/glue/CHANGELOG.md#v1100-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.5.0](service/greengrassv2/CHANGELOG.md#v150-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/groundstation`: [v1.5.2](service/groundstation/CHANGELOG.md#v152-2021-08-04)
- * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349))
-* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.8.0](service/iam/CHANGELOG.md#v180-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/identitystore`: [v1.4.0](service/identitystore/CHANGELOG.md#v140-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.8.0](service/imagebuilder/CHANGELOG.md#v180-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.9.0](service/iot/CHANGELOG.md#v190-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.4.0](service/iotanalytics/CHANGELOG.md#v140-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.9.0](service/iotsitewise/CHANGELOG.md#v190-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.8.0](service/iotwireless/CHANGELOG.md#v180-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.10.0](service/kendra/CHANGELOG.md#v1100-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.6.0](service/lambda/CHANGELOG.md#v160-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.7.0](service/lexmodelbuildingservice/CHANGELOG.md#v170-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.4.0](service/lexmodelsv2/CHANGELOG.md#v140-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.4.0](service/location/CHANGELOG.md#v140-2021-08-04)
- * **Feature**: Updated to latest API model.
- * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349))
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.9.0](service/mediaconvert/CHANGELOG.md#v190-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.8.0](service/medialive/CHANGELOG.md#v180-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.3.1](service/mgn/CHANGELOG.md#v131-2021-08-04)
- * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349))
-* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.7.0](service/personalize/CHANGELOG.md#v170-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.2.0](service/proton/CHANGELOG.md#v120-2021-08-04)
- * **Feature**: Updated to latest API model.
- * **Bug Fix**: Fixed an issue that caused one or more API operations to fail when attempting to resolve the service endpoint. ([#1349](https://github.com/aws/aws-sdk-go-v2/pull/1349))
-* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.5.0](service/qldb/CHANGELOG.md#v150-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.7.0](service/quicksight/CHANGELOG.md#v170-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.7.0](service/rds/CHANGELOG.md#v170-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.10.0](service/redshift/CHANGELOG.md#v1100-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.5.0](service/redshiftdata/CHANGELOG.md#v150-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/robomaker`: [v1.7.0](service/robomaker/CHANGELOG.md#v170-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.8.0](service/route53/CHANGELOG.md#v180-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/route53recoverycluster`: [v1.0.0](service/route53recoverycluster/CHANGELOG.md#v100-2021-08-04)
- * **Release**: New AWS service client module
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/route53recoverycontrolconfig`: [v1.0.0](service/route53recoverycontrolconfig/CHANGELOG.md#v100-2021-08-04)
- * **Release**: New AWS service client module
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/route53recoveryreadiness`: [v1.0.0](service/route53recoveryreadiness/CHANGELOG.md#v100-2021-08-04)
- * **Release**: New AWS service client module
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.12.0](service/s3/CHANGELOG.md#v1120-2021-08-04)
- * **Feature**: Add `HeadObject` presign support. ([#1346](https://github.com/aws/aws-sdk-go-v2/pull/1346))
-* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.9.0](service/s3control/CHANGELOG.md#v190-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/s3outposts`: [v1.4.0](service/s3outposts/CHANGELOG.md#v140-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.11.0](service/sagemaker/CHANGELOG.md#v1110-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/secretsmanager`: [v1.5.0](service/secretsmanager/CHANGELOG.md#v150-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.8.0](service/securityhub/CHANGELOG.md#v180-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/shield`: [v1.6.0](service/shield/CHANGELOG.md#v160-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.3.0](service/ssmcontacts/CHANGELOG.md#v130-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.2.0](service/ssmincidents/CHANGELOG.md#v120-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ssoadmin`: [v1.5.0](service/ssoadmin/CHANGELOG.md#v150-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/synthetics`: [v1.4.0](service/synthetics/CHANGELOG.md#v140-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/textract`: [v1.4.0](service/textract/CHANGELOG.md#v140-2021-08-04)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.6.0](service/transcribe/CHANGELOG.md#v160-2021-08-04)
- * **Feature**: Updated to latest API model.
-
-# Release (2021-07-15)
-
-## General Highlights
-* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/config`: [v1.5.0](config/CHANGELOG.md#v150-2021-07-15)
- * **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints.
-* `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`: [v1.3.0](feature/ec2/imds/CHANGELOG.md#v130-2021-07-15)
- * **Feature**: Support has been added for EC2 IPv6-enabled Instance Metadata Service Endpoints.
-* `github.com/aws/aws-sdk-go-v2/service/acm`: [v1.5.0](service/acm/CHANGELOG.md#v150-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.3.0](service/amp/CHANGELOG.md#v130-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.4.0](service/amplify/CHANGELOG.md#v140-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.3.0](service/amplifybackend/CHANGELOG.md#v130-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.10.0](service/autoscaling/CHANGELOG.md#v1100-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
-* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.6.0](service/chime/CHANGELOG.md#v160-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.7.0](service/cloudformation/CHANGELOG.md#v170-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
-* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.7.0](service/cloudfront/CHANGELOG.md#v170-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.5.0](service/cloudsearch/CHANGELOG.md#v150-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.6.0](service/cloudwatch/CHANGELOG.md#v160-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
-* `github.com/aws/aws-sdk-go-v2/service/databasemigrationservice`: [v1.6.0](service/databasemigrationservice/CHANGELOG.md#v160-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/devopsguru`: [v1.6.0](service/devopsguru/CHANGELOG.md#v160-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/directconnect`: [v1.6.0](service/directconnect/CHANGELOG.md#v160-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.8.0](service/docdb/CHANGELOG.md#v180-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.12.0](service/ec2/CHANGELOG.md#v1120-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.8.0](service/eks/CHANGELOG.md#v180-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.8.0](service/elasticache/CHANGELOG.md#v180-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
-* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.5.0](service/elasticbeanstalk/CHANGELOG.md#v150-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.5.0](service/elasticloadbalancing/CHANGELOG.md#v150-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
- * **Documentation**: Updated service model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.5.0](service/elasticloadbalancingv2/CHANGELOG.md#v150-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
- * **Documentation**: Updated service model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/fms`: [v1.6.0](service/fms/CHANGELOG.md#v160-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/frauddetector`: [v1.6.0](service/frauddetector/CHANGELOG.md#v160-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.9.0](service/glue/CHANGELOG.md#v190-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/health`: [v1.6.0](service/health/CHANGELOG.md#v160-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/healthlake`: [v1.5.0](service/healthlake/CHANGELOG.md#v150-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.7.0](service/iam/CHANGELOG.md#v170-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
- * **Documentation**: Updated service model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.7.0](service/imagebuilder/CHANGELOG.md#v170-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.8.0](service/iot/CHANGELOG.md#v180-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.8.0](service/iotsitewise/CHANGELOG.md#v180-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.9.0](service/kendra/CHANGELOG.md#v190-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/lambda`: [v1.5.0](service/lambda/CHANGELOG.md#v150-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelbuildingservice`: [v1.6.0](service/lexmodelbuildingservice/CHANGELOG.md#v160-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.8.0](service/lightsail/CHANGELOG.md#v180-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.5.1](service/macie/CHANGELOG.md#v151-2021-07-15)
- * **Documentation**: Updated service model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.8.1](service/macie2/CHANGELOG.md#v181-2021-07-15)
- * **Documentation**: Updated service model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.8.0](service/mediaconvert/CHANGELOG.md#v180-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.5.0](service/mediatailor/CHANGELOG.md#v150-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/mgn`: [v1.3.0](service/mgn/CHANGELOG.md#v130-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/mq`: [v1.4.0](service/mq/CHANGELOG.md#v140-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.7.0](service/neptune/CHANGELOG.md#v170-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
-* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.6.0](service/outposts/CHANGELOG.md#v160-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/pricing`: [v1.5.1](service/pricing/CHANGELOG.md#v151-2021-07-15)
- * **Documentation**: Updated service model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.6.0](service/rds/CHANGELOG.md#v160-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.9.0](service/redshift/CHANGELOG.md#v190-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.10.0](service/sagemaker/CHANGELOG.md#v1100-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.5.0](service/ses/CHANGELOG.md#v150-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
-* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.7.0](service/sns/CHANGELOG.md#v170-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
- * **Documentation**: Updated service model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.7.0](service/sqs/CHANGELOG.md#v170-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.8.0](service/ssm/CHANGELOG.md#v180-2021-07-15)
- * **Feature**: Updated service model to latest version.
- * **Documentation**: Updated service model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/storagegateway`: [v1.5.0](service/storagegateway/CHANGELOG.md#v150-2021-07-15)
- * **Feature**: Updated service model to latest version.
-* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.6.0](service/sts/CHANGELOG.md#v160-2021-07-15)
- * **Feature**: The ErrorCode method on generated service error types has been corrected to match the API model.
- * **Documentation**: Updated service model to latest revision.
-* `github.com/aws/aws-sdk-go-v2/service/wellarchitected`: [v1.5.0](service/wellarchitected/CHANGELOG.md#v150-2021-07-15)
- * **Feature**: Updated service model to latest version.
-
-# Release (2021-07-01)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/internal/ini`: [v1.1.0](internal/ini/CHANGELOG.md#v110-2021-07-01)
- * **Feature**: Support for `:`, `=`, `[`, `]` being present in expression values.
-* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.9.0](service/autoscaling/CHANGELOG.md#v190-2021-07-01)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/databrew`: [v1.6.0](service/databrew/CHANGELOG.md#v160-2021-07-01)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.11.0](service/ec2/CHANGELOG.md#v1110-2021-07-01)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.8.0](service/glue/CHANGELOG.md#v180-2021-07-01)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.8.0](service/kendra/CHANGELOG.md#v180-2021-07-01)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.7.0](service/mediaconvert/CHANGELOG.md#v170-2021-07-01)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediapackagevod`: [v1.6.0](service/mediapackagevod/CHANGELOG.md#v160-2021-07-01)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.8.0](service/redshift/CHANGELOG.md#v180-2021-07-01)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.9.0](service/sagemaker/CHANGELOG.md#v190-2021-07-01)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.7.0](service/servicediscovery/CHANGELOG.md#v170-2021-07-01)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.6.0](service/sqs/CHANGELOG.md#v160-2021-07-01)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.2.0](service/ssmcontacts/CHANGELOG.md#v120-2021-07-01)
- * **Feature**: API client updated
-
-# Release (2021-06-25)
-
-## General Highlights
-* **Feature**: Updated `github.com/aws/smithy-go` to latest version
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2`: v1.7.0
- * **Feature**: Adds configuration values for enabling endpoint discovery.
- * **Bug Fix**: Keep Object-Lock headers a header when presigning Sigv4 signing requests
-* `github.com/aws/aws-sdk-go-v2/config`: [v1.4.0](config/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: Adds configuration setting for enabling endpoint discovery.
-* `github.com/aws/aws-sdk-go-v2/credentials`: [v1.3.0](credentials/CHANGELOG.md#v130-2021-06-25)
- * **Bug Fix**: Fixed example usages of aws.CredentialsCache ([#1275](https://github.com/aws/aws-sdk-go-v2/pull/1275))
-* `github.com/aws/aws-sdk-go-v2/feature/cloudfront/sign`: [v1.2.0](feature/cloudfront/sign/CHANGELOG.md#v120-2021-06-25)
- * **Feature**: Add UnmarshalJSON for AWSEpochTime to correctly unmarshal AWSEpochTime, ([#1298](https://github.com/aws/aws-sdk-go-v2/pull/1298))
-* `github.com/aws/aws-sdk-go-v2/internal/configsources`: [v1.0.0](internal/configsources/CHANGELOG.md#v100-2021-06-25)
- * **Release**: Release new modules
-* `github.com/aws/aws-sdk-go-v2/service/amp`: [v1.2.0](service/amp/CHANGELOG.md#v120-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/amplify`: [v1.3.0](service/amplify/CHANGELOG.md#v130-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/amplifybackend`: [v1.2.0](service/amplifybackend/CHANGELOG.md#v120-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appflow`: [v1.5.0](service/appflow/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/appmesh`: [v1.4.0](service/appmesh/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/chime`: [v1.5.0](service/chime/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloud9`: [v1.5.0](service/cloud9/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudformation`: [v1.6.0](service/cloudformation/CHANGELOG.md#v160-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.6.0](service/cloudfront/CHANGELOG.md#v160-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudsearch`: [v1.4.0](service/cloudsearch/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatch`: [v1.5.0](service/cloudwatch/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatchevents`: [v1.5.0](service/cloudwatchevents/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codebuild`: [v1.5.0](service/codebuild/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/codegurureviewer`: [v1.5.0](service/codegurureviewer/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentity`: [v1.4.0](service/cognitoidentity/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.4.0](service/cognitoidentityprovider/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.5.0](service/connect/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/dax`: [v1.3.0](service/dax/CHANGELOG.md#v130-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.7.0](service/docdb/CHANGELOG.md#v170-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/dynamodb`: [v1.4.0](service/dynamodb/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: Adds support for endpoint discovery.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.10.0](service/ec2/CHANGELOG.md#v1100-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.7.0](service/elasticache/CHANGELOG.md#v170-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticbeanstalk`: [v1.4.0](service/elasticbeanstalk/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancing`: [v1.4.0](service/elasticloadbalancing/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2`: [v1.4.0](service/elasticloadbalancingv2/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/eventbridge`: [v1.5.0](service/eventbridge/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/greengrass`: [v1.5.0](service/greengrass/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/greengrassv2`: [v1.4.0](service/greengrassv2/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.6.0](service/iam/CHANGELOG.md#v160-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery`: [v1.0.0](service/internal/endpoint-discovery/CHANGELOG.md#v100-2021-06-25)
- * **Release**: Release new modules
- * **Feature**: Module supporting endpoint-discovery across all service clients.
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.7.0](service/iot/CHANGELOG.md#v170-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotanalytics`: [v1.3.0](service/iotanalytics/CHANGELOG.md#v130-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.7.0](service/kendra/CHANGELOG.md#v170-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kms`: [v1.4.0](service/kms/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.3.0](service/lexmodelsv2/CHANGELOG.md#v130-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexruntimev2`: [v1.2.0](service/lexruntimev2/CHANGELOG.md#v120-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.5.0](service/licensemanager/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.2.0](service/lookoutmetrics/CHANGELOG.md#v120-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/managedblockchain`: [v1.4.0](service/managedblockchain/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.6.0](service/mediaconnect/CHANGELOG.md#v160-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.7.0](service/medialive/CHANGELOG.md#v170-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediatailor`: [v1.4.0](service/mediatailor/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.6.0](service/neptune/CHANGELOG.md#v160-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.1.0](service/proton/CHANGELOG.md#v110-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.6.0](service/quicksight/CHANGELOG.md#v160-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ram`: [v1.5.0](service/ram/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.5.0](service/rds/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/redshift`: [v1.7.0](service/redshift/CHANGELOG.md#v170-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/redshiftdata`: [v1.4.0](service/redshiftdata/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.7.0](service/route53/CHANGELOG.md#v170-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.8.0](service/sagemaker/CHANGELOG.md#v180-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemakerfeaturestoreruntime`: [v1.4.0](service/sagemakerfeaturestoreruntime/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.7.0](service/securityhub/CHANGELOG.md#v170-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ses`: [v1.4.0](service/ses/CHANGELOG.md#v140-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/snowball`: [v1.5.0](service/snowball/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.6.0](service/sns/CHANGELOG.md#v160-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.5.0](service/sqs/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sts`: [v1.5.0](service/sts/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/timestreamquery`: [v1.3.0](service/timestreamquery/CHANGELOG.md#v130-2021-06-25)
- * **Feature**: Adds support for endpoint discovery.
-* `github.com/aws/aws-sdk-go-v2/service/timestreamwrite`: [v1.3.0](service/timestreamwrite/CHANGELOG.md#v130-2021-06-25)
- * **Feature**: Adds support for endpoint discovery.
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.5.0](service/transfer/CHANGELOG.md#v150-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/waf`: [v1.3.0](service/waf/CHANGELOG.md#v130-2021-06-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/wafv2`: [v1.6.0](service/wafv2/CHANGELOG.md#v160-2021-06-25)
- * **Feature**: API client updated
-
-# Release (2021-06-11)
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.7.0](service/autoscaling/CHANGELOG.md#v170-2021-06-11)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/cloudtrail`: [v1.3.2](service/cloudtrail/CHANGELOG.md#v132-2021-06-11)
- * **Documentation**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/cognitoidentityprovider`: [v1.3.3](service/cognitoidentityprovider/CHANGELOG.md#v133-2021-06-11)
- * **Documentation**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.6.0](service/eks/CHANGELOG.md#v160-2021-06-11)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.6.0](service/fsx/CHANGELOG.md#v160-2021-06-11)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/glue`: [v1.6.0](service/glue/CHANGELOG.md#v160-2021-06-11)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.6.0](service/kendra/CHANGELOG.md#v160-2021-06-11)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.7.0](service/macie2/CHANGELOG.md#v170-2021-06-11)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/medialive`: [v1.6.0](service/medialive/CHANGELOG.md#v160-2021-06-11)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/pi`: [v1.4.0](service/pi/CHANGELOG.md#v140-2021-06-11)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/proton`: [v1.0.0](service/proton/CHANGELOG.md#v100-2021-06-11)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.3.1](service/qldb/CHANGELOG.md#v131-2021-06-11)
- * **Documentation**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/rds`: [v1.4.2](service/rds/CHANGELOG.md#v142-2021-06-11)
- * **Documentation**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.7.0](service/sagemaker/CHANGELOG.md#v170-2021-06-11)
- * **Feature**: Updated to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.4.1](service/transfer/CHANGELOG.md#v141-2021-06-11)
- * **Documentation**: Updated to latest API model.
-
-# Release (2021-06-04)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/acmpca`: [v1.5.0](service/acmpca/CHANGELOG.md#v150-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.6.0](service/autoscaling/CHANGELOG.md#v160-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/braket`: [v1.4.0](service/braket/CHANGELOG.md#v140-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/cloudfront`: [v1.5.2](service/cloudfront/CHANGELOG.md#v152-2021-06-04)
- * **Documentation**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/datasync`: [v1.4.0](service/datasync/CHANGELOG.md#v140-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/devicefarm`: [v1.3.0](service/devicefarm/CHANGELOG.md#v130-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/docdb`: [v1.6.0](service/docdb/CHANGELOG.md#v160-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.9.0](service/ec2/CHANGELOG.md#v190-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.5.0](service/ecs/CHANGELOG.md#v150-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.7.0](service/forecast/CHANGELOG.md#v170-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/fsx`: [v1.5.0](service/fsx/CHANGELOG.md#v150-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.5.1](service/iam/CHANGELOG.md#v151-2021-06-04)
- * **Documentation**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/internal/s3shared`: [v1.4.0](service/internal/s3shared/CHANGELOG.md#v140-2021-06-04)
- * **Feature**: The handling of AccessPoint and Outpost ARNs have been updated.
-* `github.com/aws/aws-sdk-go-v2/service/iotevents`: [v1.4.0](service/iotevents/CHANGELOG.md#v140-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ioteventsdata`: [v1.3.0](service/ioteventsdata/CHANGELOG.md#v130-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.6.0](service/iotsitewise/CHANGELOG.md#v160-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.6.0](service/iotwireless/CHANGELOG.md#v160-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/kendra`: [v1.5.0](service/kendra/CHANGELOG.md#v150-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.6.1](service/lightsail/CHANGELOG.md#v161-2021-06-04)
- * **Documentation**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/location`: [v1.2.0](service/location/CHANGELOG.md#v120-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/mwaa`: [v1.2.0](service/mwaa/CHANGELOG.md#v120-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/outposts`: [v1.4.0](service/outposts/CHANGELOG.md#v140-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/polly`: [v1.3.0](service/polly/CHANGELOG.md#v130-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/qldb`: [v1.3.0](service/qldb/CHANGELOG.md#v130-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/resourcegroups`: [v1.3.2](service/resourcegroups/CHANGELOG.md#v132-2021-06-04)
- * **Documentation**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.6.2](service/route53/CHANGELOG.md#v162-2021-06-04)
- * **Documentation**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/route53resolver`: [v1.4.2](service/route53resolver/CHANGELOG.md#v142-2021-06-04)
- * **Documentation**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.10.0](service/s3/CHANGELOG.md#v1100-2021-06-04)
- * **Feature**: The handling of AccessPoint and Outpost ARNs have been updated.
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.7.0](service/s3control/CHANGELOG.md#v170-2021-06-04)
- * **Feature**: The handling of AccessPoint and Outpost ARNs have been updated.
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/servicediscovery`: [v1.5.0](service/servicediscovery/CHANGELOG.md#v150-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/sns`: [v1.5.0](service/sns/CHANGELOG.md#v150-2021-06-04)
- * **Feature**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/sqs`: [v1.4.2](service/sqs/CHANGELOG.md#v142-2021-06-04)
- * **Documentation**: Updated service client to latest API model.
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.6.2](service/ssm/CHANGELOG.md#v162-2021-06-04)
- * **Documentation**: Updated service client to latest API model.
-
-# Release (2021-05-25)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs`: [v1.4.0](service/cloudwatchlogs/CHANGELOG.md#v140-2021-05-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/commander`: [v1.1.0](service/commander/CHANGELOG.md#v110-2021-05-25)
- * **Feature**: Deprecated module. The API client was incorrectly named. Use AWS Systems Manager Incident Manager (ssmincidents) instead.
-* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.5.0](service/computeoptimizer/CHANGELOG.md#v150-2021-05-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/costexplorer`: [v1.6.0](service/costexplorer/CHANGELOG.md#v160-2021-05-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.8.0](service/ec2/CHANGELOG.md#v180-2021-05-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/efs`: [v1.4.0](service/efs/CHANGELOG.md#v140-2021-05-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/forecast`: [v1.6.0](service/forecast/CHANGELOG.md#v160-2021-05-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.6.0](service/iot/CHANGELOG.md#v160-2021-05-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/opsworkscm`: [v1.4.0](service/opsworkscm/CHANGELOG.md#v140-2021-05-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.5.0](service/quicksight/CHANGELOG.md#v150-2021-05-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.9.0](service/s3/CHANGELOG.md#v190-2021-05-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/ssmincidents`: [v1.0.0](service/ssmincidents/CHANGELOG.md#v100-2021-05-25)
- * **Release**: New AWS service client module
-* `github.com/aws/aws-sdk-go-v2/service/transfer`: [v1.4.0](service/transfer/CHANGELOG.md#v140-2021-05-25)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/workspaces`: [v1.4.0](service/workspaces/CHANGELOG.md#v140-2021-05-25)
- * **Feature**: API client updated
-
-# Release (2021-05-20)
-
-## General Highlights
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2`: v1.6.0
- * **Feature**: `internal/ini`: This package has been migrated to a separate module at `github.com/aws/aws-sdk-go-v2/internal/ini`.
-* `github.com/aws/aws-sdk-go-v2/config`: [v1.3.0](config/CHANGELOG.md#v130-2021-05-20)
- * **Feature**: SSO credentials can now be defined alongside other credential providers within the same configuration profile.
- * **Bug Fix**: Profile names were incorrectly normalized to lower-case, which could result in unexpected profile configurations.
-* `github.com/aws/aws-sdk-go-v2/internal/ini`: [v1.0.0](internal/ini/CHANGELOG.md#v100-2021-05-20)
- * **Release**: The `github.com/aws/aws-sdk-go-v2/internal/ini` package is now a Go Module.
-* `github.com/aws/aws-sdk-go-v2/service/applicationcostprofiler`: [v1.0.0](service/applicationcostprofiler/CHANGELOG.md#v100-2021-05-20)
- * **Release**: New AWS service client module
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/apprunner`: [v1.0.0](service/apprunner/CHANGELOG.md#v100-2021-05-20)
- * **Release**: New AWS service client module
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/autoscaling`: [v1.5.0](service/autoscaling/CHANGELOG.md#v150-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/computeoptimizer`: [v1.4.0](service/computeoptimizer/CHANGELOG.md#v140-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/detective`: [v1.6.0](service/detective/CHANGELOG.md#v160-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.5.0](service/eks/CHANGELOG.md#v150-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticache`: [v1.6.0](service/elasticache/CHANGELOG.md#v160-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/elasticsearchservice`: [v1.4.0](service/elasticsearchservice/CHANGELOG.md#v140-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iam`: [v1.5.0](service/iam/CHANGELOG.md#v150-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/imagebuilder`: [v1.5.0](service/imagebuilder/CHANGELOG.md#v150-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.5.0](service/iot/CHANGELOG.md#v150-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotdeviceadvisor`: [v1.4.0](service/iotdeviceadvisor/CHANGELOG.md#v140-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/iotsitewise`: [v1.5.0](service/iotsitewise/CHANGELOG.md#v150-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.4.0](service/kinesis/CHANGELOG.md#v140-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.3.0](service/kinesisanalytics/CHANGELOG.md#v130-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.4.0](service/kinesisanalyticsv2/CHANGELOG.md#v140-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lexmodelsv2`: [v1.2.0](service/lexmodelsv2/CHANGELOG.md#v120-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/licensemanager`: [v1.4.0](service/licensemanager/CHANGELOG.md#v140-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/lightsail`: [v1.6.0](service/lightsail/CHANGELOG.md#v160-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/macie`: [v1.4.0](service/macie/CHANGELOG.md#v140-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/macie2`: [v1.6.0](service/macie2/CHANGELOG.md#v160-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/mediaconnect`: [v1.5.0](service/mediaconnect/CHANGELOG.md#v150-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/neptune`: [v1.5.0](service/neptune/CHANGELOG.md#v150-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/personalize`: [v1.5.0](service/personalize/CHANGELOG.md#v150-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/quicksight`: [v1.4.0](service/quicksight/CHANGELOG.md#v140-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/rekognition`: [v1.5.0](service/rekognition/CHANGELOG.md#v150-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.8.0](service/s3/CHANGELOG.md#v180-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemaker`: [v1.6.0](service/sagemaker/CHANGELOG.md#v160-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/sagemakera2iruntime`: [v1.3.0](service/sagemakera2iruntime/CHANGELOG.md#v130-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/securityhub`: [v1.6.0](service/securityhub/CHANGELOG.md#v160-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/support`: [v1.3.0](service/support/CHANGELOG.md#v130-2021-05-20)
- * **Feature**: API client updated
-* `github.com/aws/aws-sdk-go-v2/service/transcribe`: [v1.4.0](service/transcribe/CHANGELOG.md#v140-2021-05-20)
- * **Feature**: API client updated
-
-# Release (2021-05-14)
-
-## General Highlights
-* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
-* **Dependency Update**: Updated to the latest SDK module versions
-
-## Module Highlights
-* `github.com/aws/aws-sdk-go-v2`: v1.5.0
- * **Feature**: `AddSDKAgentKey` and `AddSDKAgentKeyValue` in `aws/middleware` package have been updated to direct metadata to `User-Agent` HTTP header.
-* `github.com/aws/aws-sdk-go-v2/service/codeartifact`: [v1.3.0](service/codeartifact/CHANGELOG.md#v130-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/commander`: [v1.0.0](service/commander/CHANGELOG.md#v100-2021-05-14)
- * **Release**: New AWS service client module
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/configservice`: [v1.5.0](service/configservice/CHANGELOG.md#v150-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/connect`: [v1.4.0](service/connect/CHANGELOG.md#v140-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/ec2`: [v1.7.0](service/ec2/CHANGELOG.md#v170-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/ecs`: [v1.4.0](service/ecs/CHANGELOG.md#v140-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/eks`: [v1.4.0](service/eks/CHANGELOG.md#v140-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/finspace`: [v1.0.0](service/finspace/CHANGELOG.md#v100-2021-05-14)
- * **Release**: New AWS service client module
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/finspacedata`: [v1.0.0](service/finspacedata/CHANGELOG.md#v100-2021-05-14)
- * **Release**: New AWS service client module
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/iot`: [v1.4.0](service/iot/CHANGELOG.md#v140-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/iotwireless`: [v1.5.0](service/iotwireless/CHANGELOG.md#v150-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/kinesis`: [v1.3.0](service/kinesis/CHANGELOG.md#v130-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/kinesisanalytics`: [v1.2.0](service/kinesisanalytics/CHANGELOG.md#v120-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/kinesisanalyticsv2`: [v1.3.0](service/kinesisanalyticsv2/CHANGELOG.md#v130-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/lakeformation`: [v1.3.0](service/lakeformation/CHANGELOG.md#v130-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/lookoutmetrics`: [v1.1.0](service/lookoutmetrics/CHANGELOG.md#v110-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/mediaconvert`: [v1.5.0](service/mediaconvert/CHANGELOG.md#v150-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/route53`: [v1.6.0](service/route53/CHANGELOG.md#v160-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/s3`: [v1.7.0](service/s3/CHANGELOG.md#v170-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/s3control`: [v1.6.0](service/s3control/CHANGELOG.md#v160-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/ssm`: [v1.6.0](service/ssm/CHANGELOG.md#v160-2021-05-14)
- * **Feature**: Updated to latest service API model.
-* `github.com/aws/aws-sdk-go-v2/service/ssmcontacts`: [v1.0.0](service/ssmcontacts/CHANGELOG.md#v100-2021-05-14)
- * **Release**: New AWS service client module
- * **Feature**: Updated to latest service API model.
-
-# Release 2021-05-06
-
-## Breaking change
-* `service/ec2` - v1.6.0
- * This release contains a breaking change to the Amazon EC2 API client. API number(int/int64/etc) and boolean members were changed from value, to pointer type. Your applications using the EC2 API client will fail to compile after upgrading for all members that were updated. To migrate to this module you'll need to update your application to use pointers for all number and boolean members in the API client module. The SDK provides helper utilities to convert between value and pointer types. For example the [aws.Bool](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Bool) function to get the address from a bool literal. Similar utilities are available for all other primitive types in the [aws](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws) package.
-
-## Service Client Highlights
-* `service/acmpca` - v1.3.0
- * Feature: API client updated
-* `service/apigateway` - v1.3.0
- * Feature: API client updated
-* `service/auditmanager` - v1.4.0
- * Feature: API client updated
-* `service/chime` - v1.3.0
- * Feature: API client updated
-* `service/cloudformation` - v1.4.0
- * Feature: API client updated
-* `service/cloudfront` - v1.4.0
- * Feature: API client updated
-* `service/codegurureviewer` - v1.3.0
- * Feature: API client updated
-* `service/connect` - v1.3.0
- * Feature: API client updated
-* `service/customerprofiles` - v1.5.0
- * Feature: API client updated
-* `service/devopsguru` - v1.3.0
- * Feature: API client updated
-* `service/docdb` - v1.4.0
- * Feature: API client updated
-* `service/ec2` - v1.6.0
- * Bug Fix: Fix incorrectly modeled Amazon EC2 number and boolean members in structures. The Amazon EC2 API client has been updated with a breaking change to fix all structure number and boolean members to be pointer types instead of value types. Fixes [#1107](https://github.com/aws/aws-sdk-go-v2/issues/1107), [#1178](https://github.com/aws/aws-sdk-go-v2/issues/1178), and [#1190](https://github.com/aws/aws-sdk-go-v2/issues/1190). This breaking change is made within the major version of the client' module, because the client operations failed and were unusable with value type number and boolean members with the EC2 API.
- * Feature: API client updated
-* `service/ecs` - v1.3.0
- * Feature: API client updated
-* `service/eks` - v1.3.0
- * Feature: API client updated
-* `service/forecast` - v1.4.0
- * Feature: API client updated
-* `service/glue` - v1.4.0
- * Feature: API client updated
-* `service/health` - v1.3.0
- * Feature: API client updated
-* `service/iotsitewise` - v1.3.0
- * Feature: API client updated
-* `service/iotwireless` - v1.4.0
- * Feature: API client updated
-* `service/kafka` - v1.3.0
- * Feature: API client updated
-* `service/kinesisanalyticsv2` - v1.2.0
- * Feature: API client updated
-* `service/macie2` - v1.4.0
- * Feature: API client updated
-* `service/marketplacecatalog` - v1.2.0
- * Feature: API client updated
-* `service/mediaconvert` - v1.4.0
- * Feature: API client updated
-* `service/mediapackage` - v1.4.0
- * Feature: API client updated
-* `service/mediapackagevod` - v1.3.0
- * Feature: API client updated
-* `service/mturk` - v1.2.0
- * Feature: API client updated
-* `service/nimble` - v1.0.0
- * Feature: API client updated
-* `service/organizations` - v1.3.0
- * Feature: API client updated
-* `service/personalize` - v1.3.0
- * Feature: API client updated
-* `service/robomaker` - v1.4.0
- * Feature: API client updated
-* `service/route53` - v1.5.0
- * Feature: API client updated
-* `service/s3` - v1.6.0
- * Bug Fix: Fix PutObject and UploadPart unseekable stream documentation link to point to the correct location.
- * Feature: API client updated
-* `service/sagemaker` - v1.4.0
- * Feature: API client updated
-* `service/securityhub` - v1.4.0
- * Feature: API client updated
-* `service/servicediscovery` - v1.3.0
- * Feature: API client updated
-* `service/snowball` - v1.3.0
- * Feature: API client updated
-* `service/sns` - v1.3.0
- * Feature: API client updated
-* `service/ssm` - v1.5.0
- * Feature: API client updated
-## Core SDK Highlights
-* Dependency Update: Update smithy-go dependency to v1.4.0
-* Dependency Update: Updated SDK dependencies to their latest versions.
-* `aws` - v1.4.0
- * Feature: Add support for FIPS global partition endpoints ([#1242](https://github.com/aws/aws-sdk-go-v2/pull/1242))
-
-# Release 2021-04-23
-## Service Client Highlights
-* `service/cloudformation` - v1.3.2
- * Documentation: Service Documentation Updates
-* `service/cognitoidentityprovider` - v1.2.3
- * Documentation: Service Documentation Updates
-* `service/costexplorer` - v1.4.0
- * Feature: Service API Updates
-* `service/databasemigrationservice` - v1.3.0
- * Feature: Service API Updates
-* `service/detective` - v1.4.0
- * Feature: Service API Updates
-* `service/elasticache` - v1.4.0
- * Feature: Service API Updates
-* `service/forecast` - v1.3.0
- * Feature: Service API Updates
-* `service/groundstation` - v1.3.0
- * Feature: Service API Updates
-* `service/kendra` - v1.3.0
- * Feature: Service API Updates
-* `service/redshift` - v1.5.0
- * Feature: Service API Updates
-* `service/savingsplans` - v1.2.0
- * Feature: Service API Updates
-* `service/securityhub` - v1.3.0
- * Feature: Service API Updates
-## Core SDK Highlights
-* Dependency Update: Updated SDK dependencies to their latest versions.
-* `feature/rds/auth` - v1.0.0
- * Feature: Add Support for Amazon RDS IAM Authentication
-
-# Release 2021-04-14
-## Service Client Highlights
-* `service/codebuild` - v1.3.0
- * Feature: API client updated
-* `service/codestarconnections` - v1.2.0
- * Feature: API client updated
-* `service/comprehendmedical` - v1.2.0
- * Feature: API client updated
-* `service/configservice` - v1.4.0
- * Feature: API client updated
-* `service/ec2` - v1.5.0
- * Feature: API client updated
-* `service/fsx` - v1.3.0
- * Feature: API client updated
-* `service/lightsail` - v1.4.0
- * Feature: API client updated
-* `service/mediaconnect` - v1.3.0
- * Feature: API client updated
-* `service/rds` - v1.3.0
- * Feature: API client updated
-* `service/redshift` - v1.4.0
- * Feature: API client updated
-* `service/shield` - v1.3.0
- * Feature: API client updated
-* `service/sts` - v1.3.0
- * Feature: API client updated
-## Core SDK Highlights
-* Dependency Update: Updated SDK dependencies to their latest versions.
-
-# Release 2021-04-08
-## Service Client Highlights
-* Feature: API model sync
-* `service/lookoutequipment` - v1.0.0
- * v1 Release: new service client
-* `service/mgn` - v1.0.0
- * v1 Release: new service client
-## Core SDK Highlights
-* Dependency Update: smithy-go version bump
-* Dependency Update: Updated SDK dependencies to their latest versions.
-
-# Release 2021-04-01
-## Service Client Highlights
-* Bug Fix: Fix URL Path and RawQuery of resolved endpoint being ignored by the API client's request serialization.
- * Fixes [issue#1191](https://github.com/aws/aws-sdk-go-v2/issues/1191)
-* Refactored internal endpoints model for accessors
-* Feature: updated to latest models
-* New services
- * `service/location` - v1.0.0
- * `service/lookoutmetrics` - v1.0.0
-## Core SDK Highlights
-* Dependency Update: update smithy-go module
-* Dependency Update: Updated SDK dependencies to their latest versions.
-
-# Release 2021-03-18
-## Service Client Highlights
-* Bug Fix: Updated presign URLs to no longer include the X-Amz-User-Agent header
-* Feature: Update API model
-* Add New supported API
-* `service/internal/s3shared` - v1.2.0
- * Feature: Support for S3 Object Lambda
-* `service/s3` - v1.3.0
- * Bug Fix: Adds documentation to the PutObject and UploadPart operations Body member how to upload unseekable objects to an Amazon S3 Bucket.
- * Feature: S3 Object Lambda is a new S3 feature that enables users to apply their own custom code to process the output of a standard S3 GET request by automatically invoking a Lambda function with a GET request
-* `service/s3control` - v1.3.0
- * Feature: S3 Object Lambda is a new S3 feature that enables users to apply their own custom code to process the output of a standard S3 GET request by automatically invoking a Lambda function with a GET request
-## Core SDK Highlights
-* Dependency Update: Updated SDK dependencies to their latest versions.
-* `aws` - v1.3.0
- * Feature: Add helper to V4 signer package to swap compute payload hash middleware with unsigned payload middleware
-* `feature/s3/manager` - v1.1.0
- * Bug Fix: Add support for Amazon S3 Object Lambda feature.
- * Feature: Updates for S3 Object Lambda feature
-
-# Release 2021-03-12
-## Service Client Highlights
-* Bug Fix: Fixed a bug that could union shape types to be deserialized incorrectly
-* Bug Fix: Fixed a bug where unboxed shapes that were marked as required were not serialized and sent over the wire, causing an API error from the service.
-* Bug Fix: Fixed a bug with generated API Paginators' handling of nil input parameters causing a panic.
-* Dependency Update: update smithy-go dependency
-* `service/detective` - v1.1.2
- * Bug Fix: Fix deserialization of API response timestamp member.
-* `service/docdb` - v1.2.0
- * Feature: Client now support presigned URL generation for CopyDBClusterSnapshot and CreateDBCluster operations by specifying the target SourceRegion
-* `service/neptune` - v1.2.0
- * Feature: Client now support presigned URL generation for CopyDBClusterSnapshot and CreateDBCluster operations by specifying the target SourceRegion
-* `service/s3` - v1.2.1
- * Bug Fix: Fixed an issue where ListObjectsV2 and ListParts paginators could loop infinitely
- * Bug Fix: Fixed key encoding when addressing S3 Access Points
-## Core SDK Highlights
-* Dependency Update: Updated SDK dependencies to their latest versions.
-* `config` - v1.1.2
- * Bug Fix: Fixed a panic when using WithEC2IMDSRegion without a specified IMDS client
-
-# Release 2021-02-09
-## Service Client Highlights
-* `service/s3` - v1.2.0
- * Feature: adds support for s3 vpc endpoint interface [#1113](https://github.com/aws/aws-sdk-go-v2/pull/1113)
-* `service/s3control` - v1.2.0
- * Feature: adds support for s3 vpc endpoint interface [#1113](https://github.com/aws/aws-sdk-go-v2/pull/1113)
-## Core SDK Highlights
-* Dependency Update: Updated SDK dependencies to their latest versions.
-* `aws` - v1.2.0
- * Feature: support to add endpoint source on context. Adds getter/setter for the endpoint source [#1113](https://github.com/aws/aws-sdk-go-v2/pull/1113)
-* `config` - v1.1.1
- * Bug Fix: Only Validate SSO profile configuration when attempting to use SSO credentials [#1103](https://github.com/aws/aws-sdk-go-v2/pull/1103)
- * Bug Fix: Environment credentials were not taking precedence over AWS_PROFILE [#1103](https://github.com/aws/aws-sdk-go-v2/pull/1103)
-
-# Release 2021-01-29
-## Service Client Highlights
-* Bug Fix: A serialization bug has been fixed that caused some service operations with empty inputs to not be serialized correctly ([#1071](https://github.com/aws/aws-sdk-go-v2/pull/1071))
-* Bug Fix: Fixes a bug that could cause a waiter to fail when comparing types ([#1083](https://github.com/aws/aws-sdk-go-v2/pull/1083))
-## Core SDK Highlights
-* Feature: EndpointResolverFromURL helpers have been added for constructing a service EndpointResolver type ([#1066](https://github.com/aws/aws-sdk-go-v2/pull/1066))
-* Dependency Update: Updated SDK dependencies to their latest versions.
-* `aws` - v1.1.0
- * Feature: Add support for specifying the EndpointSource on aws.Endpoint types ([#1070](https://github.com/aws/aws-sdk-go-v2/pull/1070/))
-* `config` - v1.1.0
- * Feature: Add Support for AWS Single Sign-On (SSO) credential provider ([#1072](https://github.com/aws/aws-sdk-go-v2/pull/1072))
-* `credentials` - v1.1.0
- * Feature: Add AWS Single Sign-On (SSO) credential provider ([#1072](https://github.com/aws/aws-sdk-go-v2/pull/1072))
-
-# Release 2021-01-19
-
-We are excited to announce the [General Availability](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-version-2-general-availability/)
-(GA) release of the [AWS SDK for Go version 2 (v2)](https://github.com/aws/aws-sdk-go-v2).
-This release follows the [Release candidate](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-version-2-v2-release-candidate)
-of the AWS SDK for Go v2. Version 2 incorporates customer feedback from version 1 and takes advantage of modern Go language features.
-
-## Breaking Changes
-* `aws`: Updated Config.Retryer member to be a func that returns aws.Retryer ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033))
- * Updates the SDK's references to Config.Retryer to be a function that returns aws.Retryer value. This ensures that custom retry options specified in the `aws.Config` are scoped to individual client instances.
- * All API clients created with the config will call the `Config.Retryer` function to get an aws.Retryer.
- * Removes duplicate `Retryer` interface from `retry` package. Single definition is `aws.Retryer` now.
-* `aws/middleware`: Updates `AddAttemptClockSkewMiddleware` to use appropriate `AddRecordResponseTiming` naming ([#1031](https://github.com/aws/aws-sdk-go-v2/pull/1031))
- * Removes `ResponseMetadata` struct type, and adds its members to middleware metadata directly, to improve discoverability.
-* `config`: Updated the `WithRetryer` helper to take a function that returns an aws.Retryer ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033))
- * All API clients created with the config will call the `Config.Retryer` function to get an aws.Retryer.
-* `API Clients`: Fix SDK's API client enum constant name generation to have expected casing ([#1020](https://github.com/aws/aws-sdk-go-v2/pull/1020))
- * This updates of the generated enum const value names in API client's `types` package to have the expected casing. Prior to this, enum names were being generated with lowercase names instead of camel case.
-* `API Clients`: Updates SDK's API client request middleware stack values to be scoped to individual operation call ([#1019](https://github.com/aws/aws-sdk-go-v2/pull/1019))
- * The API client request middleware stack values were mistakenly allowed to escape to nested API operation calls. This broke the SDK's presigners.
- * Stack values that should not escape are not scoped to the individual operation call.
-* `Multiple API Clients`: Unexported the API client's `WithEndpointResolver` this type wasn't intended to be exported ([#1051](https://github.com/aws/aws-sdk-go-v2/pull/1051))
- * Using the `aws.Config.EndpointResolver` member for setting custom endpoint resolver instead.
-
-## New Features
-* `service/sts`: Add support for presigning GetCallerIdentity operation ([#1030](https://github.com/aws/aws-sdk-go-v2/pull/1030))
- * Adds a PresignClient to the `sts` API client module. Use PresignGetCallerIdentity to obtain presigned URLs for the create presigned URLs for the GetCallerIdentity operation.
- * Fixes [#1021](https://github.com/aws/aws-sdk-go-v2/issues/1021)
-* `aws/retry`: Add package documentation for retry package ([#1033](https://github.com/aws/aws-sdk-go-v2/pull/1033))
- * Adds documentation for the retry package
-
-## Bug Fixes
-* `Multiple API Clients`: Fix SDK's generated serde for unmodeled operation input/output ([#1050](https://github.com/aws/aws-sdk-go-v2/pull/1050))
- * Fixes [#1047](https://github.com/aws/aws-sdk-go-v2/issues/1047) by fixing the how the SDKs generated serialization and deserialization of API operations that did not have modeled input or output types. This caused the SDK to incorrectly attempt to deserialize response documents that were either empty, or contained unexpected data.
-* `service/s3`: Fix Tagging parameter not serialized correctly for presigned PutObject requests ([#1017](https://github.com/aws/aws-sdk-go-v2/pull/1017))
- * Fixes the Tagging parameter incorrectly being serialized to the URL's query string instead of being signed as a HTTP request header.
- * When using PresignPutObject make sure to add all signed headers returned by the method to your down stream's HTTP client's request. These headers must be included in the request, or the request will fail with signature errors.
- * Fixes [#1016](https://github.com/aws/aws-sdk-go-v2/issues/1016)
-* `service/s3`: Fix Unmarshaling `GetObjectAcl` operation's Grantee type response ([#1034](https://github.com/aws/aws-sdk-go-v2/pull/1034))
- * Updates the SDK's codegen for correctly deserializing XML attributes in tags with XML namespaces.
- * Fixes [#1013](https://github.com/aws/aws-sdk-go-v2/issues/1013)
-* `service/s3`: Fix Unmarshaling `GetBucketLocation` operation's response ([#1027](https://github.com/aws/aws-sdk-go-v2/pull/1027))
- * Fixes [#908](https://github.com/aws/aws-sdk-go-v2/issues/908)
-
-## Migrating from v2 preview SDK's v0.31.0 to v1.0.0
-
-### aws.Config Retryer member
-
-If your application sets the `Config.Retryer` member the application will need
-to be updated to set a function that returns an `aws.Retryer`. In addition, if
-your application used the `config.WithRetryer` helper a function that returns
-an `aws.Retryer` needs to be used.
-
-If your application used the `retry.Retryer` type, update to using the
-`aws.Retryer` type instead.
-
-### API Client enum value names
-
-If your application used the enum values in the API Client's `types` package between v0.31.0 and the latest version of the client module you may need to update the naming of the enum value. The enum value name casing were updated to camel case instead lowercased.
-
-# Release 2020-12-23
-
-We’re happy to announce the Release Candidate (RC) of the AWS SDK for Go v2.
-This RC follows the developer preview release of the AWS SDK for Go v2. The SDK
-has undergone a major rewrite from the v1 code base to incorporate your
-feedback and to take advantage of modern Go language features.
-
-## Documentation
-* Developer Guide: https://aws.github.io/aws-sdk-go-v2/docs/
-* API Reference docs: https://pkg.go.dev/github.com/aws/aws-sdk-go-v2
-* Migration Guide: https://aws.github.io/aws-sdk-go-v2/docs/migrating/
-
-## Breaking Changes
-* Dependency `github.com/awslabs/smithy-go` has been relocated to `github.com/aws/smithy-go`
- * The `smithy-go` repository was moved from the `awslabs` GitHub organization to `aws`.
- * `xml`, `httpbinding`, and `json` package relocated under `encoding` package.
-* The module `ec2imds` moved to `feature/ec2/imds` path ([#984](https://github.com/aws/aws-sdk-go-v2/pull/984))
- * Moves the `ec2imds` feature module to be in common location as other SDK features.
-* `aws/signer/v4`: Refactor AWS Sigv4 Signer and options types to allow function options ([#955](https://github.com/aws/aws-sdk-go-v2/pull/955))
- * Fixes [#917](https://github.com/aws/aws-sdk-go-v2/issues/917), [#960](https://github.com/aws/aws-sdk-go-v2/issues/960), [#958](https://github.com/aws/aws-sdk-go-v2/issues/958)
-* `aws`: CredentialCache type updated to require constructor function ([#946](https://github.com/aws/aws-sdk-go-v2/pull/946))
- * Fixes [#940](https://github.com/aws/aws-sdk-go-v2/issues/940)
-* `credentials`: ExpiryWindow and Jitter moved from credential provider to `CredentialCache` ([#946](https://github.com/aws/aws-sdk-go-v2/pull/946))
- * Moves ExpiryWindow and Jitter options to common option of the `CredentialCache` instead of duplicated across providers.
- * Fixes [#940](https://github.com/aws/aws-sdk-go-v2/issues/940)
-* `config`: Ensure shared credentials file has precedence over shared config file ([#990](https://github.com/aws/aws-sdk-go-v2/pull/990))
- * The shared config file was incorrectly overriding the shared credentials file when merging values.
-* `config`: Add `context.Context` to `LoadDefaultConfig` ([#951](https://github.com/aws/aws-sdk-go-v2/pull/951))
- * Updates `config#LoadDefaultConfig` function to take `context.Context` as well as functional options for the `config#LoadOptions` type.
- * Fixes [#926](https://github.com/aws/aws-sdk-go-v2/issues/926), [#819](https://github.com/aws/aws-sdk-go-v2/issues/819)
-* `aws`: Rename `NoOpRetryer` to `NopRetryer` to have consistent naming with rest of SDK ([#987](https://github.com/aws/aws-sdk-go-v2/pull/987))
- * Fixes [#878](https://github.com/aws/aws-sdk-go-v2/issues/878)
-* `service/s3control`: Change `S3InitiateRestoreObjectOperation.ExpirationInDays` from value to pointer type ([#988](https://github.com/aws/aws-sdk-go-v2/pull/988))
-* `aws`: `ReaderSeekerCloser` and `WriteAtBuffer` have been relocated to `feature/s3/manager`.
-
-## New Features
-* *Waiters*: Add Waiter utilities for API clients ([aws/smithy-go#237](https://github.com/aws/smithy-go/pull/237))
- * Your application can now use Waiter utilities to wait for AWS resources.
-* `feature/dynamodb/attributevalue`: Add Amazon DynamoDB Attribute value marshaler utility ([#948](https://github.com/aws/aws-sdk-go-v2/pull/948))
- * Adds a utility for marshaling Go types too and from Amazon DynamoDB AttributeValues.
- * Also includes utility for converting from Amazon DynamoDB Streams AttributeValues to Amazon DynamoDB AttributeValues.
-* `feature/dynamodbstreams/attributevalue`: Add Amazon DynamoDB Streams Attribute value marshaler utility ([#948](https://github.com/aws/aws-sdk-go-v2/pull/948))
- * Adds a utility for marshaling Go types too and from Amazon DynamoDB Streams AttributeValues.
- * Also includes utility for converting from Amazon DynamoDB AttributeValues to Amazon DynamoDB Streams AttributeValues.
-* `feature/dynamodb/expression`: Add Amazon DynamoDB expression utility ([#981](https://github.com/aws/aws-sdk-go-v2/pull/981))
- * Adds the expression utility to the SDK for easily building Amazon DynamoDB operation expressions in code.
-
-## Bug Fixes
-* `service/s3`: Fix Presigner to configure client correctly for Amazon S3 ([#969](https://github.com/aws/aws-sdk-go-v2/pull/969))
-* service/s3: Fix deserialization of CompleteMultipartUpload ([#965](https://github.com/aws/aws-sdk-go-v2/pull/965)
- * Fixes [#927](https://github.com/aws/aws-sdk-go-v2/issues/927)
-* `codegen`: Fix API client union serialization ([#979](https://github.com/aws/aws-sdk-go-v2/pull/979))
- * Fixes [#978](https://github.com/aws/aws-sdk-go-v2/issues/978)
-
-## Service Client Highlights
-* API Clients have been bumped to version `v0.31.0`
-* Regenerate API Clients from updated API models adding waiter utilities, and union parameters.
-* `codegen`:
- * Add documentation to union API parameters describing valid member types, and usage example ([aws/smithy-go#239](https://github.com/aws/smithy-go/pull/239))
- * Normalize Metadata header map keys to be lower case ([aws/smithy-go#241](https://github.com/aws/smithy-go/pull/241)), ([#982](https://github.com/aws/aws-sdk-go-v2/pull/982))
- * Fixes [#376](https://github.com/aws/aws-sdk-go-v2/issues/376) Amazon S3 Metadata parameters keys are always returned as lower case.
- * Fix API client deserialization of XML based responses ([aws/smithy-go#245](https://github.com/aws/smithy-go/pull/245)), ([#992](https://github.com/aws/aws-sdk-go-v2/pull/992))
- * Fixes [#910](https://github.com/aws/aws-sdk-go-v2/issues/910)
-* `service/s3`, `service/s3control`:
- * Add support for reading `s3_use_arn_region` from shared config file ([#991](https://github.com/aws/aws-sdk-go-v2/pull/991))
- * Add Utility for getting RequestID and HostID of response ([#983](https://github.com/aws/aws-sdk-go-v2/pull/983))
-
-
-## Other changes
-* Updates branch `HEAD` points from `master` to `main`.
- * This should not impact your application, but if you have pull requests or forks of the SDK you may need to update the upstream branch your fork is based off of.
-
-## Migrating from v2 preview SDK's v0.30.0 to v0.31.0 release candidate
-
-### smithy-go module relocation
-
-If your application uses `smithy-go` utilities for request pipeline your application will need to be updated to refer to the new import path of `github.com/aws/smithy-go`. If you application did *not* use `smithy-go` utilities directly, your application will update automatically.
-
-### EC2 IMDS module relocation
-
-If your application used the `ec2imds` module, it has been relocated to `feature/ec2/imds`. Your application will need to update to the new import path, `github.com/aws/aws-sdk-go-v2/feature/ec2/imds`.
-
-### CredentialsCache Constructor and ExpiryWindow Options
-
-The `aws#CredentialsCache` type was updated, and a new constructor function, `NewCredentialsCache` was added. This function needs to be used to initialize the `CredentialCache`. The constructor also has function options to specify additional configuration, e.g. ExpiryWindow and Jitter.
-
-If your application was specifying the `ExpiryWindow` with the `credentials/stscreds#AssumeRoleOptions`, `credentials/stscreds#WebIdentityRoleOptions`, `credentials/processcreds#Options`, or `credentials/ec2rolecrds#Options` types the `ExpiryWindow` option will need to specified on the `CredentialsCache` constructor instead.
-
-### AWS Sigv4 Signer Refactor
-
-The `aws/signer/v4` package's `Signer.SignHTTP` and `Signer.PresignHTTP` methods were updated to take functional options. If your application provided a custom implementation for API client's `HTTPSignerV4` or `HTTPPresignerV4` interfaces, that implementation will need to be updated for the new function signature.
-
-### Configuration Loading
-
-The `config#LoadDefaultConfig` function has been updated to require a `context.Context` as the first parameter, with additional optional function options as variadic additional arguments. Your application will need to update its usage of `LoadDefaultConfig` to pass in `context.Context` as the first parameter. If your application used the `With...` helpers those should continue to work without issue.
-
-The v2 SDK corrects its behavior to be inline with the AWS CLI and other AWS SDKs. Refer to https://docs.aws.amazon.com/credref/latest/refdocs/overview.html for more information how to use the shared config and credentials files.
-
-
-# Release 2020-11-30
-
-## Breaking Change
-* `codegen`: Add support for slice and maps generated with value members instead of pointer ([#887](https://github.com/aws/aws-sdk-go-v2/pull/887))
- * This update allow the SDK's code generation to be aware of API shapes and members that are not nullable, and can be rendered as value types by the code generation instead of pointer types.
- * Several API client parameter types will change from pointer members to value members for slice, map, number and bool member types.
- * See Migration notes for migrating to v0.30.0 with this change.
-* `aws/transport/http`: Move aws.BuildableHTTPClient to HTTP transport package ([#898](https://github.com/aws/aws-sdk-go-v2/pull/898))
- * Moves the `BuildableHTTPClient` from the SDK's `aws` package to the `aws/transport/http` package as `BuildableClient` to with other HTTP specific utilities.
-* `feature/cloudfront/sign`: Add CloudFront sign feature as module ([#884](https://github.com/aws/aws-sdk-go-v2/pull/884))
- * Moves `service/cloudfront/sign` package out of the `cloudfront` module, and into its own module as `github.com/aws/aws-sdk-go-v2/feature/cloudfront/sign`.
-
-## New Features
-* `config`: Add a WithRetryer provider helper to the config loader ([#897](https://github.com/aws/aws-sdk-go-v2/pull/897))
- * Adds a `WithRetryer` configuration provider to the config loader as a convenience helper to set the `Retryer` on the `aws.Config` when its being loaded.
-* `config`: Default to TLS 1.2 for HTTPS requests ([#892](https://github.com/aws/aws-sdk-go-v2/pull/892))
- * Updates the SDK's default HTTP client to use TLS 1.2 as the minimum TLS version for all HTTPS requests by default.
-
-## Bug Fixes
-* `config`: Fix AWS_CA_BUNDLE usage while loading default config ([#912](https://github.com/aws/aws-sdk-go-v2/pull/))
- * Fixes the `LoadDefaultConfig`'s configuration provider order to correctly load a custom HTTP client prior to configuring the client for `AWS_CA_BUNDLE` environment variable.
-* `service/s3`: Fix signature mismatch error for s3 ([#913](https://github.com/aws/aws-sdk-go-v2/pull/913))
- * Fixes ([#883](https://github.com/aws/aws-sdk-go-v2/issues/883))
-* `service/s3control`:
- * Fix HostPrefix addition behavior for s3control ([#882](https://github.com/aws/aws-sdk-go-v2/pull/882))
- * Fixes ([#863](https://github.com/aws/aws-sdk-go-v2/issues/863))
- * Fix s3control error deserializer ([#875](https://github.com/aws/aws-sdk-go-v2/pull/875))
- * Fixes ([#864](https://github.com/aws/aws-sdk-go-v2/issues/864))
-
-## Service Client Highlights
-* Pagination support has been added to supported APIs. See [Using Operation Paginators](https://aws.github.io/aws-sdk-go-v2/docs/making-requests/#using-operation-paginators) in the Developer Guide. ([#885](https://github.com/aws/aws-sdk-go-v2/pull/885))
-* Logging support has been added to service clients. See [Logging](https://aws.github.io/aws-sdk-go-v2/docs/configuring-sdk/logging/) in the Developer Guide. ([#872](https://github.com/aws/aws-sdk-go-v2/pull/872))
-* `service`: Add support for pre-signed URL clients for S3, RDS, EC2 service ([#888](https://github.com/aws/aws-sdk-go-v2/pull/888))
- * `service/s3`: operations `PutObject` and `GetObject` are now supported with s3 pre-signed url client.
- * `service/ec2`: operation `CopySnapshot` is now supported with ec2 pre-signed url client.
- * `service/rds`: operations `CopyDBSnapshot`, `CreateDBInstanceReadReplica`, `CopyDBClusterSnapshot`, `CreateDBCluster` are now supported with rds pre-signed url client.
-* `service/s3`: Add support for S3 access point and S3 on outposts access point ARNs ([#870](https://github.com/aws/aws-sdk-go-v2/pull/870))
-* `service/s3control`: Adds support for S3 on outposts access point and S3 on outposts bucket ARNs ([#870](https://github.com/aws/aws-sdk-go-v2/pull/870))
-
-## Migrating from v2 preview SDK's v0.29.0 to v0.30.0
-
-### aws.BuildableHTTPClient move
-The `aws`'s `BuildableHTTPClient` HTTP client implementation was moved to `aws/transport/http` as `BuildableClient`. If your application used the `aws.BuildableHTTPClient` type, update it to use the `BuildableClient` in the `aws/transport/http` package.
-
-### Slice and Map API member types
-This release includes several code generation updates for API client's slice map members. Using API modeling metadata the Slice and map members are now generated as value types instead of pointer types. For your application this means that for these types, the SDK no longer will have pointer member types, and have value member types.
-
-To migrate to this change you'll need to remove the pointer handling for slice and map members, and instead use value type handling of the member values.
-
-### Boolean and Number API member types
-Similar to the slice and map API member types being generated as value, the SDK's code generation now has metadata where the SDK can generate boolean and number members as value type instead of pointer types.
-
-To migrate to this change you'll need to remove the pointer handling for numbers and boolean member types, and instead use value handling.
-
-# Release 2020-10-30
-
-## New Features
-* Adds HostnameImmutable flag on aws.Endpoint to direct SDK if the associated endpoint is modifiable.([#848](https://github.com/aws/aws-sdk-go-v2/pull/848))
-
-## Bug Fixes
-* Fix SDK handling of xml based services - xml namespaces ([#858](https://github.com/aws/aws-sdk-go-v2/pull/858))
- * Fixes ([#850](https://github.com/aws/aws-sdk-go-v2/issues/850))
-
-## Service Client Highlights
-* API Clients have been bumped to version `v0.29.0`
- * Regenerate API Clients from update API models.
-* Improve client doc generation.
-
-## Core SDK Highlights
-* Dependency Update: Updated SDK dependencies to their latest versions.
-
-## Migrating from v2 preview SDK's v0.28.0 to v0.29.0
-* API Clients ResolverOptions type renamed to EndpointResolverOptions
-
-# Release 2020-10-26
-
-## New Features
-* `service/s3`: Add support for Accelerate, and Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836))
-* `service/s3control`: Add support for Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836))
-
-## Service Client Highlights
-* API Clients have been bumped to version `v0.28.0`
- * Regenerate API Clients from update API models.
-* `service/s3`: Add support for Accelerate, and Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836))
-* `service/s3control`: Add support for Dualstack ([#836](https://github.com/aws/aws-sdk-go-v2/pull/836))
-* `service/route53`: Fix sanitizeURL customization to handle leading slash(`/`) [#846](https://github.com/aws/aws-sdk-go-v2/pull/846)
- * Fixes [#843](https://github.com/aws/aws-sdk-go-v2/issues/843)
-* `service/route53`: Fix codegen to correctly look for operations that need sanitize url ([#851](https://github.com/aws/aws-sdk-go-v2/pull/851))
-
-## Core SDK Highlights
-* `aws/protocol/restjson`: Fix unexpected JSON error response deserialization ([#837](https://github.com/aws/aws-sdk-go-v2/pull/837))
- * Fixes [#832](https://github.com/aws/aws-sdk-go-v2/issues/832)
-* `example/service/s3/listobjects`: Add example for Amazon S3 ListObjectsV2 ([#838](https://github.com/aws/aws-sdk-go-v2/pull/838))
-
-# Release 2020-10-16
-
-## New Features
-* `feature/s3/manager`:
- * Initial `v0.1.0` release
- * Add the Amazon S3 Upload and Download transfer manager ([#802](https://github.com/aws/aws-sdk-go-v2/pull/802))
-
-## Service Client Highlights
-* Clients have been bumped to version `v0.27.0`
-* `service/machinelearning`: Add customization for setting client endpoint with PredictEndpoint value if set ([#782](https://github.com/aws/aws-sdk-go-v2/pull/782))
-* `service/s3`: Fix empty response body deserialization in case of error response ([#801](https://github.com/aws/aws-sdk-go-v2/pull/801))
- * Fixes xml deserialization util to correctly handle empty response body in case of an error response.
-* `service/s3`: Add customization to auto fill Content-Md5 request header for Amazon S3 operations ([#812](https://github.com/aws/aws-sdk-go-v2/pull/812))
-* `service/s3`: Add fallback to using HTTP status code for error code ([#818](https://github.com/aws/aws-sdk-go-v2/pull/818))
- * Adds falling back to using the HTTP status code to create a API Error code when not error code is received from the service, such as HeadObject.
-* `service/route53`: Add support for deserialzing `InvalidChangeBatch` API error ([#792](https://github.com/aws/aws-sdk-go-v2/pull/792))
-* `codegen`: Remove API client `Options` getter methods ([#788](https://github.com/aws/aws-sdk-go-v2/pull/788))
-* `codegen`: Regenerate API Client modeled endpoints ([#791](https://github.com/aws/aws-sdk-go-v2/pull/791))
-* `codegen`: Sort API Client struct member paramaters by required and alphabetical ([#787](https://github.com/aws/aws-sdk-go-v2/pull/787))
-* `codegen`: Add package docs to API client modules ([#821](https://github.com/aws/aws-sdk-go-v2/pull/821))
-* `codegen`: Rename `smithy-go`'s `smithy.OperationError` to `smithy.OperationInvokeError`.
-
-## Core SDK Highlights
-* `config`:
- * Bumped to `v0.2.0`
- * Refactor Config Module, Add Config Package Documentation and Examples, Improve Overall SDK Readme ([#822](https://github.com/aws/aws-sdk-go-v2/pull/822))
-* `credentials`:
- * Bumped to `v0.1.2`
- * Strip Monotonic Clock Readings when Comparing Credential Expiry Time ([#789](https://github.com/aws/aws-sdk-go-v2/pull/789))
-* `ec2imds`:
- * Bumped to `v0.1.2`
- * Fix refreshing API token if expired ([#789](https://github.com/aws/aws-sdk-go-v2/pull/789))
-
-## Migrating from v0.26.0 to v0.27.0
-
-#### Configuration
-
-The `config` module's exported types were trimmed down to add clarity and reduce confusion. Additional changes to the `config` module' helpers.
-
-* Refactored `WithCredentialsProvider`, `WithHTTPClient`, and `WithEndpointResolver` to functions instead of structs.
-* Removed `MFATokenFuncProvider`, use `AssumeRoleCredentialOptionsProvider` for setting options for `stscreds.AssumeRoleOptions`.
-* Renamed `WithWebIdentityCredentialProviderOptions` to `WithWebIdentityRoleCredentialOptions`
-* Renamed `AssumeRoleCredentialProviderOptions` to `AssumeRoleCredentialOptionsProvider`
-* Renamed `EndpointResolverFuncProvider` to `EndpointResolverProvider`
-
-#### API Client
-* API Client `Options` type getter methods have been removed. Use the struct members instead.
-* The error returned by API Client operations was renamed from `smithy.OperationError` to `smithy.OperationInvokeError`.
-
-# Release 2020-09-30
-
-## Service Client Highlights
-* Service clients have been bumped to `v0.26.0` simplify the documentation experience when using [pkg.go.dev](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2).
-* `service/s3`: Disable automatic decompression of getting Amazon S3 objects with the `Content-Encoding: gzip` metadata header. ([#748](https://github.com/aws/aws-sdk-go-v2/pull/748))
- * This changes the SDK's default behavior with regard to making S3 API calls. The client will no longer automatically set the `Accept-Encoding` HTTP request header, nor will it automatically decompress the gzipped response when the `Content-Encoding: gzip` response header was received.
- * If you'd like the client to sent the `Accept-Encoding: gzip` request header, you can add this header to the API operation method call with the [SetHeaderValue](https://pkg.go.dev/github.com/awslabs/smithy-go/transport/http#SetHeaderValue). middleware helper.
-* `service/cloudfront/sign`: Fix cloudfront example usage of SignWithPolicy ([#673](https://github.com/aws/aws-sdk-go-v2/pull/673))
- * Fixes [#671](https://github.com/aws/aws-sdk-go-v2/issues/671) documentation typo by correcting the usage of `SignWithPolicy`.
-
-## Core SDK Highlights
-* SDK core module released at `v0.26.0`
-* `config` module released at `v0.1.1`
-* `credentials` module released at `v0.1.1`
-* `ec2imds` module released at `v0.1.1`
-
-
-# Release 2020-09-28
-## Announcements
-We’re happy to share the updated clients for the v0.25.0 preview version of the AWS SDK for Go V2.
-
-The updated clients leverage new developments and advancements within AWS and the Go software ecosystem at large since
-our original preview announcement. Using the new clients will be a bit different than before. The key differences are:
-simplified API operation invocation, performance improvements, support for error wrapping, and a new middleware architecture.
-So below we have a guided walkthrough to help try it out and share your feedback in order to better influence the features
-you’d like to see in the GA version.
-
-See [Announcement Blog Post](https://aws.amazon.com/blogs/developer/client-updates-in-the-preview-version-of-the-aws-sdk-for-go-v2/) for more details.
-
-## Service Client Highlights
-* Initial service clients released at version `v0.1.0`
-## Core SDK Highlights
-* SDK core module released at `v0.25.0`
-* `config` module released at `v0.1.0`
-* `credentials` module released at `v0.1.0`
-* `ec2imds` module released at `v0.1.0`
-
-## Migrating from v2 preview SDK's v0.24.0 to v0.25.0
-
-#### Design changes
-
-The v2 preview SDK `v0.25.0` release represents a significant stepping stone bringing the v2 SDK closer to its target design and usability. This release includes significant breaking changes to the v2 preview SDK. The updates in the `v0.25.0` release focus on refactoring and modularization of the SDK’s API clients to use the new [client design](https://github.com/aws/aws-sdk-go-v2/issues/438), updated request pipeline (aka [middleware](https://pkg.go.dev/github.com/awslabs/smithy-go/middleware)), refactored [credential providers](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials), and [configuration loading](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config) packages.
-
-We've also bumped the minimum supported Go version with this release. Starting with v0.25.0 the SDK requires a minimum version of Go `v1.15`.
-
-As a part of the refactoring done to v2 preview SDK some components have not been included in this update. The following is a non exhaustive list of features that are not available.
-
-* API Paginators - [#439](https://github.com/aws/aws-sdk-go-v2/issues/439)
-* API Waiters - [#442](https://github.com/aws/aws-sdk-go-v2/issues/442)
-* Presign URL - [#794](https://github.com/aws/aws-sdk-go-v2/issues/794)
-* Amazon S3 Upload and Download manager - [#802](https://github.com/aws/aws-sdk-go-v2/pull/802)
-* Amazon DynamoDB's AttributeValue marshaler, and Expression package - [#790](https://github.com/aws/aws-sdk-go-v2/issues/790)
-* Debug Logging - [#594](https://github.com/aws/aws-sdk-go-v2/issues/594)
-
-We expect additional breaking changes to the v2 preview SDK in the coming releases. We expect these changes to focus on organizational, naming, and hardening the SDK's design for future feature capabilities after it is released for general availability.
-
-
-#### Relocated Packages
-
-In this release packages within the SDK were relocated, and in some cases those packages were converted to Go modules. The following is a list of packages have were relocated.
-
-* `github.com/aws/aws-sdk-go-v2/aws/external` => `github.com/aws/aws-sdk-go-v2/config` module
-* `github.com/aws/aws-sdk-go-v2/aws/ec2metadata` => `github.com/aws/aws-sdk-go-v2/ec2imds` module
-
-The `github.com/aws/aws-sdk-go-v2/credentials` module contains refactored credentials providers.
-
-* `github.com/aws/aws-sdk-go-v2/ec2rolecreds` => `github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds`
-* `github.com/aws/aws-sdk-go-v2/endpointcreds` => `github.com/aws/aws-sdk-go-v2/credentials/endpointcreds`
-* `github.com/aws/aws-sdk-go-v2/processcreds` => `github.com/aws/aws-sdk-go-v2/credentials/processcreds`
-* `github.com/aws/aws-sdk-go-v2/stscreds` => `github.com/aws/aws-sdk-go-v2/credentials/stscreds`
-
-
-#### Modularization
-
-New modules were added to the v2 preview SDK to allow the components to be versioned independently from each other. This allows your application to depend on specific versions of an API client module, and take discrete updates from the SDK core and other API client modules as desired.
-
-* [github.com/aws/aws-sdk-go-v2/config](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/config)
-* [github.com/aws/aws-sdk-go-v2/credentials](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/credentials)
-* Module for each API client, e.g. [github.com/aws/aws-sdk-go-v2/service/s3](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3)
-
-
-#### API Clients
-
-The following is a list of the major changes to the API client modules
-
-* Removed paginators: we plan to add these back once they are implemented to integrate with the SDK's new API client design.
-* Removed waiters: we need to further investigate how the V2 SDK should expose waiters, and how their behavior should be modeled.
-* API Clients are now Go modules. When migrating to the v2 preview SDK `v0.25.0`, you'll need to add the API client's module to your application's go.mod file.
-* API parameter nested types have been moved to a `types` package within the API client's module, e.g. `github.com/aws/aws-sdk-go-v2/service/s3/types` These types were moved to improve documentation and discovery of the API client, operation, and input/output types. For example Amazon S3's ListObject's operation [ListObjectOutput.Contents](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3/#ListObjectsOutput) input parameter is a slice of [types.Object](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3/types#Object).
-* The client operation method has been renamed, removing the `Request` suffix. The method now invokes the operation instead of constructing a request, which needed to be invoked separately. The operation methods were also expanded to include functional options for providing operation specific configuration, such as modifying the request pipeline.
-
-```go
-result, err := client.Scan(context.TODO(), &dynamodb.ScanInput{
- TableName: aws.String("exampleTable"),
-}, func(o *Options) {
- // Limit operation calls to only 1 attempt.
- o.Retryer = retry.AddWithMaxAttempts(o.Retryer, 1)
-})
-```
-
-
-#### Configuration
-
-In addition to the `github.com/aws/aws-sdk-go-v2/aws/external` package being made a module at `github.com/aws/aws-sdk-go-v2/config`, the `LoadDefaultAWSConfig` function was renamed to `LoadDefaultConfig`.
-
-The `github.com/aws/aws-sdk-go-v2/aws/defaults` package has been removed. Its components have been migrated to the `github.com/aws/aws-sdk-go-v2/aws` package, and `github.com/aws/aws-sdk-go-v2/config` module.
-
-
-#### Error Handling
-
-The `github.com/aws/aws-sdk-go-v2/aws/awserr` package was removed as a part of the SDK error handling refactor. The SDK now uses typed errors built around [Go v1.13](https://golang.org/doc/go1.13#error_wrapping)'s [errors.As](https://pkg.go.dev/errors#As) and [errors.Unwrap](https://pkg.go.dev/errors#Unwrap) features. All SDK error types that wrap other errors implement the `Unwrap` method. Generic v2 preview SDK errors created with `fmt.Errorf` use `%w` to wrap the underlying error.
-
-The SDK API clients now include generated public error types for errors modeled for an API. The SDK will automatically deserialize the error response from the API into the appropriate error type. Your application should use `errors.As` to check if the returned error matches one it is interested in. Your application can also use the generic interface [smithy.APIError](https://pkg.go.dev/github.com/awslabs/smithy-go/#APIError) to test if the API client's operation method returned an API error, but not check against a specific error.
-
-API client errors returned to the caller will use error wrapping to layer the error values. This allows underlying error types to be specific to their use case, and the SDK's more generic error types to wrap the underlying error.
-
-For example, if an [Amazon DynamoDB](https://aws.amazon.com/dynamodb/) [Scan](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/dynamodb#Scan) operation call cannot find the `TableName` requested, the error returned will contain [dynamodb.ResourceNotFoundException](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/dynamodb/types#ResourceNotFoundException). The SDK will return this error value wrapped in a couple layers, with each layer adding additional contextual information such as [ResponseError](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/transport/http#ResponseError) for AWS HTTP response error metadata , and [smithy.OperationError](https://pkg.go.dev/github.com/awslabs/smithy-go/#OperationError) for API operation call metadata.
-
-```go
-result, err := client.Scan(context.TODO(), params)
-if err != nil {
- // To get a specific API error
- var notFoundErr *types.ResourceNotFoundException
- if errors.As(err, ¬FoundErr) {
- log.Printf("scan failed because the table was not found, %v",
- notFoundErr.ErrorMessage())
- }
-
- // To get any API error
- var apiErr smithy.APIError
- if errors.As(err, &apiErr) {
- log.Printf("scan failed because of an API error, Code: %v, Message: %v",
- apiErr.ErrorCode(), apiErr.ErrorMessage())
- }
-
- // To get the AWS response metadata, such as RequestID
- var respErr *awshttp.ResponseError // Using import alias "awshttp" for package github.com/aws/aws-sdk-go-v2/aws/transport/http
- if errors.As(err, &respErr) {
- log.Printf("scan failed with HTTP status code %v, Request ID %v and error %v",
- respErr.HTTPStatusCode(), respErr.ServiceRequestID(), respErr)
- }
-
- return err
-}
-```
-
-Logging an error value will include information from each wrapped error. For example, the following is a mock error logged for a Scan operation call that failed because the table was not found.
-
-> 2020/10/15 16:03:37 operation error DynamoDB: Scan, https response error StatusCode: 400, RequestID: ABCREQUESTID123, ResourceNotFoundException: Requested resource not found
-
-
-#### Endpoints
-
-The `github.com/aws/aws-sdk-go-v2/aws/endpoints` has been removed from the SDK, along with all exported endpoint definitions and iteration behavior. Each generated API client now includes its own endpoint definition internally to the module.
-
-API clients can optionally be configured with a generic [aws.EndpointResolver](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#EndpointResolver) via the [aws.Config.EndpointResolver](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws#Config.EndpointResolver). If the API client is not configured with a custom endpoint resolver it will defer to the endpoint resolver the client module was generated with.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md b/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md
deleted file mode 100644
index 3b6446687..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,4 +0,0 @@
-## Code of Conduct
-This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
-For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
-opensource-codeofconduct@amazon.com with any additional questions or comments.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md b/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md
deleted file mode 100644
index c2fc3b8f5..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/CONTRIBUTING.md
+++ /dev/null
@@ -1,178 +0,0 @@
-# Contributing to the AWS SDK for Go
-
-Thank you for your interest in contributing to the AWS SDK for Go!
-We work hard to provide a high-quality and useful SDK, and we greatly value
-feedback and contributions from our community. Whether it's a bug report,
-new feature, correction, or additional documentation, we welcome your issues
-and pull requests. Please read through this document before submitting any
-[issues] or [pull requests][pr] to ensure we have all the necessary information to
-effectively respond to your bug report or contribution.
-
-Jump To:
-
-* [Bug Reports](#bug-reports)
-* [Feature Requests](#feature-requests)
-* [Code Contributions](#code-contributions)
-
-
-## How to contribute
-
-*Before you send us a pull request, please be sure that:*
-
-1. You're working from the latest source on the master branch.
-2. You check existing open, and recently closed, pull requests to be sure
- that someone else hasn't already addressed the problem.
-3. You create an issue before working on a contribution that will take a
- significant amount of your time.
-
-*Creating a Pull Request*
-
-1. Fork the repository.
-2. In your fork, make your change in a branch that's based on this repo's master branch.
-3. Commit the change to your fork, using a clear and descriptive commit message.
-4. Create a pull request, answering any questions in the pull request form.
-
-For contributions that will take a significant amount of time, open a new
-issue to pitch your idea before you get started. Explain the problem and
-describe the content you want to see added to the documentation. Let us know
-if you'll write it yourself or if you'd like us to help. We'll discuss your
-proposal with you and let you know whether we're likely to accept it.
-
-## Bug Reports
-
-You can file bug reports against the SDK on the [GitHub issues][issues] page.
-
-If you are filing a report for a bug or regression in the SDK, it's extremely
-helpful to provide as much information as possible when opening the original
-issue. This helps us reproduce and investigate the possible bug without having
-to wait for this extra information to be provided. Please read the following
-guidelines prior to filing a bug report.
-
-1. Search through existing [issues][] to ensure that your specific issue has
- not yet been reported. If it is a common issue, it is likely there is
- already a bug report for your problem.
-
-2. Ensure that you have tested the latest version of the SDK. Although you
- may have an issue against an older version of the SDK, we cannot provide
- bug fixes for old versions. It's also possible that the bug may have been
- fixed in the latest release.
-
-3. Provide as much information about your environment, SDK version, and
- relevant dependencies as possible. For example, let us know what version
- of Go you are using, which and version of the operating system, and the
- the environment your code is running in. e.g Container.
-
-4. Provide a minimal test case that reproduces your issue or any error
- information you related to your problem. We can provide feedback much
- more quickly if we know what operations you are calling in the SDK. If
- you cannot provide a full test case, provide as much code as you can
- to help us diagnose the problem. Any relevant information should be provided
- as well, like whether this is a persistent issue, or if it only occurs
- some of the time.
-
-## Feature Requests
-
-Open an [issue][issues] with the following:
-
-* A short, descriptive title. Ideally, other community members should be able
- to get a good idea of the feature just from reading the title.
-* A detailed description of the the proposed feature.
- * Why it should be added to the SDK.
- * If possible, example code to illustrate how it should work.
-* Use Markdown to make the request easier to read;
-* If you intend to implement this feature, indicate that you'd like to the issue to be assigned to you.
-
-## Code Contributions
-
-We are always happy to receive code and documentation contributions to the SDK.
-Please be aware of the following notes prior to opening a pull request:
-
-1. The SDK is released under the [Apache license][license]. Any code you submit
- will be released under that license. For substantial contributions, we may
- ask you to sign a [Contributor License Agreement (CLA)][cla].
-
-2. If you would like to implement support for a significant feature that is not
- yet available in the SDK, please talk to us beforehand to avoid any
- duplication of effort.
-
-3. Wherever possible, pull requests should contain tests as appropriate.
- Bugfixes should contain tests that exercise the corrected behavior (i.e., the
- test should fail without the bugfix and pass with it), and new features
- should be accompanied by tests exercising the feature.
-
-4. Pull requests that contain failing tests will not be merged until the test
- failures are addressed. Pull requests that cause a significant drop in the
- SDK's test coverage percentage are unlikely to be merged until tests have
- been added.
-
-5. The JSON files under the SDK's `models` folder are sourced from outside the SDK.
- Such as `models/apis/ec2/2016-11-15/api.json`. We will not accept pull requests
- directly on these models. If you discover an issue with the models please
- create a [GitHub issue][issues] describing the issue.
-
-### Testing
-
-To run the tests locally, running the `make unit` command will `go get` the
-SDK's testing dependencies, and run vet, link and unit tests for the SDK.
-
-```
-make unit
-```
-
-Standard go testing functionality is supported as well. To test SDK code that
-is tagged with `codegen` you'll need to set the build tag in the go test
-command. The `make unit` command will do this automatically.
-
-```
-go test -tags codegen ./private/...
-```
-
-See the `Makefile` for additional testing tags that can be used in testing.
-
-To test on multiple platform the SDK includes several DockerFiles under the
-`awstesting/sandbox` folder, and associated make recipes to to execute
-unit testing within environments configured for specific Go versions.
-
-```
-make sandbox-test-go18
-```
-
-To run all sandbox environments use the following make recipe
-
-```
-# Optionally update the Go tip that will be used during the batch testing
-make update-aws-golang-tip
-
-# Run all SDK tests for supported Go versions in sandboxes
-make sandbox-test
-```
-
-In addition the sandbox environment include make recipes for interactive modes
-so you can run command within the Docker container and context of the SDK.
-
-```
-make sandbox-go18
-```
-
-### Changelog Documents
-
-You can see all release changes in the `CHANGELOG.md` file at the root of the
-repository. The release notes added to this file will contain service client
-updates, and major SDK changes. When submitting a pull request please include an entry in `CHANGELOG_PENDING.md` under the appropriate changelog type so your changelog entry is included on the following release.
-
-#### Changelog Types
-
-* `SDK Features` - For major additive features, internal changes that have
-outward impact, or updates to the SDK foundations. This will result in a minor
-version change.
-* `SDK Enhancements` - For minor additive features or incremental sized changes.
-This will result in a patch version change.
-* `SDK Bugs` - For minor changes that resolve an issue. This will result in a
-patch version change.
-
-[issues]: https://github.com/aws/aws-sdk-go/issues
-[pr]: https://github.com/aws/aws-sdk-go/pulls
-[license]: http://aws.amazon.com/apache2.0/
-[cla]: http://en.wikipedia.org/wiki/Contributor_License_Agreement
-[releasenotes]: https://github.com/aws/aws-sdk-go/releases
-
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md b/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md
deleted file mode 100644
index 8490c7d67..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/DESIGN.md
+++ /dev/null
@@ -1,15 +0,0 @@
-Open Discussions
----
-The following issues are currently open for community feedback.
-All discourse must adhere to the [Code of Conduct] policy.
-
-* [Refactoring API Client Paginators](https://github.com/aws/aws-sdk-go-v2/issues/439)
-* [Refactoring API Client Waiters](https://github.com/aws/aws-sdk-go-v2/issues/442)
-* [Refactoring API Client Enums and Types to Discrete Packages](https://github.com/aws/aws-sdk-go-v2/issues/445)
-* [SDK Modularization](https://github.com/aws/aws-sdk-go-v2/issues/444)
-
-Past Discussions
----
-The issues listed here are for documentation purposes, and is used to capture issues and their associated discussions.
-
-[Code of Conduct]: https://github.com/aws/aws-sdk-go-v2/blob/master/CODE_OF_CONDUCT.md
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/Makefile b/vendor/github.com/aws/aws-sdk-go-v2/Makefile
deleted file mode 100644
index 4b761e771..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/Makefile
+++ /dev/null
@@ -1,519 +0,0 @@
-# Lint rules to ignore
-LINTIGNORESINGLEFIGHT='internal/sync/singleflight/singleflight.go:.+error should be the last type'
-LINT_IGNORE_S3MANAGER_INPUT='feature/s3/manager/upload.go:.+struct field SSEKMSKeyId should be SSEKMSKeyID'
-
-UNIT_TEST_TAGS=
-BUILD_TAGS=-tags "example,codegen,integration,ec2env,perftest"
-
-SMITHY_GO_SRC ?= $(shell pwd)/../smithy-go
-
-SDK_MIN_GO_VERSION ?= 1.15
-
-EACHMODULE_FAILFAST ?= true
-EACHMODULE_FAILFAST_FLAG=-fail-fast=${EACHMODULE_FAILFAST}
-
-EACHMODULE_CONCURRENCY ?= 1
-EACHMODULE_CONCURRENCY_FLAG=-c ${EACHMODULE_CONCURRENCY}
-
-EACHMODULE_SKIP ?=
-EACHMODULE_SKIP_FLAG=-skip="${EACHMODULE_SKIP}"
-
-EACHMODULE_FLAGS=${EACHMODULE_CONCURRENCY_FLAG} ${EACHMODULE_FAILFAST_FLAG} ${EACHMODULE_SKIP_FLAG}
-
-# SDK's Core and client packages that are compatible with Go 1.9+.
-SDK_CORE_PKGS=./aws/... ./internal/...
-SDK_CLIENT_PKGS=./service/...
-SDK_COMPA_PKGS=${SDK_CORE_PKGS} ${SDK_CLIENT_PKGS}
-
-# SDK additional packages that are used for development of the SDK.
-SDK_EXAMPLES_PKGS=
-SDK_ALL_PKGS=${SDK_COMPA_PKGS} ${SDK_EXAMPLES_PKGS}
-
-RUN_NONE=-run NONE
-RUN_INTEG=-run '^TestInteg_'
-
-CODEGEN_RESOURCES_PATH=$(shell pwd)/codegen/smithy-aws-go-codegen/src/main/resources/software/amazon/smithy/aws/go/codegen
-CODEGEN_API_MODELS_PATH=$(shell pwd)/codegen/sdk-codegen/aws-models
-ENDPOINTS_JSON=${CODEGEN_RESOURCES_PATH}/endpoints.json
-ENDPOINT_PREFIX_JSON=${CODEGEN_RESOURCES_PATH}/endpoint-prefix.json
-
-LICENSE_FILE=$(shell pwd)/LICENSE.txt
-
-SMITHY_GO_VERSION ?=
-PRE_RELEASE_VERSION ?=
-RELEASE_MANIFEST_FILE ?=
-RELEASE_CHGLOG_DESC_FILE ?=
-
-REPOTOOLS_VERSION ?= latest
-REPOTOOLS_MODULE = github.com/awslabs/aws-go-multi-module-repository-tools
-REPOTOOLS_CMD_ANNOTATE_STABLE_GEN = ${REPOTOOLS_MODULE}/cmd/annotatestablegen@${REPOTOOLS_VERSION}
-REPOTOOLS_CMD_MAKE_RELATIVE = ${REPOTOOLS_MODULE}/cmd/makerelative@${REPOTOOLS_VERSION}
-REPOTOOLS_CMD_CALCULATE_RELEASE = ${REPOTOOLS_MODULE}/cmd/calculaterelease@${REPOTOOLS_VERSION}
-REPOTOOLS_CMD_UPDATE_REQUIRES = ${REPOTOOLS_MODULE}/cmd/updaterequires@${REPOTOOLS_VERSION}
-REPOTOOLS_CMD_UPDATE_MODULE_METADATA = ${REPOTOOLS_MODULE}/cmd/updatemodulemeta@${REPOTOOLS_VERSION}
-REPOTOOLS_CMD_GENERATE_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/generatechangelog@${REPOTOOLS_VERSION}
-REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION}
-REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION}
-REPOTOOLS_CMD_EDIT_MODULE_DEPENDENCY = ${REPOTOOLS_MODULE}/cmd/editmoduledependency@${REPOTOOLS_VERSION}
-
-REPOTOOLS_CALCULATE_RELEASE_VERBOSE ?= false
-REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG=-v=${REPOTOOLS_CALCULATE_RELEASE_VERBOSE}
-
-REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS ?=
-
-ifneq ($(PRE_RELEASE_VERSION),)
- REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION}
-endif
-
-.PHONY: all
-all: generate unit
-
-###################
-# Code Generation #
-###################
-.PHONY: generate smithy-generate smithy-build smithy-build-% smithy-clean smithy-go-publish-local format \
-gen-config-asserts gen-repo-mod-replace gen-mod-replace-smithy gen-mod-dropreplace-smithy-% gen-aws-ptrs tidy-modules-% \
-add-module-license-files sync-models sync-endpoints-model sync-endpoints.json clone-v1-models gen-internal-codegen \
-sync-api-models copy-attributevalue-feature min-go-version-% update-requires smithy-annotate-stable \
-update-module-metadata download-modules-%
-
-generate: smithy-generate update-requires gen-repo-mod-replace update-module-metadata smithy-annotate-stable \
-gen-config-asserts gen-internal-codegen copy-attributevalue-feature gen-mod-dropreplace-smithy-. min-go-version-. \
-tidy-modules-. add-module-license-files gen-aws-ptrs format
-
-smithy-generate:
- cd codegen && ./gradlew clean build -Plog-tests && ./gradlew clean
-
-smithy-build:
- cd codegen && ./gradlew clean build -Plog-tests
-
-smithy-build-%:
- @# smithy-build- command that uses the pattern to define build filter that
- @# the smithy API model service id starts with. Strips off the
- @# "smithy-build-".
- @#
- @# e.g. smithy-build-com.amazonaws.rds
- @# e.g. smithy-build-com.amazonaws.rds#AmazonRDSv19
- cd codegen && \
- SMITHY_GO_BUILD_API="$(subst smithy-build-,,$@)" ./gradlew clean build -Plog-tests
-
-smithy-annotate-stable:
- go run ${REPOTOOLS_CMD_ANNOTATE_STABLE_GEN}
-
-smithy-clean:
- cd codegen && ./gradlew clean
-
-smithy-go-publish-local:
- rm -rf /tmp/smithy-go-local
- git clone https://github.com/aws/smithy-go /tmp/smithy-go-local
- make -C /tmp/smithy-go-local smithy-clean smithy-publish-local
-
-format:
- gofmt -w -s .
-
-gen-config-asserts:
- @echo "Generating SDK config package implementor assertions"
- cd config \
- && go mod tidy \
- && go generate
-
-gen-internal-codegen:
- @echo "Generating internal/codegen"
- cd internal/codegen \
- && go generate
-
-gen-repo-mod-replace:
- @echo "Generating go.mod replace for repo modules"
- go run ${REPOTOOLS_CMD_MAKE_RELATIVE}
-
-gen-mod-replace-smithy-%:
- @# gen-mod-replace-smithy- command that uses the pattern to define build filter that
- @# for modules to add replace to. Strips off the "gen-mod-replace-smithy-".
- @#
- @# SMITHY_GO_SRC environment variable is the path to add replace to
- @#
- @# e.g. gen-mod-replace-smithy-service_ssooidc
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst gen-mod-replace-smithy-,,$@)) ${EACHMODULE_FLAGS} \
- "go mod edit -replace github.com/aws/smithy-go=${SMITHY_GO_SRC}"
-
-gen-mod-dropreplace-smithy-%:
- @# gen-mod-dropreplace-smithy- command that uses the pattern to define build filter that
- @# for modules to add replace to. Strips off the "gen-mod-dropreplace-smithy-".
- @#
- @# e.g. gen-mod-dropreplace-smithy-service_ssooidc
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst gen-mod-dropreplace-smithy-,,$@)) ${EACHMODULE_FLAGS} \
- "go mod edit -dropreplace github.com/aws/smithy-go"
-
-gen-aws-ptrs:
- cd aws && go generate
-
-tidy-modules-%:
- @# tidy command that uses the pattern to define the root path that the
- @# module testing will start from. Strips off the "tidy-modules-" and
- @# replaces all "_" with "/".
- @#
- @# e.g. tidy-modules-internal_protocoltest
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst tidy-modules-,,$@)) ${EACHMODULE_FLAGS} \
- "go mod tidy"
-
-download-modules-%:
- @# download command that uses the pattern to define the root path that the
- @# module testing will start from. Strips off the "download-modules-" and
- @# replaces all "_" with "/".
- @#
- @# e.g. download-modules-internal_protocoltest
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst download-modules-,,$@)) ${EACHMODULE_FLAGS} \
- "go mod download all"
-
-add-module-license-files:
- cd internal/repotools/cmd/eachmodule && \
- go run . -skip-root \
- "cp $(LICENSE_FILE) ."
-
-sync-models: sync-endpoints-model sync-api-models
-
-sync-endpoints-model: sync-endpoints.json
-
-sync-endpoints.json:
- [[ ! -z "${ENDPOINTS_MODEL}" ]] && cp ${ENDPOINTS_MODEL} ${ENDPOINTS_JSON} || echo "ENDPOINTS_MODEL not set, must not be empty"
-
-clone-v1-models:
- rm -rf /tmp/aws-sdk-go-model-sync
- git clone https://github.com/aws/aws-sdk-go.git --depth 1 /tmp/aws-sdk-go-model-sync
-
-sync-api-models:
- cd internal/repotools/cmd/syncAPIModels && \
- go run . \
- -m ${API_MODELS} \
- -o ${CODEGEN_API_MODELS_PATH}
-
-copy-attributevalue-feature:
- cd ./feature/dynamodbstreams/attributevalue && \
- find . -name "*.go" | grep -v "doc.go" | xargs -I % rm % && \
- find ../../dynamodb/attributevalue -name "*.go" | grep -v "doc.go" | xargs -I % cp % . && \
- ls *.go | grep -v "convert.go" | grep -v "doc.go" | \
- xargs -I % sed -i.bk -E 's:github.com/aws/aws-sdk-go-v2/(service|feature)/dynamodb:github.com/aws/aws-sdk-go-v2/\1/dynamodbstreams:g' % && \
- ls *.go | grep -v "convert.go" | grep -v "doc.go" | \
- xargs -I % sed -i.bk 's:DynamoDB:DynamoDBStreams:g' % && \
- ls *.go | grep -v "doc.go" | \
- xargs -I % sed -i.bk 's:dynamodb\.:dynamodbstreams.:g' % && \
- sed -i.bk 's:streams\.:ddbtypes.:g' "convert.go" && \
- sed -i.bk 's:ddb\.:streams.:g' "convert.go" && \
- sed -i.bk 's:ddbtypes\.:ddb.:g' "convert.go" &&\
- sed -i.bk 's:Streams::g' "convert.go" && \
- rm -rf ./*.bk && \
- go mod tidy && \
- gofmt -w -s . && \
- go test .
-
-min-go-version-%:
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst min-go-version-,,$@)) ${EACHMODULE_FLAGS} \
- "go mod edit -go=${SDK_MIN_GO_VERSION}"
-
-update-requires:
- go run ${REPOTOOLS_CMD_UPDATE_REQUIRES}
-
-update-module-metadata:
- go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA}
-
-################
-# Unit Testing #
-################
-.PHONY: unit unit-race unit-test unit-race-test unit-race-modules-% unit-modules-% build build-modules-% \
-go-build-modules-% test test-race-modules-% test-modules-% cachedep cachedep-modules-% api-diff-modules-%
-
-unit: lint unit-modules-.
-unit-race: lint unit-race-modules-.
-
-unit-test: test-modules-.
-unit-race-test: test-race-modules-.
-
-unit-race-modules-%:
- @# unit command that uses the pattern to define the root path that the
- @# module testing will start from. Strips off the "unit-race-modules-" and
- @# replaces all "_" with "/".
- @#
- @# e.g. unit-race-modules-internal_protocoltest
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst unit-race-modules-,,$@)) ${EACHMODULE_FLAGS} \
- "go vet ${BUILD_TAGS} --all ./..." \
- "go test ${BUILD_TAGS} ${RUN_NONE} ./..." \
- "go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..."
-
-
-unit-modules-%:
- @# unit command that uses the pattern to define the root path that the
- @# module testing will start from. Strips off the "unit-modules-" and
- @# replaces all "_" with "/".
- @#
- @# e.g. unit-modules-internal_protocoltest
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst unit-modules-,,$@)) ${EACHMODULE_FLAGS} \
- "go vet ${BUILD_TAGS} --all ./..." \
- "go test ${BUILD_TAGS} ${RUN_NONE} ./..." \
- "go test -timeout=1m ${UNIT_TEST_TAGS} ./..."
-
-build: build-modules-.
-
-build-modules-%:
- @# build command that uses the pattern to define the root path that the
- @# module testing will start from. Strips off the "build-modules-" and
- @# replaces all "_" with "/".
- @#
- @# e.g. build-modules-internal_protocoltest
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst build-modules-,,$@)) ${EACHMODULE_FLAGS} \
- "go test ${BUILD_TAGS} ${RUN_NONE} ./..."
-
-go-build-modules-%:
- @# build command that uses the pattern to define the root path that the
- @# module testing will start from. Strips off the "build-modules-" and
- @# replaces all "_" with "/".
- @#
- @# Validates that all modules in the repo have buildable Go files.
- @#
- @# e.g. go-build-modules-internal_protocoltest
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst go-build-modules-,,$@)) ${EACHMODULE_FLAGS} \
- "go build ${BUILD_TAGS} ./..."
-
-test: test-modules-.
-
-test-race-modules-%:
- @# Test command that uses the pattern to define the root path that the
- @# module testing will start from. Strips off the "test-race-modules-" and
- @# replaces all "_" with "/".
- @#
- @# e.g. test-race-modules-internal_protocoltest
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst test-race-modules-,,$@)) ${EACHMODULE_FLAGS} \
- "go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./..."
-
-test-modules-%:
- @# Test command that uses the pattern to define the root path that the
- @# module testing will start from. Strips off the "test-modules-" and
- @# replaces all "_" with "/".
- @#
- @# e.g. test-modules-internal_protocoltest
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst test-modules-,,$@)) ${EACHMODULE_FLAGS} \
- "go test -timeout=1m ${UNIT_TEST_TAGS} ./..."
-
-cachedep: cachedep-modules-.
-
-cachedep-modules-%:
- @# build command that uses the pattern to define the root path that the
- @# module caching will start from. Strips off the "cachedep-modules-" and
- @# replaces all "_" with "/".
- @#
- @# e.g. cachedep-modules-internal_protocoltest
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst cachedep-modules-,,$@)) ${EACHMODULE_FLAGS} \
- "go mod download"
-
-api-diff-modules-%:
- @# Command that uses the pattern to define the root path that the
- @# module testing will start from. Strips off the "api-diff-modules-" and
- @# replaces all "_" with "/".
- @#
- @# Requires golang.org/x/exp/cmd/gorelease to be available in the GOPATH.
- @#
- @# e.g. api-diff-modules-internal_protocoltest
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst api-diff-modules-,,$@)) \
- -fail-fast=true \
- -c 1 \
- -skip="internal/repotools" \
- "$$(go env GOPATH)/bin/gorelease"
-
-##############
-# CI Testing #
-##############
-.PHONY: ci-test ci-test-no-generate ci-test-generate-validate
-
-ci-test: generate unit-race ci-test-generate-validate
-ci-test-no-generate: unit-race
-
-ci-test-generate-validate:
- @echo "CI test validate no generated code changes"
- git update-index --assume-unchanged go.mod go.sum
- git add . -A
- gitstatus=`git diff --cached --ignore-space-change`; \
- echo "$$gitstatus"; \
- if [ "$$gitstatus" != "" ] && [ "$$gitstatus" != "skipping validation" ]; then echo "$$gitstatus"; exit 1; fi
- git update-index --no-assume-unchanged go.mod go.sum
-
-ci-lint: ci-lint-.
-
-ci-lint-%:
- @# Run golangci-lint command that uses the pattern to define the root path that the
- @# module check will start from. Strips off the "ci-lint-" and
- @# replaces all "_" with "/".
- @#
- @# e.g. ci-lint-internal_protocoltest
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst ci-lint-,,$@)) \
- -fail-fast=false \
- -c 1 \
- -skip="internal/repotools" \
- "golangci-lint run"
-
-ci-lint-install:
- @# Installs golangci-lint at GoPATH.
- @# This should be used to run golangci-lint locally.
- @#
- go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
-
-#######################
-# Integration Testing #
-#######################
-.PHONY: integration integ-modules-% cleanup-integ-buckets
-
-integration: integ-modules-service
-
-integ-modules-%:
- @# integration command that uses the pattern to define the root path that
- @# the module testing will start from. Strips off the "integ-modules-" and
- @# replaces all "_" with "/".
- @#
- @# e.g. test-modules-service_dynamodb
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst integ-modules-,,$@)) ${EACHMODULE_FLAGS} \
- "go test -timeout=10m -tags "integration" -v ${RUN_INTEG} -count 1 ./..."
-
-cleanup-integ-buckets:
- @echo "Cleaning up SDK integration resources"
- go run -tags "integration" ./internal/awstesting/cmd/bucket_cleanup/main.go "aws-sdk-go-integration"
-
-##############
-# Benchmarks #
-##############
-.PHONY: bench bench-modules-%
-
-bench: bench-modules-.
-
-bench-modules-%:
- @# benchmark command that uses the pattern to define the root path that
- @# the module testing will start from. Strips off the "bench-modules-" and
- @# replaces all "_" with "/".
- @#
- @# e.g. bench-modules-service_dynamodb
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst bench-modules-,,$@)) ${EACHMODULE_FLAGS} \
- "go test -timeout=10m -bench . --benchmem ${BUILD_TAGS} ${RUN_NONE} ./..."
-
-
-#####################
-# Release Process #
-#####################
-.PHONY: preview-release pre-release-validation release
-
-ls-changes:
- go run ${REPOTOOLS_CMD_CHANGELOG} ls
-
-preview-release:
- go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} ${REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG} ${REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS}
-
-pre-release-validation:
- @if [[ -z "${RELEASE_MANIFEST_FILE}" ]]; then \
- echo "RELEASE_MANIFEST_FILE is required to specify the file to write the release manifest" && false; \
- fi
- @if [[ -z "${RELEASE_CHGLOG_DESC_FILE}" ]]; then \
- echo "RELEASE_CHGLOG_DESC_FILE is required to specify the file to write the release notes" && false; \
- fi
-
-release: pre-release-validation
- go run ${REPOTOOLS_CMD_CALCULATE_RELEASE} -o ${RELEASE_MANIFEST_FILE} ${REPOTOOLS_CALCULATE_RELEASE_VERBOSE_FLAG} ${REPOTOOLS_CALCULATE_RELEASE_ADDITIONAL_ARGS}
- go run ${REPOTOOLS_CMD_UPDATE_REQUIRES} -release ${RELEASE_MANIFEST_FILE}
- go run ${REPOTOOLS_CMD_UPDATE_MODULE_METADATA} -release ${RELEASE_MANIFEST_FILE}
- go run ${REPOTOOLS_CMD_GENERATE_CHANGELOG} -release ${RELEASE_MANIFEST_FILE} -o ${RELEASE_CHGLOG_DESC_FILE}
- go run ${REPOTOOLS_CMD_CHANGELOG} rm -all
- go run ${REPOTOOLS_CMD_TAG_RELEASE} -release ${RELEASE_MANIFEST_FILE}
-
-##############
-# Repo Tools #
-##############
-.PHONY: install-repotools
-
-install-repotools:
- go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION}
-
-set-smithy-go-version:
- @if [[ -z "${SMITHY_GO_VERSION}" ]]; then \
- echo "SMITHY_GO_VERSION is required to update SDK's smithy-go module dependency version" && false; \
- fi
- go run ${REPOTOOLS_CMD_EDIT_MODULE_DEPENDENCY} -s "github.com/aws/smithy-go" -v "${SMITHY_GO_VERSION}"
-
-##################
-# Linting/Verify #
-##################
-.PHONY: verify lint vet vet-modules-% sdkv1check
-
-verify: lint vet sdkv1check
-
-lint:
- @echo "go lint SDK and vendor packages"
- @lint=`golint ./...`; \
- dolint=`echo "$$lint" | grep -E -v \
- -e ${LINT_IGNORE_S3MANAGER_INPUT} \
- -e ${LINTIGNORESINGLEFIGHT}`; \
- echo "$$dolint"; \
- if [ "$$dolint" != "" ]; then exit 1; fi
-
-vet: vet-modules-.
-
-vet-modules-%:
- cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst vet-modules-,,$@)) ${EACHMODULE_FLAGS} \
- "go vet ${BUILD_TAGS} --all ./..."
-
-sdkv1check:
- @echo "Checking for usage of AWS SDK for Go v1"
- @sdkv1usage=`go list -test -f '''{{ if not .Standard }}{{ range $$_, $$name := .Imports }} * {{ $$.ImportPath }} -> {{ $$name }}{{ print "\n" }}{{ end }}{{ range $$_, $$name := .TestImports }} *: {{ $$.ImportPath }} -> {{ $$name }}{{ print "\n" }}{{ end }}{{ end}}''' ./... | sort -u | grep '''/aws-sdk-go/'''`; \
- echo "$$sdkv1usage"; \
- if [ "$$sdkv1usage" != "" ]; then exit 1; fi
-
-list-deps: list-deps-.
-
-list-deps-%:
- @# command that uses the pattern to define the root path that the
- @# module testing will start from. Strips off the "list-deps-" and
- @# replaces all "_" with "/".
- @#
- @# Trim output to only include stdout for list of dependencies only.
- @# make list-deps 2>&-
- @#
- @# e.g. list-deps-internal_protocoltest
- @cd ./internal/repotools/cmd/eachmodule \
- && go run . -p $(subst _,/,$(subst list-deps-,,$@)) ${EACHMODULE_FLAGS} \
- "go list -m all | grep -v 'github.com/aws/aws-sdk-go-v2'" | sort -u
-
-###################
-# Sandbox Testing #
-###################
-.PHONY: sandbox-tests sandbox-build-% sandbox-run-% sandbox-test-% update-aws-golang-tip
-
-sandbox-tests: sandbox-test-go1.15 sandbox-test-go1.16 sandbox-test-go1.17 sandbox-test-gotip
-
-sandbox-build-%:
- @# sandbox-build-go1.17
- @# sandbox-build-gotip
- docker build \
- -f ./internal/awstesting/sandbox/Dockerfile.test.$(subst sandbox-build-,,$@) \
- -t "aws-sdk-go-$(subst sandbox-build-,,$@)" .
-sandbox-run-%: sandbox-build-%
- @# sandbox-run-go1.17
- @# sandbox-run-gotip
- docker run -i -t "aws-sdk-go-$(subst sandbox-run-,,$@)" bash
-sandbox-test-%: sandbox-build-%
- @# sandbox-test-go1.17
- @# sandbox-test-gotip
- docker run -t "aws-sdk-go-$(subst sandbox-test-,,$@)"
-
-update-aws-golang-tip:
- docker build --no-cache=true -f ./internal/awstesting/sandbox/Dockerfile.golang-tip -t "aws-golang:tip" .
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt
index 5f14d1162..899129ecc 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt
+++ b/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt
@@ -1,3 +1,3 @@
AWS SDK for Go
-Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Copyright 2014-2015 Stripe, Inc.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/README.md b/vendor/github.com/aws/aws-sdk-go-v2/README.md
deleted file mode 100644
index cda17f77d..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/README.md
+++ /dev/null
@@ -1,157 +0,0 @@
-# AWS SDK for Go v2
-
-[](https://github.com/aws/aws-sdk-go-v2/actions/workflows/go.yml)[](https://github.com/aws/aws-sdk-go-v2/actions/workflows/codegen.yml) [](https://aws.github.io/aws-sdk-go-v2/docs/) [](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) [](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) [](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt)
-
-
-`aws-sdk-go-v2` is the v2 AWS SDK for the Go programming language.
-
-The v2 SDK requires a minimum version of `Go 1.15`.
-
-Checkout out the [release notes](https://github.com/aws/aws-sdk-go-v2/blob/main/CHANGELOG.md) for information about the latest bug
-fixes, updates, and features added to the SDK.
-
-Jump To:
-* [Getting Started](#getting-started)
-* [Getting Help](#getting-help)
-* [Contributing](#feedback-and-contributing)
-* [More Resources](#resources)
-
-## Maintenance and support for SDK major versions
-
-For information about maintenance and support for SDK major versions and their underlying dependencies, see the
-following in the AWS SDKs and Tools Shared Configuration and Credentials Reference Guide:
-
-* [AWS SDKs and Tools Maintenance Policy](https://docs.aws.amazon.com/credref/latest/refdocs/maint-policy.html)
-* [AWS SDKs and Tools Version Support Matrix](https://docs.aws.amazon.com/credref/latest/refdocs/version-support-matrix.html)
-
-## Getting started
-To get started working with the SDK setup your project for Go modules, and retrieve the SDK dependencies with `go get`.
-This example shows how you can use the v2 SDK to make an API request using the SDK's [Amazon DynamoDB] client.
-
-###### Initialize Project
-```sh
-$ mkdir ~/helloaws
-$ cd ~/helloaws
-$ go mod init helloaws
-```
-###### Add SDK Dependencies
-```sh
-$ go get github.com/aws/aws-sdk-go-v2/aws
-$ go get github.com/aws/aws-sdk-go-v2/config
-$ go get github.com/aws/aws-sdk-go-v2/service/dynamodb
-```
-
-###### Write Code
-In your preferred editor add the following content to `main.go`
-
-```go
-package main
-
-import (
- "context"
- "fmt"
- "log"
-
- "github.com/aws/aws-sdk-go-v2/aws"
- "github.com/aws/aws-sdk-go-v2/config"
- "github.com/aws/aws-sdk-go-v2/service/dynamodb"
-)
-
-func main() {
- // Using the SDK's default configuration, loading additional config
- // and credentials values from the environment variables, shared
- // credentials, and shared configuration files
- cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion("us-west-2"))
- if err != nil {
- log.Fatalf("unable to load SDK config, %v", err)
- }
-
- // Using the Config value, create the DynamoDB client
- svc := dynamodb.NewFromConfig(cfg)
-
- // Build the request with its input parameters
- resp, err := svc.ListTables(context.TODO(), &dynamodb.ListTablesInput{
- Limit: aws.Int32(5),
- })
- if err != nil {
- log.Fatalf("failed to list tables, %v", err)
- }
-
- fmt.Println("Tables:")
- for _, tableName := range resp.TableNames {
- fmt.Println(tableName)
- }
-}
-```
-
-###### Compile and Execute
-```sh
-$ go run .
-Table:
-tableOne
-tableTwo
-```
-
-## Getting Help
-
-Please use these community resources for getting help. We use the GitHub issues
-for tracking bugs and feature requests.
-
-* Ask a question on [StackOverflow](http://stackoverflow.com/) and tag it with the [`aws-sdk-go`](http://stackoverflow.com/questions/tagged/aws-sdk-go) tag.
-* Open a support ticket with [AWS Support](http://docs.aws.amazon.com/awssupport/latest/user/getting-started.html).
-* If you think you may have found a bug, please open an [issue](https://github.com/aws/aws-sdk-go-v2/issues/new/choose).
-
-This SDK implements AWS service APIs. For general issues regarding the AWS services and their limitations, you may also take a look at the [Amazon Web Services Discussion Forums](https://forums.aws.amazon.com/).
-
-### Opening Issues
-
-If you encounter a bug with the AWS SDK for Go we would like to hear about it.
-Search the [existing issues][Issues] and see
-if others are also experiencing the issue before opening a new issue. Please
-include the version of AWS SDK for Go, Go language, and OS you’re using. Please
-also include reproduction case when appropriate.
-
-The GitHub issues are intended for bug reports and feature requests. For help
-and questions with using AWS SDK for Go please make use of the resources listed
-in the [Getting Help](#getting-help) section.
-Keeping the list of open issues lean will help us respond in a timely manner.
-
-## Feedback and contributing
-
-The v2 SDK will use GitHub [Issues] to track feature requests and issues with the SDK. In addition, we'll use GitHub [Projects] to track large tasks spanning multiple pull requests, such as refactoring the SDK's internal request lifecycle. You can provide feedback to us in several ways.
-
-**GitHub issues**. To provide feedback or report bugs, file GitHub [Issues] on the SDK. This is the preferred mechanism to give feedback so that other users can engage in the conversation, +1 issues, etc. Issues you open will be evaluated, and included in our roadmap for the GA launch.
-
-**Contributing**. You can open pull requests for fixes or additions to the AWS SDK for Go 2.0. All pull requests must be submitted under the Apache 2.0 license and will be reviewed by an SDK team member before being merged in. Accompanying unit tests, where possible, are appreciated.
-
-## Resources
-
-[SDK Developer Guide](https://aws.github.io/aws-sdk-go-v2/docs/) - Use this document to learn how to get started and
-use the AWS SDK for Go V2.
-
-[SDK Migration Guide](https://aws.github.io/aws-sdk-go-v2/docs/migrating/) - Use this document to learn how to migrate to V2 from the AWS SDK for Go.
-
-[SDK API Reference Documentation](https://pkg.go.dev/mod/github.com/aws/aws-sdk-go-v2) - Use this
-document to look up all API operation input and output parameters for AWS
-services supported by the SDK. The API reference also includes documentation of
-the SDK, and examples how to using the SDK, service client API operations, and
-API operation require parameters.
-
-[Service Documentation](https://aws.amazon.com/documentation/) - Use this
-documentation to learn how to interface with AWS services. These guides are
-great for getting started with a service, or when looking for more
-information about a service. While this document is not required for coding,
-services may supply helpful samples to look out for.
-
-[Forum](https://forums.aws.amazon.com/forum.jspa?forumID=293) - Ask questions, get help, and give feedback
-
-[Issues] - Report issues, submit pull requests, and get involved
- (see [Apache 2.0 License][license])
-
-[Dep]: https://github.com/golang/dep
-[Issues]: https://github.com/aws/aws-sdk-go-v2/issues
-[Projects]: https://github.com/aws/aws-sdk-go-v2/projects
-[CHANGELOG]: https://github.com/aws/aws-sdk-go-v2/blob/master/CHANGELOG.md
-[Amazon DynamoDB]: https://aws.amazon.com/dynamodb/
-[design]: https://github.com/aws/aws-sdk-go-v2/blob/master/DESIGN.md
-[license]: http://aws.amazon.com/apache2.0/
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go
new file mode 100644
index 000000000..6504a2186
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/accountid_endpoint_mode.go
@@ -0,0 +1,18 @@
+package aws
+
+// AccountIDEndpointMode controls how a resolved AWS account ID is handled for endpoint routing.
+type AccountIDEndpointMode string
+
+const (
+ // AccountIDEndpointModeUnset indicates the AWS account ID will not be used for endpoint routing
+ AccountIDEndpointModeUnset AccountIDEndpointMode = ""
+
+ // AccountIDEndpointModePreferred indicates the AWS account ID will be used for endpoint routing if present
+ AccountIDEndpointModePreferred = "preferred"
+
+ // AccountIDEndpointModeRequired indicates an error will be returned if the AWS account ID is not resolved from identity
+ AccountIDEndpointModeRequired = "required"
+
+ // AccountIDEndpointModeDisabled indicates the AWS account ID will be ignored during endpoint routing
+ AccountIDEndpointModeDisabled = "disabled"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go
new file mode 100644
index 000000000..4152caade
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/checksum.go
@@ -0,0 +1,33 @@
+package aws
+
+// RequestChecksumCalculation controls request checksum calculation workflow
+type RequestChecksumCalculation int
+
+const (
+ // RequestChecksumCalculationUnset is the unset value for RequestChecksumCalculation
+ RequestChecksumCalculationUnset RequestChecksumCalculation = iota
+
+ // RequestChecksumCalculationWhenSupported indicates request checksum will be calculated
+ // if the operation supports input checksums
+ RequestChecksumCalculationWhenSupported
+
+ // RequestChecksumCalculationWhenRequired indicates request checksum will be calculated
+ // if required by the operation or if user elects to set a checksum algorithm in request
+ RequestChecksumCalculationWhenRequired
+)
+
+// ResponseChecksumValidation controls response checksum validation workflow
+type ResponseChecksumValidation int
+
+const (
+ // ResponseChecksumValidationUnset is the unset value for ResponseChecksumValidation
+ ResponseChecksumValidationUnset ResponseChecksumValidation = iota
+
+ // ResponseChecksumValidationWhenSupported indicates response checksum will be validated
+ // if the operation supports output checksums
+ ResponseChecksumValidationWhenSupported
+
+ // ResponseChecksumValidationWhenRequired indicates response checksum will only
+ // be validated if the operation requires output checksum validation
+ ResponseChecksumValidationWhenRequired
+)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
index 20153586b..3219517da 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/config.go
@@ -6,6 +6,7 @@ import (
smithybearer "github.com/aws/smithy-go/auth/bearer"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
)
// HTTPClient provides the interface to provide custom HTTPClients. Generally
@@ -68,6 +69,12 @@ type Config struct {
//
// See the `aws.EndpointResolverWithOptions` documentation for additional
// usage information.
+ //
+ // Deprecated: with the release of endpoint resolution v2 in API clients,
+ // EndpointResolver and EndpointResolverWithOptions are deprecated.
+ // Providing a value for this field will likely prevent you from using
+ // newer endpoint-related service features. See API client options
+ // EndpointResolverV2 and BaseEndpoint.
EndpointResolverWithOptions EndpointResolverWithOptions
// RetryMaxAttempts specifies the maximum number attempts an API client
@@ -132,6 +139,71 @@ type Config struct {
// `config.LoadDefaultConfig`. You should not populate this structure
// programmatically, or rely on the values here within your applications.
RuntimeEnvironment RuntimeEnvironment
+
+ // AppId is an optional application specific identifier that can be set.
+ // When set it will be appended to the User-Agent header of every request
+ // in the form of App/{AppId}. This variable is sourced from environment
+ // variable AWS_SDK_UA_APP_ID or the shared config profile attribute sdk_ua_app_id.
+ // See https://docs.aws.amazon.com/sdkref/latest/guide/settings-reference.html for
+ // more information on environment variables and shared config settings.
+ AppID string
+
+ // BaseEndpoint is an intermediary transfer location to a service specific
+ // BaseEndpoint on a service's Options.
+ BaseEndpoint *string
+
+ // DisableRequestCompression toggles if an operation request could be
+ // compressed or not. Will be set to false by default. This variable is sourced from
+ // environment variable AWS_DISABLE_REQUEST_COMPRESSION or the shared config profile attribute
+ // disable_request_compression
+ DisableRequestCompression bool
+
+ // RequestMinCompressSizeBytes sets the inclusive min bytes of a request body that could be
+ // compressed. Will be set to 10240 by default and must be within 0 and 10485760 bytes inclusively.
+ // This variable is sourced from environment variable AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES or
+ // the shared config profile attribute request_min_compression_size_bytes
+ RequestMinCompressSizeBytes int64
+
+ // Controls how a resolved AWS account ID is handled for endpoint routing.
+ AccountIDEndpointMode AccountIDEndpointMode
+
+ // RequestChecksumCalculation determines when request checksum calculation is performed.
+ //
+ // There are two possible values for this setting:
+ //
+ // 1. RequestChecksumCalculationWhenSupported (default): The checksum is always calculated
+ // if the operation supports it, regardless of whether the user sets an algorithm in the request.
+ //
+ // 2. RequestChecksumCalculationWhenRequired: The checksum is only calculated if the user
+ // explicitly sets a checksum algorithm in the request.
+ //
+ // This setting is sourced from the environment variable AWS_REQUEST_CHECKSUM_CALCULATION
+ // or the shared config profile attribute "request_checksum_calculation".
+ RequestChecksumCalculation RequestChecksumCalculation
+
+ // ResponseChecksumValidation determines when response checksum validation is performed
+ //
+ // There are two possible values for this setting:
+ //
+ // 1. ResponseChecksumValidationWhenSupported (default): The checksum is always validated
+ // if the operation supports it, regardless of whether the user sets the validation mode to ENABLED in request.
+ //
+ // 2. ResponseChecksumValidationWhenRequired: The checksum is only validated if the user
+ // explicitly sets the validation mode to ENABLED in the request
+ // This variable is sourced from environment variable AWS_RESPONSE_CHECKSUM_VALIDATION or
+ // the shared config profile attribute "response_checksum_validation".
+ ResponseChecksumValidation ResponseChecksumValidation
+
+ // Registry of HTTP interceptors.
+ Interceptors smithyhttp.InterceptorRegistry
+
+ // Priority list of preferred auth scheme IDs.
+ AuthSchemePreference []string
+
+ // ServiceOptions provides service specific configuration options that will be applied
+ // when constructing clients for specific services. Each callback function receives the service ID
+ // and the service's Options struct, allowing for dynamic configuration based on the service.
+ ServiceOptions []func(string, any)
}
// NewConfig returns a new Config pointer that can be chained with builder
@@ -140,8 +212,7 @@ func NewConfig() *Config {
return &Config{}
}
-// Copy will return a shallow copy of the Config object. If any additional
-// configurations are provided they will be merged into the new config returned.
+// Copy will return a shallow copy of the Config object.
func (c Config) Copy() Config {
cp := c
return cp
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go
index 9e9525231..623890e8d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credential_cache.go
@@ -172,12 +172,29 @@ func (p *CredentialsCache) getCreds() (Credentials, bool) {
return *c, true
}
+// ProviderSources returns a list of where the underlying credential provider
+// has been sourced, if available. Returns empty if the provider doesn't implement
+// the interface
+func (p *CredentialsCache) ProviderSources() []CredentialSource {
+ asSource, ok := p.provider.(CredentialProviderSource)
+ if !ok {
+ return []CredentialSource{}
+ }
+ return asSource.ProviderSources()
+}
+
// Invalidate will invalidate the cached credentials. The next call to Retrieve
// will cause the provider's Retrieve method to be called.
func (p *CredentialsCache) Invalidate() {
p.creds.Store((*Credentials)(nil))
}
+// IsCredentialsProvider returns whether credential provider wrapped by CredentialsCache
+// matches the target provider type.
+func (p *CredentialsCache) IsCredentialsProvider(target CredentialsProvider) bool {
+ return IsCredentialsProvider(p.provider, target)
+}
+
// HandleFailRefreshCredentialsCacheStrategy is an interface for
// CredentialsCache to allow CredentialsProvider how failed to refresh
// credentials is handled.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go
index 24c8ce4a7..4ad2ee440 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/credentials.go
@@ -3,6 +3,7 @@ package aws
import (
"context"
"fmt"
+ "reflect"
"time"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
@@ -69,6 +70,56 @@ func (AnonymousCredentials) Retrieve(context.Context) (Credentials, error) {
fmt.Errorf("the AnonymousCredentials is not a valid credential provider, and cannot be used to sign AWS requests with")
}
+// CredentialSource is the source of the credential provider.
+// A provider can have multiple credential sources: For example, a provider that reads a profile, calls ECS to
+// get credentials and then assumes a role using STS will have all these as part of its provider chain.
+type CredentialSource int
+
+const (
+ // CredentialSourceUndefined is the sentinel zero value
+ CredentialSourceUndefined CredentialSource = iota
+ // CredentialSourceCode credentials resolved from code, cli parameters, session object, or client instance
+ CredentialSourceCode
+ // CredentialSourceEnvVars credentials resolved from environment variables
+ CredentialSourceEnvVars
+ // CredentialSourceEnvVarsSTSWebIDToken credentials resolved from environment variables for assuming a role with STS using a web identity token
+ CredentialSourceEnvVarsSTSWebIDToken
+ // CredentialSourceSTSAssumeRole credentials resolved from STS using AssumeRole
+ CredentialSourceSTSAssumeRole
+ // CredentialSourceSTSAssumeRoleSaml credentials resolved from STS using assume role with SAML
+ CredentialSourceSTSAssumeRoleSaml
+ // CredentialSourceSTSAssumeRoleWebID credentials resolved from STS using assume role with web identity
+ CredentialSourceSTSAssumeRoleWebID
+ // CredentialSourceSTSFederationToken credentials resolved from STS using a federation token
+ CredentialSourceSTSFederationToken
+ // CredentialSourceSTSSessionToken credentials resolved from STS using a session token S
+ CredentialSourceSTSSessionToken
+ // CredentialSourceProfile credentials resolved from a config file(s) profile with static credentials
+ CredentialSourceProfile
+ // CredentialSourceProfileSourceProfile credentials resolved from a source profile in a config file(s) profile
+ CredentialSourceProfileSourceProfile
+ // CredentialSourceProfileNamedProvider credentials resolved from a named provider in a config file(s) profile (like EcsContainer)
+ CredentialSourceProfileNamedProvider
+ // CredentialSourceProfileSTSWebIDToken credentials resolved from configuration for assuming a role with STS using web identity token in a config file(s) profile
+ CredentialSourceProfileSTSWebIDToken
+ // CredentialSourceProfileSSO credentials resolved from an SSO session in a config file(s) profile
+ CredentialSourceProfileSSO
+ // CredentialSourceSSO credentials resolved from an SSO session
+ CredentialSourceSSO
+ // CredentialSourceProfileSSOLegacy credentials resolved from an SSO session in a config file(s) profile using legacy format
+ CredentialSourceProfileSSOLegacy
+ // CredentialSourceSSOLegacy credentials resolved from an SSO session using legacy format
+ CredentialSourceSSOLegacy
+ // CredentialSourceProfileProcess credentials resolved from a process in a config file(s) profile
+ CredentialSourceProfileProcess
+ // CredentialSourceProcess credentials resolved from a process
+ CredentialSourceProcess
+ // CredentialSourceHTTP credentials resolved from an HTTP endpoint
+ CredentialSourceHTTP
+ // CredentialSourceIMDS credentials resolved from the instance metadata service (IMDS)
+ CredentialSourceIMDS
+)
+
// A Credentials is the AWS credentials value for individual credential fields.
type Credentials struct {
// AWS Access key ID
@@ -89,6 +140,9 @@ type Credentials struct {
// The time the credentials will expire at. Should be ignored if CanExpire
// is false.
Expires time.Time
+
+ // The ID of the account for the credentials.
+ AccountID string
}
// Expired returns if the credentials have expired.
@@ -121,6 +175,13 @@ type CredentialsProvider interface {
Retrieve(ctx context.Context) (Credentials, error)
}
+// CredentialProviderSource allows any credential provider to track
+// all providers where a credential provider were sourced. For example, if the credentials came from a
+// call to a role specified in the profile, this method will give the whole breadcrumb trail
+type CredentialProviderSource interface {
+ ProviderSources() []CredentialSource
+}
+
// CredentialsProviderFunc provides a helper wrapping a function value to
// satisfy the CredentialsProvider interface.
type CredentialsProviderFunc func(context.Context) (Credentials, error)
@@ -129,3 +190,41 @@ type CredentialsProviderFunc func(context.Context) (Credentials, error)
func (fn CredentialsProviderFunc) Retrieve(ctx context.Context) (Credentials, error) {
return fn(ctx)
}
+
+type isCredentialsProvider interface {
+ IsCredentialsProvider(CredentialsProvider) bool
+}
+
+// IsCredentialsProvider returns whether the target CredentialProvider is the same type as provider when comparing the
+// implementation type.
+//
+// If provider has a method IsCredentialsProvider(CredentialsProvider) bool it will be responsible for validating
+// whether target matches the credential provider type.
+//
+// When comparing the CredentialProvider implementations provider and target for equality, the following rules are used:
+//
+// If provider is of type T and target is of type V, true if type *T is the same as type *V, otherwise false
+// If provider is of type *T and target is of type V, true if type *T is the same as type *V, otherwise false
+// If provider is of type T and target is of type *V, true if type *T is the same as type *V, otherwise false
+// If provider is of type *T and target is of type *V,true if type *T is the same as type *V, otherwise false
+func IsCredentialsProvider(provider, target CredentialsProvider) bool {
+ if target == nil || provider == nil {
+ return provider == target
+ }
+
+ if x, ok := provider.(isCredentialsProvider); ok {
+ return x.IsCredentialsProvider(target)
+ }
+
+ targetType := reflect.TypeOf(target)
+ if targetType.Kind() != reflect.Ptr {
+ targetType = reflect.PtrTo(targetType)
+ }
+
+ providerType := reflect.TypeOf(provider)
+ if providerType.Kind() != reflect.Ptr {
+ providerType = reflect.PtrTo(providerType)
+ }
+
+ return targetType.AssignableTo(providerType)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go
index aa10a9b40..99edbf3ee 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/endpoints.go
@@ -70,6 +70,10 @@ func GetUseFIPSEndpoint(options ...interface{}) (value FIPSEndpointState, found
// The SDK will automatically resolve these endpoints per API client using an
// internal endpoint resolvers. If you'd like to provide custom endpoint
// resolving behavior you can implement the EndpointResolver interface.
+//
+// Deprecated: This structure was used with the global [EndpointResolver]
+// interface, which has been deprecated in favor of service-specific endpoint
+// resolution. See the deprecation docs on that interface for more information.
type Endpoint struct {
// The base URL endpoint the SDK API clients will use to make API calls to.
// The SDK will suffix URI path and query elements to this endpoint.
@@ -124,6 +128,8 @@ type Endpoint struct {
}
// EndpointSource is the endpoint source type.
+//
+// Deprecated: The global [Endpoint] structure is deprecated.
type EndpointSource int
const (
@@ -161,19 +167,25 @@ func (e *EndpointNotFoundError) Unwrap() error {
// API clients will fallback to attempting to resolve the endpoint using its
// internal default endpoint resolver.
//
-// Deprecated: See EndpointResolverWithOptions
+// Deprecated: The global endpoint resolution interface is deprecated. The API
+// for endpoint resolution is now unique to each service and is set via the
+// EndpointResolverV2 field on service client options. Setting a value for
+// EndpointResolver on aws.Config or service client options will prevent you
+// from using any endpoint-related service features released after the
+// introduction of EndpointResolverV2. You may also encounter broken or
+// unexpected behavior when using the old global interface with services that
+// use many endpoint-related customizations such as S3.
type EndpointResolver interface {
ResolveEndpoint(service, region string) (Endpoint, error)
}
// EndpointResolverFunc wraps a function to satisfy the EndpointResolver interface.
//
-// Deprecated: See EndpointResolverWithOptionsFunc
+// Deprecated: The global endpoint resolution interface is deprecated. See
+// deprecation docs on [EndpointResolver].
type EndpointResolverFunc func(service, region string) (Endpoint, error)
// ResolveEndpoint calls the wrapped function and returns the results.
-//
-// Deprecated: See EndpointResolverWithOptions.ResolveEndpoint
func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint, error) {
return e(service, region)
}
@@ -184,11 +196,17 @@ func (e EndpointResolverFunc) ResolveEndpoint(service, region string) (Endpoint,
// available. If the EndpointResolverWithOptions returns an EndpointNotFoundError error,
// API clients will fallback to attempting to resolve the endpoint using its
// internal default endpoint resolver.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. See
+// deprecation docs on [EndpointResolver].
type EndpointResolverWithOptions interface {
ResolveEndpoint(service, region string, options ...interface{}) (Endpoint, error)
}
// EndpointResolverWithOptionsFunc wraps a function to satisfy the EndpointResolverWithOptions interface.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. See
+// deprecation docs on [EndpointResolver].
type EndpointResolverWithOptionsFunc func(service, region string, options ...interface{}) (Endpoint, error)
// ResolveEndpoint calls the wrapped function and returns the results.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
index 5ca68262b..b72921f87 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/go_module_metadata.go
@@ -3,4 +3,4 @@
package aws
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.16.16"
+const goModuleVersion = "1.38.3"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go
index e6e87ac77..d66f0960a 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/metadata.go
@@ -2,6 +2,7 @@ package middleware
import (
"context"
+
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/smithy-go/middleware"
@@ -42,12 +43,13 @@ func (s RegisterServiceMetadata) HandleInitialize(
// service metadata keys for storing and lookup of runtime stack information.
type (
- serviceIDKey struct{}
- signingNameKey struct{}
- signingRegionKey struct{}
- regionKey struct{}
- operationNameKey struct{}
- partitionIDKey struct{}
+ serviceIDKey struct{}
+ signingNameKey struct{}
+ signingRegionKey struct{}
+ regionKey struct{}
+ operationNameKey struct{}
+ partitionIDKey struct{}
+ requiresLegacyEndpointsKey struct{}
)
// GetServiceID retrieves the service id from the context.
@@ -63,6 +65,9 @@ func GetServiceID(ctx context.Context) (v string) {
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
+//
+// Deprecated: This value is unstable. The resolved signing name is available
+// in the signer properties object passed to the signer.
func GetSigningName(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, signingNameKey{}).(string)
return v
@@ -72,6 +77,9 @@ func GetSigningName(ctx context.Context) (v string) {
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
+//
+// Deprecated: This value is unstable. The resolved signing region is available
+// in the signer properties object passed to the signer.
func GetSigningRegion(ctx context.Context) (v string) {
v, _ = middleware.GetStackValue(ctx, signingRegionKey{}).(string)
return v
@@ -104,10 +112,32 @@ func GetPartitionID(ctx context.Context) string {
return v
}
-// SetSigningName set or modifies the signing name on the context.
+// GetRequiresLegacyEndpoints the flag used to indicate if legacy endpoint
+// customizations need to be executed.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetRequiresLegacyEndpoints(ctx context.Context) bool {
+ v, _ := middleware.GetStackValue(ctx, requiresLegacyEndpointsKey{}).(bool)
+ return v
+}
+
+// SetRequiresLegacyEndpoints set or modifies the flag indicated that
+// legacy endpoint customizations are needed.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetRequiresLegacyEndpoints(ctx context.Context, value bool) context.Context {
+ return middleware.WithStackValue(ctx, requiresLegacyEndpointsKey{}, value)
+}
+
+// SetSigningName set or modifies the sigv4 or sigv4a signing name on the context.
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
+//
+// Deprecated: This value is unstable. Use WithSigV4SigningName client option
+// funcs instead.
func SetSigningName(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, signingNameKey{}, value)
}
@@ -116,6 +146,9 @@ func SetSigningName(ctx context.Context, value string) context.Context {
//
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
+//
+// Deprecated: This value is unstable. Use WithSigV4SigningRegion client option
+// funcs instead.
func SetSigningRegion(ctx context.Context, value string) context.Context {
return middleware.WithStackValue(ctx, signingRegionKey{}, value)
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go
index 9bd0dfb15..6d5f0079c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/middleware.go
@@ -139,16 +139,16 @@ func AddRecordResponseTiming(stack *middleware.Stack) error {
// raw response within the response metadata.
type rawResponseKey struct{}
-// addRawResponse middleware adds raw response on to the metadata
-type addRawResponse struct{}
+// AddRawResponse middleware adds raw response on to the metadata
+type AddRawResponse struct{}
// ID the identifier for the ClientRequestID
-func (m *addRawResponse) ID() string {
+func (m *AddRawResponse) ID() string {
return "AddRawResponseToMetadata"
}
// HandleDeserialize adds raw response on the middleware metadata
-func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+func (m AddRawResponse) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
) {
out, metadata, err = next.HandleDeserialize(ctx, in)
@@ -159,7 +159,7 @@ func (m addRawResponse) HandleDeserialize(ctx context.Context, in middleware.Des
// AddRawResponseToMetadata adds middleware to the middleware stack that
// store raw response on to the metadata.
func AddRawResponseToMetadata(stack *middleware.Stack) error {
- return stack.Deserialize.Add(&addRawResponse{}, middleware.Before)
+ return stack.Deserialize.Add(&AddRawResponse{}, middleware.Before)
}
// GetRawResponse returns raw response set on metadata
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go
new file mode 100644
index 000000000..3f6aaf231
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/recursion_detection.go
@@ -0,0 +1,94 @@
+package middleware
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "os"
+)
+
+const envAwsLambdaFunctionName = "AWS_LAMBDA_FUNCTION_NAME"
+const envAmznTraceID = "_X_AMZN_TRACE_ID"
+const amznTraceIDHeader = "X-Amzn-Trace-Id"
+
+// AddRecursionDetection adds recursionDetection to the middleware stack
+func AddRecursionDetection(stack *middleware.Stack) error {
+ return stack.Build.Add(&RecursionDetection{}, middleware.After)
+}
+
+// RecursionDetection detects Lambda environment and sets its X-Ray trace ID to request header if absent
+// to avoid recursion invocation in Lambda
+type RecursionDetection struct{}
+
+// ID returns the middleware identifier
+func (m *RecursionDetection) ID() string {
+ return "RecursionDetection"
+}
+
+// HandleBuild detects Lambda environment and adds its trace ID to request header if absent
+func (m *RecursionDetection) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ _, hasLambdaEnv := os.LookupEnv(envAwsLambdaFunctionName)
+ xAmznTraceID, hasTraceID := os.LookupEnv(envAmznTraceID)
+ value := req.Header.Get(amznTraceIDHeader)
+ // only set the X-Amzn-Trace-Id header when it is not set initially, the
+ // current environment is Lambda and the _X_AMZN_TRACE_ID env variable exists
+ if value != "" || !hasLambdaEnv || !hasTraceID {
+ return next.HandleBuild(ctx, in)
+ }
+
+ req.Header.Set(amznTraceIDHeader, percentEncode(xAmznTraceID))
+ return next.HandleBuild(ctx, in)
+}
+
+func percentEncode(s string) string {
+ upperhex := "0123456789ABCDEF"
+ hexCount := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEncode(c) {
+ hexCount++
+ }
+ }
+
+ if hexCount == 0 {
+ return s
+ }
+
+ required := len(s) + 2*hexCount
+ t := make([]byte, required)
+ j := 0
+ for i := 0; i < len(s); i++ {
+ if c := s[i]; shouldEncode(c) {
+ t[j] = '%'
+ t[j+1] = upperhex[c>>4]
+ t[j+2] = upperhex[c&15]
+ j += 3
+ } else {
+ t[j] = c
+ j++
+ }
+ }
+ return string(t)
+}
+
+func shouldEncode(c byte) bool {
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
+ return false
+ }
+ switch c {
+ case '-', '=', ';', ':', '+', '&', '[', ']', '{', '}', '"', '\'', ',':
+ return false
+ default:
+ return true
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go
index 7ce48c611..128b60a73 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/request_id_retriever.go
@@ -4,6 +4,7 @@ import (
"context"
"github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -11,18 +12,22 @@ import (
func AddRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
// add error wrapper middleware before operation deserializers so that it can wrap the error response
// returned by operation deserializers
- return stack.Deserialize.Insert(&requestIDRetriever{}, "OperationDeserializer", middleware.Before)
+ return stack.Deserialize.Insert(&RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
}
-type requestIDRetriever struct {
+// RequestIDRetriever middleware captures the AWS service request ID from the
+// raw response.
+type RequestIDRetriever struct {
}
// ID returns the middleware identifier
-func (m *requestIDRetriever) ID() string {
+func (m *RequestIDRetriever) ID() string {
return "RequestIDRetriever"
}
-func (m *requestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+// HandleDeserialize pulls the AWS request ID from the response, storing it in
+// operation metadata.
+func (m *RequestIDRetriever) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
) {
out, metadata, err = next.HandleDeserialize(ctx, in)
@@ -41,6 +46,9 @@ func (m *requestIDRetriever) HandleDeserialize(ctx context.Context, in middlewar
if v := resp.Header.Get(h); len(v) != 0 {
// set reqID on metadata for successful responses.
SetRequestIDMetadata(&metadata, v)
+
+ span, _ := tracing.GetSpan(ctx)
+ span.SetProperty("aws.request_id", v)
break
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go
index 285b2bba8..6ee3391be 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/middleware/user_agent.go
@@ -5,6 +5,7 @@ import (
"fmt"
"os"
"runtime"
+ "sort"
"strings"
"github.com/aws/aws-sdk-go-v2/aws"
@@ -30,8 +31,12 @@ const (
FrameworkMetadata
AdditionalMetadata
ApplicationIdentifier
+ FeatureMetadata2
)
+// Hardcoded value to specify which version of the user agent we're using
+const uaMetadata = "ua/2.1"
+
func (k SDKAgentKeyType) string() string {
switch k {
case APIMetadata:
@@ -50,6 +55,8 @@ func (k SDKAgentKeyType) string() string {
return "lib"
case ApplicationIdentifier:
return "app"
+ case FeatureMetadata2:
+ return "m"
case AdditionalMetadata:
fallthrough
default:
@@ -59,12 +66,107 @@ func (k SDKAgentKeyType) string() string {
const execEnvVar = `AWS_EXECUTION_ENV`
-// requestUserAgent is a build middleware that set the User-Agent for the request.
-type requestUserAgent struct {
+var validChars = map[rune]bool{
+ '!': true, '#': true, '$': true, '%': true, '&': true, '\'': true, '*': true, '+': true,
+ '-': true, '.': true, '^': true, '_': true, '`': true, '|': true, '~': true,
+}
+
+// UserAgentFeature enumerates tracked SDK features.
+type UserAgentFeature string
+
+// Enumerates UserAgentFeature.
+const (
+ UserAgentFeatureResourceModel UserAgentFeature = "A" // n/a (we don't generate separate resource types)
+
+ UserAgentFeatureWaiter = "B"
+ UserAgentFeaturePaginator = "C"
+
+ UserAgentFeatureRetryModeLegacy = "D" // n/a (equivalent to standard)
+ UserAgentFeatureRetryModeStandard = "E"
+ UserAgentFeatureRetryModeAdaptive = "F"
+
+ UserAgentFeatureS3Transfer = "G"
+ UserAgentFeatureS3CryptoV1N = "H" // n/a (crypto client is external)
+ UserAgentFeatureS3CryptoV2 = "I" // n/a
+ UserAgentFeatureS3ExpressBucket = "J"
+ UserAgentFeatureS3AccessGrants = "K" // not yet implemented
+
+ UserAgentFeatureGZIPRequestCompression = "L"
+
+ UserAgentFeatureProtocolRPCV2CBOR = "M"
+
+ UserAgentFeatureAccountIDEndpoint = "O" // DO NOT IMPLEMENT: rules output is not currently defined. SDKs should not parse endpoints for feature information.
+ UserAgentFeatureAccountIDModePreferred = "P"
+ UserAgentFeatureAccountIDModeDisabled = "Q"
+ UserAgentFeatureAccountIDModeRequired = "R"
+
+ UserAgentFeatureRequestChecksumCRC32 = "U"
+ UserAgentFeatureRequestChecksumCRC32C = "V"
+ UserAgentFeatureRequestChecksumCRC64 = "W"
+ UserAgentFeatureRequestChecksumSHA1 = "X"
+ UserAgentFeatureRequestChecksumSHA256 = "Y"
+ UserAgentFeatureRequestChecksumWhenSupported = "Z"
+ UserAgentFeatureRequestChecksumWhenRequired = "a"
+ UserAgentFeatureResponseChecksumWhenSupported = "b"
+ UserAgentFeatureResponseChecksumWhenRequired = "c"
+
+ UserAgentFeatureDynamoDBUserAgent = "d" // not yet implemented
+
+ UserAgentFeatureCredentialsCode = "e"
+ UserAgentFeatureCredentialsJvmSystemProperties = "f" // n/a (this is not a JVM sdk)
+ UserAgentFeatureCredentialsEnvVars = "g"
+ UserAgentFeatureCredentialsEnvVarsStsWebIDToken = "h"
+ UserAgentFeatureCredentialsStsAssumeRole = "i"
+ UserAgentFeatureCredentialsStsAssumeRoleSaml = "j" // not yet implemented
+ UserAgentFeatureCredentialsStsAssumeRoleWebID = "k"
+ UserAgentFeatureCredentialsStsFederationToken = "l" // not yet implemented
+ UserAgentFeatureCredentialsStsSessionToken = "m" // not yet implemented
+ UserAgentFeatureCredentialsProfile = "n"
+ UserAgentFeatureCredentialsProfileSourceProfile = "o"
+ UserAgentFeatureCredentialsProfileNamedProvider = "p"
+ UserAgentFeatureCredentialsProfileStsWebIDToken = "q"
+ UserAgentFeatureCredentialsProfileSso = "r"
+ UserAgentFeatureCredentialsSso = "s"
+ UserAgentFeatureCredentialsProfileSsoLegacy = "t"
+ UserAgentFeatureCredentialsSsoLegacy = "u"
+ UserAgentFeatureCredentialsProfileProcess = "v"
+ UserAgentFeatureCredentialsProcess = "w"
+ UserAgentFeatureCredentialsBoto2ConfigFile = "x" // n/a (this is not boto/Python)
+ UserAgentFeatureCredentialsAwsSdkStore = "y" // n/a (this is used by .NET based sdk)
+ UserAgentFeatureCredentialsHTTP = "z"
+ UserAgentFeatureCredentialsIMDS = "0"
+)
+
+var credentialSourceToFeature = map[aws.CredentialSource]UserAgentFeature{
+ aws.CredentialSourceCode: UserAgentFeatureCredentialsCode,
+ aws.CredentialSourceEnvVars: UserAgentFeatureCredentialsEnvVars,
+ aws.CredentialSourceEnvVarsSTSWebIDToken: UserAgentFeatureCredentialsEnvVarsStsWebIDToken,
+ aws.CredentialSourceSTSAssumeRole: UserAgentFeatureCredentialsStsAssumeRole,
+ aws.CredentialSourceSTSAssumeRoleSaml: UserAgentFeatureCredentialsStsAssumeRoleSaml,
+ aws.CredentialSourceSTSAssumeRoleWebID: UserAgentFeatureCredentialsStsAssumeRoleWebID,
+ aws.CredentialSourceSTSFederationToken: UserAgentFeatureCredentialsStsFederationToken,
+ aws.CredentialSourceSTSSessionToken: UserAgentFeatureCredentialsStsSessionToken,
+ aws.CredentialSourceProfile: UserAgentFeatureCredentialsProfile,
+ aws.CredentialSourceProfileSourceProfile: UserAgentFeatureCredentialsProfileSourceProfile,
+ aws.CredentialSourceProfileNamedProvider: UserAgentFeatureCredentialsProfileNamedProvider,
+ aws.CredentialSourceProfileSTSWebIDToken: UserAgentFeatureCredentialsProfileStsWebIDToken,
+ aws.CredentialSourceProfileSSO: UserAgentFeatureCredentialsProfileSso,
+ aws.CredentialSourceSSO: UserAgentFeatureCredentialsSso,
+ aws.CredentialSourceProfileSSOLegacy: UserAgentFeatureCredentialsProfileSsoLegacy,
+ aws.CredentialSourceSSOLegacy: UserAgentFeatureCredentialsSsoLegacy,
+ aws.CredentialSourceProfileProcess: UserAgentFeatureCredentialsProfileProcess,
+ aws.CredentialSourceProcess: UserAgentFeatureCredentialsProcess,
+ aws.CredentialSourceHTTP: UserAgentFeatureCredentialsHTTP,
+ aws.CredentialSourceIMDS: UserAgentFeatureCredentialsIMDS,
+}
+
+// RequestUserAgent is a build middleware that set the User-Agent for the request.
+type RequestUserAgent struct {
sdkAgent, userAgent *smithyhttp.UserAgentBuilder
+ features map[UserAgentFeature]struct{}
}
-// newRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the
+// NewRequestUserAgent returns a new requestUserAgent which will set the User-Agent and X-Amz-User-Agent for the
// request.
//
// User-Agent example:
@@ -74,14 +176,16 @@ type requestUserAgent struct {
// X-Amz-User-Agent example:
//
// aws-sdk-go-v2/1.2.3 md/GOOS/linux md/GOARCH/amd64 lang/go/1.15
-func newRequestUserAgent() *requestUserAgent {
+func NewRequestUserAgent() *RequestUserAgent {
userAgent, sdkAgent := smithyhttp.NewUserAgentBuilder(), smithyhttp.NewUserAgentBuilder()
addProductName(userAgent)
+ addUserAgentMetadata(userAgent)
addProductName(sdkAgent)
- r := &requestUserAgent{
+ r := &RequestUserAgent{
sdkAgent: sdkAgent,
userAgent: userAgent,
+ features: map[UserAgentFeature]struct{}{},
}
addSDKMetadata(r)
@@ -89,7 +193,7 @@ func newRequestUserAgent() *requestUserAgent {
return r
}
-func addSDKMetadata(r *requestUserAgent) {
+func addSDKMetadata(r *RequestUserAgent) {
r.AddSDKAgentKey(OperatingSystemMetadata, getNormalizedOSName())
r.AddSDKAgentKeyValue(LanguageMetadata, "go", languageVersion)
r.AddSDKAgentKeyValue(AdditionalMetadata, "GOOS", runtime.GOOS)
@@ -103,6 +207,10 @@ func addProductName(builder *smithyhttp.UserAgentBuilder) {
builder.AddKeyValue(aws.SDKName, aws.SDKVersion)
}
+func addUserAgentMetadata(builder *smithyhttp.UserAgentBuilder) {
+ builder.AddKey(uaMetadata)
+}
+
// AddUserAgentKey retrieves a requestUserAgent from the provided stack, or initializes one.
func AddUserAgentKey(key string) func(*middleware.Stack) error {
return func(stack *middleware.Stack) error {
@@ -157,18 +265,18 @@ func AddRequestUserAgentMiddleware(stack *middleware.Stack) error {
return err
}
-func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error) {
- id := (*requestUserAgent)(nil).ID()
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*RequestUserAgent, error) {
+ id := (*RequestUserAgent)(nil).ID()
bm, ok := stack.Build.Get(id)
if !ok {
- bm = newRequestUserAgent()
+ bm = NewRequestUserAgent()
err := stack.Build.Add(bm, middleware.After)
if err != nil {
return nil, err
}
}
- requestUserAgent, ok := bm.(*requestUserAgent)
+ requestUserAgent, ok := bm.(*RequestUserAgent)
if !ok {
return nil, fmt.Errorf("%T for %s middleware did not match expected type", bm, id)
}
@@ -177,34 +285,48 @@ func getOrAddRequestUserAgent(stack *middleware.Stack) (*requestUserAgent, error
}
// AddUserAgentKey adds the component identified by name to the User-Agent string.
-func (u *requestUserAgent) AddUserAgentKey(key string) {
- u.userAgent.AddKey(key)
+func (u *RequestUserAgent) AddUserAgentKey(key string) {
+ u.userAgent.AddKey(strings.Map(rules, key))
}
// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string.
-func (u *requestUserAgent) AddUserAgentKeyValue(key, value string) {
- u.userAgent.AddKeyValue(key, value)
+func (u *RequestUserAgent) AddUserAgentKeyValue(key, value string) {
+ u.userAgent.AddKeyValue(strings.Map(rules, key), strings.Map(rules, value))
}
-// AddUserAgentKey adds the component identified by name to the User-Agent string.
-func (u *requestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) {
+// AddUserAgentFeature adds the feature ID to the tracking list to be emitted
+// in the final User-Agent string.
+func (u *RequestUserAgent) AddUserAgentFeature(feature UserAgentFeature) {
+ u.features[feature] = struct{}{}
+}
+
+// AddSDKAgentKey adds the component identified by name to the User-Agent string.
+func (u *RequestUserAgent) AddSDKAgentKey(keyType SDKAgentKeyType, key string) {
// TODO: should target sdkAgent
- u.userAgent.AddKey(keyType.string() + "/" + key)
+ u.userAgent.AddKey(keyType.string() + "/" + strings.Map(rules, key))
}
-// AddUserAgentKeyValue adds the key identified by the given name and value to the User-Agent string.
-func (u *requestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) {
+// AddSDKAgentKeyValue adds the key identified by the given name and value to the User-Agent string.
+func (u *RequestUserAgent) AddSDKAgentKeyValue(keyType SDKAgentKeyType, key, value string) {
// TODO: should target sdkAgent
- u.userAgent.AddKeyValue(keyType.string()+"/"+key, value)
+ u.userAgent.AddKeyValue(keyType.string(), strings.Map(rules, key)+"#"+strings.Map(rules, value))
+}
+
+// AddCredentialsSource adds the credential source as a feature on the User-Agent string
+func (u *RequestUserAgent) AddCredentialsSource(source aws.CredentialSource) {
+ x, ok := credentialSourceToFeature[source]
+ if ok {
+ u.AddUserAgentFeature(x)
+ }
}
// ID the name of the middleware.
-func (u *requestUserAgent) ID() string {
+func (u *RequestUserAgent) ID() string {
return "UserAgent"
}
// HandleBuild adds or appends the constructed user agent to the request.
-func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+func (u *RequestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
out middleware.BuildOutput, metadata middleware.Metadata, err error,
) {
switch req := in.Request.(type) {
@@ -219,12 +341,15 @@ func (u *requestUserAgent) HandleBuild(ctx context.Context, in middleware.BuildI
return next.HandleBuild(ctx, in)
}
-func (u *requestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) {
+func (u *RequestUserAgent) addHTTPUserAgent(request *smithyhttp.Request) {
const userAgent = "User-Agent"
+ if len(u.features) > 0 {
+ updateHTTPHeader(request, userAgent, buildFeatureMetrics(u.features))
+ }
updateHTTPHeader(request, userAgent, u.userAgent.Build())
}
-func (u *requestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) {
+func (u *RequestUserAgent) addHTTPSDKAgent(request *smithyhttp.Request) {
const sdkAgent = "X-Amz-User-Agent"
updateHTTPHeader(request, sdkAgent, u.sdkAgent.Build())
}
@@ -241,3 +366,26 @@ func updateHTTPHeader(request *smithyhttp.Request, header string, value string)
}
request.Header[header] = append(request.Header[header][:0], current)
}
+
+func rules(r rune) rune {
+ switch {
+ case r >= '0' && r <= '9':
+ return r
+ case r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z':
+ return r
+ case validChars[r]:
+ return r
+ default:
+ return '-'
+ }
+}
+
+func buildFeatureMetrics(features map[UserAgentFeature]struct{}) string {
+ fs := make([]string, 0, len(features))
+ for f := range features {
+ fs = append(fs, string(f))
+ }
+
+ sort.Strings(fs)
+ return fmt.Sprintf("%s/%s", FeatureMetadata2.string(), strings.Join(fs, ","))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go
index 9d7d3a0cb..6669a3ddf 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/array.go
@@ -1,8 +1,8 @@
package query
import (
- "fmt"
"net/url"
+ "strconv"
)
// Array represents the encoding of Query lists and sets. A Query array is a
@@ -21,41 +21,41 @@ type Array struct {
// keys for each element in the list. For example, an entry might have the
// key "ParentStructure.ListName.member.MemberName.1".
//
- // While this is currently represented as a string that gets added to, it
- // could also be represented as a stack that only gets condensed into a
- // string when a finalized key is created. This could potentially reduce
- // allocations.
+ // When the array is not flat the prefix will contain the memberName otherwise the memberName is ignored
prefix string
- // Whether the list is flat or not. A list that is not flat will produce the
- // following entry to the url.Values for a given entry:
- // ListName.MemberName.1=value
- // A list that is flat will produce the following:
- // ListName.1=value
- flat bool
- // The location name of the member. In most cases this should be "member".
- memberName string
// Elements are stored in values, so we keep track of the list size here.
size int32
+ // Empty lists are encoded as "=", if we add a value later we will
+ // remove this encoding
+ emptyValue Value
}
func newArray(values url.Values, prefix string, flat bool, memberName string) *Array {
+ emptyValue := newValue(values, prefix, flat)
+ emptyValue.String("")
+
+ if !flat {
+ // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead
+ prefix = prefix + keySeparator + memberName
+ }
+
return &Array{
values: values,
prefix: prefix,
- flat: flat,
- memberName: memberName,
+ emptyValue: emptyValue,
}
}
// Value adds a new element to the Query Array. Returns a Value type used to
// encode the array element.
func (a *Array) Value() Value {
+ if a.size == 0 {
+ delete(a.values, a.emptyValue.key)
+ }
+
// Query lists start a 1, so adjust the size first
a.size++
- prefix := a.prefix
- if !a.flat {
- prefix = fmt.Sprintf("%s.%s", prefix, a.memberName)
- }
// Lists can't have flat members
- return newValue(a.values, fmt.Sprintf("%s.%d", prefix, a.size), false)
+ // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead
+ return newValue(a.values, a.prefix+keySeparator+strconv.FormatInt(int64(a.size), 10), false)
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go
index 6a99d4ea8..305a8ace3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/object.go
@@ -1,9 +1,6 @@
package query
-import (
- "fmt"
- "net/url"
-)
+import "net/url"
// Object represents the encoding of Query structures and unions. A Query
// object is a representation of a mapping of string keys to arbitrary
@@ -41,6 +38,12 @@ func (o *Object) Key(name string) Value {
return o.key(name, false)
}
+// KeyWithValues adds the given named key to the Query object.
+// Returns a Value encoder that should be used to encode a Query list of values.
+func (o *Object) KeyWithValues(name string) Value {
+ return o.keyWithValues(name, false)
+}
+
// FlatKey adds the given named key to the Query object.
// Returns a Value encoder that should be used to encode a Query value type. The
// value will be flattened if it is a map or array.
@@ -50,7 +53,16 @@ func (o *Object) FlatKey(name string) Value {
func (o *Object) key(name string, flatValue bool) Value {
if o.prefix != "" {
- return newValue(o.values, fmt.Sprintf("%s.%s", o.prefix, name), flatValue)
+ // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead
+ return newValue(o.values, o.prefix+keySeparator+name, flatValue)
}
return newValue(o.values, name, flatValue)
}
+
+func (o *Object) keyWithValues(name string, flatValue bool) Value {
+ if o.prefix != "" {
+ // This uses string concatenation in place of fmt.Sprintf as fmt.Sprintf has a much higher resource overhead
+ return newAppendValue(o.values, o.prefix+keySeparator+name, flatValue)
+ }
+ return newAppendValue(o.values, name, flatValue)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go
index 302525ab1..8063c592d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/query/value.go
@@ -7,6 +7,8 @@ import (
"github.com/aws/smithy-go/encoding/httpbinding"
)
+const keySeparator = "."
+
// Value represents a Query Value type.
type Value struct {
// The query values to add the value to.
@@ -27,6 +29,15 @@ func newValue(values url.Values, key string, flat bool) Value {
}
}
+func newAppendValue(values url.Values, key string, flat bool) Value {
+ return Value{
+ values: values,
+ key: key,
+ flat: flat,
+ queryValue: httpbinding.NewQueryValue(values, key, true),
+ }
+}
+
func newBaseValue(values url.Values) Value {
return Value{
values: values,
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go
index c228f7d87..6975ce652 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/protocol/xml/error_utils.go
@@ -21,26 +21,18 @@ func GetErrorResponseComponents(r io.Reader, noErrorWrapping bool) (ErrorCompone
if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
}
- return ErrorComponents{
- Code: errResponse.Code,
- Message: errResponse.Message,
- RequestID: errResponse.RequestID,
- }, nil
+ return ErrorComponents(errResponse), nil
}
var errResponse wrappedErrorResponse
if err := xml.NewDecoder(r).Decode(&errResponse); err != nil && err != io.EOF {
return ErrorComponents{}, fmt.Errorf("error while deserializing xml error response: %w", err)
}
- return ErrorComponents{
- Code: errResponse.Code,
- Message: errResponse.Message,
- RequestID: errResponse.RequestID,
- }, nil
+ return ErrorComponents(errResponse), nil
}
// noWrappedErrorResponse represents the error response body with
-// no internal ...
+// wrapped within Error
type wrappedErrorResponse struct {
Code string `xml:"Error>Code"`
Message string `xml:"Error>Message"`
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go
new file mode 100644
index 000000000..8c7836410
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/none.go
@@ -0,0 +1,20 @@
+package ratelimit
+
+import "context"
+
+// None implements a no-op rate limiter which effectively disables client-side
+// rate limiting (also known as "retry quotas").
+//
+// GetToken does nothing and always returns a nil error. The returned
+// token-release function does nothing, and always returns a nil error.
+//
+// AddTokens does nothing and always returns a nil error.
+var None = &none{}
+
+type none struct{}
+
+func (*none) GetToken(ctx context.Context, cost uint) (func() error, error) {
+ return func() error { return nil }, nil
+}
+
+func (*none) AddTokens(v uint) error { return nil }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go
index 12a3f0c4f..d89090ad3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/ratelimit/token_rate_limit.go
@@ -30,10 +30,6 @@ func NewTokenRateLimit(tokens uint) *TokenRateLimit {
}
}
-func isTimeoutError(error) bool {
- return false
-}
-
type canceledError struct {
Err error
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go
new file mode 100644
index 000000000..bfa5bf7d1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/attempt_metrics.go
@@ -0,0 +1,51 @@
+package retry
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type attemptMetrics struct {
+ Attempts metrics.Int64Counter
+ Errors metrics.Int64Counter
+
+ AttemptDuration metrics.Float64Histogram
+}
+
+func newAttemptMetrics(meter metrics.Meter) (*attemptMetrics, error) {
+ m := &attemptMetrics{}
+ var err error
+
+ m.Attempts, err = meter.Int64Counter("client.call.attempts", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "{attempt}"
+ o.Description = "The number of attempts for an individual operation"
+ })
+ if err != nil {
+ return nil, err
+ }
+ m.Errors, err = meter.Int64Counter("client.call.errors", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "{error}"
+ o.Description = "The number of errors for an operation"
+ })
+ if err != nil {
+ return nil, err
+ }
+ m.AttemptDuration, err = meter.Float64Histogram("client.call.attempt_duration", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = "The time it takes to connect to the service, send the request, and get back HTTP status code and headers (including time queued waiting to be sent)"
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
+
+func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption {
+ return func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("rpc.service", middleware.GetServiceID(ctx))
+ o.Properties.Set("rpc.method", middleware.GetOperationName(ctx))
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
index 3326289a1..5549922ab 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/middleware.go
@@ -2,17 +2,22 @@ package retry
import (
"context"
+ "errors"
"fmt"
"strconv"
"strings"
"time"
+ internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+ "github.com/aws/smithy-go"
+
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddle "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
"github.com/aws/smithy-go/logging"
- "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/metrics"
smithymiddle "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
"github.com/aws/smithy-go/transport/http"
)
@@ -35,10 +40,17 @@ type Attempt struct {
// attempts are reached.
LogAttempts bool
+ // A Meter instance for recording retry-related metrics.
+ OperationMeter metrics.Meter
+
retryer aws.RetryerV2
requestCloner RequestCloner
}
+// define the threshold at which we will consider certain kind of errors to be probably
+// caused by clock skew
+const skewThreshold = 4 * time.Minute
+
// NewAttemptMiddleware returns a new Attempt retry middleware.
func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optFns ...func(*Attempt)) *Attempt {
m := &Attempt{
@@ -48,6 +60,10 @@ func NewAttemptMiddleware(retryer aws.Retryer, requestCloner RequestCloner, optF
for _, fn := range optFns {
fn(m)
}
+ if m.OperationMeter == nil {
+ m.OperationMeter = metrics.NopMeterProvider{}.Meter("")
+ }
+
return m
}
@@ -73,6 +89,11 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn
maxAttempts := r.retryer.MaxAttempts()
releaseRetryToken := nopRelease
+ retryMetrics, err := newAttemptMetrics(r.OperationMeter)
+ if err != nil {
+ return out, metadata, err
+ }
+
for {
attemptNum++
attemptInput := in
@@ -86,8 +107,29 @@ func (r *Attempt) HandleFinalize(ctx context.Context, in smithymiddle.FinalizeIn
AttemptClockSkew: attemptClockSkew,
})
+ // Setting clock skew to be used on other context (like signing)
+ ctx = internalcontext.SetAttemptSkewContext(ctx, attemptClockSkew)
+
var attemptResult AttemptResult
+
+ attemptCtx, span := tracing.StartSpan(attemptCtx, "Attempt", func(o *tracing.SpanOptions) {
+ o.Properties.Set("operation.attempt", attemptNum)
+ })
+ retryMetrics.Attempts.Add(ctx, 1, withOperationMetadata(ctx))
+
+ start := sdk.NowTime()
out, attemptResult, releaseRetryToken, err = r.handleAttempt(attemptCtx, attemptInput, releaseRetryToken, next)
+ elapsed := sdk.NowTime().Sub(start)
+
+ retryMetrics.AttemptDuration.Record(ctx, float64(elapsed)/1e9, withOperationMetadata(ctx))
+ if err != nil {
+ retryMetrics.Errors.Add(ctx, 1, withOperationMetadata(ctx), func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("exception.type", errorType(err))
+ })
+ }
+
+ span.End()
+
attemptClockSkew, _ = awsmiddle.GetAttemptSkew(attemptResult.ResponseMetadata)
// AttemptResult Retried states that the attempt was not successful, and
@@ -185,6 +227,8 @@ func (r *Attempt) handleAttempt(
return out, attemptResult, nopRelease, err
}
+ err = wrapAsClockSkew(ctx, err)
+
//------------------------------
// Is Retryable and Should Retry
//------------------------------
@@ -216,7 +260,7 @@ func (r *Attempt) handleAttempt(
// Get a retry token that will be released after the
releaseRetryToken, retryTokenErr := r.retryer.GetRetryToken(ctx, err)
if retryTokenErr != nil {
- return out, attemptResult, nopRelease, retryTokenErr
+ return out, attemptResult, nopRelease, errors.Join(err, retryTokenErr)
}
//------------------------------
@@ -240,6 +284,37 @@ func (r *Attempt) handleAttempt(
return out, attemptResult, releaseRetryToken, err
}
+// errors that, if detected when we know there's a clock skew,
+// can be retried and have a high chance of success
+var possibleSkewCodes = map[string]struct{}{
+ "InvalidSignatureException": {},
+ "SignatureDoesNotMatch": {},
+ "AuthFailure": {},
+}
+
+var definiteSkewCodes = map[string]struct{}{
+ "RequestExpired": {},
+ "RequestInTheFuture": {},
+ "RequestTimeTooSkewed": {},
+}
+
+// wrapAsClockSkew checks if this error could be related to a clock skew
+// error and if so, wrap the error.
+func wrapAsClockSkew(ctx context.Context, err error) error {
+ var v interface{ ErrorCode() string }
+ if !errors.As(err, &v) {
+ return err
+ }
+ if _, ok := definiteSkewCodes[v.ErrorCode()]; ok {
+ return &retryableClockSkewError{Err: err}
+ }
+ _, isPossibleSkewCode := possibleSkewCodes[v.ErrorCode()]
+ if skew := internalcontext.GetAttemptSkewContext(ctx); skew > skewThreshold && isPossibleSkewCode {
+ return &retryableClockSkewError{Err: err}
+ }
+ return err
+}
+
// MetricsHeader attaches SDK request metric header for retries to the transport
type MetricsHeader struct{}
@@ -292,7 +367,7 @@ type retryMetadataKey struct{}
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) {
- metadata, ok = middleware.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata)
+ metadata, ok = smithymiddle.GetStackValue(ctx, retryMetadataKey{}).(retryMetadata)
return metadata, ok
}
@@ -301,7 +376,7 @@ func getRetryMetadata(ctx context.Context) (metadata retryMetadata, ok bool) {
// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
// to clear all stack values.
func setRetryMetadata(ctx context.Context, metadata retryMetadata) context.Context {
- return middleware.WithStackValue(ctx, retryMetadataKey{}, metadata)
+ return smithymiddle.WithStackValue(ctx, retryMetadataKey{}, metadata)
}
// AddRetryMiddlewaresOptions is the set of options that can be passed to
@@ -321,11 +396,23 @@ func AddRetryMiddlewares(stack *smithymiddle.Stack, options AddRetryMiddlewaresO
middleware.LogAttempts = options.LogRetryAttempts
})
- if err := stack.Finalize.Add(attempt, smithymiddle.After); err != nil {
+ // index retry to before signing, if signing exists
+ if err := stack.Finalize.Insert(attempt, "Signing", smithymiddle.Before); err != nil {
return err
}
- if err := stack.Finalize.Add(&MetricsHeader{}, smithymiddle.After); err != nil {
+
+ if err := stack.Finalize.Insert(&MetricsHeader{}, attempt.ID(), smithymiddle.After); err != nil {
return err
}
return nil
}
+
+// Determines the value of exception.type for metrics purposes. We prefer an
+// API-specific error code, otherwise it's just the Go type for the value.
+func errorType(err error) string {
+ var terr smithy.APIError
+ if errors.As(err, &terr) {
+ return terr.ErrorCode()
+ }
+ return fmt.Sprintf("%T", err)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go
index c695e6fe5..1b485f998 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/retryable_error.go
@@ -2,6 +2,7 @@ package retry
import (
"errors"
+ "fmt"
"net"
"net/url"
"strings"
@@ -95,12 +96,33 @@ func (r RetryableConnectionError) IsErrorRetryable(err error) aws.Ternary {
var timeoutErr interface{ Timeout() bool }
var urlErr *url.Error
var netOpErr *net.OpError
+ var dnsError *net.DNSError
+
+ if errors.As(err, &dnsError) {
+ // NXDOMAIN errors should not be retried
+ if dnsError.IsNotFound {
+ return aws.BoolTernary(false)
+ }
+
+ // if !dnsError.Temporary(), error may or may not be temporary,
+ // (i.e. !Temporary() =/=> !retryable) so we should fall through to
+ // remaining checks
+ if dnsError.Temporary() {
+ return aws.BoolTernary(true)
+ }
+ }
switch {
case errors.As(err, &conErr) && conErr.ConnectionError():
retryable = true
+ case strings.Contains(err.Error(), "use of closed network connection"):
+ fallthrough
case strings.Contains(err.Error(), "connection reset"):
+ // The errors "connection reset" and "use of closed network connection"
+ // are effectively the same. It appears to be the difference between
+ // sync and async read of TCP RST in the stdlib's net.Conn read loop.
+ // see #2737
retryable = true
case errors.As(err, &urlErr):
@@ -184,3 +206,23 @@ func (r RetryableErrorCode) IsErrorRetryable(err error) aws.Ternary {
return aws.TrueTernary
}
+
+// retryableClockSkewError marks errors that can be caused by clock skew
+// (difference between server time and client time).
+// This is returned when there's certain confidence that adjusting the client time
+// could allow a retry to succeed
+type retryableClockSkewError struct{ Err error }
+
+func (e *retryableClockSkewError) Error() string {
+ return fmt.Sprintf("Probable clock skew error: %v", e.Err)
+}
+
+// Unwrap returns the wrapped error.
+func (e *retryableClockSkewError) Unwrap() error {
+ return e.Err
+}
+
+// RetryableError allows the retryer to retry this request
+func (e *retryableClockSkewError) RetryableError() bool {
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go
index 25abffc81..d5ea93222 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retry/standard.go
@@ -123,6 +123,17 @@ type StandardOptions struct {
// Provides the rate limiting strategy for rate limiting attempt retries
// across all attempts the retryer is being used with.
+ //
+ // A RateLimiter operates as a token bucket with a set capacity, where
+ // attempt failures events consume tokens. A retry attempt that attempts to
+ // consume more tokens than what's available results in operation failure.
+ // The default implementation is parameterized as follows:
+ // - a capacity of 500 (DefaultRetryRateTokens)
+ // - a retry caused by a timeout costs 10 tokens (DefaultRetryCost)
+ // - a retry caused by other errors costs 5 tokens (DefaultRetryTimeoutCost)
+ // - an operation that succeeds on the 1st attempt adds 1 token (DefaultNoRetryIncrement)
+ //
+ // You can disable rate limiting by setting this field to ratelimit.None.
RateLimiter RateLimiter
// The cost to deduct from the RateLimiter's token bucket per retry.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go
index 6777e21ef..b0ba4cb2f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/retryer.go
@@ -54,7 +54,7 @@ type Retryer interface {
MaxAttempts() int
// RetryDelay returns the delay that should be used before retrying the
- // attempt. Will return error if the if the delay could not be determined.
+ // attempt. Will return error if the delay could not be determined.
RetryDelay(attempt int, opErr error) (time.Duration, error)
// GetRetryToken attempts to deduct the retry cost from the retry token pool.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go
index 85a1d8f03..d99b32ceb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4/headers.go
@@ -4,9 +4,11 @@ package v4
var IgnoredHeaders = Rules{
ExcludeList{
MapRule{
- "Authorization": struct{}{},
- "User-Agent": struct{}{},
- "X-Amzn-Trace-Id": struct{}{},
+ "Authorization": struct{}{},
+ "User-Agent": struct{}{},
+ "X-Amzn-Trace-Id": struct{}{},
+ "Expect": struct{}{},
+ "Transfer-Encoding": struct{}{},
},
},
}
@@ -44,9 +46,9 @@ var RequiredSignedHeaders = Rules{
"X-Amz-Grant-Write-Acp": struct{}{},
"X-Amz-Metadata-Directive": struct{}{},
"X-Amz-Mfa": struct{}{},
- "X-Amz-Request-Payer": struct{}{},
"X-Amz-Server-Side-Encryption": struct{}{},
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
+ "X-Amz-Server-Side-Encryption-Context": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
"X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go
index db8377ae5..8a46220a3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/middleware.go
@@ -12,8 +12,10 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
v4Internal "github.com/aws/aws-sdk-go-v2/aws/signer/internal/v4"
+ internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
"github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -57,7 +59,7 @@ func (e *SigningError) Unwrap() error {
// S3 PutObject API allows unsigned payload signing auth usage when TLS is enabled, and uses this middleware to
// dynamically switch between unsigned and signed payload based on TLS state for request.
func UseDynamicPayloadSigningMiddleware(stack *middleware.Stack) error {
- _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{})
+ _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &dynamicPayloadSigningMiddleware{})
return err
}
@@ -70,27 +72,25 @@ func (m *dynamicPayloadSigningMiddleware) ID() string {
return computePayloadHashMiddlewareID
}
-// HandleBuild sets a resolver that directs to the payload sha256 compute handler.
-func (m *dynamicPayloadSigningMiddleware) HandleBuild(
- ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+// HandleFinalize delegates SHA256 computation according to whether the request
+// is TLS-enabled.
+func (m *dynamicPayloadSigningMiddleware) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
) (
- out middleware.BuildOutput, metadata middleware.Metadata, err error,
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) {
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
}
- // if TLS is enabled, use unsigned payload when supported
if req.IsHTTPS() {
- return (&unsignedPayload{}).HandleBuild(ctx, in, next)
+ return (&UnsignedPayload{}).HandleFinalize(ctx, in, next)
}
-
- // else fall back to signed payload
- return (&computePayloadSHA256{}).HandleBuild(ctx, in, next)
+ return (&ComputePayloadSHA256{}).HandleFinalize(ctx, in, next)
}
-// unsignedPayload sets the SigV4 request payload hash to unsigned.
+// UnsignedPayload sets the SigV4 request payload hash to unsigned.
//
// Will not set the Unsigned Payload magic SHA value, if a SHA has already been
// stored in the context. (e.g. application pre-computed SHA256 before making
@@ -98,39 +98,32 @@ func (m *dynamicPayloadSigningMiddleware) HandleBuild(
//
// This middleware does not check the X-Amz-Content-Sha256 header, if that
// header is serialized a middleware must translate it into the context.
-type unsignedPayload struct{}
+type UnsignedPayload struct{}
// AddUnsignedPayloadMiddleware adds unsignedPayload to the operation
// middleware stack
func AddUnsignedPayloadMiddleware(stack *middleware.Stack) error {
- return stack.Build.Add(&unsignedPayload{}, middleware.After)
+ return stack.Finalize.Insert(&UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
}
// ID returns the unsignedPayload identifier
-func (m *unsignedPayload) ID() string {
+func (m *UnsignedPayload) ID() string {
return computePayloadHashMiddlewareID
}
-// HandleBuild sets the payload hash to be an unsigned payload
-func (m *unsignedPayload) HandleBuild(
- ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+// HandleFinalize sets the payload hash magic value to the unsigned sentinel.
+func (m *UnsignedPayload) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
) (
- out middleware.BuildOutput, metadata middleware.Metadata, err error,
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) {
- // This should not compute the content SHA256 if the value is already
- // known. (e.g. application pre-computed SHA256 before making API call).
- // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if
- // that header is provided a middleware must translate it into the context.
- contentSHA := GetPayloadHash(ctx)
- if len(contentSHA) == 0 {
- contentSHA = v4Internal.UnsignedPayload
+ if GetPayloadHash(ctx) == "" {
+ ctx = SetPayloadHash(ctx, v4Internal.UnsignedPayload)
}
-
- ctx = SetPayloadHash(ctx, contentSHA)
- return next.HandleBuild(ctx, in)
+ return next.HandleFinalize(ctx, in)
}
-// computePayloadSHA256 computes SHA256 payload hash to sign.
+// ComputePayloadSHA256 computes SHA256 payload hash to sign.
//
// Will not set the Unsigned Payload magic SHA value, if a SHA has already been
// stored in the context. (e.g. application pre-computed SHA256 before making
@@ -138,32 +131,40 @@ func (m *unsignedPayload) HandleBuild(
//
// This middleware does not check the X-Amz-Content-Sha256 header, if that
// header is serialized a middleware must translate it into the context.
-type computePayloadSHA256 struct{}
+type ComputePayloadSHA256 struct{}
// AddComputePayloadSHA256Middleware adds computePayloadSHA256 to the
// operation middleware stack
func AddComputePayloadSHA256Middleware(stack *middleware.Stack) error {
- return stack.Build.Add(&computePayloadSHA256{}, middleware.After)
+ return stack.Finalize.Insert(&ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
}
// RemoveComputePayloadSHA256Middleware removes computePayloadSHA256 from the
// operation middleware stack
func RemoveComputePayloadSHA256Middleware(stack *middleware.Stack) error {
- _, err := stack.Build.Remove(computePayloadHashMiddlewareID)
+ _, err := stack.Finalize.Remove(computePayloadHashMiddlewareID)
return err
}
// ID is the middleware name
-func (m *computePayloadSHA256) ID() string {
+func (m *ComputePayloadSHA256) ID() string {
return computePayloadHashMiddlewareID
}
-// HandleBuild compute the payload hash for the request payload
-func (m *computePayloadSHA256) HandleBuild(
- ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+// HandleFinalize computes the payload hash for the request, storing it to the
+// context. This is a no-op if a caller has previously set that value.
+func (m *ComputePayloadSHA256) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
) (
- out middleware.BuildOutput, metadata middleware.Metadata, err error,
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) {
+ if GetPayloadHash(ctx) != "" {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ _, span := tracing.StartSpan(ctx, "ComputePayloadSHA256")
+ defer span.End()
+
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &HashComputationError{
@@ -171,14 +172,6 @@ func (m *computePayloadSHA256) HandleBuild(
}
}
- // This should not compute the content SHA256 if the value is already
- // known. (e.g. application pre-computed SHA256 before making API call)
- // Does not have any tight coupling to the X-Amz-Content-Sha256 header, if
- // that header is provided a middleware must translate it into the context.
- if contentSHA := GetPayloadHash(ctx); len(contentSHA) != 0 {
- return next.HandleBuild(ctx, in)
- }
-
hash := sha256.New()
if stream := req.GetStream(); stream != nil {
_, err = io.Copy(hash, stream)
@@ -197,7 +190,8 @@ func (m *computePayloadSHA256) HandleBuild(
ctx = SetPayloadHash(ctx, hex.EncodeToString(hash.Sum(nil)))
- return next.HandleBuild(ctx, in)
+ span.End()
+ return next.HandleFinalize(ctx, in)
}
// SwapComputePayloadSHA256ForUnsignedPayloadMiddleware replaces the
@@ -206,38 +200,38 @@ func (m *computePayloadSHA256) HandleBuild(
// Use this to disable computing the Payload SHA256 checksum and instead use
// UNSIGNED-PAYLOAD for the SHA256 value.
func SwapComputePayloadSHA256ForUnsignedPayloadMiddleware(stack *middleware.Stack) error {
- _, err := stack.Build.Swap(computePayloadHashMiddlewareID, &unsignedPayload{})
+ _, err := stack.Finalize.Swap(computePayloadHashMiddlewareID, &UnsignedPayload{})
return err
}
-// contentSHA256Header sets the X-Amz-Content-Sha256 header value to
+// ContentSHA256Header sets the X-Amz-Content-Sha256 header value to
// the Payload hash stored in the context.
-type contentSHA256Header struct{}
+type ContentSHA256Header struct{}
// AddContentSHA256HeaderMiddleware adds ContentSHA256Header to the
// operation middleware stack
func AddContentSHA256HeaderMiddleware(stack *middleware.Stack) error {
- return stack.Build.Insert(&contentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After)
+ return stack.Finalize.Insert(&ContentSHA256Header{}, computePayloadHashMiddlewareID, middleware.After)
}
// RemoveContentSHA256HeaderMiddleware removes contentSHA256Header middleware
// from the operation middleware stack
func RemoveContentSHA256HeaderMiddleware(stack *middleware.Stack) error {
- _, err := stack.Build.Remove((*contentSHA256Header)(nil).ID())
+ _, err := stack.Finalize.Remove((*ContentSHA256Header)(nil).ID())
return err
}
// ID returns the ContentSHA256HeaderMiddleware identifier
-func (m *contentSHA256Header) ID() string {
+func (m *ContentSHA256Header) ID() string {
return "SigV4ContentSHA256Header"
}
-// HandleBuild sets the X-Amz-Content-Sha256 header value to the Payload hash
+// HandleFinalize sets the X-Amz-Content-Sha256 header value to the Payload hash
// stored in the context.
-func (m *contentSHA256Header) HandleBuild(
- ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+func (m *ContentSHA256Header) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
) (
- out middleware.BuildOutput, metadata middleware.Metadata, err error,
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) {
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
@@ -245,25 +239,35 @@ func (m *contentSHA256Header) HandleBuild(
}
req.Header.Set(v4Internal.ContentSHAKey, GetPayloadHash(ctx))
-
- return next.HandleBuild(ctx, in)
+ return next.HandleFinalize(ctx, in)
}
-// SignHTTPRequestMiddlewareOptions is the configuration options for the SignHTTPRequestMiddleware middleware.
+// SignHTTPRequestMiddlewareOptions is the configuration options for
+// [SignHTTPRequestMiddleware].
+//
+// Deprecated: [SignHTTPRequestMiddleware] is deprecated.
type SignHTTPRequestMiddlewareOptions struct {
CredentialsProvider aws.CredentialsProvider
Signer HTTPSigner
LogSigning bool
}
-// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4 HTTP Signing
+// SignHTTPRequestMiddleware is a `FinalizeMiddleware` implementation for SigV4
+// HTTP Signing.
+//
+// Deprecated: AWS service clients no longer use this middleware. Signing as an
+// SDK operation is now performed through an internal per-service middleware
+// which opaquely selects and uses the signer from the resolved auth scheme.
type SignHTTPRequestMiddleware struct {
credentialsProvider aws.CredentialsProvider
signer HTTPSigner
logSigning bool
}
-// NewSignHTTPRequestMiddleware constructs a SignHTTPRequestMiddleware using the given Signer for signing requests
+// NewSignHTTPRequestMiddleware constructs a [SignHTTPRequestMiddleware] using
+// the given [Signer] for signing requests.
+//
+// Deprecated: SignHTTPRequestMiddleware is deprecated.
func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *SignHTTPRequestMiddleware {
return &SignHTTPRequestMiddleware{
credentialsProvider: options.CredentialsProvider,
@@ -272,12 +276,17 @@ func NewSignHTTPRequestMiddleware(options SignHTTPRequestMiddlewareOptions) *Sig
}
}
-// ID is the SignHTTPRequestMiddleware identifier
+// ID is the SignHTTPRequestMiddleware identifier.
+//
+// Deprecated: SignHTTPRequestMiddleware is deprecated.
func (s *SignHTTPRequestMiddleware) ID() string {
return "Signing"
}
-// HandleFinalize will take the provided input and sign the request using the SigV4 authentication scheme
+// HandleFinalize will take the provided input and sign the request using the
+// SigV4 authentication scheme.
+//
+// Deprecated: SignHTTPRequestMiddleware is deprecated.
func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) {
@@ -301,11 +310,23 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl
return out, metadata, &SigningError{Err: fmt.Errorf("failed to retrieve credentials: %w", err)}
}
- err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(),
+ signerOptions := []func(o *SignerOptions){
func(o *SignerOptions) {
o.Logger = middleware.GetLogger(ctx)
o.LogSigning = s.logSigning
+ },
+ }
+
+ // existing DisableURIPathEscaping is equivalent in purpose
+ // to authentication scheme property DisableDoubleEncoding
+ disableDoubleEncoding, overridden := internalauth.GetDisableDoubleEncoding(ctx)
+ if overridden {
+ signerOptions = append(signerOptions, func(o *SignerOptions) {
+ o.DisableURIPathEscaping = disableDoubleEncoding
})
+ }
+
+ err = s.signer.SignHTTP(ctx, credentials, req.Request, payloadHash, signingName, signingRegion, sdk.NowTime(), signerOptions...)
if err != nil {
return out, metadata, &SigningError{Err: fmt.Errorf("failed to sign http request, %w", err)}
}
@@ -315,21 +336,24 @@ func (s *SignHTTPRequestMiddleware) HandleFinalize(ctx context.Context, in middl
return next.HandleFinalize(ctx, in)
}
-type streamingEventsPayload struct{}
+// StreamingEventsPayload signs input event stream messages.
+type StreamingEventsPayload struct{}
// AddStreamingEventsPayload adds the streamingEventsPayload middleware to the stack.
func AddStreamingEventsPayload(stack *middleware.Stack) error {
- return stack.Build.Add(&streamingEventsPayload{}, middleware.After)
+ return stack.Finalize.Add(&StreamingEventsPayload{}, middleware.Before)
}
-func (s *streamingEventsPayload) ID() string {
+// ID identifies the middleware.
+func (s *StreamingEventsPayload) ID() string {
return computePayloadHashMiddlewareID
}
-func (s *streamingEventsPayload) HandleBuild(
- ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+// HandleFinalize marks the input stream to be signed with SigV4.
+func (s *StreamingEventsPayload) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
) (
- out middleware.BuildOutput, metadata middleware.Metadata, err error,
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) {
contentSHA := GetPayloadHash(ctx)
if len(contentSHA) == 0 {
@@ -338,7 +362,7 @@ func (s *streamingEventsPayload) HandleBuild(
ctx = SetPayloadHash(ctx, contentSHA)
- return next.HandleBuild(ctx, in)
+ return next.HandleFinalize(ctx, in)
}
// GetSignedRequestSignature attempts to extract the signature of the request.
@@ -348,8 +372,9 @@ func GetSignedRequestSignature(r *http.Request) ([]byte, error) {
const authHeaderSignatureElem = "Signature="
if auth := r.Header.Get(authorizationHeader); len(auth) != 0 {
- ps := strings.Split(auth, ", ")
+ ps := strings.Split(auth, ",")
for _, p := range ps {
+ p = strings.TrimSpace(p)
if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 {
sig := p[len(authHeaderSignatureElem):]
if len(sig) == 0 {
@@ -371,13 +396,8 @@ func haveCredentialProvider(p aws.CredentialsProvider) bool {
if p == nil {
return false
}
- switch p.(type) {
- case aws.AnonymousCredentials,
- *aws.AnonymousCredentials:
- return false
- }
- return true
+ return !aws.IsCredentialsProvider(p, (*aws.AnonymousCredentials)(nil))
}
type payloadHashKey struct{}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go
index 66aa2bd6a..32875e077 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/stream.go
@@ -59,7 +59,7 @@ func (s *StreamSigner) GetSignature(ctx context.Context, headers, payload []byte
prevSignature := s.prevSignature
- st := v4Internal.NewSigningTime(signingTime)
+ st := v4Internal.NewSigningTime(signingTime.UTC())
sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
index afd069c1f..7ed91d5ba 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/signer/v4/v4.go
@@ -1,48 +1,41 @@
-// Package v4 implements signing for AWS V4 signer
+// Package v4 implements the AWS signature version 4 algorithm (commonly known
+// as SigV4).
//
-// Provides request signing for request that need to be signed with
-// AWS V4 Signatures.
+// For more information about SigV4, see [Signing AWS API requests] in the IAM
+// user guide.
//
-// # Standalone Signer
+// While this implementation CAN work in an external context, it is developed
+// primarily for SDK use and you may encounter fringe behaviors around header
+// canonicalization.
//
-// Generally using the signer outside of the SDK should not require any additional
+// # Pre-escaping a request URI
//
-// The signer does this by taking advantage of the URL.EscapedPath method. If your request URI requires
+// AWS v4 signature validation requires that the canonical string's URI path
+// component must be the escaped form of the HTTP request's path.
+//
+// The Go HTTP client will perform escaping automatically on the HTTP request.
+// This may cause signature validation errors because the request differs from
+// the URI path or query from which the signature was generated.
//
-// additional escaping you many need to use the URL.Opaque to define what the raw URI should be sent
-// to the service as.
+// Because of this, we recommend that you explicitly escape the request when
+// using this signer outside of the SDK to prevent possible signature mismatch.
+// This can be done by setting URL.Opaque on the request. The signer will
+// prefer that value, falling back to the return of URL.EscapedPath if unset.
//
-// The signer will first check the URL.Opaque field, and use its value if set.
-// The signer does require the URL.Opaque field to be set in the form of:
+// When setting URL.Opaque you must do so in the form of:
//
// "///"
//
// // e.g.
// "//example.com/some/path"
//
-// The leading "//" and hostname are required or the URL.Opaque escaping will
-// not work correctly.
+// The leading "//" and hostname are required or the escaping will not work
+// correctly.
//
-// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
-// method and using the returned value.
-//
-// AWS v4 signature validation requires that the canonical string's URI path
-// element must be the URI escaped form of the HTTP request's path.
-// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+// The TestStandaloneSign unit test provides a complete example of using the
+// signer outside of the SDK and pre-escaping the URI path.
//
-// The Go HTTP client will perform escaping automatically on the request. Some
-// of these escaping may cause signature validation errors because the HTTP
-// request differs from the URI path or query that the signature was generated.
-// https://golang.org/pkg/net/url/#URL.EscapedPath
-//
-// Because of this, it is recommended that when using the signer outside of the
-// SDK that explicitly escaping the request prior to being signed is preferable,
-// and will help prevent signature validation errors. This can be done by setting
-// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
-// call URL.EscapedPath() if Opaque is not set.
-//
-// Test `TestStandaloneSign` provides a complete example of using the signer
-// outside of the SDK and pre-escaping the URI path.
+// [Signing AWS API requests]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-signing.html
package v4
import (
@@ -68,6 +61,9 @@ import (
const (
signingAlgorithm = "AWS4-HMAC-SHA256"
authorizationHeader = "Authorization"
+
+ // Version of signing v4
+ Version = "SigV4"
)
// HTTPSigner is an interface to a SigV4 signer that can sign HTTP requests
@@ -103,6 +99,11 @@ type SignerOptions struct {
// This will enable logging of the canonical request, the string to sign, and for presigning the subsequent
// presigned URL.
LogSigning bool
+
+ // Disables setting the session token on the request as part of signing
+ // through X-Amz-Security-Token. This is needed for variations of v4 that
+ // present the token elsewhere.
+ DisableSessionToken bool
}
// Signer applies AWS v4 signing to given request. Use this to sign requests
@@ -136,6 +137,7 @@ type httpSigner struct {
DisableHeaderHoisting bool
DisableURIPathEscaping bool
+ DisableSessionToken bool
}
func (s *httpSigner) Build() (signedRequest, error) {
@@ -284,6 +286,7 @@ func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *ht
Time: v4Internal.NewSigningTime(signingTime.UTC()),
DisableHeaderHoisting: options.DisableHeaderHoisting,
DisableURIPathEscaping: options.DisableURIPathEscaping,
+ DisableSessionToken: options.DisableSessionToken,
KeyDerivator: s.keyDerivator,
}
@@ -335,7 +338,7 @@ func (s Signer) SignHTTP(ctx context.Context, credentials aws.Credentials, r *ht
//
// expires := 20 * time.Minute
// query := req.URL.Query()
-// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10)
+// query.Set("X-Amz-Expires", strconv.FormatInt(int64(expires/time.Second), 10))
// req.URL.RawQuery = query.Encode()
//
// This method does not modify the provided request.
@@ -360,6 +363,7 @@ func (s *Signer) PresignHTTP(
IsPreSign: true,
DisableHeaderHoisting: options.DisableHeaderHoisting,
DisableURIPathEscaping: options.DisableURIPathEscaping,
+ DisableSessionToken: options.DisableSessionToken,
KeyDerivator: s.keyDerivator,
}
@@ -390,7 +394,18 @@ func (s *httpSigner) buildCredentialScope() string {
func buildQuery(r v4Internal.Rule, header http.Header) (url.Values, http.Header) {
query := url.Values{}
unsignedHeaders := http.Header{}
+
+ // A list of headers to be converted to lower case to mitigate a limitation from S3
+ lowerCaseHeaders := map[string]string{
+ "X-Amz-Expected-Bucket-Owner": "x-amz-expected-bucket-owner", // see #2508
+ "X-Amz-Request-Payer": "x-amz-request-payer", // see #2764
+ }
+
for k, h := range header {
+ if newKey, ok := lowerCaseHeaders[k]; ok {
+ k = newKey
+ }
+
if r.IsValid(k) {
query[k] = h
} else {
@@ -502,7 +517,8 @@ func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Val
if s.IsPreSign {
query.Set(v4Internal.AmzAlgorithmKey, signingAlgorithm)
- if sessionToken := s.Credentials.SessionToken; len(sessionToken) > 0 {
+ sessionToken := s.Credentials.SessionToken
+ if !s.DisableSessionToken && len(sessionToken) > 0 {
query.Set("X-Amz-Security-Token", sessionToken)
}
@@ -512,7 +528,7 @@ func (s *httpSigner) setRequiredSigningFields(headers http.Header, query url.Val
headers[v4Internal.AmzDateKey] = append(headers[v4Internal.AmzDateKey][:0], amzDate)
- if len(s.Credentials.SessionToken) > 0 {
+ if !s.DisableSessionToken && len(s.Credentials.SessionToken) > 0 {
headers[v4Internal.AmzSecurityTokenKey] = append(headers[v4Internal.AmzSecurityTokenKey][:0], s.Credentials.SessionToken)
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go
index 26d90719b..8d7c35a9e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/client.go
@@ -1,13 +1,16 @@
package http
import (
+ "context"
"crypto/tls"
- "github.com/aws/aws-sdk-go-v2/aws"
"net"
"net/http"
"reflect"
"sync"
"time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/smithy-go/tracing"
)
// Defaults for the HTTPTransportBuilder.
@@ -179,7 +182,7 @@ func defaultHTTPTransport() *http.Transport {
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
- DialContext: dialer.DialContext,
+ DialContext: traceDialContext(dialer.DialContext),
TLSHandshakeTimeout: DefaultHTTPTransportTLSHandleshakeTimeout,
MaxIdleConns: DefaultHTTPTransportMaxIdleConns,
MaxIdleConnsPerHost: DefaultHTTPTransportMaxIdleConnsPerHost,
@@ -194,6 +197,35 @@ func defaultHTTPTransport() *http.Transport {
return tr
}
+type dialContext func(ctx context.Context, network, addr string) (net.Conn, error)
+
+func traceDialContext(dc dialContext) dialContext {
+ return func(ctx context.Context, network, addr string) (net.Conn, error) {
+ span, _ := tracing.GetSpan(ctx)
+ span.SetProperty("net.peer.name", addr)
+
+ conn, err := dc(ctx, network, addr)
+ if err != nil {
+ return conn, err
+ }
+
+ raddr := conn.RemoteAddr()
+ if raddr == nil {
+ return conn, err
+ }
+
+ host, port, err := net.SplitHostPort(raddr.String())
+ if err != nil { // don't blow up just because we couldn't parse
+ span.SetProperty("net.peer.addr", raddr.String())
+ } else {
+ span.SetProperty("net.peer.host", host)
+ span.SetProperty("net.peer.port", port)
+ }
+
+ return conn, err
+ }
+}
+
// shallowCopyStruct creates a shallow copy of the passed in source struct, and
// returns that copy of the same struct type.
func shallowCopyStruct(src interface{}) interface{} {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go
index 8fd14cecd..a1ad20fe3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/aws/transport/http/response_error_middleware.go
@@ -12,18 +12,20 @@ import (
func AddResponseErrorMiddleware(stack *middleware.Stack) error {
// add error wrapper middleware before request id retriever middleware so that it can wrap the error response
// returned by operation deserializers
- return stack.Deserialize.Insert(&responseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+ return stack.Deserialize.Insert(&ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
}
-type responseErrorWrapper struct {
+// ResponseErrorWrapper wraps operation errors with ResponseError.
+type ResponseErrorWrapper struct {
}
// ID returns the middleware identifier
-func (m *responseErrorWrapper) ID() string {
+func (m *ResponseErrorWrapper) ID() string {
return "ResponseErrorWrapper"
}
-func (m *responseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+// HandleDeserialize wraps the stack error with smithyhttp.ResponseError.
+func (m *ResponseErrorWrapper) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
) {
out, metadata, err = next.HandleDeserialize(ctx, in)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml b/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml
deleted file mode 100644
index b11df5082..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-version: 0.2
-
-phases:
- build:
- commands:
- - echo Build started on `date`
- - export GOPATH=/go
- - export SDK_CODEBUILD_ROOT=`pwd`
- - make ci-test-no-generate
- post_build:
- commands:
- - echo Build completed on `date`
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
index 825337fdd..98546207b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/CHANGELOG.md
@@ -1,3 +1,740 @@
+# v1.29.15 (2025-06-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.14 (2025-04-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.13 (2025-04-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.12 (2025-03-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.11 (2025-03-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.10 (2025-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.9 (2025-03-04.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.8 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.7 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.6 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.5 (2025-02-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.4 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.3 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.2 (2025-01-24)
+
+* **Bug Fix**: Fix env config naming and usage of deprecated ioutil
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.29.1 (2025-01-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.0 (2025-01-15)
+
+* **Feature**: S3 client behavior is updated to always calculate a checksum by default for operations that support it (such as PutObject or UploadPart), or require it (such as DeleteObjects). The checksum algorithm used by default now becomes CRC32. Checksum behavior can be configured using `when_supported` and `when_required` options - in code using RequestChecksumCalculation, in shared config using request_checksum_calculation, or as env variable using AWS_REQUEST_CHECKSUM_CALCULATION. The S3 client attempts to validate response checksums for all S3 API operations that support checksums. However, if the SDK has not implemented the specified checksum algorithm then this validation is skipped. Checksum validation behavior can be configured using `when_supported` and `when_required` options - in code using ResponseChecksumValidation, in shared config using response_checksum_validation, or as env variable using AWS_RESPONSE_CHECKSUM_VALIDATION.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.11 (2025-01-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.10 (2025-01-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.9 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.8 (2025-01-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.7 (2024-12-19)
+
+* **Bug Fix**: Fix improper use of printf-style functions.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.6 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.5 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.4 (2024-11-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.3 (2024-11-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.2 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.1 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.0 (2024-10-16)
+
+* **Feature**: Adds the LoadOptions hook `WithBaseEndpoint` for setting global endpoint override in-code.
+
+# v1.27.43 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.42 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.41 (2024-10-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.40 (2024-10-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.39 (2024-09-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.38 (2024-09-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.37 (2024-09-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.36 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.35 (2024-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.34 (2024-09-16)
+
+* **Bug Fix**: Read `AWS_CONTAINER_CREDENTIALS_FULL_URI` env variable if set when reading a profile with `credential_source`. Also ensure `AWS_CONTAINER_CREDENTIALS_RELATIVE_URI` is always read before it
+
+# v1.27.33 (2024-09-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.32 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.31 (2024-08-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.30 (2024-08-23)
+
+* **Bug Fix**: Don't fail credentials unit tests if credentials are found on a file
+
+# v1.27.29 (2024-08-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.28 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.27 (2024-07-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.26 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.25 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.24 (2024-07-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.23 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.22 (2024-06-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.21 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.20 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.19 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.18 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.17 (2024-06-03)
+
+* **Documentation**: Add deprecation docs to global endpoint resolution interfaces. These APIs were previously deprecated with the introduction of service-specific endpoint resolution (EndpointResolverV2 and BaseEndpoint on service client options).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.16 (2024-05-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.15 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.14 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.13 (2024-05-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.12 (2024-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.11 (2024-04-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.10 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.9 (2024-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.8 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.7 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.6 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.5 (2024-03-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.4 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.3 (2024-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.1 (2024-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.6 (2024-01-22)
+
+* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.5 (2024-01-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.4 (2024-01-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.3 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.2 (2023-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.1 (2023-12-08)
+
+* **Bug Fix**: Correct loading of [services *] sections into shared config.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.0 (2023-12-07)
+
+* **Feature**: Support modeled request compression. The only algorithm supported at this time is `gzip`.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.12 (2023-12-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.11 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.10 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.9 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.8 (2023-11-28.3)
+
+* **Bug Fix**: Correct resolution of S3Express auth disable toggle.
+
+# v1.25.7 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.6 (2023-11-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.5 (2023-11-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.3 (2023-11-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.2 (2023-11-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.1 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.0 (2023-11-14)
+
+* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.0 (2023-11-13)
+
+* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.0 (2023-11-09.2)
+
+* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.3 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.2 (2023-11-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.1 (2023-11-06)
+
+* No change notes available for this release.
+
+# v1.22.0 (2023-11-02)
+
+* **Feature**: Add env and shared config settings for disabling IMDSv1 fallback.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.1 (2023-10-24)
+
+* No change notes available for this release.
+
+# v1.19.0 (2023-10-16)
+
+* **Feature**: Modify logic of retrieving user agent appID from env config
+
+# v1.18.45 (2023-10-12)
+
+* **Bug Fix**: Fail to load config if an explicitly provided profile doesn't exist.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.44 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.43 (2023-10-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.42 (2023-09-22)
+
+* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0.
+* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.41 (2023-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.40 (2023-09-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.39 (2023-09-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.38 (2023-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.37 (2023-08-23)
+
+* No change notes available for this release.
+
+# v1.18.36 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.35 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.34 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.33 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.32 (2023-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.31 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.30 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.29 (2023-07-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.28 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.27 (2023-06-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.26 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.25 (2023-05-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.24 (2023-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.23 (2023-05-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.22 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.21 (2023-04-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.20 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.19 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.18 (2023-03-16)
+
+* **Bug Fix**: Allow RoleARN to be set as functional option on STS WebIdentityRoleOptions. Fixes aws/aws-sdk-go-v2#2015.
+
+# v1.18.17 (2023-03-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.16 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.15 (2023-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.14 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.13 (2023-02-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.12 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.11 (2023-02-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.10 (2023-01-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.9 (2023-01-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.8 (2023-01-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.7 (2022-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.6 (2022-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.5 (2022-12-15)
+
+* **Bug Fix**: Unify logic between shared config and in finding home directory
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.4 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.3 (2022-11-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.2 (2022-11-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.1 (2022-11-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.0 (2022-11-11)
+
+* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846
+* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.11 (2022-11-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.10 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.9 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.8 (2022-09-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.7 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.6 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.4 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.3 (2022-08-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2022-08-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2022-08-14)
+
+* **Feature**: Add alternative mechanism for determning the users `$HOME` or `%USERPROFILE%` location when the environment variables are not present.
+
+# v1.16.1 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2022-08-10)
+
+* **Feature**: Adds support for the following settings in the `~/.aws/credentials` file: `sso_account_id`, `sso_region`, `sso_role_name`, `sso_start_url`, and `ca_bundle`.
+
+# v1.15.17 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.16 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.15 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.14 (2022-07-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.13 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.12 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.11 (2022-06-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.10 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.9 (2022-05-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.8 (2022-05-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.7 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.6 (2022-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.5 (2022-05-09)
+
+* **Bug Fix**: Fixes a bug in LoadDefaultConfig to correctly assign ConfigSources so all config resolvers have access to the config sources. This fixes the feature/ec2/imds client not having configuration applied via config.LoadOptions such as EC2IMDSClientEnableState. PR [#1682](https://github.com/aws/aws-sdk-go-v2/pull/1682)
+
+# v1.15.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.15.1 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go
index 79f067017..09d9b6311 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/config.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/config.go
@@ -2,18 +2,11 @@ package config
import (
"context"
+ "os"
"github.com/aws/aws-sdk-go-v2/aws"
)
-// defaultLoaders are a slice of functions that will read external configuration
-// sources for configuration values. These values are read by the AWSConfigResolvers
-// using interfaces to extract specific information from the external configuration.
-var defaultLoaders = []loader{
- loadEnvConfig,
- loadSharedConfigIgnoreNotExist,
-}
-
// defaultAWSConfigResolvers are a slice of functions that will resolve external
// configuration values into AWS configuration values.
//
@@ -72,6 +65,30 @@ var defaultAWSConfigResolvers = []awsConfigResolver{
// implementations depend on or can be configured with earlier resolved
// configuration options.
resolveCredentials,
+
+ // Sets the resolved bearer authentication token API clients will use for
+ // httpBearerAuth authentication scheme.
+ resolveBearerAuthToken,
+
+ // Sets the sdk app ID if present in env var or shared config profile
+ resolveAppID,
+
+ resolveBaseEndpoint,
+
+ // Sets the DisableRequestCompression if present in env var or shared config profile
+ resolveDisableRequestCompression,
+
+ // Sets the RequestMinCompressSizeBytes if present in env var or shared config profile
+ resolveRequestMinCompressSizeBytes,
+
+ // Sets the AccountIDEndpointMode if present in env var or shared config profile
+ resolveAccountIDEndpointMode,
+
+ // Sets the RequestChecksumCalculation if present in env var or shared config profile
+ resolveRequestChecksumCalculation,
+
+ // Sets the ResponseChecksumValidation if present in env var or shared config profile
+ resolveResponseChecksumValidation,
}
// A Config represents a generic configuration value or set of values. This type
@@ -137,17 +154,10 @@ func (cs configs) ResolveAWSConfig(ctx context.Context, resolvers []awsConfigRes
for _, fn := range resolvers {
if err := fn(ctx, &cfg, cs); err != nil {
- // TODO provide better error?
return aws.Config{}, err
}
}
- var sources []interface{}
- for _, s := range cs {
- sources = append(sources, s)
- }
- cfg.ConfigSources = sources
-
return cfg, nil
}
@@ -169,13 +179,12 @@ func (cs configs) ResolveConfig(f func(configs []interface{}) error) error {
// The custom configurations must satisfy the respective providers for their data
// or the custom data will be ignored by the resolvers and config loaders.
//
-// cfg, err := config.LoadDefaultConfig( context.TODO(),
-// WithSharedConfigProfile("test-profile"),
-// )
-// if err != nil {
-// panic(fmt.Sprintf("failed loading config, %v", err))
-// }
-//
+// cfg, err := config.LoadDefaultConfig( context.TODO(),
+// config.WithSharedConfigProfile("test-profile"),
+// )
+// if err != nil {
+// panic(fmt.Sprintf("failed loading config, %v", err))
+// }
//
// The default configuration sources are:
// * Environment Variables
@@ -191,7 +200,7 @@ func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error)
// assign Load Options to configs
var cfgCpy = configs{options}
- cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, defaultLoaders)
+ cfgCpy, err = cfgCpy.AppendFromLoaders(ctx, resolveConfigLoaders(&options))
if err != nil {
return aws.Config{}, err
}
@@ -203,3 +212,17 @@ func LoadDefaultConfig(ctx context.Context, optFns ...func(*LoadOptions) error)
return cfg, nil
}
+
+func resolveConfigLoaders(options *LoadOptions) []loader {
+ loaders := make([]loader, 2)
+ loaders[0] = loadEnvConfig
+
+ // specification of a profile should cause a load failure if it doesn't exist
+ if os.Getenv(awsProfileEnv) != "" || options.SharedConfigProfile != "" {
+ loaders[1] = loadSharedConfig
+ } else {
+ loaders[1] = loadSharedConfigIgnoreNotExist
+ }
+
+ return loaders
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go
index 31648ffb5..aab7164e2 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/doc.go
@@ -15,6 +15,6 @@
// take precedence over the default environment and shared config sources used by the SDK. If one or more Config sources
// implement the same provider interface, priority will be handled by the order in which the sources were passed in.
//
-// A number of helpers (prefixed by ``With``) are provided in this package that implement their respective provider
+// A number of helpers (prefixed by “With“) are provided in this package that implement their respective provider
// interface. These helpers should be used for overriding configuration programmatically at runtime.
package config
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go
index 18c8e0121..9db507e38 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/env_config.go
@@ -5,13 +5,13 @@ import (
"context"
"fmt"
"io"
- "io/ioutil"
"os"
"strconv"
"strings"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression"
)
// CredentialsSourceName provides a name of the provider when config is
@@ -20,73 +20,89 @@ const CredentialsSourceName = "EnvConfigCredentials"
// Environment variables that will be read for configuration values.
const (
- awsAccessKeyIDEnvVar = "AWS_ACCESS_KEY_ID"
- awsAccessKeyEnvVar = "AWS_ACCESS_KEY"
+ awsAccessKeyIDEnv = "AWS_ACCESS_KEY_ID"
+ awsAccessKeyEnv = "AWS_ACCESS_KEY"
- awsSecretAccessKeyEnvVar = "AWS_SECRET_ACCESS_KEY"
- awsSecretKeyEnvVar = "AWS_SECRET_KEY"
+ awsSecretAccessKeyEnv = "AWS_SECRET_ACCESS_KEY"
+ awsSecretKeyEnv = "AWS_SECRET_KEY"
- awsSessionTokenEnvVar = "AWS_SESSION_TOKEN"
+ awsSessionTokenEnv = "AWS_SESSION_TOKEN"
- awsContainerCredentialsEndpointEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
- awsContainerCredentialsRelativePathEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
- awsContainerPProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
+ awsContainerCredentialsFullURIEnv = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
+ awsContainerCredentialsRelativeURIEnv = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
+ awsContainerAuthorizationTokenEnv = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
- awsRegionEnvVar = "AWS_REGION"
- awsDefaultRegionEnvVar = "AWS_DEFAULT_REGION"
+ awsRegionEnv = "AWS_REGION"
+ awsDefaultRegionEnv = "AWS_DEFAULT_REGION"
- awsProfileEnvVar = "AWS_PROFILE"
- awsDefaultProfileEnvVar = "AWS_DEFAULT_PROFILE"
+ awsProfileEnv = "AWS_PROFILE"
+ awsDefaultProfileEnv = "AWS_DEFAULT_PROFILE"
- awsSharedCredentialsFileEnvVar = "AWS_SHARED_CREDENTIALS_FILE"
+ awsSharedCredentialsFileEnv = "AWS_SHARED_CREDENTIALS_FILE"
- awsConfigFileEnvVar = "AWS_CONFIG_FILE"
+ awsConfigFileEnv = "AWS_CONFIG_FILE"
- awsCustomCABundleEnvVar = "AWS_CA_BUNDLE"
+ awsCABundleEnv = "AWS_CA_BUNDLE"
- awsWebIdentityTokenFilePathEnvVar = "AWS_WEB_IDENTITY_TOKEN_FILE"
+ awsWebIdentityTokenFileEnv = "AWS_WEB_IDENTITY_TOKEN_FILE"
- awsRoleARNEnvVar = "AWS_ROLE_ARN"
- awsRoleSessionNameEnvVar = "AWS_ROLE_SESSION_NAME"
+ awsRoleARNEnv = "AWS_ROLE_ARN"
+ awsRoleSessionNameEnv = "AWS_ROLE_SESSION_NAME"
- awsEnableEndpointDiscoveryEnvVar = "AWS_ENABLE_ENDPOINT_DISCOVERY"
+ awsEnableEndpointDiscoveryEnv = "AWS_ENABLE_ENDPOINT_DISCOVERY"
- awsS3UseARNRegionEnvVar = "AWS_S3_USE_ARN_REGION"
+ awsS3UseARNRegionEnv = "AWS_S3_USE_ARN_REGION"
- awsEc2MetadataServiceEndpointModeEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE"
+ awsEc2MetadataServiceEndpointModeEnv = "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE"
- awsEc2MetadataServiceEndpointEnvVar = "AWS_EC2_METADATA_SERVICE_ENDPOINT"
+ awsEc2MetadataServiceEndpointEnv = "AWS_EC2_METADATA_SERVICE_ENDPOINT"
- awsEc2MetadataDisabled = "AWS_EC2_METADATA_DISABLED"
+ awsEc2MetadataDisabledEnv = "AWS_EC2_METADATA_DISABLED"
+ awsEc2MetadataV1DisabledEnv = "AWS_EC2_METADATA_V1_DISABLED"
- awsS3DisableMultiRegionAccessPointEnvVar = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS"
+ awsS3DisableMultiRegionAccessPointsEnv = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS"
- awsUseDualStackEndpoint = "AWS_USE_DUALSTACK_ENDPOINT"
+ awsUseDualStackEndpointEnv = "AWS_USE_DUALSTACK_ENDPOINT"
- awsUseFIPSEndpoint = "AWS_USE_FIPS_ENDPOINT"
+ awsUseFIPSEndpointEnv = "AWS_USE_FIPS_ENDPOINT"
- awsDefaultMode = "AWS_DEFAULTS_MODE"
+ awsDefaultsModeEnv = "AWS_DEFAULTS_MODE"
- awsRetryMaxAttempts = "AWS_MAX_ATTEMPTS"
- awsRetryMode = "AWS_RETRY_MODE"
+ awsMaxAttemptsEnv = "AWS_MAX_ATTEMPTS"
+ awsRetryModeEnv = "AWS_RETRY_MODE"
+ awsSdkUaAppIDEnv = "AWS_SDK_UA_APP_ID"
+
+ awsIgnoreConfiguredEndpointURLEnv = "AWS_IGNORE_CONFIGURED_ENDPOINT_URLS"
+ awsEndpointURLEnv = "AWS_ENDPOINT_URL"
+
+ awsDisableRequestCompressionEnv = "AWS_DISABLE_REQUEST_COMPRESSION"
+ awsRequestMinCompressionSizeBytesEnv = "AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES"
+
+ awsS3DisableExpressSessionAuthEnv = "AWS_S3_DISABLE_EXPRESS_SESSION_AUTH"
+
+ awsAccountIDEnv = "AWS_ACCOUNT_ID"
+ awsAccountIDEndpointModeEnv = "AWS_ACCOUNT_ID_ENDPOINT_MODE"
+
+ awsRequestChecksumCalculation = "AWS_REQUEST_CHECKSUM_CALCULATION"
+ awsResponseChecksumValidation = "AWS_RESPONSE_CHECKSUM_VALIDATION"
)
var (
credAccessEnvKeys = []string{
- awsAccessKeyIDEnvVar,
- awsAccessKeyEnvVar,
+ awsAccessKeyIDEnv,
+ awsAccessKeyEnv,
}
credSecretEnvKeys = []string{
- awsSecretAccessKeyEnvVar,
- awsSecretKeyEnvVar,
+ awsSecretAccessKeyEnv,
+ awsSecretKeyEnv,
}
regionEnvKeys = []string{
- awsRegionEnvVar,
- awsDefaultRegionEnvVar,
+ awsRegionEnv,
+ awsDefaultRegionEnv,
}
profileEnvKeys = []string{
- awsProfileEnvVar,
- awsDefaultProfileEnvVar,
+ awsProfileEnv,
+ awsDefaultProfileEnv,
}
)
@@ -205,6 +221,11 @@ type EnvConfig struct {
// AWS_EC2_METADATA_DISABLED=true
EC2IMDSClientEnableState imds.ClientEnableState
+ // Specifies if EC2 IMDSv1 fallback is disabled.
+ //
+ // AWS_EC2_METADATA_V1_DISABLED=true
+ EC2IMDSv1Disabled *bool
+
// Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6)
//
// AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6
@@ -248,6 +269,41 @@ type EnvConfig struct {
//
// aws_retry_mode=standard
RetryMode aws.RetryMode
+
+ // aws sdk app ID that can be added to user agent header string
+ AppID string
+
+ // Flag used to disable configured endpoints.
+ IgnoreConfiguredEndpoints *bool
+
+ // Value to contain configured endpoints to be propagated to
+ // corresponding endpoint resolution field.
+ BaseEndpoint string
+
+ // determine if request compression is allowed, default to false
+ // retrieved from env var AWS_DISABLE_REQUEST_COMPRESSION
+ DisableRequestCompression *bool
+
+ // inclusive threshold request body size to trigger compression,
+ // default to 10240 and must be within 0 and 10485760 bytes inclusive
+ // retrieved from env var AWS_REQUEST_MIN_COMPRESSION_SIZE_BYTES
+ RequestMinCompressSizeBytes *int64
+
+ // Whether S3Express auth is disabled.
+ //
+ // This will NOT prevent requests from being made to S3Express buckets, it
+ // will only bypass the modified endpoint routing and signing behaviors
+ // associated with the feature.
+ S3DisableExpressAuth *bool
+
+ // Indicates whether account ID will be required/ignored in endpoint2.0 routing
+ AccountIDEndpointMode aws.AccountIDEndpointMode
+
+ // Indicates whether request checksum should be calculated
+ RequestChecksumCalculation aws.RequestChecksumCalculation
+
+ // Indicates whether response checksum should be validated
+ ResponseChecksumValidation aws.ResponseChecksumValidation
}
// loadEnvConfig reads configuration values from the OS's environment variables.
@@ -267,61 +323,95 @@ func NewEnvConfig() (EnvConfig, error) {
setStringFromEnvVal(&creds.AccessKeyID, credAccessEnvKeys)
setStringFromEnvVal(&creds.SecretAccessKey, credSecretEnvKeys)
if creds.HasKeys() {
- creds.SessionToken = os.Getenv(awsSessionTokenEnvVar)
+ creds.AccountID = os.Getenv(awsAccountIDEnv)
+ creds.SessionToken = os.Getenv(awsSessionTokenEnv)
cfg.Credentials = creds
}
- cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsEndpointEnvVar)
- cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativePathEnvVar)
- cfg.ContainerAuthorizationToken = os.Getenv(awsContainerPProviderAuthorizationEnvVar)
+ cfg.ContainerCredentialsEndpoint = os.Getenv(awsContainerCredentialsFullURIEnv)
+ cfg.ContainerCredentialsRelativePath = os.Getenv(awsContainerCredentialsRelativeURIEnv)
+ cfg.ContainerAuthorizationToken = os.Getenv(awsContainerAuthorizationTokenEnv)
setStringFromEnvVal(&cfg.Region, regionEnvKeys)
setStringFromEnvVal(&cfg.SharedConfigProfile, profileEnvKeys)
- cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnvVar)
- cfg.SharedConfigFile = os.Getenv(awsConfigFileEnvVar)
+ cfg.SharedCredentialsFile = os.Getenv(awsSharedCredentialsFileEnv)
+ cfg.SharedConfigFile = os.Getenv(awsConfigFileEnv)
- cfg.CustomCABundle = os.Getenv(awsCustomCABundleEnvVar)
+ cfg.CustomCABundle = os.Getenv(awsCABundleEnv)
- cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFilePathEnvVar)
+ cfg.WebIdentityTokenFilePath = os.Getenv(awsWebIdentityTokenFileEnv)
- cfg.RoleARN = os.Getenv(awsRoleARNEnvVar)
- cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnvVar)
+ cfg.RoleARN = os.Getenv(awsRoleARNEnv)
+ cfg.RoleSessionName = os.Getenv(awsRoleSessionNameEnv)
- if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnvVar}); err != nil {
+ cfg.AppID = os.Getenv(awsSdkUaAppIDEnv)
+
+ if err := setBoolPtrFromEnvVal(&cfg.DisableRequestCompression, []string{awsDisableRequestCompressionEnv}); err != nil {
+ return cfg, err
+ }
+ if err := setInt64PtrFromEnvVal(&cfg.RequestMinCompressSizeBytes, []string{awsRequestMinCompressionSizeBytesEnv}, smithyrequestcompression.MaxRequestMinCompressSizeBytes); err != nil {
+ return cfg, err
+ }
+
+ if err := setEndpointDiscoveryTypeFromEnvVal(&cfg.EnableEndpointDiscovery, []string{awsEnableEndpointDiscoveryEnv}); err != nil {
+ return cfg, err
+ }
+
+ if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnv}); err != nil {
return cfg, err
}
- if err := setBoolPtrFromEnvVal(&cfg.S3UseARNRegion, []string{awsS3UseARNRegionEnvVar}); err != nil {
+ setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabledEnv})
+ if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnv}); err != nil {
+ return cfg, err
+ }
+ cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnv)
+ if err := setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, []string{awsEc2MetadataV1DisabledEnv}); err != nil {
return cfg, err
}
- setEC2IMDSClientEnableState(&cfg.EC2IMDSClientEnableState, []string{awsEc2MetadataDisabled})
- if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, []string{awsEc2MetadataServiceEndpointModeEnvVar}); err != nil {
+ if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointsEnv}); err != nil {
return cfg, err
}
- cfg.EC2IMDSEndpoint = os.Getenv(awsEc2MetadataServiceEndpointEnvVar)
- if err := setBoolPtrFromEnvVal(&cfg.S3DisableMultiRegionAccessPoints, []string{awsS3DisableMultiRegionAccessPointEnvVar}); err != nil {
+ if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpointEnv}); err != nil {
return cfg, err
}
- if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, []string{awsUseDualStackEndpoint}); err != nil {
+ if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpointEnv}); err != nil {
return cfg, err
}
- if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, []string{awsUseFIPSEndpoint}); err != nil {
+ if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultsModeEnv}); err != nil {
return cfg, err
}
- if err := setDefaultsModeFromEnvVal(&cfg.DefaultsMode, []string{awsDefaultMode}); err != nil {
+ if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsMaxAttemptsEnv}); err != nil {
+ return cfg, err
+ }
+ if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryModeEnv}); err != nil {
return cfg, err
}
- if err := setIntFromEnvVal(&cfg.RetryMaxAttempts, []string{awsRetryMaxAttempts}); err != nil {
+ setStringFromEnvVal(&cfg.BaseEndpoint, []string{awsEndpointURLEnv})
+
+ if err := setBoolPtrFromEnvVal(&cfg.IgnoreConfiguredEndpoints, []string{awsIgnoreConfiguredEndpointURLEnv}); err != nil {
return cfg, err
}
- if err := setRetryModeFromEnvVal(&cfg.RetryMode, []string{awsRetryMode}); err != nil {
+
+ if err := setBoolPtrFromEnvVal(&cfg.S3DisableExpressAuth, []string{awsS3DisableExpressSessionAuthEnv}); err != nil {
+ return cfg, err
+ }
+
+ if err := setAIDEndPointModeFromEnvVal(&cfg.AccountIDEndpointMode, []string{awsAccountIDEndpointModeEnv}); err != nil {
+ return cfg, err
+ }
+
+ if err := setRequestChecksumCalculationFromEnvVal(&cfg.RequestChecksumCalculation, []string{awsRequestChecksumCalculation}); err != nil {
+ return cfg, err
+ }
+ if err := setResponseChecksumValidationFromEnvVal(&cfg.ResponseChecksumValidation, []string{awsResponseChecksumValidation}); err != nil {
return cfg, err
}
@@ -335,6 +425,36 @@ func (c EnvConfig) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool,
return c.DefaultsMode, true, nil
}
+func (c EnvConfig) getAppID(context.Context) (string, bool, error) {
+ return c.AppID, len(c.AppID) > 0, nil
+}
+
+func (c EnvConfig) getDisableRequestCompression(context.Context) (bool, bool, error) {
+ if c.DisableRequestCompression == nil {
+ return false, false, nil
+ }
+ return *c.DisableRequestCompression, true, nil
+}
+
+func (c EnvConfig) getRequestMinCompressSizeBytes(context.Context) (int64, bool, error) {
+ if c.RequestMinCompressSizeBytes == nil {
+ return 0, false, nil
+ }
+ return *c.RequestMinCompressSizeBytes, true, nil
+}
+
+func (c EnvConfig) getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error) {
+ return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil
+}
+
+func (c EnvConfig) getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error) {
+ return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil
+}
+
+func (c EnvConfig) getResponseChecksumValidation(context.Context) (aws.ResponseChecksumValidation, bool, error) {
+ return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil
+}
+
// GetRetryMaxAttempts returns the value of AWS_MAX_ATTEMPTS if was specified,
// and not 0.
func (c EnvConfig) GetRetryMaxAttempts(ctx context.Context) (int, bool, error) {
@@ -409,6 +529,67 @@ func setEC2IMDSEndpointMode(mode *imds.EndpointModeState, keys []string) error {
return nil
}
+func setAIDEndPointModeFromEnvVal(m *aws.AccountIDEndpointMode, keys []string) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+
+ switch value {
+ case "preferred":
+ *m = aws.AccountIDEndpointModePreferred
+ case "required":
+ *m = aws.AccountIDEndpointModeRequired
+ case "disabled":
+ *m = aws.AccountIDEndpointModeDisabled
+ default:
+ return fmt.Errorf("invalid value for environment variable, %s=%s, must be preferred/required/disabled", k, value)
+ }
+ break
+ }
+ return nil
+}
+
+func setRequestChecksumCalculationFromEnvVal(m *aws.RequestChecksumCalculation, keys []string) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+
+ switch strings.ToLower(value) {
+ case checksumWhenSupported:
+ *m = aws.RequestChecksumCalculationWhenSupported
+ case checksumWhenRequired:
+ *m = aws.RequestChecksumCalculationWhenRequired
+ default:
+ return fmt.Errorf("invalid value for environment variable, %s=%s, must be when_supported/when_required", k, value)
+ }
+ }
+ return nil
+}
+
+func setResponseChecksumValidationFromEnvVal(m *aws.ResponseChecksumValidation, keys []string) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+
+ switch strings.ToLower(value) {
+ case checksumWhenSupported:
+ *m = aws.ResponseChecksumValidationWhenSupported
+ case checksumWhenRequired:
+ *m = aws.ResponseChecksumValidationWhenRequired
+ default:
+ return fmt.Errorf("invalid value for environment variable, %s=%s, must be when_supported/when_required", k, value)
+ }
+
+ }
+ return nil
+}
+
// GetRegion returns the AWS Region if set in the environment. Returns an empty
// string if not set.
func (c EnvConfig) getRegion(ctx context.Context) (string, bool, error) {
@@ -465,13 +646,41 @@ func (c EnvConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) {
return nil, false, nil
}
- b, err := ioutil.ReadFile(c.CustomCABundle)
+ b, err := os.ReadFile(c.CustomCABundle)
if err != nil {
return nil, false, err
}
return bytes.NewReader(b), true, nil
}
+// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured
+// endpoints feature.
+func (c EnvConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) {
+ if c.IgnoreConfiguredEndpoints == nil {
+ return false, false, nil
+ }
+
+ return *c.IgnoreConfiguredEndpoints, true, nil
+}
+
+func (c EnvConfig) getBaseEndpoint(context.Context) (string, bool, error) {
+ return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil
+}
+
+// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use
+// with configured endpoints.
+func (c EnvConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) {
+ if endpt := os.Getenv(fmt.Sprintf("%s_%s", awsEndpointURLEnv, normalizeEnv(sdkID))); endpt != "" {
+ return endpt, true, nil
+ }
+ return "", false, nil
+}
+
+func normalizeEnv(sdkID string) string {
+ upper := strings.ToUpper(sdkID)
+ return strings.ReplaceAll(upper, " ", "_")
+}
+
// GetS3UseARNRegion returns whether to allow ARNs to direct the region
// the S3 client's requests are sent to.
func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err error) {
@@ -482,9 +691,9 @@ func (c EnvConfig) GetS3UseARNRegion(ctx context.Context) (value, ok bool, err e
return *c.S3UseARNRegion, true, nil
}
-// GetS3DisableMultRegionAccessPoints returns whether to disable multi-region access point
+// GetS3DisableMultiRegionAccessPoints returns whether to disable multi-region access point
// support for the S3 client.
-func (c EnvConfig) GetS3DisableMultRegionAccessPoints(ctx context.Context) (value, ok bool, err error) {
+func (c EnvConfig) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (value, ok bool, err error) {
if c.S3DisableMultiRegionAccessPoints == nil {
return false, false, nil
}
@@ -563,6 +772,30 @@ func setBoolPtrFromEnvVal(dst **bool, keys []string) error {
return nil
}
+func setInt64PtrFromEnvVal(dst **int64, keys []string, max int64) error {
+ for _, k := range keys {
+ value := os.Getenv(k)
+ if len(value) == 0 {
+ continue
+ }
+
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return fmt.Errorf("invalid value for env var, %s=%s, need int64", k, value)
+ } else if v < 0 || v > max {
+ return fmt.Errorf("invalid range for env var min request compression size bytes %q, must be within 0 and 10485760 inclusively", v)
+ }
+ if *dst == nil {
+ *dst = new(int64)
+ }
+
+ **dst = v
+ break
+ }
+
+ return nil
+}
+
func setEndpointDiscoveryTypeFromEnvVal(dst *aws.EndpointDiscoveryEnableState, keys []string) error {
for _, k := range keys {
value := os.Getenv(k)
@@ -663,3 +896,23 @@ func (c EnvConfig) GetEC2IMDSEndpoint() (string, bool, error) {
return c.EC2IMDSEndpoint, true, nil
}
+
+// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option
+// resolver interface.
+func (c EnvConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) {
+ if c.EC2IMDSv1Disabled == nil {
+ return false, false
+ }
+
+ return *c.EC2IMDSv1Disabled, true
+}
+
+// GetS3DisableExpressAuth returns the configured value for
+// [EnvConfig.S3DisableExpressAuth].
+func (c EnvConfig) GetS3DisableExpressAuth() (value, ok bool) {
+ if c.S3DisableExpressAuth == nil {
+ return false, false
+ }
+
+ return *c.S3DisableExpressAuth, true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
index 7de7b15a7..f4b653dd8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/go_module_metadata.go
@@ -3,4 +3,4 @@
package config
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.15.1"
+const goModuleVersion = "1.29.15"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go
index 22e6019fb..0810ecf16 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/load_options.go
@@ -11,6 +11,7 @@ import (
"github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ smithybearer "github.com/aws/smithy-go/auth/bearer"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
)
@@ -28,6 +29,9 @@ type LoadOptions struct {
// Credentials object to use when signing requests.
Credentials aws.CredentialsProvider
+ // Token provider for authentication operations with bearer authentication.
+ BearerAuthTokenProvider smithybearer.TokenProvider
+
// HTTPClient the SDK's API clients will use to invoke HTTP requests.
HTTPClient HTTPClient
@@ -128,6 +132,14 @@ type LoadOptions struct {
// aws.CredentialsCacheOptions
CredentialsCacheOptions func(*aws.CredentialsCacheOptions)
+ // BearerAuthTokenCacheOptions is a function for setting the smithy-go
+ // auth/bearer#TokenCacheOptions
+ BearerAuthTokenCacheOptions func(*smithybearer.TokenCacheOptions)
+
+ // SSOTokenProviderOptions is a function for setting the
+ // credentials/ssocreds.SSOTokenProviderOptions
+ SSOTokenProviderOptions func(*ssocreds.SSOTokenProviderOptions)
+
// ProcessCredentialOptions is a function for setting
// the processcreds.Options
ProcessCredentialOptions func(*processcreds.Options)
@@ -160,6 +172,10 @@ type LoadOptions struct {
// the region, the client's requests are sent to.
S3UseARNRegion *bool
+ // S3DisableMultiRegionAccessPoints specifies if the S3 service should disable
+ // the S3 Multi-Region access points feature.
+ S3DisableMultiRegionAccessPoints *bool
+
// EnableEndpointDiscovery specifies if endpoint discovery is enable for
// the client.
EnableEndpointDiscovery aws.EndpointDiscoveryEnableState
@@ -187,6 +203,31 @@ type LoadOptions struct {
// Specifies the SDK configuration mode for defaults.
DefaultsModeOptions DefaultsModeOptions
+
+ // The sdk app ID retrieved from env var or shared config to be added to request user agent header
+ AppID string
+
+ // Specifies whether an operation request could be compressed
+ DisableRequestCompression *bool
+
+ // The inclusive min bytes of a request body that could be compressed
+ RequestMinCompressSizeBytes *int64
+
+ // Whether S3 Express auth is disabled.
+ S3DisableExpressAuth *bool
+
+ // Whether account id should be built into endpoint resolution
+ AccountIDEndpointMode aws.AccountIDEndpointMode
+
+ // Specify if request checksum should be calculated
+ RequestChecksumCalculation aws.RequestChecksumCalculation
+
+ // Specifies if response checksum should be validated
+ ResponseChecksumValidation aws.ResponseChecksumValidation
+
+ // Service endpoint override. This value is not necessarily final and is
+ // passed to the service's EndpointResolverV2 for further delegation.
+ BaseEndpoint string
}
func (o LoadOptions) getDefaultsMode(ctx context.Context) (aws.DefaultsMode, bool, error) {
@@ -229,6 +270,52 @@ func (o LoadOptions) getRegion(ctx context.Context) (string, bool, error) {
return o.Region, true, nil
}
+// getAppID returns AppID from config's LoadOptions
+func (o LoadOptions) getAppID(ctx context.Context) (string, bool, error) {
+ return o.AppID, len(o.AppID) > 0, nil
+}
+
+// getDisableRequestCompression returns DisableRequestCompression from config's LoadOptions
+func (o LoadOptions) getDisableRequestCompression(ctx context.Context) (bool, bool, error) {
+ if o.DisableRequestCompression == nil {
+ return false, false, nil
+ }
+ return *o.DisableRequestCompression, true, nil
+}
+
+// getRequestMinCompressSizeBytes returns RequestMinCompressSizeBytes from config's LoadOptions
+func (o LoadOptions) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) {
+ if o.RequestMinCompressSizeBytes == nil {
+ return 0, false, nil
+ }
+ return *o.RequestMinCompressSizeBytes, true, nil
+}
+
+func (o LoadOptions) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) {
+ return o.AccountIDEndpointMode, len(o.AccountIDEndpointMode) > 0, nil
+}
+
+func (o LoadOptions) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) {
+ return o.RequestChecksumCalculation, o.RequestChecksumCalculation > 0, nil
+}
+
+func (o LoadOptions) getResponseChecksumValidation(ctx context.Context) (aws.ResponseChecksumValidation, bool, error) {
+ return o.ResponseChecksumValidation, o.ResponseChecksumValidation > 0, nil
+}
+
+func (o LoadOptions) getBaseEndpoint(context.Context) (string, bool, error) {
+ return o.BaseEndpoint, o.BaseEndpoint != "", nil
+}
+
+// GetServiceBaseEndpoint satisfies (internal/configsources).ServiceBaseEndpointProvider.
+//
+// The sdkID value is unused because LoadOptions only supports setting a GLOBAL
+// endpoint override. In-code, per-service endpoint overrides are performed via
+// functional options in service client space.
+func (o LoadOptions) GetServiceBaseEndpoint(context.Context, string) (string, bool, error) {
+ return o.BaseEndpoint, o.BaseEndpoint != "", nil
+}
+
// WithRegion is a helper function to construct functional options
// that sets Region on config's LoadOptions. Setting the region to
// an empty string, will result in the region value being ignored.
@@ -241,6 +328,70 @@ func WithRegion(v string) LoadOptionsFunc {
}
}
+// WithAppID is a helper function to construct functional options
+// that sets AppID on config's LoadOptions.
+func WithAppID(ID string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.AppID = ID
+ return nil
+ }
+}
+
+// WithDisableRequestCompression is a helper function to construct functional options
+// that sets DisableRequestCompression on config's LoadOptions.
+func WithDisableRequestCompression(DisableRequestCompression *bool) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ if DisableRequestCompression == nil {
+ return nil
+ }
+ o.DisableRequestCompression = DisableRequestCompression
+ return nil
+ }
+}
+
+// WithRequestMinCompressSizeBytes is a helper function to construct functional options
+// that sets RequestMinCompressSizeBytes on config's LoadOptions.
+func WithRequestMinCompressSizeBytes(RequestMinCompressSizeBytes *int64) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ if RequestMinCompressSizeBytes == nil {
+ return nil
+ }
+ o.RequestMinCompressSizeBytes = RequestMinCompressSizeBytes
+ return nil
+ }
+}
+
+// WithAccountIDEndpointMode is a helper function to construct functional options
+// that sets AccountIDEndpointMode on config's LoadOptions
+func WithAccountIDEndpointMode(m aws.AccountIDEndpointMode) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ if m != "" {
+ o.AccountIDEndpointMode = m
+ }
+ return nil
+ }
+}
+
+// WithRequestChecksumCalculation is a helper function to construct functional options
+// that sets RequestChecksumCalculation on config's LoadOptions
+func WithRequestChecksumCalculation(c aws.RequestChecksumCalculation) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ if c > 0 {
+ o.RequestChecksumCalculation = c
+ }
+ return nil
+ }
+}
+
+// WithResponseChecksumValidation is a helper function to construct functional options
+// that sets ResponseChecksumValidation on config's LoadOptions
+func WithResponseChecksumValidation(v aws.ResponseChecksumValidation) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.ResponseChecksumValidation = v
+ return nil
+ }
+}
+
// getDefaultRegion returns DefaultRegion from config's LoadOptions
func (o LoadOptions) getDefaultRegion(ctx context.Context) (string, bool, error) {
if len(o.DefaultRegion) == 0 {
@@ -451,6 +602,73 @@ func WithCredentialsCacheOptions(v func(*aws.CredentialsCacheOptions)) LoadOptio
}
}
+// getBearerAuthTokenProvider returns the credentials value
+func (o LoadOptions) getBearerAuthTokenProvider(ctx context.Context) (smithybearer.TokenProvider, bool, error) {
+ if o.BearerAuthTokenProvider == nil {
+ return nil, false, nil
+ }
+
+ return o.BearerAuthTokenProvider, true, nil
+}
+
+// WithBearerAuthTokenProvider is a helper function to construct functional options
+// that sets Credential provider value on config's LoadOptions. If credentials
+// provider is set to nil, the credentials provider value will be ignored.
+// If multiple WithBearerAuthTokenProvider calls are made, the last call overrides
+// the previous call values.
+func WithBearerAuthTokenProvider(v smithybearer.TokenProvider) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.BearerAuthTokenProvider = v
+ return nil
+ }
+}
+
+// getBearerAuthTokenCacheOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions
+func (o LoadOptions) getBearerAuthTokenCacheOptions(ctx context.Context) (func(*smithybearer.TokenCacheOptions), bool, error) {
+ if o.BearerAuthTokenCacheOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.BearerAuthTokenCacheOptions, true, nil
+}
+
+// WithBearerAuthTokenCacheOptions is a helper function to construct functional options
+// that sets a function to modify the TokenCacheOptions the smithy-go
+// auth/bearer#TokenCache will be configured with, if the TokenCache is used by
+// the configuration loader.
+//
+// If multiple WithBearerAuthTokenCacheOptions calls are made, the last call overrides
+// the previous call values.
+func WithBearerAuthTokenCacheOptions(v func(*smithybearer.TokenCacheOptions)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.BearerAuthTokenCacheOptions = v
+ return nil
+ }
+}
+
+// getSSOTokenProviderOptionsProvider returns the wrapped function to set smithybearer.TokenCacheOptions
+func (o LoadOptions) getSSOTokenProviderOptions(ctx context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error) {
+ if o.SSOTokenProviderOptions == nil {
+ return nil, false, nil
+ }
+
+ return o.SSOTokenProviderOptions, true, nil
+}
+
+// WithSSOTokenProviderOptions is a helper function to construct functional
+// options that sets a function to modify the SSOtokenProviderOptions the SDK's
+// credentials/ssocreds#SSOProvider will be configured with, if the
+// SSOTokenProvider is used by the configuration loader.
+//
+// If multiple WithSSOTokenProviderOptions calls are made, the last call overrides
+// the previous call values.
+func WithSSOTokenProviderOptions(v func(*ssocreds.SSOTokenProviderOptions)) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.SSOTokenProviderOptions = v
+ return nil
+ }
+}
+
// getProcessCredentialOptions returns the wrapped function to set processcreds.Options
func (o LoadOptions) getProcessCredentialOptions(ctx context.Context) (func(*processcreds.Options), bool, error) {
if o.ProcessCredentialOptions == nil {
@@ -675,7 +893,14 @@ func (o LoadOptions) getEndpointResolver(ctx context.Context) (aws.EndpointResol
// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls
// are made, the last call overrides the previous call values.
//
-// Deprecated: See WithEndpointResolverWithOptions
+// Deprecated: The global endpoint resolution interface is deprecated. The API
+// for endpoint resolution is now unique to each service and is set via the
+// EndpointResolverV2 field on service client options. Use of
+// WithEndpointResolver or WithEndpointResolverWithOptions will prevent you
+// from using any endpoint-related service features released after the
+// introduction of EndpointResolverV2. You may also encounter broken or
+// unexpected behavior when using the old global interface with services that
+// use many endpoint-related customizations such as S3.
func WithEndpointResolver(v aws.EndpointResolver) LoadOptionsFunc {
return func(o *LoadOptions) error {
o.EndpointResolver = v
@@ -695,6 +920,9 @@ func (o LoadOptions) getEndpointResolverWithOptions(ctx context.Context) (aws.En
// that sets the EndpointResolverWithOptions on LoadOptions. If the EndpointResolverWithOptions is set to nil,
// the EndpointResolver value is ignored. If multiple WithEndpointResolver calls
// are made, the last call overrides the previous call values.
+//
+// Deprecated: The global endpoint resolution interface is deprecated. See
+// deprecation docs on [WithEndpointResolver].
func WithEndpointResolverWithOptions(v aws.EndpointResolverWithOptions) LoadOptionsFunc {
return func(o *LoadOptions) error {
o.EndpointResolverWithOptions = v
@@ -780,6 +1008,26 @@ func WithS3UseARNRegion(v bool) LoadOptionsFunc {
}
}
+// GetS3DisableMultiRegionAccessPoints returns whether to disable
+// the S3 multi-region access points feature.
+func (o LoadOptions) GetS3DisableMultiRegionAccessPoints(ctx context.Context) (v bool, found bool, err error) {
+ if o.S3DisableMultiRegionAccessPoints == nil {
+ return false, false, nil
+ }
+ return *o.S3DisableMultiRegionAccessPoints, true, nil
+}
+
+// WithS3DisableMultiRegionAccessPoints is a helper function to construct functional options
+// that can be used to set S3DisableMultiRegionAccessPoints on LoadOptions.
+// If multiple WithS3DisableMultiRegionAccessPoints calls are made, the last call overrides
+// the previous call values.
+func WithS3DisableMultiRegionAccessPoints(v bool) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.S3DisableMultiRegionAccessPoints = &v
+ return nil
+ }
+}
+
// GetEnableEndpointDiscovery returns if the EnableEndpointDiscovery flag is set.
func (o LoadOptions) GetEnableEndpointDiscovery(ctx context.Context) (value aws.EndpointDiscoveryEnableState, ok bool, err error) {
if o.EnableEndpointDiscovery == aws.EndpointDiscoveryUnset {
@@ -924,3 +1172,38 @@ func WithDefaultsMode(mode aws.DefaultsMode, optFns ...func(options *DefaultsMod
return nil
}
}
+
+// GetS3DisableExpressAuth returns the configured value for
+// [EnvConfig.S3DisableExpressAuth].
+func (o LoadOptions) GetS3DisableExpressAuth() (value, ok bool) {
+ if o.S3DisableExpressAuth == nil {
+ return false, false
+ }
+
+ return *o.S3DisableExpressAuth, true
+}
+
+// WithS3DisableExpressAuth sets [LoadOptions.S3DisableExpressAuth]
+// to the value provided.
+func WithS3DisableExpressAuth(v bool) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.S3DisableExpressAuth = &v
+ return nil
+ }
+}
+
+// WithBaseEndpoint is a helper function to construct functional options that
+// sets BaseEndpoint on config's LoadOptions. Empty values have no effect, and
+// subsequent calls to this API override previous ones.
+//
+// This is an in-code setting, therefore, any value set using this hook takes
+// precedence over and will override ALL environment and shared config
+// directives that set endpoint URLs. Functional options on service clients
+// have higher specificity, and functional options that modify the value of
+// BaseEndpoint on a client will take precedence over this setting.
+func WithBaseEndpoint(v string) LoadOptionsFunc {
+ return func(o *LoadOptions) error {
+ o.BaseEndpoint = v
+ return nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go
index 3f12df1bf..a8ff40d84 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/provider.go
@@ -12,6 +12,7 @@ import (
"github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
+ smithybearer "github.com/aws/smithy-go/auth/bearer"
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
)
@@ -121,6 +122,160 @@ func getRegion(ctx context.Context, configs configs) (value string, found bool,
return
}
+// IgnoreConfiguredEndpointsProvider is needed to search for all providers
+// that provide a flag to disable configured endpoints.
+type IgnoreConfiguredEndpointsProvider interface {
+ GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error)
+}
+
+// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured
+// endpoints feature.
+func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok {
+ value, found, err = p.GetIgnoreConfiguredEndpoints(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+type baseEndpointProvider interface {
+ getBaseEndpoint(ctx context.Context) (string, bool, error)
+}
+
+func getBaseEndpoint(ctx context.Context, configs configs) (value string, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(baseEndpointProvider); ok {
+ value, found, err = p.getBaseEndpoint(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+type servicesObjectProvider interface {
+ getServicesObject(ctx context.Context) (map[string]map[string]string, bool, error)
+}
+
+func getServicesObject(ctx context.Context, configs configs) (value map[string]map[string]string, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(servicesObjectProvider); ok {
+ value, found, err = p.getServicesObject(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// appIDProvider provides access to the sdk app ID value
+type appIDProvider interface {
+ getAppID(ctx context.Context) (string, bool, error)
+}
+
+func getAppID(ctx context.Context, configs configs) (value string, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(appIDProvider); ok {
+ value, found, err = p.getAppID(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// disableRequestCompressionProvider provides access to the DisableRequestCompression
+type disableRequestCompressionProvider interface {
+ getDisableRequestCompression(context.Context) (bool, bool, error)
+}
+
+func getDisableRequestCompression(ctx context.Context, configs configs) (value bool, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(disableRequestCompressionProvider); ok {
+ value, found, err = p.getDisableRequestCompression(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// requestMinCompressSizeBytesProvider provides access to the MinCompressSizeBytes
+type requestMinCompressSizeBytesProvider interface {
+ getRequestMinCompressSizeBytes(context.Context) (int64, bool, error)
+}
+
+func getRequestMinCompressSizeBytes(ctx context.Context, configs configs) (value int64, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(requestMinCompressSizeBytesProvider); ok {
+ value, found, err = p.getRequestMinCompressSizeBytes(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// accountIDEndpointModeProvider provides access to the AccountIDEndpointMode
+type accountIDEndpointModeProvider interface {
+ getAccountIDEndpointMode(context.Context) (aws.AccountIDEndpointMode, bool, error)
+}
+
+func getAccountIDEndpointMode(ctx context.Context, configs configs) (value aws.AccountIDEndpointMode, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(accountIDEndpointModeProvider); ok {
+ value, found, err = p.getAccountIDEndpointMode(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// requestChecksumCalculationProvider provides access to the RequestChecksumCalculation
+type requestChecksumCalculationProvider interface {
+ getRequestChecksumCalculation(context.Context) (aws.RequestChecksumCalculation, bool, error)
+}
+
+func getRequestChecksumCalculation(ctx context.Context, configs configs) (value aws.RequestChecksumCalculation, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(requestChecksumCalculationProvider); ok {
+ value, found, err = p.getRequestChecksumCalculation(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// responseChecksumValidationProvider provides access to the ResponseChecksumValidation
+type responseChecksumValidationProvider interface {
+ getResponseChecksumValidation(context.Context) (aws.ResponseChecksumValidation, bool, error)
+}
+
+func getResponseChecksumValidation(ctx context.Context, configs configs) (value aws.ResponseChecksumValidation, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(responseChecksumValidationProvider); ok {
+ value, found, err = p.getResponseChecksumValidation(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
// ec2IMDSRegionProvider provides access to the ec2 imds region
// configuration value
type ec2IMDSRegionProvider interface {
@@ -185,6 +340,73 @@ func getCredentialsCacheOptionsProvider(ctx context.Context, configs configs) (
return
}
+// bearerAuthTokenProviderProvider provides access to the bearer authentication
+// token external configuration value.
+type bearerAuthTokenProviderProvider interface {
+ getBearerAuthTokenProvider(context.Context) (smithybearer.TokenProvider, bool, error)
+}
+
+// getBearerAuthTokenProvider searches the config sources for a
+// bearerAuthTokenProviderProvider and returns the value if found. Returns an
+// error if a provider fails before a value is found.
+func getBearerAuthTokenProvider(ctx context.Context, configs configs) (p smithybearer.TokenProvider, found bool, err error) {
+ for _, cfg := range configs {
+ if provider, ok := cfg.(bearerAuthTokenProviderProvider); ok {
+ p, found, err = provider.getBearerAuthTokenProvider(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// bearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for
+// setting the smithy-go auth/bearer#TokenCacheOptions.
+type bearerAuthTokenCacheOptionsProvider interface {
+ getBearerAuthTokenCacheOptions(context.Context) (func(*smithybearer.TokenCacheOptions), bool, error)
+}
+
+// getBearerAuthTokenCacheOptionsProvider is an interface for retrieving a function for
+// setting the smithy-go auth/bearer#TokenCacheOptions.
+func getBearerAuthTokenCacheOptions(ctx context.Context, configs configs) (
+ f func(*smithybearer.TokenCacheOptions), found bool, err error,
+) {
+ for _, config := range configs {
+ if p, ok := config.(bearerAuthTokenCacheOptionsProvider); ok {
+ f, found, err = p.getBearerAuthTokenCacheOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// ssoTokenProviderOptionsProvider is an interface for retrieving a function for
+// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions.
+type ssoTokenProviderOptionsProvider interface {
+ getSSOTokenProviderOptions(context.Context) (func(*ssocreds.SSOTokenProviderOptions), bool, error)
+}
+
+// getSSOTokenProviderOptions is an interface for retrieving a function for
+// setting the SDK's credentials/ssocreds#SSOTokenProviderOptions.
+func getSSOTokenProviderOptions(ctx context.Context, configs configs) (
+ f func(*ssocreds.SSOTokenProviderOptions), found bool, err error,
+) {
+ for _, config := range configs {
+ if p, ok := config.(ssoTokenProviderOptionsProvider); ok {
+ f, found, err = p.getSSOTokenProviderOptions(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// ssoTokenProviderOptionsProvider
+
// processCredentialOptions is an interface for retrieving a function for setting
// the processcreds.Options.
type processCredentialOptions interface {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go
index 4a8024769..a68bd0993 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve.go
@@ -21,9 +21,14 @@ import (
// This should be used as the first resolver in the slice of resolvers when
// resolving external configuration.
func resolveDefaultAWSConfig(ctx context.Context, cfg *aws.Config, cfgs configs) error {
+ var sources []interface{}
+ for _, s := range cfgs {
+ sources = append(sources, s)
+ }
+
*cfg = aws.Config{
- Credentials: aws.AnonymousCredentials{},
- Logger: logging.NewStandardLogger(os.Stderr),
+ Logger: logging.NewStandardLogger(os.Stderr),
+ ConfigSources: sources,
}
return nil
}
@@ -100,6 +105,113 @@ func resolveRegion(ctx context.Context, cfg *aws.Config, configs configs) error
return nil
}
+func resolveBaseEndpoint(ctx context.Context, cfg *aws.Config, configs configs) error {
+ var downcastCfgSources []interface{}
+ for _, cs := range configs {
+ downcastCfgSources = append(downcastCfgSources, interface{}(cs))
+ }
+
+ if val, found, err := GetIgnoreConfiguredEndpoints(ctx, downcastCfgSources); found && val && err == nil {
+ cfg.BaseEndpoint = nil
+ return nil
+ }
+
+ v, found, err := getBaseEndpoint(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ if !found {
+ return nil
+ }
+ cfg.BaseEndpoint = aws.String(v)
+ return nil
+}
+
+// resolveAppID extracts the sdk app ID from the configs slice's SharedConfig or env var
+func resolveAppID(ctx context.Context, cfg *aws.Config, configs configs) error {
+ ID, _, err := getAppID(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ cfg.AppID = ID
+ return nil
+}
+
+// resolveDisableRequestCompression extracts the DisableRequestCompression from the configs slice's
+// SharedConfig or EnvConfig
+func resolveDisableRequestCompression(ctx context.Context, cfg *aws.Config, configs configs) error {
+ disable, _, err := getDisableRequestCompression(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ cfg.DisableRequestCompression = disable
+ return nil
+}
+
+// resolveRequestMinCompressSizeBytes extracts the RequestMinCompressSizeBytes from the configs slice's
+// SharedConfig or EnvConfig
+func resolveRequestMinCompressSizeBytes(ctx context.Context, cfg *aws.Config, configs configs) error {
+ minBytes, found, err := getRequestMinCompressSizeBytes(ctx, configs)
+ if err != nil {
+ return err
+ }
+ // must set a default min size 10240 if not configured
+ if !found {
+ minBytes = 10240
+ }
+ cfg.RequestMinCompressSizeBytes = minBytes
+ return nil
+}
+
+// resolveAccountIDEndpointMode extracts the AccountIDEndpointMode from the configs slice's
+// SharedConfig or EnvConfig
+func resolveAccountIDEndpointMode(ctx context.Context, cfg *aws.Config, configs configs) error {
+ m, found, err := getAccountIDEndpointMode(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ if !found {
+ m = aws.AccountIDEndpointModePreferred
+ }
+
+ cfg.AccountIDEndpointMode = m
+ return nil
+}
+
+// resolveRequestChecksumCalculation extracts the RequestChecksumCalculation from the configs slice's
+// SharedConfig or EnvConfig
+func resolveRequestChecksumCalculation(ctx context.Context, cfg *aws.Config, configs configs) error {
+ c, found, err := getRequestChecksumCalculation(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ if !found {
+ c = aws.RequestChecksumCalculationWhenSupported
+ }
+ cfg.RequestChecksumCalculation = c
+ return nil
+}
+
+// resolveResponseValidation extracts the ResponseChecksumValidation from the configs slice's
+// SharedConfig or EnvConfig
+func resolveResponseChecksumValidation(ctx context.Context, cfg *aws.Config, configs configs) error {
+ c, found, err := getResponseChecksumValidation(ctx, configs)
+ if err != nil {
+ return err
+ }
+
+ if !found {
+ c = aws.ResponseChecksumValidationWhenSupported
+ }
+ cfg.ResponseChecksumValidation = c
+ return nil
+}
+
// resolveDefaultRegion extracts the first instance of a default region and sets `aws.Config.Region` to the default
// region if region had not been resolved from other sources.
func resolveDefaultRegion(ctx context.Context, cfg *aws.Config, configs configs) error {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go
new file mode 100644
index 000000000..a8ebb3c0a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_bearer_token.go
@@ -0,0 +1,122 @@
+package config
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/credentials/ssocreds"
+ "github.com/aws/aws-sdk-go-v2/service/ssooidc"
+ smithybearer "github.com/aws/smithy-go/auth/bearer"
+)
+
+// resolveBearerAuthToken extracts a token provider from the config sources.
+//
+// If an explicit bearer authentication token provider is not found the
+// resolver will fallback to resolving token provider via other config sources
+// such as SharedConfig.
+func resolveBearerAuthToken(ctx context.Context, cfg *aws.Config, configs configs) error {
+ found, err := resolveBearerAuthTokenProvider(ctx, cfg, configs)
+ if found || err != nil {
+ return err
+ }
+
+ return resolveBearerAuthTokenProviderChain(ctx, cfg, configs)
+}
+
+// resolveBearerAuthTokenProvider extracts the first instance of
+// BearerAuthTokenProvider from the config sources.
+//
+// The resolved BearerAuthTokenProvider will be wrapped in a cache to ensure
+// the Token is only refreshed when needed. This also protects the
+// TokenProvider so it can be used concurrently.
+//
+// Config providers used:
+// * bearerAuthTokenProviderProvider
+func resolveBearerAuthTokenProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) {
+ tokenProvider, found, err := getBearerAuthTokenProvider(ctx, configs)
+ if !found || err != nil {
+ return false, err
+ }
+
+ cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache(
+ ctx, configs, tokenProvider)
+ if err != nil {
+ return false, err
+ }
+
+ return true, nil
+}
+
+func resolveBearerAuthTokenProviderChain(ctx context.Context, cfg *aws.Config, configs configs) (err error) {
+ _, sharedConfig, _ := getAWSConfigSources(configs)
+
+ var provider smithybearer.TokenProvider
+
+ if sharedConfig.SSOSession != nil {
+ provider, err = resolveBearerAuthSSOTokenProvider(
+ ctx, cfg, sharedConfig.SSOSession, configs)
+ }
+
+ if err == nil && provider != nil {
+ cfg.BearerAuthTokenProvider, err = wrapWithBearerAuthTokenCache(
+ ctx, configs, provider)
+ }
+
+ return err
+}
+
+func resolveBearerAuthSSOTokenProvider(ctx context.Context, cfg *aws.Config, session *SSOSession, configs configs) (*ssocreds.SSOTokenProvider, error) {
+ ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err)
+ }
+
+ var optFns []func(*ssocreds.SSOTokenProviderOptions)
+ if found {
+ optFns = append(optFns, ssoTokenProviderOptionsFn)
+ }
+
+ cachePath, err := ssocreds.StandardCachedTokenFilepath(session.Name)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get SSOTokenProvider's cache path, %w", err)
+ }
+
+ client := ssooidc.NewFromConfig(*cfg)
+ provider := ssocreds.NewSSOTokenProvider(client, cachePath, optFns...)
+
+ return provider, nil
+}
+
+// wrapWithBearerAuthTokenCache will wrap provider with an smithy-go
+// bearer/auth#TokenCache with the provided options if the provider is not
+// already a TokenCache.
+func wrapWithBearerAuthTokenCache(
+ ctx context.Context,
+ cfgs configs,
+ provider smithybearer.TokenProvider,
+ optFns ...func(*smithybearer.TokenCacheOptions),
+) (smithybearer.TokenProvider, error) {
+ _, ok := provider.(*smithybearer.TokenCache)
+ if ok {
+ return provider, nil
+ }
+
+ tokenCacheConfigOptions, optionsFound, err := getBearerAuthTokenCacheOptions(ctx, cfgs)
+ if err != nil {
+ return nil, err
+ }
+
+ opts := make([]func(*smithybearer.TokenCacheOptions), 0, 2+len(optFns))
+ opts = append(opts, func(o *smithybearer.TokenCacheOptions) {
+ o.RefreshBeforeExpires = 5 * time.Minute
+ o.RetrieveBearerTokenTimeout = 30 * time.Second
+ })
+ opts = append(opts, optFns...)
+ if optionsFound {
+ opts = append(opts, tokenCacheConfigOptions)
+ }
+
+ return smithybearer.NewTokenCache(provider, opts...), nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go
index 42904ed74..b00259df0 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/resolve_credentials.go
@@ -3,7 +3,10 @@ package config
import (
"context"
"fmt"
+ "io/ioutil"
+ "net"
"net/url"
+ "os"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
@@ -15,39 +18,56 @@ import (
"github.com/aws/aws-sdk-go-v2/credentials/stscreds"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/service/sso"
+ "github.com/aws/aws-sdk-go-v2/service/ssooidc"
"github.com/aws/aws-sdk-go-v2/service/sts"
)
const (
// valid credential source values
- credSourceEc2Metadata = "Ec2InstanceMetadata"
- credSourceEnvironment = "Environment"
- credSourceECSContainer = "EcsContainer"
+ credSourceEc2Metadata = "Ec2InstanceMetadata"
+ credSourceEnvironment = "Environment"
+ credSourceECSContainer = "EcsContainer"
+ httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE"
)
+// direct representation of the IPv4 address for the ECS container
+// "169.254.170.2"
+var ecsContainerIPv4 net.IP = []byte{
+ 169, 254, 170, 2,
+}
+
+// direct representation of the IPv4 address for the EKS container
+// "169.254.170.23"
+var eksContainerIPv4 net.IP = []byte{
+ 169, 254, 170, 23,
+}
+
+// direct representation of the IPv6 address for the EKS container
+// "fd00:ec2::23"
+var eksContainerIPv6 net.IP = []byte{
+ 0xFD, 0, 0xE, 0xC2,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0x23,
+}
+
var (
ecsContainerEndpoint = "http://169.254.170.2" // not constant to allow for swapping during unit-testing
)
-// resolveCredentials extracts a credential provider from slice of config sources.
+// resolveCredentials extracts a credential provider from slice of config
+// sources.
//
-// If an explict credential provider is not found the resolver will fallback to resolving
-// credentials by extracting a credential provider from EnvConfig and SharedConfig.
+// If an explicit credential provider is not found the resolver will fallback
+// to resolving credentials by extracting a credential provider from EnvConfig
+// and SharedConfig.
func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) error {
found, err := resolveCredentialProvider(ctx, cfg, configs)
- if err != nil {
+ if found || err != nil {
return err
}
- if found {
- return nil
- }
- err = resolveCredentialChain(ctx, cfg, configs)
- if err != nil {
- return err
- }
-
- return nil
+ return resolveCredentialChain(ctx, cfg, configs)
}
// resolveCredentialProvider extracts the first instance of Credentials from the
@@ -61,12 +81,9 @@ func resolveCredentials(ctx context.Context, cfg *aws.Config, configs configs) e
// * credentialsProviderProvider
func resolveCredentialProvider(ctx context.Context, cfg *aws.Config, configs configs) (bool, error) {
credProvider, found, err := getCredentialsProvider(ctx, configs)
- if err != nil {
+ if !found || err != nil {
return false, err
}
- if !found {
- return false, nil
- }
cfg.Credentials, err = wrapWithCredentialsCache(ctx, configs, credProvider)
if err != nil {
@@ -95,13 +112,15 @@ func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs config
switch {
case sharedProfileSet:
- err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other)
+ ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other)
case envConfig.Credentials.HasKeys():
- cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials}
+ ctx = addCredentialSource(ctx, aws.CredentialSourceEnvVars)
+ cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials, Source: getCredentialSources(ctx)}
case len(envConfig.WebIdentityTokenFilePath) > 0:
+ ctx = addCredentialSource(ctx, aws.CredentialSourceEnvVarsSTSWebIDToken)
err = assumeWebIdentity(ctx, cfg, envConfig.WebIdentityTokenFilePath, envConfig.RoleARN, envConfig.RoleSessionName, configs)
default:
- err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other)
+ ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig, other)
}
if err != nil {
return err
@@ -116,53 +135,71 @@ func resolveCredentialChain(ctx context.Context, cfg *aws.Config, configs config
return nil
}
-func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (err error) {
-
+func resolveCredsFromProfile(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedConfig *SharedConfig, configs configs) (ctx2 context.Context, err error) {
switch {
case sharedConfig.Source != nil:
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSourceProfile)
// Assume IAM role with credentials source from a different profile.
- err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs)
+ ctx, err = resolveCredsFromProfile(ctx, cfg, envConfig, sharedConfig.Source, configs)
case sharedConfig.Credentials.HasKeys():
// Static Credentials from Shared Config/Credentials file.
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProfile)
cfg.Credentials = credentials.StaticCredentialsProvider{
- Value: sharedConfig.Credentials,
+ Value: sharedConfig.Credentials,
+ Source: getCredentialSources(ctx),
}
case len(sharedConfig.CredentialSource) != 0:
- err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs)
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProfileNamedProvider)
+ ctx, err = resolveCredsFromSource(ctx, cfg, envConfig, sharedConfig, configs)
case len(sharedConfig.WebIdentityTokenFile) != 0:
// Credentials from Assume Web Identity token require an IAM Role, and
// that roll will be assumed. May be wrapped with another assume role
// via SourceProfile.
- return assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs)
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSTSWebIDToken)
+ return ctx, assumeWebIdentity(ctx, cfg, sharedConfig.WebIdentityTokenFile, sharedConfig.RoleARN, sharedConfig.RoleSessionName, configs)
case sharedConfig.hasSSOConfiguration():
+ if sharedConfig.hasLegacySSOConfiguration() {
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSSOLegacy)
+ ctx = addCredentialSource(ctx, aws.CredentialSourceSSOLegacy)
+ } else {
+ ctx = addCredentialSource(ctx, aws.CredentialSourceSSO)
+ }
+ if sharedConfig.SSOSession != nil {
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProfileSSO)
+ }
err = resolveSSOCredentials(ctx, cfg, sharedConfig, configs)
case len(sharedConfig.CredentialProcess) != 0:
// Get credentials from CredentialProcess
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProfileProcess)
+ ctx = addCredentialSource(ctx, aws.CredentialSourceProcess)
err = processCredentials(ctx, cfg, sharedConfig, configs)
- case len(envConfig.ContainerCredentialsEndpoint) != 0:
- err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs)
-
case len(envConfig.ContainerCredentialsRelativePath) != 0:
+ ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP)
err = resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs)
+ case len(envConfig.ContainerCredentialsEndpoint) != 0:
+ ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP)
+ err = resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs)
+
default:
+ ctx = addCredentialSource(ctx, aws.CredentialSourceIMDS)
err = resolveEC2RoleCredentials(ctx, cfg, configs)
}
if err != nil {
- return err
+ return ctx, err
}
if len(sharedConfig.RoleARN) > 0 {
- return credsFromAssumeRole(ctx, cfg, sharedConfig, configs)
+ return ctx, credsFromAssumeRole(ctx, cfg, sharedConfig, configs)
}
- return nil
+ return ctx, nil
}
func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *SharedConfig, configs configs) error {
@@ -180,7 +217,34 @@ func resolveSSOCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *S
}
cfgCopy := cfg.Copy()
- cfgCopy.Region = sharedConfig.SSORegion
+
+ options = append(options, func(o *ssocreds.Options) {
+ o.CredentialSources = getCredentialSources(ctx)
+ })
+
+ if sharedConfig.SSOSession != nil {
+ ssoTokenProviderOptionsFn, found, err := getSSOTokenProviderOptions(ctx, configs)
+ if err != nil {
+ return fmt.Errorf("failed to get SSOTokenProviderOptions from config sources, %w", err)
+ }
+ var optFns []func(*ssocreds.SSOTokenProviderOptions)
+ if found {
+ optFns = append(optFns, ssoTokenProviderOptionsFn)
+ }
+ cfgCopy.Region = sharedConfig.SSOSession.SSORegion
+ cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedConfig.SSOSession.Name)
+ if err != nil {
+ return err
+ }
+ oidcClient := ssooidc.NewFromConfig(cfgCopy)
+ tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath, optFns...)
+ options = append(options, func(o *ssocreds.Options) {
+ o.SSOTokenProvider = tokenProvider
+ o.CachedTokenFilepath = cachedPath
+ })
+ } else {
+ cfgCopy.Region = sharedConfig.SSORegion
+ }
cfg.Credentials = ssocreds.New(sso.NewFromConfig(cfgCopy), sharedConfig.SSOAccountID, sharedConfig.SSORoleName, sharedConfig.SSOStartURL, options...)
@@ -202,11 +266,45 @@ func processCredentials(ctx context.Context, cfg *aws.Config, sharedConfig *Shar
opts = append(opts, options)
}
+ opts = append(opts, func(o *processcreds.Options) {
+ o.CredentialSources = getCredentialSources(ctx)
+ })
+
cfg.Credentials = processcreds.NewProvider(sharedConfig.CredentialProcess, opts...)
return nil
}
+// isAllowedHost allows host to be loopback or known ECS/EKS container IPs
+//
+// host can either be an IP address OR an unresolved hostname - resolution will
+// be automatically performed in the latter case
+func isAllowedHost(host string) (bool, error) {
+ if ip := net.ParseIP(host); ip != nil {
+ return isIPAllowed(ip), nil
+ }
+
+ addrs, err := lookupHostFn(host)
+ if err != nil {
+ return false, err
+ }
+
+ for _, addr := range addrs {
+ if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func isIPAllowed(ip net.IP) bool {
+ return ip.IsLoopback() ||
+ ip.Equal(ecsContainerIPv4) ||
+ ip.Equal(eksContainerIPv4) ||
+ ip.Equal(eksContainerIPv6)
+}
+
func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpointURL, authToken string, configs configs) error {
var resolveErr error
@@ -217,10 +315,12 @@ func resolveLocalHTTPCredProvider(ctx context.Context, cfg *aws.Config, endpoint
host := parsed.Hostname()
if len(host) == 0 {
resolveErr = fmt.Errorf("unable to parse host from local HTTP cred provider URL")
- } else if isLoopback, loopbackErr := isLoopbackHost(host); loopbackErr != nil {
- resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, loopbackErr)
- } else if !isLoopback {
- resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback hosts are allowed", host)
+ } else if parsed.Scheme == "http" {
+ if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil {
+ resolveErr = fmt.Errorf("failed to resolve host %q, %v", host, allowHostErr)
+ } else if !isAllowedHost {
+ resolveErr = fmt.Errorf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed", host)
+ }
}
}
@@ -237,10 +337,21 @@ func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToke
if len(authToken) != 0 {
options.AuthorizationToken = authToken
}
+ if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" {
+ options.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) {
+ var contents []byte
+ var err error
+ if contents, err = ioutil.ReadFile(authFilePath); err != nil {
+ return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err)
+ }
+ return string(contents), nil
+ })
+ }
options.APIOptions = cfg.APIOptions
if cfg.Retryer != nil {
options.Retryer = cfg.Retryer()
}
+ options.CredentialSources = getCredentialSources(ctx)
},
}
@@ -264,25 +375,31 @@ func resolveHTTPCredProvider(ctx context.Context, cfg *aws.Config, url, authToke
return nil
}
-func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (err error) {
+func resolveCredsFromSource(ctx context.Context, cfg *aws.Config, envConfig *EnvConfig, sharedCfg *SharedConfig, configs configs) (context.Context, error) {
switch sharedCfg.CredentialSource {
case credSourceEc2Metadata:
- return resolveEC2RoleCredentials(ctx, cfg, configs)
+ ctx = addCredentialSource(ctx, aws.CredentialSourceIMDS)
+ return ctx, resolveEC2RoleCredentials(ctx, cfg, configs)
case credSourceEnvironment:
- cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials}
+ ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP)
+ cfg.Credentials = credentials.StaticCredentialsProvider{Value: envConfig.Credentials, Source: getCredentialSources(ctx)}
case credSourceECSContainer:
- if len(envConfig.ContainerCredentialsRelativePath) == 0 {
- return fmt.Errorf("EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set")
+ ctx = addCredentialSource(ctx, aws.CredentialSourceHTTP)
+ if len(envConfig.ContainerCredentialsRelativePath) != 0 {
+ return ctx, resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs)
+ }
+ if len(envConfig.ContainerCredentialsEndpoint) != 0 {
+ return ctx, resolveLocalHTTPCredProvider(ctx, cfg, envConfig.ContainerCredentialsEndpoint, envConfig.ContainerAuthorizationToken, configs)
}
- return resolveHTTPCredProvider(ctx, cfg, ecsContainerURI(envConfig.ContainerCredentialsRelativePath), envConfig.ContainerAuthorizationToken, configs)
+ return ctx, fmt.Errorf("EcsContainer was specified as the credential_source, but neither 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' or AWS_CONTAINER_CREDENTIALS_FULL_URI' was set")
default:
- return fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment")
+ return ctx, fmt.Errorf("credential_source values must be EcsContainer, Ec2InstanceMetadata, or Environment")
}
- return nil
+ return ctx, nil
}
func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs configs) error {
@@ -301,6 +418,7 @@ func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs con
if o.Client == nil {
o.Client = imds.NewFromConfig(*cfg)
}
+ o.CredentialSources = getCredentialSources(ctx)
})
provider := ec2rolecreds.New(optFns...)
@@ -309,7 +427,6 @@ func resolveEC2RoleCredentials(ctx context.Context, cfg *aws.Config, configs con
if err != nil {
return err
}
-
return nil
}
@@ -369,10 +486,6 @@ func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, ro
return fmt.Errorf("token file path is not set")
}
- if len(roleARN) == 0 {
- return fmt.Errorf("role ARN is not set")
- }
-
optFns := []func(*stscreds.WebIdentityRoleOptions){
func(options *stscreds.WebIdentityRoleOptions) {
options.RoleSessionName = sessionName
@@ -383,11 +496,33 @@ func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, ro
if err != nil {
return err
}
+
if found {
optFns = append(optFns, optFn)
}
- provider := stscreds.NewWebIdentityRoleProvider(sts.NewFromConfig(*cfg), roleARN, stscreds.IdentityTokenFile(filepath), optFns...)
+ opts := stscreds.WebIdentityRoleOptions{
+ RoleARN: roleARN,
+ }
+
+ optFns = append(optFns, func(options *stscreds.WebIdentityRoleOptions) {
+ options.CredentialSources = getCredentialSources(ctx)
+ })
+
+ for _, fn := range optFns {
+ fn(&opts)
+ }
+
+ if len(opts.RoleARN) == 0 {
+ return fmt.Errorf("role ARN is not set")
+ }
+
+ client := opts.Client
+ if client == nil {
+ client = sts.NewFromConfig(*cfg)
+ }
+
+ provider := stscreds.NewWebIdentityRoleProvider(client, roleARN, stscreds.IdentityTokenFile(filepath), optFns...)
cfg.Credentials = provider
@@ -395,6 +530,8 @@ func assumeWebIdentity(ctx context.Context, cfg *aws.Config, filepath string, ro
}
func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *SharedConfig, configs configs) (err error) {
+ // resolve credentials early
+ credentialSources := getCredentialSources(ctx)
optFns := []func(*stscreds.AssumeRoleOptions){
func(options *stscreds.AssumeRoleOptions) {
options.RoleSessionName = sharedCfg.RoleSessionName
@@ -412,6 +549,9 @@ func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *Shared
if len(sharedCfg.MFASerial) != 0 {
options.SerialNumber = aws.String(sharedCfg.MFASerial)
}
+
+ // add existing credential chain
+ options.CredentialSources = credentialSources
},
}
@@ -434,7 +574,6 @@ func credsFromAssumeRole(ctx context.Context, cfg *aws.Config, sharedCfg *Shared
return AssumeRoleTokenProviderNotSetError{}
}
}
-
cfg.Credentials = stscreds.NewAssumeRoleProvider(sts.NewFromConfig(*cfg), sharedCfg.RoleARN, optFns...)
return nil
@@ -454,7 +593,7 @@ func wrapWithCredentialsCache(
return provider, nil
}
- credCacheOptions, found, err := getCredentialsCacheOptionsProvider(ctx, cfgs)
+ credCacheOptions, optionsFound, err := getCredentialsCacheOptionsProvider(ctx, cfgs)
if err != nil {
return nil, err
}
@@ -462,9 +601,27 @@ func wrapWithCredentialsCache(
// force allocation of a new slice if the additional options are
// needed, to prevent overwriting the passed in slice of options.
optFns = optFns[:len(optFns):len(optFns)]
- if found {
+ if optionsFound {
optFns = append(optFns, credCacheOptions)
}
return aws.NewCredentialsCache(provider, optFns...), nil
}
+
+// credentialSource stores the chain of providers that was used to create an instance of
+// a credentials provider on the context
+type credentialSource struct{}
+
+func addCredentialSource(ctx context.Context, source aws.CredentialSource) context.Context {
+ existing, ok := ctx.Value(credentialSource{}).([]aws.CredentialSource)
+ if !ok {
+ existing = []aws.CredentialSource{source}
+ } else {
+ existing = append(existing, source)
+ }
+ return context.WithValue(ctx, credentialSource{}, existing)
+}
+
+func getCredentialSources(ctx context.Context) []aws.CredentialSource {
+ return ctx.Value(credentialSource{}).([]aws.CredentialSource)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go
index 4c43a165d..00b071fe6 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/config/shared_config.go
@@ -15,13 +15,24 @@ import (
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/feature/ec2/imds"
"github.com/aws/aws-sdk-go-v2/internal/ini"
+ "github.com/aws/aws-sdk-go-v2/internal/shareddefaults"
"github.com/aws/smithy-go/logging"
+ smithyrequestcompression "github.com/aws/smithy-go/private/requestcompression"
)
const (
- // Prefix to use for filtering profiles
+ // Prefix to use for filtering profiles. The profile prefix should only
+ // exist in the shared config file, not the credentials file.
profilePrefix = `profile `
+ // Prefix to be used for SSO sections. These are supposed to only exist in
+ // the shared config file, not the credentials file.
+ ssoSectionPrefix = `sso-session `
+
+ // Prefix for services section. It is referenced in profile via the services
+ // parameter to configure clients for service-specific parameters.
+ servicesPrefix = `services `
+
// string equivalent for boolean
endpointDiscoveryDisabled = `false`
endpointDiscoveryEnabled = `true`
@@ -42,10 +53,13 @@ const (
roleDurationSecondsKey = "duration_seconds" // optional
// AWS Single Sign-On (AWS SSO) group
+ ssoSessionNameKey = "sso_session"
+
+ ssoRegionKey = "sso_region"
+ ssoStartURLKey = "sso_start_url"
+
ssoAccountIDKey = "sso_account_id"
- ssoRegionKey = "sso_region"
ssoRoleNameKey = "sso_role_name"
- ssoStartURL = "sso_start_url"
// Additional Config fields
regionKey = `region`
@@ -66,6 +80,8 @@ const (
ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint"
+ ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled"
+
// Use DualStack Endpoint Resolution
useDualStackEndpoint = "use_dualstack_endpoint"
@@ -86,6 +102,27 @@ const (
retryModeKey = "retry_mode"
caBundleKey = "ca_bundle"
+
+ sdkAppID = "sdk_ua_app_id"
+
+ ignoreConfiguredEndpoints = "ignore_configured_endpoint_urls"
+
+ endpointURL = "endpoint_url"
+
+ servicesSectionKey = "services"
+
+ disableRequestCompression = "disable_request_compression"
+ requestMinCompressionSizeBytes = "request_min_compression_size_bytes"
+
+ s3DisableExpressSessionAuthKey = "s3_disable_express_session_auth"
+
+ accountIDKey = "aws_account_id"
+ accountIDEndpointMode = "account_id_endpoint_mode"
+
+ requestChecksumCalculationKey = "request_checksum_calculation"
+ responseChecksumValidationKey = "response_checksum_validation"
+ checksumWhenSupported = "when_supported"
+ checksumWhenRequired = "when_required"
)
// defaultSharedConfigProfile allows for swapping the default profile for testing
@@ -99,7 +136,7 @@ var defaultSharedConfigProfile = DefaultSharedConfigProfile
// - Linux/Unix: $HOME/.aws/credentials
// - Windows: %USERPROFILE%\.aws\credentials
func DefaultSharedCredentialsFilename() string {
- return filepath.Join(userHomeDir(), ".aws", "credentials")
+ return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "credentials")
}
// DefaultSharedConfigFilename returns the SDK's default file path for
@@ -110,7 +147,7 @@ func DefaultSharedCredentialsFilename() string {
// - Linux/Unix: $HOME/.aws/config
// - Windows: %USERPROFILE%\.aws\config
func DefaultSharedConfigFilename() string {
- return filepath.Join(userHomeDir(), ".aws", "config")
+ return filepath.Join(shareddefaults.UserHomeDir(), ".aws", "config")
}
// DefaultSharedConfigFiles is a slice of the default shared config files that
@@ -119,12 +156,44 @@ var DefaultSharedConfigFiles = []string{
DefaultSharedConfigFilename(),
}
-// DefaultSharedCredentialsFiles is a slice of the default shared credentials files that
-// the will be used in order to load the SharedConfig.
+// DefaultSharedCredentialsFiles is a slice of the default shared credentials
+// files that the will be used in order to load the SharedConfig.
var DefaultSharedCredentialsFiles = []string{
DefaultSharedCredentialsFilename(),
}
+// SSOSession provides the shared configuration parameters of the sso-session
+// section.
+type SSOSession struct {
+ Name string
+ SSORegion string
+ SSOStartURL string
+}
+
+func (s *SSOSession) setFromIniSection(section ini.Section) {
+ updateString(&s.Name, section, ssoSessionNameKey)
+ updateString(&s.SSORegion, section, ssoRegionKey)
+ updateString(&s.SSOStartURL, section, ssoStartURLKey)
+}
+
+// Services contains values configured in the services section
+// of the AWS configuration file.
+type Services struct {
+ // Services section values
+ // {"serviceId": {"key": "value"}}
+ // e.g. {"s3": {"endpoint_url": "example.com"}}
+ ServiceValues map[string]map[string]string
+}
+
+func (s *Services) setFromIniSection(section ini.Section) {
+ if s.ServiceValues == nil {
+ s.ServiceValues = make(map[string]map[string]string)
+ }
+ for _, service := range section.List() {
+ s.ServiceValues[service] = section.Map(service)
+ }
+}
+
// SharedConfig represents the configuration fields of the SDK config files.
type SharedConfig struct {
Profile string
@@ -144,10 +213,17 @@ type SharedConfig struct {
CredentialProcess string
WebIdentityTokenFile string
+ // SSO session options
+ SSOSessionName string
+ SSOSession *SSOSession
+
+ // Legacy SSO session options
+ SSORegion string
+ SSOStartURL string
+
+ // SSO fields not used
SSOAccountID string
- SSORegion string
SSORoleName string
- SSOStartURL string
RoleARN string
ExternalID string
@@ -188,6 +264,12 @@ type SharedConfig struct {
// ec2_metadata_service_endpoint=http://fd00:ec2::254
EC2IMDSEndpoint string
+ // Specifies that IMDS clients should not fallback to IMDSv1 if token
+ // requests fail.
+ //
+ // ec2_metadata_v1_disabled=true
+ EC2IMDSv1Disabled *bool
+
// Specifies if the S3 service should disable support for Multi-Region
// access-points
//
@@ -237,6 +319,44 @@ type SharedConfig struct {
//
// ca_bundle=$HOME/my_custom_ca_bundle
CustomCABundle string
+
+ // aws sdk app ID that can be added to user agent header string
+ AppID string
+
+ // Flag used to disable configured endpoints.
+ IgnoreConfiguredEndpoints *bool
+
+ // Value to contain configured endpoints to be propagated to
+ // corresponding endpoint resolution field.
+ BaseEndpoint string
+
+ // Services section config.
+ ServicesSectionName string
+ Services Services
+
+ // determine if request compression is allowed, default to false
+ // retrieved from config file's profile field disable_request_compression
+ DisableRequestCompression *bool
+
+ // inclusive threshold request body size to trigger compression,
+ // default to 10240 and must be within 0 and 10485760 bytes inclusive
+ // retrieved from config file's profile field request_min_compression_size_bytes
+ RequestMinCompressSizeBytes *int64
+
+ // Whether S3Express auth is disabled.
+ //
+ // This will NOT prevent requests from being made to S3Express buckets, it
+ // will only bypass the modified endpoint routing and signing behaviors
+ // associated with the feature.
+ S3DisableExpressAuth *bool
+
+ AccountIDEndpointMode aws.AccountIDEndpointMode
+
+ // RequestChecksumCalculation indicates if the request checksum should be calculated
+ RequestChecksumCalculation aws.RequestChecksumCalculation
+
+ // ResponseChecksumValidation indicates if the response checksum should be validated
+ ResponseChecksumValidation aws.ResponseChecksumValidation
}
func (c SharedConfig) getDefaultsMode(ctx context.Context) (value aws.DefaultsMode, ok bool, err error) {
@@ -326,6 +446,16 @@ func (c SharedConfig) GetEC2IMDSEndpoint() (string, bool, error) {
return c.EC2IMDSEndpoint, true, nil
}
+// GetEC2IMDSV1FallbackDisabled implements an EC2IMDSV1FallbackDisabled option
+// resolver interface.
+func (c SharedConfig) GetEC2IMDSV1FallbackDisabled() (bool, bool) {
+ if c.EC2IMDSv1Disabled == nil {
+ return false, false
+ }
+
+ return *c.EC2IMDSv1Disabled, true
+}
+
// GetUseDualStackEndpoint returns whether the service's dual-stack endpoint should be
// used for requests.
func (c SharedConfig) GetUseDualStackEndpoint(ctx context.Context) (value aws.DualStackEndpointState, found bool, err error) {
@@ -346,6 +476,16 @@ func (c SharedConfig) GetUseFIPSEndpoint(ctx context.Context) (value aws.FIPSEnd
return c.UseFIPSEndpoint, true, nil
}
+// GetS3DisableExpressAuth returns the configured value for
+// [SharedConfig.S3DisableExpressAuth].
+func (c SharedConfig) GetS3DisableExpressAuth() (value, ok bool) {
+ if c.S3DisableExpressAuth == nil {
+ return false, false
+ }
+
+ return *c.S3DisableExpressAuth, true
+}
+
// GetCustomCABundle returns the custom CA bundle's PEM bytes if the file was
func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error) {
if len(c.CustomCABundle) == 0 {
@@ -359,6 +499,45 @@ func (c SharedConfig) getCustomCABundle(context.Context) (io.Reader, bool, error
return bytes.NewReader(b), true, nil
}
+// getAppID returns the sdk app ID if set in shared config profile
+func (c SharedConfig) getAppID(context.Context) (string, bool, error) {
+ return c.AppID, len(c.AppID) > 0, nil
+}
+
+// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured
+// endpoints feature.
+func (c SharedConfig) GetIgnoreConfiguredEndpoints(context.Context) (bool, bool, error) {
+ if c.IgnoreConfiguredEndpoints == nil {
+ return false, false, nil
+ }
+
+ return *c.IgnoreConfiguredEndpoints, true, nil
+}
+
+func (c SharedConfig) getBaseEndpoint(context.Context) (string, bool, error) {
+ return c.BaseEndpoint, len(c.BaseEndpoint) > 0, nil
+}
+
+// GetServiceBaseEndpoint is used to retrieve a normalized SDK ID for use
+// with configured endpoints.
+func (c SharedConfig) GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error) {
+ if service, ok := c.Services.ServiceValues[normalizeShared(sdkID)]; ok {
+ if endpt, ok := service[endpointURL]; ok {
+ return endpt, true, nil
+ }
+ }
+ return "", false, nil
+}
+
+func normalizeShared(sdkID string) string {
+ lower := strings.ToLower(sdkID)
+ return strings.ReplaceAll(lower, " ", "_")
+}
+
+func (c SharedConfig) getServicesObject(context.Context) (map[string]map[string]string, bool, error) {
+ return c.Services.ServiceValues, c.Services.ServiceValues != nil, nil
+}
+
// loadSharedConfigIgnoreNotExist is an alias for loadSharedConfig with the
// addition of ignoring when none of the files exist or when the profile
// is not found in any of the files.
@@ -463,7 +642,6 @@ type LoadSharedConfigOptions struct {
//
// You can read more about shared config and credentials file location at
// https://docs.aws.amazon.com/credref/latest/refdocs/file-location.html#file-location
-//
func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func(*LoadSharedConfigOptions)) (SharedConfig, error) {
var option LoadSharedConfigOptions
for _, fn := range optFns {
@@ -485,7 +663,7 @@ func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func
}
// check for profile prefix and drop duplicates or invalid profiles
- err = processConfigSections(ctx, configSections, option.Logger)
+ err = processConfigSections(ctx, &configSections, option.Logger)
if err != nil {
return SharedConfig{}, err
}
@@ -497,18 +675,19 @@ func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func
}
// check for profile prefix and drop duplicates or invalid profiles
- err = processCredentialsSections(ctx, credentialsSections, option.Logger)
+ err = processCredentialsSections(ctx, &credentialsSections, option.Logger)
if err != nil {
return SharedConfig{}, err
}
- err = mergeSections(configSections, credentialsSections)
+ err = mergeSections(&configSections, credentialsSections)
if err != nil {
return SharedConfig{}, err
}
cfg := SharedConfig{}
profiles := map[string]struct{}{}
+
if err = cfg.setFromIniSections(profiles, profile, configSections, option.Logger); err != nil {
return SharedConfig{}, err
}
@@ -516,53 +695,74 @@ func LoadSharedConfigProfile(ctx context.Context, profile string, optFns ...func
return cfg, nil
}
-func processConfigSections(ctx context.Context, sections ini.Sections, logger logging.Logger) error {
+func processConfigSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error {
+ skipSections := map[string]struct{}{}
+
for _, section := range sections.List() {
- // drop profiles without prefix for config files
- if !strings.HasPrefix(section, profilePrefix) && !strings.EqualFold(section, "default") {
+ if _, ok := skipSections[section]; ok {
+ continue
+ }
+
+ // drop sections from config file that do not have expected prefixes.
+ switch {
+ case strings.HasPrefix(section, profilePrefix):
+ // Rename sections to remove "profile " prefixing to match with
+ // credentials file. If default is already present, it will be
+ // dropped.
+ newName, err := renameProfileSection(section, sections, logger)
+ if err != nil {
+ return fmt.Errorf("failed to rename profile section, %w", err)
+ }
+ skipSections[newName] = struct{}{}
+
+ case strings.HasPrefix(section, ssoSectionPrefix):
+ case strings.HasPrefix(section, servicesPrefix):
+ case strings.EqualFold(section, "default"):
+ default:
// drop this section, as invalid profile name
sections.DeleteSection(section)
if logger != nil {
- logger.Logf(logging.Debug,
- "A profile defined with name `%v` is ignored. For use within a shared configuration file, "+
- "a non-default profile must have `profile ` prefixed to the profile name.\n",
+ logger.Logf(logging.Debug, "A profile defined with name `%v` is ignored. "+
+ "For use within a shared configuration file, "+
+ "a non-default profile must have `profile ` "+
+ "prefixed to the profile name.",
section,
)
}
}
}
+ return nil
+}
- // rename sections to remove `profile ` prefixing to match with credentials file.
- // if default is already present, it will be dropped.
- for _, section := range sections.List() {
- if strings.HasPrefix(section, profilePrefix) {
- v, ok := sections.GetSection(section)
- if !ok {
- return fmt.Errorf("error processing profiles within the shared configuration files")
- }
-
- // delete section with profile as prefix
- sections.DeleteSection(section)
+func renameProfileSection(section string, sections *ini.Sections, logger logging.Logger) (string, error) {
+ v, ok := sections.GetSection(section)
+ if !ok {
+ return "", fmt.Errorf("error processing profiles within the shared configuration files")
+ }
- // set the value to non-prefixed name in sections.
- section = strings.TrimPrefix(section, profilePrefix)
- if sections.HasSection(section) {
- oldSection, _ := sections.GetSection(section)
- v.Logs = append(v.Logs,
- fmt.Sprintf("A default profile prefixed with `profile ` found in %s, "+
- "overrided non-prefixed default profile from %s", v.SourceFile, oldSection.SourceFile))
- }
+ // delete section with profile as prefix
+ sections.DeleteSection(section)
- // assign non-prefixed name to section
- v.Name = section
- sections.SetSection(section, v)
- }
+ // set the value to non-prefixed name in sections.
+ section = strings.TrimPrefix(section, profilePrefix)
+ if sections.HasSection(section) {
+ oldSection, _ := sections.GetSection(section)
+ v.Logs = append(v.Logs,
+ fmt.Sprintf("A non-default profile not prefixed with `profile ` found in %s, "+
+ "overriding non-default profile from %s",
+ v.SourceFile, oldSection.SourceFile))
+ sections.DeleteSection(section)
}
- return nil
+
+ // assign non-prefixed name to section
+ v.Name = section
+ sections.SetSection(section, v)
+
+ return section, nil
}
-func processCredentialsSections(ctx context.Context, sections ini.Sections, logger logging.Logger) error {
+func processCredentialsSections(ctx context.Context, sections *ini.Sections, logger logging.Logger) error {
for _, section := range sections.List() {
// drop profiles with prefix for credential files
if strings.HasPrefix(section, profilePrefix) {
@@ -596,7 +796,7 @@ func loadIniFiles(filenames []string) (ini.Sections, error) {
}
// mergeSections into mergedSections
- err = mergeSections(mergedSections, sections)
+ err = mergeSections(&mergedSections, sections)
if err != nil {
return ini.Sections{}, SharedConfigLoadError{Filename: filename, Err: err}
}
@@ -606,7 +806,7 @@ func loadIniFiles(filenames []string) (ini.Sections, error) {
}
// mergeSections merges source section properties into destination section properties
-func mergeSections(dst, src ini.Sections) error {
+func mergeSections(dst *ini.Sections, src ini.Sections) error {
for _, sectionName := range src.List() {
srcSection, _ := src.GetSection(sectionName)
@@ -676,29 +876,29 @@ func mergeSections(dst, src ini.Sections) error {
s3DisableMultiRegionAccessPointsKey,
ec2MetadataServiceEndpointModeKey,
ec2MetadataServiceEndpointKey,
+ ec2MetadataV1DisabledKey,
useDualStackEndpoint,
useFIPSEndpointKey,
defaultsModeKey,
retryModeKey,
- }
- for i := range stringKeys {
- if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil {
- return err
- }
- }
-
- intKeys := []string{
+ caBundleKey,
roleDurationSecondsKey,
retryMaxAttemptsKey,
+
+ ssoSessionNameKey,
+ ssoAccountIDKey,
+ ssoRegionKey,
+ ssoRoleNameKey,
+ ssoStartURLKey,
}
- for i := range intKeys {
- if err := mergeIntKey(&srcSection, &dstSection, sectionName, intKeys[i]); err != nil {
+ for i := range stringKeys {
+ if err := mergeStringKey(&srcSection, &dstSection, sectionName, stringKeys[i]); err != nil {
return err
}
}
// set srcSection on dst srcSection
- dst = dst.SetSection(sectionName, dstSection)
+ *dst = dst.SetSection(sectionName, dstSection)
}
return nil
@@ -723,26 +923,6 @@ func mergeStringKey(srcSection *ini.Section, dstSection *ini.Section, sectionNam
return nil
}
-func mergeIntKey(srcSection *ini.Section, dstSection *ini.Section, sectionName, key string) error {
- if srcSection.Has(key) {
- srcValue := srcSection.Int(key)
- v, err := ini.NewIntValue(srcValue)
- if err != nil {
- return fmt.Errorf("error merging %s, %w", key, err)
- }
-
- if dstSection.Has(key) {
- dstSection.Logs = append(dstSection.Logs, newMergeKeyLogMessage(sectionName, key,
- dstSection.SourceFile[key], srcSection.SourceFile[key]))
-
- }
-
- dstSection.UpdateValue(key, v)
- dstSection.UpdateSourceFile(key, srcSection.SourceFile[key])
- }
- return nil
-}
-
func newMergeKeyLogMessage(sectionName, key, dstSourceFile, srcSourceFile string) string {
return fmt.Sprintf("For profile: %v, overriding %v value, defined in %v "+
"with a %v value found in a duplicate profile defined at file %v. \n",
@@ -769,7 +949,7 @@ func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile
}
}
- // set config from the provided ini section
+ // set config from the provided INI section
err := c.setFromIniSection(profile, section)
if err != nil {
return fmt.Errorf("error fetching config from profile, %v, %w", profile, err)
@@ -782,9 +962,8 @@ func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile
// profile only have credential provider options.
c.clearAssumeRoleOptions()
} else {
- // First time a profile has been seen, It must either be a assume role
- // credentials, or SSO. Assert if the credential type requires a role ARN,
- // the ARN is also set, or validate that the SSO configuration is complete.
+ // First time a profile has been seen. Assert if the credential type
+ // requires a role ARN, the ARN is also set
if err := c.validateCredentialsConfig(profile); err != nil {
return err
}
@@ -832,11 +1011,34 @@ func (c *SharedConfig) setFromIniSections(profiles map[string]struct{}, profile
c.Source = srcCfg
}
+ // If the profile contains an SSO session parameter, the session MUST exist
+ // as a section in the config file. Load the SSO session using the name
+ // provided. If the session section is not found or incomplete an error
+ // will be returned.
+ if c.hasSSOTokenProviderConfiguration() {
+ section, ok := sections.GetSection(ssoSectionPrefix + strings.TrimSpace(c.SSOSessionName))
+ if !ok {
+ return fmt.Errorf("failed to find SSO session section, %v", c.SSOSessionName)
+ }
+ var ssoSession SSOSession
+ ssoSession.setFromIniSection(section)
+ ssoSession.Name = c.SSOSessionName
+ c.SSOSession = &ssoSession
+ }
+
+ if len(c.ServicesSectionName) > 0 {
+ if section, ok := sections.GetSection(servicesPrefix + c.ServicesSectionName); ok {
+ var svcs Services
+ svcs.setFromIniSection(section)
+ c.Services = svcs
+ }
+ }
+
return nil
}
// setFromIniSection loads the configuration from the profile section defined in
-// the provided ini file. A SharedConfig pointer type value is used so that
+// the provided INI file. A SharedConfig pointer type value is used so that
// multiple config file loadings can be chained.
//
// Only loads complete logically grouped values, and will not set fields in cfg
@@ -871,14 +1073,27 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er
updateString(&c.Region, section, regionKey)
// AWS Single Sign-On (AWS SSO)
- updateString(&c.SSOAccountID, section, ssoAccountIDKey)
+ // SSO session options
+ updateString(&c.SSOSessionName, section, ssoSessionNameKey)
+
+ // Legacy SSO session options
updateString(&c.SSORegion, section, ssoRegionKey)
+ updateString(&c.SSOStartURL, section, ssoStartURLKey)
+
+ // SSO fields not used
+ updateString(&c.SSOAccountID, section, ssoAccountIDKey)
updateString(&c.SSORoleName, section, ssoRoleNameKey)
- updateString(&c.SSOStartURL, section, ssoStartURL)
+ // we're retaining a behavioral quirk with this field that existed before
+ // the removal of literal parsing for #2276:
+ // - if the key is missing, the config field will not be set
+ // - if the key is set to a non-numeric, the config field will be set to 0
if section.Has(roleDurationSecondsKey) {
- d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second
- c.RoleDurationSeconds = &d
+ if v, ok := section.Int(roleDurationSecondsKey); ok {
+ c.RoleDurationSeconds = aws.Duration(time.Duration(v) * time.Second)
+ } else {
+ c.RoleDurationSeconds = aws.Duration(time.Duration(0))
+ }
}
updateString(&c.CredentialProcess, section, credentialProcessKey)
@@ -887,11 +1102,13 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er
updateEndpointDiscoveryType(&c.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey)
updateBoolPtr(&c.S3UseARNRegion, section, s3UseARNRegionKey)
updateBoolPtr(&c.S3DisableMultiRegionAccessPoints, section, s3DisableMultiRegionAccessPointsKey)
+ updateBoolPtr(&c.S3DisableExpressAuth, section, s3DisableExpressSessionAuthKey)
if err := updateEC2MetadataServiceEndpointMode(&c.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil {
return fmt.Errorf("failed to load %s from shared config, %v", ec2MetadataServiceEndpointModeKey, err)
}
updateString(&c.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey)
+ updateBoolPtr(&c.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey)
updateUseDualStackEndpoint(&c.UseDualStackEndpoint, section, useDualStackEndpoint)
updateUseFIPSEndpoint(&c.UseFIPSEndpoint, section, useFIPSEndpointKey)
@@ -909,21 +1126,167 @@ func (c *SharedConfig) setFromIniSection(profile string, section ini.Section) er
updateString(&c.CustomCABundle, section, caBundleKey)
+ // user agent app ID added to request User-Agent header
+ updateString(&c.AppID, section, sdkAppID)
+
+ updateBoolPtr(&c.IgnoreConfiguredEndpoints, section, ignoreConfiguredEndpoints)
+
+ updateString(&c.BaseEndpoint, section, endpointURL)
+
+ if err := updateDisableRequestCompression(&c.DisableRequestCompression, section, disableRequestCompression); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", disableRequestCompression, err)
+ }
+ if err := updateRequestMinCompressSizeBytes(&c.RequestMinCompressSizeBytes, section, requestMinCompressionSizeBytes); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", requestMinCompressionSizeBytes, err)
+ }
+
+ if err := updateAIDEndpointMode(&c.AccountIDEndpointMode, section, accountIDEndpointMode); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", accountIDEndpointMode, err)
+ }
+
+ if err := updateRequestChecksumCalculation(&c.RequestChecksumCalculation, section, requestChecksumCalculationKey); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", requestChecksumCalculationKey, err)
+ }
+ if err := updateResponseChecksumValidation(&c.ResponseChecksumValidation, section, responseChecksumValidationKey); err != nil {
+ return fmt.Errorf("failed to load %s from shared config, %w", responseChecksumValidationKey, err)
+ }
+
// Shared Credentials
creds := aws.Credentials{
AccessKeyID: section.String(accessKeyIDKey),
SecretAccessKey: section.String(secretAccessKey),
SessionToken: section.String(sessionTokenKey),
Source: fmt.Sprintf("SharedConfigCredentials: %s", section.SourceFile[accessKeyIDKey]),
+ AccountID: section.String(accountIDKey),
}
if creds.HasKeys() {
c.Credentials = creds
}
+ updateString(&c.ServicesSectionName, section, servicesSectionKey)
+
+ return nil
+}
+
+func updateRequestMinCompressSizeBytes(bytes **int64, sec ini.Section, key string) error {
+ if !sec.Has(key) {
+ return nil
+ }
+
+ v, ok := sec.Int(key)
+ if !ok {
+ return fmt.Errorf("invalid value for min request compression size bytes %s, need int64", sec.String(key))
+ }
+ if v < 0 || v > smithyrequestcompression.MaxRequestMinCompressSizeBytes {
+ return fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", v)
+ }
+ *bytes = new(int64)
+ **bytes = v
+ return nil
+}
+
+func updateDisableRequestCompression(disable **bool, sec ini.Section, key string) error {
+ if !sec.Has(key) {
+ return nil
+ }
+
+ v := sec.String(key)
+ switch {
+ case v == "true":
+ *disable = new(bool)
+ **disable = true
+ case v == "false":
+ *disable = new(bool)
+ **disable = false
+ default:
+ return fmt.Errorf("invalid value for shared config profile field, %s=%s, need true or false", key, v)
+ }
+ return nil
+}
+
+func updateAIDEndpointMode(m *aws.AccountIDEndpointMode, sec ini.Section, key string) error {
+ if !sec.Has(key) {
+ return nil
+ }
+
+ v := sec.String(key)
+ switch v {
+ case "preferred":
+ *m = aws.AccountIDEndpointModePreferred
+ case "required":
+ *m = aws.AccountIDEndpointModeRequired
+ case "disabled":
+ *m = aws.AccountIDEndpointModeDisabled
+ default:
+ return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be preferred/required/disabled", key, v)
+ }
+
+ return nil
+}
+
+func updateRequestChecksumCalculation(m *aws.RequestChecksumCalculation, sec ini.Section, key string) error {
+ if !sec.Has(key) {
+ return nil
+ }
+
+ v := sec.String(key)
+ switch strings.ToLower(v) {
+ case checksumWhenSupported:
+ *m = aws.RequestChecksumCalculationWhenSupported
+ case checksumWhenRequired:
+ *m = aws.RequestChecksumCalculationWhenRequired
+ default:
+ return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be when_supported/when_required", key, v)
+ }
+
+ return nil
+}
+
+func updateResponseChecksumValidation(m *aws.ResponseChecksumValidation, sec ini.Section, key string) error {
+ if !sec.Has(key) {
+ return nil
+ }
+
+ v := sec.String(key)
+ switch strings.ToLower(v) {
+ case checksumWhenSupported:
+ *m = aws.ResponseChecksumValidationWhenSupported
+ case checksumWhenRequired:
+ *m = aws.ResponseChecksumValidationWhenRequired
+ default:
+ return fmt.Errorf("invalid value for shared config profile field, %s=%s, must be when_supported/when_required", key, v)
+ }
+
return nil
}
+func (c SharedConfig) getRequestMinCompressSizeBytes(ctx context.Context) (int64, bool, error) {
+ if c.RequestMinCompressSizeBytes == nil {
+ return 0, false, nil
+ }
+ return *c.RequestMinCompressSizeBytes, true, nil
+}
+
+func (c SharedConfig) getDisableRequestCompression(ctx context.Context) (bool, bool, error) {
+ if c.DisableRequestCompression == nil {
+ return false, false, nil
+ }
+ return *c.DisableRequestCompression, true, nil
+}
+
+func (c SharedConfig) getAccountIDEndpointMode(ctx context.Context) (aws.AccountIDEndpointMode, bool, error) {
+ return c.AccountIDEndpointMode, len(c.AccountIDEndpointMode) > 0, nil
+}
+
+func (c SharedConfig) getRequestChecksumCalculation(ctx context.Context) (aws.RequestChecksumCalculation, bool, error) {
+ return c.RequestChecksumCalculation, c.RequestChecksumCalculation > 0, nil
+}
+
+func (c SharedConfig) getResponseChecksumValidation(ctx context.Context) (aws.ResponseChecksumValidation, bool, error) {
+ return c.ResponseChecksumValidation, c.ResponseChecksumValidation > 0, nil
+}
+
func updateDefaultsMode(mode *aws.DefaultsMode, section ini.Section, key string) error {
if !section.Has(key) {
return nil
@@ -992,39 +1355,88 @@ func (c *SharedConfig) validateCredentialType() error {
len(c.CredentialProcess) != 0,
len(c.WebIdentityTokenFile) != 0,
) {
- return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token, or sso")
+ return fmt.Errorf("only one credential type may be specified per profile: source profile, credential source, credential process, web identity token")
}
return nil
}
func (c *SharedConfig) validateSSOConfiguration() error {
- if !c.hasSSOConfiguration() {
+ if c.hasSSOTokenProviderConfiguration() {
+ err := c.validateSSOTokenProviderConfiguration()
+ if err != nil {
+ return err
+ }
return nil
}
+ if c.hasLegacySSOConfiguration() {
+ err := c.validateLegacySSOConfiguration()
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (c *SharedConfig) validateSSOTokenProviderConfiguration() error {
var missing []string
- if len(c.SSOAccountID) == 0 {
- missing = append(missing, ssoAccountIDKey)
+
+ if len(c.SSOSessionName) == 0 {
+ missing = append(missing, ssoSessionNameKey)
+ }
+
+ if c.SSOSession == nil {
+ missing = append(missing, ssoSectionPrefix)
+ } else {
+ if len(c.SSOSession.SSORegion) == 0 {
+ missing = append(missing, ssoRegionKey)
+ }
+
+ if len(c.SSOSession.SSOStartURL) == 0 {
+ missing = append(missing, ssoStartURLKey)
+ }
+ }
+
+ if len(missing) > 0 {
+ return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
+ c.Profile, strings.Join(missing, ", "))
+ }
+
+ if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion {
+ return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix)
+ }
+
+ if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL {
+ return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURLKey, c.Profile, ssoStartURLKey, ssoSectionPrefix)
}
+ return nil
+}
+
+func (c *SharedConfig) validateLegacySSOConfiguration() error {
+ var missing []string
+
if len(c.SSORegion) == 0 {
missing = append(missing, ssoRegionKey)
}
- if len(c.SSORoleName) == 0 {
- missing = append(missing, ssoRoleNameKey)
+ if len(c.SSOStartURL) == 0 {
+ missing = append(missing, ssoStartURLKey)
}
- if len(c.SSOStartURL) == 0 {
- missing = append(missing, ssoStartURL)
+ if len(c.SSOAccountID) == 0 {
+ missing = append(missing, ssoAccountIDKey)
+ }
+
+ if len(c.SSORoleName) == 0 {
+ missing = append(missing, ssoRoleNameKey)
}
if len(missing) > 0 {
return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
c.Profile, strings.Join(missing, ", "))
}
-
return nil
}
@@ -1044,15 +1456,15 @@ func (c *SharedConfig) hasCredentials() bool {
}
func (c *SharedConfig) hasSSOConfiguration() bool {
- switch {
- case len(c.SSOAccountID) != 0:
- case len(c.SSORegion) != 0:
- case len(c.SSORoleName) != 0:
- case len(c.SSOStartURL) != 0:
- default:
- return false
- }
- return true
+ return c.hasSSOTokenProviderConfiguration() || c.hasLegacySSOConfiguration()
+}
+
+func (c *SharedConfig) hasSSOTokenProviderConfiguration() bool {
+ return len(c.SSOSessionName) > 0
+}
+
+func (c *SharedConfig) hasLegacySSOConfiguration() bool {
+ return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0
}
func (c *SharedConfig) clearAssumeRoleOptions() {
@@ -1143,12 +1555,6 @@ func (e CredentialRequiresARNError) Error() string {
)
}
-func userHomeDir() string {
- // Ignore errors since we only care about Windows and *nix.
- homedir, _ := os.UserHomeDir()
- return homedir
-}
-
func oneOrNone(bs ...bool) bool {
var count int
@@ -1182,12 +1588,13 @@ func updateInt(dst *int, section ini.Section, key string) error {
if !section.Has(key) {
return nil
}
- if vt, _ := section.ValueType(key); vt != ini.IntegerType {
- return fmt.Errorf("invalid value %s=%s, expect integer",
- key, section.String(key))
+ v, ok := section.Int(key)
+ if !ok {
+ return fmt.Errorf("invalid value %s=%s, expect integer", key, section.String(key))
}
- *dst = int(section.Int(key))
+
+ *dst = int(v)
return nil
}
@@ -1197,7 +1604,10 @@ func updateBool(dst *bool, section ini.Section, key string) {
if !section.Has(key) {
return
}
- *dst = section.Bool(key)
+
+ // retains pre-#2276 behavior where non-bool value would resolve to false
+ v, _ := section.Bool(key)
+ *dst = v
}
// updateBoolPtr will only update the dst with the value in the section key,
@@ -1206,8 +1616,11 @@ func updateBoolPtr(dst **bool, section ini.Section, key string) {
if !section.Has(key) {
return
}
+
+ // retains pre-#2276 behavior where non-bool value would resolve to false
+ v, _ := section.Bool(key)
*dst = new(bool)
- **dst = section.Bool(key)
+ **dst = v
}
// updateEndpointDiscoveryType will only update the dst with the value in the section, if
@@ -1239,7 +1652,8 @@ func updateUseDualStackEndpoint(dst *aws.DualStackEndpointState, section ini.Sec
return
}
- if section.Bool(key) {
+ // retains pre-#2276 behavior where non-bool value would resolve to false
+ if v, _ := section.Bool(key); v {
*dst = aws.DualStackEndpointStateEnabled
} else {
*dst = aws.DualStackEndpointStateDisabled
@@ -1255,7 +1669,8 @@ func updateUseFIPSEndpoint(dst *aws.FIPSEndpointState, section ini.Section, key
return
}
- if section.Bool(key) {
+ // retains pre-#2276 behavior where non-bool value would resolve to false
+ if v, _ := section.Bool(key); v {
*dst = aws.FIPSEndpointStateEnabled
} else {
*dst = aws.FIPSEndpointStateDisabled
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
index e16d30c6f..708472171 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/CHANGELOG.md
@@ -1,3 +1,566 @@
+# v1.17.68 (2025-06-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.67 (2025-04-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.66 (2025-04-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.65 (2025-03-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.64 (2025-03-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.63 (2025-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.62 (2025-03-04.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.61 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.60 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.59 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.58 (2025-02-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.57 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.56 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.55 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.17.54 (2025-01-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.53 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.52 (2025-01-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.51 (2025-01-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.50 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.49 (2025-01-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.48 (2024-12-19)
+
+* **Bug Fix**: Fix improper use of printf-style functions.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.47 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.46 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.45 (2024-11-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.44 (2024-11-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.43 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.42 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.41 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.40 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.39 (2024-10-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.38 (2024-10-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.37 (2024-09-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.36 (2024-09-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.35 (2024-09-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.34 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.33 (2024-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.32 (2024-09-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.31 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.30 (2024-08-26)
+
+* **Bug Fix**: Save SSO cached token expiry in UTC to ensure cross-SDK compatibility.
+
+# v1.17.29 (2024-08-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.28 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.27 (2024-07-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.26 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.25 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.24 (2024-07-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.23 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.22 (2024-06-26)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.21 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.20 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.19 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.18 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.17 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.16 (2024-05-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.15 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.14 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.13 (2024-05-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.12 (2024-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.11 (2024-04-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.10 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.9 (2024-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.8 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.7 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.6 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2024-03-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.4 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.3 (2024-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2024-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.16 (2024-01-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.15 (2024-01-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.14 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.13 (2023-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.12 (2023-12-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.11 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.10 (2023-12-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.9 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.8 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.7 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.6 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.5 (2023-11-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.4 (2023-11-21)
+
+* **Bug Fix**: Don't expect error responses to have a JSON payload in the endpointcreds provider.
+
+# v1.16.3 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.2 (2023-11-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.1 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2023-11-14)
+
+* **Feature**: Add support for dynamic auth token from file and EKS container host in absolute/relative URIs in the HTTP credential provider.
+
+# v1.15.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2023-11-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.43 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.42 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.41 (2023-10-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.40 (2023-09-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.39 (2023-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.38 (2023-09-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.37 (2023-09-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.36 (2023-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.35 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.34 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.33 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.32 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.31 (2023-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.30 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.29 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.28 (2023-07-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.27 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.26 (2023-06-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.25 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.24 (2023-05-09)
+
+* No change notes available for this release.
+
+# v1.13.23 (2023-05-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.22 (2023-05-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.21 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.20 (2023-04-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.19 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.18 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.17 (2023-03-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.16 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.15 (2023-02-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.14 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.13 (2023-02-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.12 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.11 (2023-02-01)
+
+* No change notes available for this release.
+
+# v1.13.10 (2023-01-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.9 (2023-01-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.8 (2023-01-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.7 (2022-12-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.6 (2022-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.5 (2022-12-15)
+
+* **Bug Fix**: Unify logic between shared config and in finding home directory
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.4 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.3 (2022-11-22)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.2 (2022-11-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2022-11-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2022-11-11)
+
+* **Announcement**: When using the SSOTokenProvider, a previous implementation incorrectly compensated for invalid SSOTokenProvider configurations in the shared profile. This has been fixed via PR #1903 and tracked in issue #1846
+* **Feature**: Adds token refresh support (via SSOTokenProvider) when using the SSOCredentialProvider
+
+# v1.12.24 (2022-11-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.23 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.22 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.21 (2022-09-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.12.20 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go
index 72214bf40..6ed71b42b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/doc.go
@@ -11,7 +11,7 @@
// # Loading credentials with the SDK's AWS Config
//
// The EC2 Instance role credentials provider will automatically be the resolved
-// credential provider int he credential chain if no other credential provider is
+// credential provider in the credential chain if no other credential provider is
// resolved first.
//
// To explicitly instruct the SDK's credentials resolving to use the EC2 Instance
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go
index 5c699f166..a95e6c8bd 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ec2rolecreds/provider.go
@@ -47,6 +47,10 @@ type Options struct {
//
// If nil, the provider will default to the EC2 IMDS client.
Client GetMetadataAPIClient
+
+ // The chain of providers that was used to create this provider
+ // These values are for reporting purposes and are not meant to be set up directly
+ CredentialSources []aws.CredentialSource
}
// New returns an initialized Provider value configured to retrieve
@@ -227,3 +231,11 @@ func requestCred(ctx context.Context, client GetMetadataAPIClient, credsName str
return respCreds, nil
}
+
+// ProviderSources returns the credential chain that was used to construct this provider
+func (p *Provider) ProviderSources() []aws.CredentialSource {
+ if p.options.CredentialSources == nil {
+ return []aws.CredentialSource{aws.CredentialSourceIMDS}
+ } // If no source has been set, assume this is used directly which means just call to assume role
+ return p.options.CredentialSources
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go
new file mode 100644
index 000000000..c3f5dadce
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/auth.go
@@ -0,0 +1,48 @@
+package client
+
+import (
+ "context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type getIdentityMiddleware struct {
+ options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+ return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
+
+type signRequestMiddleware struct {
+}
+
+func (*signRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
+
+type resolveAuthSchemeMiddleware struct {
+ operation string
+ options Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+ return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go
index 60b8298f8..dc291c97c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/client.go
@@ -62,7 +62,16 @@ func New(options Options, optFns ...func(*Options)) *Client {
}
if options.Retryer == nil {
- options.Retryer = retry.NewStandard()
+ // Amazon-owned implementations of this endpoint are known to sometimes
+ // return plaintext responses (i.e. no Code) like normal, add a few
+ // additional status codes
+ options.Retryer = retry.NewStandard(func(o *retry.StandardOptions) {
+ o.Retryables = append(o.Retryables, retry.RetryableHTTPStatusCode{
+ Codes: map[int]struct{}{
+ http.StatusTooManyRequests: {},
+ },
+ })
+ })
}
for _, fn := range optFns {
@@ -92,6 +101,7 @@ func (c *Client) GetCredentials(ctx context.Context, params *GetCredentialsInput
stack.Serialize.Add(&serializeOpGetCredential{}, smithymiddleware.After)
stack.Build.Add(&buildEndpoint{Endpoint: options.Endpoint}, smithymiddleware.After)
stack.Deserialize.Add(&deserializeOpGetCredential{}, smithymiddleware.After)
+ addProtocolFinalizerMiddlewares(stack, options, "GetCredentials")
retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{Retryer: options.Retryer})
middleware.AddSDKAgentKey(middleware.FeatureMetadata, ServiceID)
smithyhttp.AddErrorCloseResponseBodyMiddleware(stack)
@@ -118,13 +128,15 @@ type GetCredentialsOutput struct {
AccessKeyID string
SecretAccessKey string
Token string
+ AccountID string
}
// EndpointError is an error returned from the endpoint service
type EndpointError struct {
- Code string `json:"code"`
- Message string `json:"message"`
- Fault smithy.ErrorFault `json:"-"`
+ Code string `json:"code"`
+ Message string `json:"message"`
+ Fault smithy.ErrorFault `json:"-"`
+ statusCode int `json:"-"`
}
// Error is the error mesage string
@@ -146,3 +158,8 @@ func (e *EndpointError) ErrorMessage() string {
func (e *EndpointError) ErrorFault() smithy.ErrorFault {
return e.Fault
}
+
+// HTTPStatusCode implements retry.HTTPStatusCode.
+func (e *EndpointError) HTTPStatusCode() int {
+ return e.statusCode
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go
new file mode 100644
index 000000000..748ee6724
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/endpoints.go
@@ -0,0 +1,20 @@
+package client
+
+import (
+ "context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type resolveEndpointV2Middleware struct {
+ options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go
index 40747a53c..f2820d20e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client/middleware.go
@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
+ "io"
"net/url"
"github.com/aws/smithy-go"
@@ -104,17 +105,60 @@ func (d *deserializeOpGetCredential) HandleDeserialize(ctx context.Context, in s
}
func deserializeError(response *smithyhttp.Response) error {
- var errShape *EndpointError
- err := json.NewDecoder(response.Body).Decode(&errShape)
+ // we could be talking to anything, json isn't guaranteed
+ // see https://github.com/aws/aws-sdk-go-v2/issues/2316
+ if response.Header.Get("Content-Type") == "application/json" {
+ return deserializeJSONError(response)
+ }
+
+ msg, err := io.ReadAll(response.Body)
if err != nil {
- return &smithy.DeserializationError{Err: fmt.Errorf("failed to decode error message, %w", err)}
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("read response, %w", err),
+ }
+ }
+
+ return &EndpointError{
+ // no sensible value for Code
+ Message: string(msg),
+ Fault: stof(response.StatusCode),
+ statusCode: response.StatusCode,
}
+}
- if response.StatusCode >= 500 {
- errShape.Fault = smithy.FaultServer
- } else {
- errShape.Fault = smithy.FaultClient
+func deserializeJSONError(response *smithyhttp.Response) error {
+ var errShape *EndpointError
+ if err := json.NewDecoder(response.Body).Decode(&errShape); err != nil {
+ return &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode error message, %w", err),
+ }
}
+ errShape.Fault = stof(response.StatusCode)
+ errShape.statusCode = response.StatusCode
return errShape
}
+
+// maps HTTP status code to smithy ErrorFault
+func stof(code int) smithy.ErrorFault {
+ if code >= 500 {
+ return smithy.FaultServer
+ }
+ return smithy.FaultClient
+}
+
+func addProtocolFinalizerMiddlewares(stack *smithymiddleware.Stack, options Options, operation string) error {
+ if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, smithymiddleware.Before); err != nil {
+ return fmt.Errorf("add ResolveAuthScheme: %w", err)
+ }
+ if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", smithymiddleware.After); err != nil {
+ return fmt.Errorf("add GetIdentity: %w", err)
+ }
+ if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", smithymiddleware.After); err != nil {
+ return fmt.Errorf("add ResolveEndpointV2: %w", err)
+ }
+ if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", smithymiddleware.After); err != nil {
+ return fmt.Errorf("add Signing: %w", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go
index adc7fc6b0..c8ac6d9ff 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/provider.go
@@ -36,6 +36,7 @@ import (
"context"
"fmt"
"net/http"
+ "strings"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/credentials/endpointcreds/internal/client"
@@ -81,7 +82,41 @@ type Options struct {
// Optional authorization token value if set will be used as the value of
// the Authorization header of the endpoint credential request.
+ //
+ // When constructed from environment, the provider will use the value of
+ // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token
+ //
+ // Will be overridden if AuthorizationTokenProvider is configured
AuthorizationToken string
+
+ // Optional auth provider func to dynamically load the auth token from a file
+ // everytime a credential is retrieved
+ //
+ // When constructed from environment, the provider will read and use the content
+ // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable
+ // as the auth token everytime credentials are retrieved
+ //
+ // Will override AuthorizationToken if configured
+ AuthorizationTokenProvider AuthTokenProvider
+
+ // The chain of providers that was used to create this provider
+ // These values are for reporting purposes and are not meant to be set up directly
+ CredentialSources []aws.CredentialSource
+}
+
+// AuthTokenProvider defines an interface to dynamically load a value to be passed
+// for the Authorization header of a credentials request.
+type AuthTokenProvider interface {
+ GetToken() (string, error)
+}
+
+// TokenProviderFunc is a func type implementing AuthTokenProvider interface
+// and enables customizing token provider behavior
+type TokenProviderFunc func() (string, error)
+
+// GetToken func retrieves auth token according to TokenProviderFunc implementation
+func (p TokenProviderFunc) GetToken() (string, error) {
+ return p()
}
// New returns a credentials Provider for retrieving AWS credentials
@@ -121,6 +156,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
SecretAccessKey: resp.SecretAccessKey,
SessionToken: resp.Token,
Source: ProviderName,
+ AccountID: resp.AccountID,
}
if resp.Expiration != nil {
@@ -132,5 +168,40 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
}
func (p *Provider) getCredentials(ctx context.Context) (*client.GetCredentialsOutput, error) {
- return p.client.GetCredentials(ctx, &client.GetCredentialsInput{AuthorizationToken: p.options.AuthorizationToken})
+ authToken, err := p.resolveAuthToken()
+ if err != nil {
+ return nil, fmt.Errorf("resolve auth token: %v", err)
+ }
+
+ return p.client.GetCredentials(ctx, &client.GetCredentialsInput{
+ AuthorizationToken: authToken,
+ })
+}
+
+func (p *Provider) resolveAuthToken() (string, error) {
+ authToken := p.options.AuthorizationToken
+
+ var err error
+ if p.options.AuthorizationTokenProvider != nil {
+ authToken, err = p.options.AuthorizationTokenProvider.GetToken()
+ if err != nil {
+ return "", err
+ }
+ }
+
+ if strings.ContainsAny(authToken, "\r\n") {
+ return "", fmt.Errorf("authorization token contains invalid newline sequence")
+ }
+
+ return authToken, nil
+}
+
+var _ aws.CredentialProviderSource = (*Provider)(nil)
+
+// ProviderSources returns the credential chain that was used to construct this provider
+func (p *Provider) ProviderSources() []aws.CredentialSource {
+ if p.options.CredentialSources == nil {
+ return []aws.CredentialSource{aws.CredentialSourceHTTP}
+ }
+ return p.options.CredentialSources
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
index db136bfc4..9fa372702 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/go_module_metadata.go
@@ -3,4 +3,4 @@
package credentials
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.12.20"
+const goModuleVersion = "1.17.68"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go
index 3921da34c..dfc6b2548 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/processcreds/provider.go
@@ -57,6 +57,9 @@ type Provider struct {
type Options struct {
// Timeout limits the time a process can run.
Timeout time.Duration
+ // The chain of providers that was used to create this provider
+ // These values are for reporting purposes and are not meant to be set up directly
+ CredentialSources []aws.CredentialSource
}
// NewCommandBuilder provides the interface for specifying how command will be
@@ -149,12 +152,27 @@ func NewProviderCommand(builder NewCommandBuilder, options ...func(*Options)) *P
return p
}
-type credentialProcessResponse struct {
- Version int
- AccessKeyID string `json:"AccessKeyId"`
+// A CredentialProcessResponse is the AWS credentials format that must be
+// returned when executing an external credential_process.
+type CredentialProcessResponse struct {
+ // As of this writing, the Version key must be set to 1. This might
+ // increment over time as the structure evolves.
+ Version int
+
+ // The access key ID that identifies the temporary security credentials.
+ AccessKeyID string `json:"AccessKeyId"`
+
+ // The secret access key that can be used to sign requests.
SecretAccessKey string
- SessionToken string
- Expiration *time.Time
+
+ // The token that users must pass to the service API to use the temporary credentials.
+ SessionToken string
+
+ // The date on which the current credentials expire.
+ Expiration *time.Time
+
+ // The ID of the account for credentials
+ AccountID string `json:"AccountId"`
}
// Retrieve executes the credential process command and returns the
@@ -166,7 +184,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
}
// Serialize and validate response
- resp := &credentialProcessResponse{}
+ resp := &CredentialProcessResponse{}
if err = json.Unmarshal(out, resp); err != nil {
return aws.Credentials{Source: ProviderName}, &ProviderError{
Err: fmt.Errorf("parse failed of process output: %s, error: %w", out, err),
@@ -196,6 +214,7 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
AccessKeyID: resp.AccessKeyID,
SecretAccessKey: resp.SecretAccessKey,
SessionToken: resp.SessionToken,
+ AccountID: resp.AccountID,
}
// Handle expiration
@@ -258,6 +277,14 @@ func (p *Provider) executeCredentialProcess(ctx context.Context) ([]byte, error)
return out, nil
}
+// ProviderSources returns the credential chain that was used to construct this provider
+func (p *Provider) ProviderSources() []aws.CredentialSource {
+ if p.options.CredentialSources == nil {
+ return []aws.CredentialSource{aws.CredentialSourceProcess}
+ }
+ return p.options.CredentialSources
+}
+
func executeCommand(cmd *exec.Cmd, exec chan error) {
// Start the command
err := cmd.Start()
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go
index 43e5676d3..ece1e65f7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/doc.go
@@ -11,12 +11,11 @@
// # Loading AWS SSO credentials with the AWS shared configuration file
//
// You can use configure AWS SSO credentials from the AWS shared configuration file by
-// providing the specifying the required keys in the profile:
+// specifying the required keys in the profile and referencing an sso-session:
//
+// sso_session
// sso_account_id
-// sso_region
// sso_role_name
-// sso_start_url
//
// For example, the following defines a profile "devsso" and specifies the AWS
// SSO parameters that defines the target account, role, sign-on portal, and
@@ -24,11 +23,15 @@
// provided, or an error will be returned.
//
// [profile devsso]
-// sso_start_url = https://my-sso-portal.awsapps.com/start
+// sso_session = dev-session
// sso_role_name = SSOReadOnlyRole
-// sso_region = us-east-1
// sso_account_id = 123456789012
//
+// [sso-session dev-session]
+// sso_start_url = https://my-sso-portal.awsapps.com/start
+// sso_region = us-east-1
+// sso_registration_scopes = sso:account:access
+//
// Using the config module, you can load the AWS SDK shared configuration, and
// specify that this profile be used to retrieve credentials. For example:
//
@@ -43,10 +46,17 @@
// and provide the necessary information to load and retrieve temporary
// credentials using an access token from ~/.aws/sso/cache.
//
-// client := sso.NewFromConfig(cfg)
+// ssoClient := sso.NewFromConfig(cfg)
+// ssoOidcClient := ssooidc.NewFromConfig(cfg)
+// tokenPath, err := ssocreds.StandardCachedTokenFilepath("dev-session")
+// if err != nil {
+// return err
+// }
//
// var provider aws.CredentialsProvider
-// provider = ssocreds.New(client, "123456789012", "SSOReadOnlyRole", "us-east-1", "https://my-sso-portal.awsapps.com/start")
+// provider = ssocreds.New(ssoClient, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start", func(options *ssocreds.Options) {
+// options.SSOTokenProvider = ssocreds.NewSSOTokenProvider(ssoOidcClient, tokenPath)
+// })
//
// // Wrap the provider with aws.CredentialsCache to cache the credentials until their expire time
// provider = aws.NewCredentialsCache(provider)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go
index 40743f0d7..46ae2f923 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_cached_token.go
@@ -13,9 +13,10 @@ import (
"time"
"github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/aws-sdk-go-v2/internal/shareddefaults"
)
-var osUserHomeDur = os.UserHomeDir
+var osUserHomeDur = shareddefaults.UserHomeDir
// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or
// error if unable get derive the path. Key that will be used to compute a SHA1
@@ -25,13 +26,12 @@ var osUserHomeDur = os.UserHomeDir
//
// ~/.aws/sso/cache/.json
func StandardCachedTokenFilepath(key string) (string, error) {
- homeDir, err := osUserHomeDur()
- if err != nil {
- return "", fmt.Errorf("unable to get USER's home directory for cached token, %w", err)
+ homeDir := osUserHomeDur()
+ if len(homeDir) == 0 {
+ return "", fmt.Errorf("unable to get USER's home directory for cached token")
}
-
hash := sha1.New()
- if _, err = hash.Write([]byte(key)); err != nil {
+ if _, err := hash.Write([]byte(key)); err != nil {
return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %w", err)
}
@@ -225,7 +225,7 @@ func (r *rfc3339) UnmarshalJSON(bytes []byte) (err error) {
}
func (r *rfc3339) MarshalJSON() ([]byte, error) {
- value := time.Time(*r).Format(time.RFC3339)
+ value := time.Time(*r).UTC().Format(time.RFC3339)
// Use JSON unmarshal to unescape the quoted value making use of JSON's
// quoting rules.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go
index bd7603bbc..3ed9cbb3e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/ssocreds/sso_credentials_provider.go
@@ -45,6 +45,14 @@ type Options struct {
// If custom cached token filepath is used, the Provider's startUrl
// parameter will be ignored.
CachedTokenFilepath string
+
+ // Used by the SSOCredentialProvider if a token configuration
+ // profile is used in the shared config
+ SSOTokenProvider *SSOTokenProvider
+
+ // The chain of providers that was used to create this provider.
+ // These values are for reporting purposes and are not meant to be set up directly
+ CredentialSources []aws.CredentialSource
}
// Provider is an AWS credential provider that retrieves temporary AWS
@@ -78,27 +86,39 @@ func New(client GetRoleCredentialsAPIClient, accountID, roleName, startURL strin
// Retrieve retrieves temporary AWS credentials from the configured Amazon
// Single Sign-On (AWS SSO) user portal by exchanging the accessToken present
-// in ~/.aws/sso/cache.
+// in ~/.aws/sso/cache. However, if a token provider configuration exists
+// in the shared config, then we ought to use the token provider rather then
+// direct access on the cached token.
func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
- if p.cachedTokenFilepath == "" {
- cachedTokenFilepath, err := StandardCachedTokenFilepath(p.options.StartURL)
+ var accessToken *string
+ if p.options.SSOTokenProvider != nil {
+ token, err := p.options.SSOTokenProvider.RetrieveBearerToken(ctx)
if err != nil {
- return aws.Credentials{}, &InvalidTokenError{Err: err}
+ return aws.Credentials{}, err
+ }
+ accessToken = &token.Value
+ } else {
+ if p.cachedTokenFilepath == "" {
+ cachedTokenFilepath, err := StandardCachedTokenFilepath(p.options.StartURL)
+ if err != nil {
+ return aws.Credentials{}, &InvalidTokenError{Err: err}
+ }
+ p.cachedTokenFilepath = cachedTokenFilepath
}
- p.cachedTokenFilepath = cachedTokenFilepath
- }
- tokenFile, err := loadCachedToken(p.cachedTokenFilepath)
- if err != nil {
- return aws.Credentials{}, &InvalidTokenError{Err: err}
- }
+ tokenFile, err := loadCachedToken(p.cachedTokenFilepath)
+ if err != nil {
+ return aws.Credentials{}, &InvalidTokenError{Err: err}
+ }
- if tokenFile.ExpiresAt == nil || sdk.NowTime().After(time.Time(*tokenFile.ExpiresAt)) {
- return aws.Credentials{}, &InvalidTokenError{}
+ if tokenFile.ExpiresAt == nil || sdk.NowTime().After(time.Time(*tokenFile.ExpiresAt)) {
+ return aws.Credentials{}, &InvalidTokenError{}
+ }
+ accessToken = &tokenFile.AccessToken
}
output, err := p.options.Client.GetRoleCredentials(ctx, &sso.GetRoleCredentialsInput{
- AccessToken: &tokenFile.AccessToken,
+ AccessToken: accessToken,
AccountId: &p.options.AccountID,
RoleName: &p.options.RoleName,
})
@@ -113,9 +133,18 @@ func (p *Provider) Retrieve(ctx context.Context) (aws.Credentials, error) {
CanExpire: true,
Expires: time.Unix(0, output.RoleCredentials.Expiration*int64(time.Millisecond)).UTC(),
Source: ProviderName,
+ AccountID: p.options.AccountID,
}, nil
}
+// ProviderSources returns the credential chain that was used to construct this provider
+func (p *Provider) ProviderSources() []aws.CredentialSource {
+ if p.options.CredentialSources == nil {
+ return []aws.CredentialSource{aws.CredentialSourceSSO}
+ }
+ return p.options.CredentialSources
+}
+
// InvalidTokenError is the error type that is returned if loaded token has
// expired or is otherwise invalid. To refresh the SSO session run AWS SSO
// login with the corresponding profile.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go
index d525cac09..a469abdb7 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/static_provider.go
@@ -22,6 +22,16 @@ func (*StaticCredentialsEmptyError) Error() string {
// never expire.
type StaticCredentialsProvider struct {
Value aws.Credentials
+ // These values are for reporting purposes and are not meant to be set up directly
+ Source []aws.CredentialSource
+}
+
+// ProviderSources returns the credential chain that was used to construct this provider
+func (s StaticCredentialsProvider) ProviderSources() []aws.CredentialSource {
+ if s.Source == nil {
+ return []aws.CredentialSource{aws.CredentialSourceCode} // If no source has been set, assume this is used directly which means hardcoded creds
+ }
+ return s.Source
}
// NewStaticCredentialsProvider return a StaticCredentialsProvider initialized with the AWS
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go
index 289707b6d..1ccf71e77 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/assume_role_provider.go
@@ -247,6 +247,10 @@ type AssumeRoleOptions struct {
// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
// in the IAM User Guide. This parameter is optional.
TransitiveTagKeys []string
+
+ // The chain of providers that was used to create this provider
+ // These values are for reporting purposes and are not meant to be set up directly
+ CredentialSources []aws.CredentialSource
}
// NewAssumeRoleProvider constructs and returns a credentials provider that
@@ -308,6 +312,11 @@ func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, err
return aws.Credentials{Source: ProviderName}, err
}
+ var accountID string
+ if resp.AssumedRoleUser != nil {
+ accountID = getAccountID(resp.AssumedRoleUser)
+ }
+
return aws.Credentials{
AccessKeyID: *resp.Credentials.AccessKeyId,
SecretAccessKey: *resp.Credentials.SecretAccessKey,
@@ -316,5 +325,14 @@ func (p *AssumeRoleProvider) Retrieve(ctx context.Context) (aws.Credentials, err
CanExpire: true,
Expires: *resp.Credentials.Expiration,
+ AccountID: accountID,
}, nil
}
+
+// ProviderSources returns the credential chain that was used to construct this provider
+func (p *AssumeRoleProvider) ProviderSources() []aws.CredentialSource {
+ if p.options.CredentialSources == nil {
+ return []aws.CredentialSource{aws.CredentialSourceSTSAssumeRole}
+ } // If no source has been set, assume this is used directly which means just call to assume role
+ return append(p.options.CredentialSources, aws.CredentialSourceSTSAssumeRole)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go
index ddaf6df6c..5f4286dda 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/credentials/stscreds/web_identity_provider.go
@@ -5,6 +5,7 @@ import (
"fmt"
"io/ioutil"
"strconv"
+ "strings"
"time"
"github.com/aws/aws-sdk-go-v2/aws"
@@ -63,6 +64,10 @@ type WebIdentityRoleOptions struct {
// want to use as managed session policies. The policies must exist in the
// same account as the role.
PolicyARNs []types.PolicyDescriptorType
+
+ // The chain of providers that was used to create this provider
+ // These values are for reporting purposes and are not meant to be set up directly
+ CredentialSources []aws.CredentialSource
}
// IdentityTokenRetriever is an interface for retrieving a JWT
@@ -135,6 +140,11 @@ func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials
return aws.Credentials{}, fmt.Errorf("failed to retrieve credentials, %w", err)
}
+ var accountID string
+ if resp.AssumedRoleUser != nil {
+ accountID = getAccountID(resp.AssumedRoleUser)
+ }
+
// InvalidIdentityToken error is a temporary error that can occur
// when assuming an Role with a JWT web identity token.
@@ -145,6 +155,27 @@ func (p *WebIdentityRoleProvider) Retrieve(ctx context.Context) (aws.Credentials
Source: WebIdentityProviderName,
CanExpire: true,
Expires: *resp.Credentials.Expiration,
+ AccountID: accountID,
}
return value, nil
}
+
+// extract accountID from arn with format "arn:partition:service:region:account-id:[resource-section]"
+func getAccountID(u *types.AssumedRoleUser) string {
+ if u.Arn == nil {
+ return ""
+ }
+ parts := strings.Split(*u.Arn, ":")
+ if len(parts) < 5 {
+ return ""
+ }
+ return parts[4]
+}
+
+// ProviderSources returns the credential chain that was used to construct this provider
+func (p *WebIdentityRoleProvider) ProviderSources() []aws.CredentialSource {
+ if p.options.CredentialSources == nil {
+ return []aws.CredentialSource{aws.CredentialSourceSTSAssumeRoleWebID}
+ }
+ return p.options.CredentialSources
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/doc.go
deleted file mode 100644
index 944feac55..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/doc.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Package sdk is the official AWS SDK v2 for the Go programming language.
-//
-// aws-sdk-go-v2 is the the v2 of the AWS SDK for the Go programming language.
-//
-// # Getting started
-//
-// The best way to get started working with the SDK is to use `go get` to add the
-// SDK and desired service clients to your Go dependencies explicitly.
-//
-// go get github.com/aws/aws-sdk-go-v2
-// go get github.com/aws/aws-sdk-go-v2/config
-// go get github.com/aws/aws-sdk-go-v2/service/dynamodb
-//
-// # Hello AWS
-//
-// This example shows how you can use the v2 SDK to make an API request using the
-// SDK's Amazon DynamoDB client.
-//
-// package main
-//
-// import (
-// "context"
-// "fmt"
-// "log"
-//
-// "github.com/aws/aws-sdk-go-v2/aws"
-// "github.com/aws/aws-sdk-go-v2/config"
-// "github.com/aws/aws-sdk-go-v2/service/dynamodb"
-// )
-//
-// func main() {
-// // Using the SDK's default configuration, loading additional config
-// // and credentials values from the environment variables, shared
-// // credentials, and shared configuration files
-// cfg, err := config.LoadDefaultConfig(context.TODO(),
-// config.WithRegion("us-west-2"),
-// )
-// if err != nil {
-// log.Fatalf("unable to load SDK config, %v", err)
-// }
-//
-// // Using the Config value, create the DynamoDB client
-// svc := dynamodb.NewFromConfig(cfg)
-//
-// // Build the request with its input parameters
-// resp, err := svc.ListTables(context.TODO(), &dynamodb.ListTablesInput{
-// Limit: aws.Int32(5),
-// })
-// if err != nil {
-// log.Fatalf("failed to list tables, %v", err)
-// }
-//
-// fmt.Println("Tables:")
-// for _, tableName := range resp.TableNames {
-// fmt.Println(tableName)
-// }
-// }
-package sdk
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
index e8d4e2a5a..1f69e820e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/CHANGELOG.md
@@ -1,3 +1,287 @@
+# v1.16.30 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.29 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.28 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.27 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.26 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.25 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.16.24 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.23 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.22 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.21 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.20 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.19 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.18 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.17 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.16 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.15 (2024-10-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.14 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.13 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.12 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.11 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.10 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.9 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.8 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.7 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.6 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.5 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.4 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.3 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.2 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.1 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2024-03-21)
+
+* **Feature**: Add config switch `DisableDefaultTimeout` that allows you to disable the default operation timeout (5 seconds) for IMDS calls.
+
+# v1.15.4 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.3 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.11 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.10 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.9 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.8 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.7 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.6 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.5 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.4 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.3 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.2 (2023-11-02)
+
+* No change notes available for this release.
+
+# v1.14.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.13 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.12 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.11 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.10 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.9 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.8 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.7 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.6 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.5 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.4 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.3 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.2 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2023-03-14)
+
+* **Feature**: Add flag to disable IMDSv1 fallback
+
+# v1.12.24 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.23 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.22 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.21 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.19 (2022-10-24)
+
+* **Bug Fix**: Fixes an issue that prevented logging of the API request or responses when the respective log modes were enabled.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.18 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.12.17 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go
index 53f3d3c78..3f4a10e2c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_client.go
@@ -106,8 +106,10 @@ func New(options Options, optFns ...func(*Options)) *Client {
// or adding custom middleware behavior.
func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
opts := Options{
- APIOptions: append([]func(*middleware.Stack) error{}, cfg.APIOptions...),
- HTTPClient: cfg.HTTPClient,
+ APIOptions: append([]func(*middleware.Stack) error{}, cfg.APIOptions...),
+ HTTPClient: cfg.HTTPClient,
+ ClientLogMode: cfg.ClientLogMode,
+ Logger: cfg.Logger,
}
if cfg.Retryer != nil {
@@ -117,6 +119,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
resolveClientEnableState(cfg, &opts)
resolveEndpointConfig(cfg, &opts)
resolveEndpointModeConfig(cfg, &opts)
+ resolveEnableFallback(cfg, &opts)
return New(opts, optFns...)
}
@@ -172,6 +175,20 @@ type Options struct {
// The logger writer interface to write logging messages to.
Logger logging.Logger
+ // Configure IMDSv1 fallback behavior. By default, the client will attempt
+ // to fall back to IMDSv1 as needed for backwards compatibility. When set to [aws.FalseTernary]
+ // the client will return any errors encountered from attempting to fetch a token
+ // instead of silently using the insecure data flow of IMDSv1.
+ //
+ // See [configuring IMDS] for more information.
+ //
+ // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
+ EnableFallback aws.Ternary
+
+ // By default, all IMDS client operations enforce a 5-second timeout. You
+ // can disable that behavior with this setting.
+ DisableDefaultTimeout bool
+
// provides the caching of API tokens used for operation calls. If unset,
// the API token will not be retrieved for the operation.
tokenProvider *tokenProvider
@@ -316,3 +333,20 @@ func resolveEndpointConfig(cfg aws.Config, options *Options) error {
options.Endpoint = value
return nil
}
+
+func resolveEnableFallback(cfg aws.Config, options *Options) {
+ if options.EnableFallback != aws.UnknownTernary {
+ return
+ }
+
+ disabled, ok := internalconfig.ResolveV1FallbackDisabled(cfg.ConfigSources)
+ if !ok {
+ return
+ }
+
+ if disabled {
+ options.EnableFallback = aws.FalseTernary
+ } else {
+ options.EnableFallback = aws.TrueTernary
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go
index 9e3bdb0e6..af58b6bb1 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetDynamicData.go
@@ -56,6 +56,7 @@ type GetDynamicDataOutput struct {
func addGetDynamicDataMiddleware(stack *middleware.Stack, options Options) error {
return addAPIRequestMiddleware(stack,
options,
+ "GetDynamicData",
buildGetDynamicDataPath,
buildGetDynamicDataOutput)
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go
index 24845dccd..5111cc90c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetIAMInfo.go
@@ -53,6 +53,7 @@ type GetIAMInfoOutput struct {
func addGetIAMInfoMiddleware(stack *middleware.Stack, options Options) error {
return addAPIRequestMiddleware(stack,
options,
+ "GetIAMInfo",
buildGetIAMInfoPath,
buildGetIAMInfoOutput,
)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go
index a87758ed3..dc8c09edf 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetInstanceIdentityDocument.go
@@ -54,6 +54,7 @@ type GetInstanceIdentityDocumentOutput struct {
func addGetInstanceIdentityDocumentMiddleware(stack *middleware.Stack, options Options) error {
return addAPIRequestMiddleware(stack,
options,
+ "GetInstanceIdentityDocument",
buildGetInstanceIdentityDocumentPath,
buildGetInstanceIdentityDocumentOutput,
)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go
index cb0ce4c00..869bfc9fe 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetMetadata.go
@@ -56,6 +56,7 @@ type GetMetadataOutput struct {
func addGetMetadataMiddleware(stack *middleware.Stack, options Options) error {
return addAPIRequestMiddleware(stack,
options,
+ "GetMetadata",
buildGetMetadataPath,
buildGetMetadataOutput)
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go
index 7b9b48912..8c0572bb5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetRegion.go
@@ -45,6 +45,7 @@ type GetRegionOutput struct {
func addGetRegionMiddleware(stack *middleware.Stack, options Options) error {
return addAPIRequestMiddleware(stack,
options,
+ "GetRegion",
buildGetInstanceIdentityDocumentPath,
buildGetRegionOutput,
)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go
index 841f802c1..1f9ee97a5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetToken.go
@@ -49,6 +49,7 @@ func addGetTokenMiddleware(stack *middleware.Stack, options Options) error {
err := addRequestMiddleware(stack,
options,
"PUT",
+ "GetToken",
buildGetTokenPath,
buildGetTokenOutput)
if err != nil {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go
index 88aa61e9a..890369724 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/api_op_GetUserData.go
@@ -45,6 +45,7 @@ type GetUserDataOutput struct {
func addGetUserDataMiddleware(stack *middleware.Stack, options Options) error {
return addAPIRequestMiddleware(stack,
options,
+ "GetUserData",
buildGetUserDataPath,
buildGetUserDataOutput)
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go
new file mode 100644
index 000000000..ad283cf82
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/auth.go
@@ -0,0 +1,48 @@
+package imds
+
+import (
+ "context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type getIdentityMiddleware struct {
+ options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+ return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
+
+type signRequestMiddleware struct {
+}
+
+func (*signRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
+
+type resolveAuthSchemeMiddleware struct {
+ operation string
+ options Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+ return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go
index bacdb5d21..d5765c36b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/doc.go
@@ -3,8 +3,9 @@
//
// All Client operation calls have a default timeout. If the operation is not
// completed before this timeout expires, the operation will be canceled. This
-// timeout can be overridden by providing Context with a timeout or deadline
-// with calling the client's operations.
+// timeout can be overridden through the following:
+// - Set the options flag DisableDefaultTimeout
+// - Provide a Context with a timeout or deadline with calling the client's operations.
//
// See the EC2 IMDS user guide for more information on using the API.
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go
new file mode 100644
index 000000000..d7540da34
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/endpoints.go
@@ -0,0 +1,20 @@
+package imds
+
+import (
+ "context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type resolveEndpointV2Middleware struct {
+ options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
index caf7671ee..dba9ef600 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/go_module_metadata.go
@@ -3,4 +3,4 @@
package imds
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.12.17"
+const goModuleVersion = "1.16.30"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go
index d72fcb562..ce7745589 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/internal/config/resolvers.go
@@ -58,6 +58,10 @@ type EndpointResolver interface {
GetEC2IMDSEndpoint() (string, bool, error)
}
+type v1FallbackDisabledResolver interface {
+ GetEC2IMDSV1FallbackDisabled() (bool, bool)
+}
+
// ResolveClientEnableState resolves the ClientEnableState from a list of configuration sources.
func ResolveClientEnableState(sources []interface{}) (value ClientEnableState, found bool, err error) {
for _, source := range sources {
@@ -96,3 +100,15 @@ func ResolveEndpointConfig(sources []interface{}) (value string, found bool, err
}
return value, found, err
}
+
+// ResolveV1FallbackDisabled ...
+func ResolveV1FallbackDisabled(sources []interface{}) (bool, bool) {
+ for _, source := range sources {
+ if resolver, ok := source.(v1FallbackDisabledResolver); ok {
+ if v, found := resolver.GetEC2IMDSV1FallbackDisabled(); found {
+ return v, true
+ }
+ }
+ }
+ return false, false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
index 605cbd131..90cf4aeb3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/request_middleware.go
@@ -17,10 +17,11 @@ import (
func addAPIRequestMiddleware(stack *middleware.Stack,
options Options,
+ operation string,
getPath func(interface{}) (string, error),
getOutput func(*smithyhttp.Response) (interface{}, error),
) (err error) {
- err = addRequestMiddleware(stack, options, "GET", getPath, getOutput)
+ err = addRequestMiddleware(stack, options, "GET", operation, getPath, getOutput)
if err != nil {
return err
}
@@ -44,6 +45,7 @@ func addAPIRequestMiddleware(stack *middleware.Stack,
func addRequestMiddleware(stack *middleware.Stack,
options Options,
method string,
+ operation string,
getPath func(interface{}) (string, error),
getOutput func(*smithyhttp.Response) (interface{}, error),
) (err error) {
@@ -54,6 +56,7 @@ func addRequestMiddleware(stack *middleware.Stack,
// Operation timeout
err = stack.Initialize.Add(&operationTimeout{
+ Disabled: options.DisableDefaultTimeout,
DefaultTimeout: defaultOperationTimeout,
}, middleware.Before)
if err != nil {
@@ -86,6 +89,25 @@ func addRequestMiddleware(stack *middleware.Stack,
return err
}
+ err = stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
+ LogRequest: options.ClientLogMode.IsRequest(),
+ LogRequestWithBody: options.ClientLogMode.IsRequestWithBody(),
+ LogResponse: options.ClientLogMode.IsResponse(),
+ LogResponseWithBody: options.ClientLogMode.IsResponseWithBody(),
+ }, middleware.After)
+ if err != nil {
+ return err
+ }
+
+ err = addSetLoggerMiddleware(stack, options)
+ if err != nil {
+ return err
+ }
+
+ if err := addProtocolFinalizerMiddlewares(stack, options, operation); err != nil {
+ return fmt.Errorf("add protocol finalizers: %w", err)
+ }
+
// Retry support
return retry.AddRetryMiddlewares(stack, retry.AddRetryMiddlewaresOptions{
Retryer: options.Retryer,
@@ -93,6 +115,10 @@ func addRequestMiddleware(stack *middleware.Stack,
})
}
+func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
+ return middleware.AddSetLoggerMiddleware(stack, o.Logger)
+}
+
type serializeRequest struct {
GetPath func(interface{}) (string, error)
Method string
@@ -235,6 +261,7 @@ const (
// Otherwise the timeout cleanup will race the resource being consumed
// upstream.
type operationTimeout struct {
+ Disabled bool
DefaultTimeout time.Duration
}
@@ -245,6 +272,10 @@ func (m *operationTimeout) HandleInitialize(
) (
output middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
+ if m.Disabled {
+ return next.HandleInitialize(ctx, input)
+ }
+
if _, ok := ctx.Deadline(); !ok && m.DefaultTimeout != 0 {
var cancelFn func()
ctx, cancelFn = context.WithTimeout(ctx, m.DefaultTimeout)
@@ -264,3 +295,19 @@ func appendURIPath(base, add string) string {
}
return reqPath
}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+ if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+ return fmt.Errorf("add ResolveAuthScheme: %w", err)
+ }
+ if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+ return fmt.Errorf("add GetIdentity: %w", err)
+ }
+ if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+ return fmt.Errorf("add ResolveEndpointV2: %w", err)
+ }
+ if err := stack.Finalize.Insert(&signRequestMiddleware{}, "ResolveEndpointV2", middleware.After); err != nil {
+ return fmt.Errorf("add Signing: %w", err)
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go
index 275fade48..5703c6e16 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/feature/ec2/imds/token_provider.go
@@ -4,12 +4,14 @@ import (
"context"
"errors"
"fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/logging"
"net/http"
"sync"
"sync/atomic"
"time"
- smithy "github.com/aws/smithy-go"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -68,7 +70,7 @@ func (t *tokenProvider) HandleFinalize(
) (
out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
) {
- if !t.enabled() {
+ if t.fallbackEnabled() && !t.enabled() {
// short-circuits to insecure data flow if token provider is disabled.
return next.HandleFinalize(ctx, input)
}
@@ -115,23 +117,15 @@ func (t *tokenProvider) HandleDeserialize(
}
if resp.StatusCode == http.StatusUnauthorized { // unauthorized
- err = &retryableError{Err: err}
t.enable()
+ err = &retryableError{Err: err, isRetryable: true}
}
return out, metadata, err
}
-type retryableError struct {
- Err error
-}
-
-func (*retryableError) RetryableError() bool { return true }
-
-func (e *retryableError) Error() string { return e.Err.Error() }
-
func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error) {
- if !t.enabled() {
+ if t.fallbackEnabled() && !t.enabled() {
return nil, &bypassTokenRetrievalError{
Err: fmt.Errorf("cannot get API token, provider disabled"),
}
@@ -147,7 +141,7 @@ func (t *tokenProvider) getToken(ctx context.Context) (tok *apiToken, err error)
tok, err = t.updateToken(ctx)
if err != nil {
- return nil, fmt.Errorf("cannot get API token, %w", err)
+ return nil, err
}
return tok, nil
@@ -167,17 +161,19 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) {
TokenTTL: t.tokenTTL,
})
if err != nil {
- // change the disabled flag on token provider to true, when error is request timeout error.
var statusErr interface{ HTTPStatusCode() int }
if errors.As(err, &statusErr) {
switch statusErr.HTTPStatusCode() {
-
- // Disable get token if failed because of 403, 404, or 405
+ // Disable future get token if failed because of 403, 404, or 405
case http.StatusForbidden,
http.StatusNotFound,
http.StatusMethodNotAllowed:
- t.disable()
+ if t.fallbackEnabled() {
+ logger := middleware.GetLogger(ctx)
+ logger.Logf(logging.Warn, "falling back to IMDSv1: %v", err)
+ t.disable()
+ }
// 400 errors are terminal, and need to be upstreamed
case http.StatusBadRequest:
@@ -192,8 +188,17 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) {
atomic.StoreUint32(&t.disabled, 1)
}
- // Token couldn't be retrieved, but bypass this, and allow the
- // request to continue.
+ if !t.fallbackEnabled() {
+ // NOTE: getToken() is an implementation detail of some outer operation
+ // (e.g. GetMetadata). It has its own retries that have already been exhausted.
+ // Mark the underlying error as a terminal error.
+ err = &retryableError{Err: err, isRetryable: false}
+ return nil, err
+ }
+
+ // Token couldn't be retrieved, fallback to IMDSv1 insecure flow for this request
+ // and allow the request to proceed. Future requests _may_ re-attempt fetching a
+ // token if not disabled.
return nil, &bypassTokenRetrievalError{Err: err}
}
@@ -206,21 +211,21 @@ func (t *tokenProvider) updateToken(ctx context.Context) (*apiToken, error) {
return tok, nil
}
-type bypassTokenRetrievalError struct {
- Err error
-}
-
-func (e *bypassTokenRetrievalError) Error() string {
- return fmt.Sprintf("bypass token retrieval, %v", e.Err)
-}
-
-func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err }
-
// enabled returns if the token provider is current enabled or not.
func (t *tokenProvider) enabled() bool {
return atomic.LoadUint32(&t.disabled) == 0
}
+// fallbackEnabled returns false if EnableFallback is [aws.FalseTernary], true otherwise
+func (t *tokenProvider) fallbackEnabled() bool {
+ switch t.client.options.EnableFallback {
+ case aws.FalseTernary:
+ return false
+ default:
+ return true
+ }
+}
+
// disable disables the token provider and it will no longer attempt to inject
// the token, nor request updates.
func (t *tokenProvider) disable() {
@@ -235,3 +240,22 @@ func (t *tokenProvider) enable() {
t.tokenMux.Unlock()
atomic.StoreUint32(&t.disabled, 0)
}
+
+type bypassTokenRetrievalError struct {
+ Err error
+}
+
+func (e *bypassTokenRetrievalError) Error() string {
+ return fmt.Sprintf("bypass token retrieval, %v", e.Err)
+}
+
+func (e *bypassTokenRetrievalError) Unwrap() error { return e.Err }
+
+type retryableError struct {
+ Err error
+ isRetryable bool
+}
+
+func (e *retryableError) RetryableError() bool { return e.isRetryable }
+
+func (e *retryableError) Error() string { return e.Err.Error() }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go
new file mode 100644
index 000000000..0b81db548
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/auth.go
@@ -0,0 +1,45 @@
+package auth
+
+import (
+ "github.com/aws/smithy-go/auth"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// HTTPAuthScheme is the SDK's internal implementation of smithyhttp.AuthScheme
+// for pre-existing implementations where the signer was added to client
+// config. SDK clients will key off of this type and ensure per-operation
+// updates to those signers persist on the scheme itself.
+type HTTPAuthScheme struct {
+ schemeID string
+ signer smithyhttp.Signer
+}
+
+var _ smithyhttp.AuthScheme = (*HTTPAuthScheme)(nil)
+
+// NewHTTPAuthScheme returns an auth scheme instance with the given config.
+func NewHTTPAuthScheme(schemeID string, signer smithyhttp.Signer) *HTTPAuthScheme {
+ return &HTTPAuthScheme{
+ schemeID: schemeID,
+ signer: signer,
+ }
+}
+
+// SchemeID identifies the auth scheme.
+func (s *HTTPAuthScheme) SchemeID() string {
+ return s.schemeID
+}
+
+// IdentityResolver gets the identity resolver for the auth scheme.
+func (s *HTTPAuthScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver {
+ return o.GetIdentityResolver(s.schemeID)
+}
+
+// Signer gets the signer for the auth scheme.
+func (s *HTTPAuthScheme) Signer() smithyhttp.Signer {
+ return s.signer
+}
+
+// WithSigner returns a new instance of the auth scheme with the updated signer.
+func (s *HTTPAuthScheme) WithSigner(signer smithyhttp.Signer) *HTTPAuthScheme {
+ return NewHTTPAuthScheme(s.schemeID, signer)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go
new file mode 100644
index 000000000..bbc2ec06e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/scheme.go
@@ -0,0 +1,191 @@
+package auth
+
+import (
+ "context"
+ "fmt"
+
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// SigV4 is a constant representing
+// Authentication Scheme Signature Version 4
+const SigV4 = "sigv4"
+
+// SigV4A is a constant representing
+// Authentication Scheme Signature Version 4A
+const SigV4A = "sigv4a"
+
+// SigV4S3Express identifies the S3 S3Express auth scheme.
+const SigV4S3Express = "sigv4-s3express"
+
+// None is a constant representing the
+// None Authentication Scheme
+const None = "none"
+
+// SupportedSchemes is a data structure
+// that indicates the list of supported AWS
+// authentication schemes
+var SupportedSchemes = map[string]bool{
+ SigV4: true,
+ SigV4A: true,
+ SigV4S3Express: true,
+ None: true,
+}
+
+// AuthenticationScheme is a representation of
+// AWS authentication schemes
+type AuthenticationScheme interface {
+ isAuthenticationScheme()
+}
+
+// AuthenticationSchemeV4 is a AWS SigV4 representation
+type AuthenticationSchemeV4 struct {
+ Name string
+ SigningName *string
+ SigningRegion *string
+ DisableDoubleEncoding *bool
+}
+
+func (a *AuthenticationSchemeV4) isAuthenticationScheme() {}
+
+// AuthenticationSchemeV4A is a AWS SigV4A representation
+type AuthenticationSchemeV4A struct {
+ Name string
+ SigningName *string
+ SigningRegionSet []string
+ DisableDoubleEncoding *bool
+}
+
+func (a *AuthenticationSchemeV4A) isAuthenticationScheme() {}
+
+// AuthenticationSchemeNone is a representation for the none auth scheme
+type AuthenticationSchemeNone struct{}
+
+func (a *AuthenticationSchemeNone) isAuthenticationScheme() {}
+
+// NoAuthenticationSchemesFoundError is used in signaling
+// that no authentication schemes have been specified.
+type NoAuthenticationSchemesFoundError struct{}
+
+func (e *NoAuthenticationSchemesFoundError) Error() string {
+ return fmt.Sprint("No authentication schemes specified.")
+}
+
+// UnSupportedAuthenticationSchemeSpecifiedError is used in
+// signaling that only unsupported authentication schemes
+// were specified.
+type UnSupportedAuthenticationSchemeSpecifiedError struct {
+ UnsupportedSchemes []string
+}
+
+func (e *UnSupportedAuthenticationSchemeSpecifiedError) Error() string {
+ return fmt.Sprint("Unsupported authentication scheme specified.")
+}
+
+// GetAuthenticationSchemes extracts the relevant authentication scheme data
+// into a custom strongly typed Go data structure.
+func GetAuthenticationSchemes(p *smithy.Properties) ([]AuthenticationScheme, error) {
+ var result []AuthenticationScheme
+ if !p.Has("authSchemes") {
+ return nil, &NoAuthenticationSchemesFoundError{}
+ }
+
+ authSchemes, _ := p.Get("authSchemes").([]interface{})
+
+ var unsupportedSchemes []string
+ for _, scheme := range authSchemes {
+ authScheme, _ := scheme.(map[string]interface{})
+
+ version := authScheme["name"].(string)
+ switch version {
+ case SigV4, SigV4S3Express:
+ v4Scheme := AuthenticationSchemeV4{
+ Name: version,
+ SigningName: getSigningName(authScheme),
+ SigningRegion: getSigningRegion(authScheme),
+ DisableDoubleEncoding: getDisableDoubleEncoding(authScheme),
+ }
+ result = append(result, AuthenticationScheme(&v4Scheme))
+ case SigV4A:
+ v4aScheme := AuthenticationSchemeV4A{
+ Name: SigV4A,
+ SigningName: getSigningName(authScheme),
+ SigningRegionSet: getSigningRegionSet(authScheme),
+ DisableDoubleEncoding: getDisableDoubleEncoding(authScheme),
+ }
+ result = append(result, AuthenticationScheme(&v4aScheme))
+ case None:
+ noneScheme := AuthenticationSchemeNone{}
+ result = append(result, AuthenticationScheme(&noneScheme))
+ default:
+ unsupportedSchemes = append(unsupportedSchemes, authScheme["name"].(string))
+ continue
+ }
+ }
+
+ if len(result) == 0 {
+ return nil, &UnSupportedAuthenticationSchemeSpecifiedError{
+ UnsupportedSchemes: unsupportedSchemes,
+ }
+ }
+
+ return result, nil
+}
+
+type disableDoubleEncoding struct{}
+
+// SetDisableDoubleEncoding sets or modifies the disable double encoding option
+// on the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func SetDisableDoubleEncoding(ctx context.Context, value bool) context.Context {
+ return middleware.WithStackValue(ctx, disableDoubleEncoding{}, value)
+}
+
+// GetDisableDoubleEncoding retrieves the disable double encoding option
+// from the context.
+//
+// Scoped to stack values. Use github.com/aws/smithy-go/middleware#ClearStackValues
+// to clear all stack values.
+func GetDisableDoubleEncoding(ctx context.Context) (value bool, ok bool) {
+ value, ok = middleware.GetStackValue(ctx, disableDoubleEncoding{}).(bool)
+ return value, ok
+}
+
+func getSigningName(authScheme map[string]interface{}) *string {
+ signingName, ok := authScheme["signingName"].(string)
+ if !ok || signingName == "" {
+ return nil
+ }
+ return &signingName
+}
+
+func getSigningRegionSet(authScheme map[string]interface{}) []string {
+ untypedSigningRegionSet, ok := authScheme["signingRegionSet"].([]interface{})
+ if !ok {
+ return nil
+ }
+ signingRegionSet := []string{}
+ for _, item := range untypedSigningRegionSet {
+ signingRegionSet = append(signingRegionSet, item.(string))
+ }
+ return signingRegionSet
+}
+
+func getSigningRegion(authScheme map[string]interface{}) *string {
+ signingRegion, ok := authScheme["signingRegion"].(string)
+ if !ok || signingRegion == "" {
+ return nil
+ }
+ return &signingRegion
+}
+
+func getDisableDoubleEncoding(authScheme map[string]interface{}) *bool {
+ disableDoubleEncoding, ok := authScheme["disableDoubleEncoding"].(bool)
+ if !ok {
+ return nil
+ }
+ return &disableDoubleEncoding
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go
new file mode 100644
index 000000000..f059b5d39
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_adapter.go
@@ -0,0 +1,43 @@
+package smithy
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/auth/bearer"
+)
+
+// BearerTokenAdapter adapts smithy bearer.Token to smithy auth.Identity.
+type BearerTokenAdapter struct {
+ Token bearer.Token
+}
+
+var _ auth.Identity = (*BearerTokenAdapter)(nil)
+
+// Expiration returns the time of expiration for the token.
+func (v *BearerTokenAdapter) Expiration() time.Time {
+ return v.Token.Expires
+}
+
+// BearerTokenProviderAdapter adapts smithy bearer.TokenProvider to smithy
+// auth.IdentityResolver.
+type BearerTokenProviderAdapter struct {
+ Provider bearer.TokenProvider
+}
+
+var _ (auth.IdentityResolver) = (*BearerTokenProviderAdapter)(nil)
+
+// GetIdentity retrieves a bearer token using the underlying provider.
+func (v *BearerTokenProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) (
+ auth.Identity, error,
+) {
+ token, err := v.Provider.RetrieveBearerToken(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("get token: %w", err)
+ }
+
+ return &BearerTokenAdapter{Token: token}, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go
new file mode 100644
index 000000000..a88281527
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/bearer_token_signer_adapter.go
@@ -0,0 +1,35 @@
+package smithy
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/auth/bearer"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// BearerTokenSignerAdapter adapts smithy bearer.Signer to smithy http
+// auth.Signer.
+type BearerTokenSignerAdapter struct {
+ Signer bearer.Signer
+}
+
+var _ (smithyhttp.Signer) = (*BearerTokenSignerAdapter)(nil)
+
+// SignRequest signs the request with the provided bearer token.
+func (v *BearerTokenSignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, _ smithy.Properties) error {
+ ca, ok := identity.(*BearerTokenAdapter)
+ if !ok {
+ return fmt.Errorf("unexpected identity type: %T", identity)
+ }
+
+ signed, err := v.Signer.SignWithBearerToken(ctx, ca.Token, r)
+ if err != nil {
+ return fmt.Errorf("sign request: %w", err)
+ }
+
+ *r = *signed.(*smithyhttp.Request)
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go
new file mode 100644
index 000000000..f926c4aaa
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/credentials_adapter.go
@@ -0,0 +1,46 @@
+package smithy
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+)
+
+// CredentialsAdapter adapts aws.Credentials to auth.Identity.
+type CredentialsAdapter struct {
+ Credentials aws.Credentials
+}
+
+var _ auth.Identity = (*CredentialsAdapter)(nil)
+
+// Expiration returns the time of expiration for the credentials.
+func (v *CredentialsAdapter) Expiration() time.Time {
+ return v.Credentials.Expires
+}
+
+// CredentialsProviderAdapter adapts aws.CredentialsProvider to auth.IdentityResolver.
+type CredentialsProviderAdapter struct {
+ Provider aws.CredentialsProvider
+}
+
+var _ (auth.IdentityResolver) = (*CredentialsProviderAdapter)(nil)
+
+// GetIdentity retrieves AWS credentials using the underlying provider.
+func (v *CredentialsProviderAdapter) GetIdentity(ctx context.Context, _ smithy.Properties) (
+ auth.Identity, error,
+) {
+ if v.Provider == nil {
+ return &CredentialsAdapter{Credentials: aws.Credentials{}}, nil
+ }
+
+ creds, err := v.Provider.Retrieve(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("get credentials: %w", err)
+ }
+
+ return &CredentialsAdapter{Credentials: creds}, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go
new file mode 100644
index 000000000..42b458673
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/smithy.go
@@ -0,0 +1,2 @@
+// Package smithy adapts concrete AWS auth and signing types to the generic smithy versions.
+package smithy
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go
new file mode 100644
index 000000000..24db8e144
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/auth/smithy/v4signer_adapter.go
@@ -0,0 +1,57 @@
+package smithy
+
+import (
+ "context"
+ "fmt"
+
+ v4 "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+ "github.com/aws/aws-sdk-go-v2/internal/sdk"
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/logging"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// V4SignerAdapter adapts v4.HTTPSigner to smithy http.Signer.
+type V4SignerAdapter struct {
+ Signer v4.HTTPSigner
+ Logger logging.Logger
+ LogSigning bool
+}
+
+var _ (smithyhttp.Signer) = (*V4SignerAdapter)(nil)
+
+// SignRequest signs the request with the provided identity.
+func (v *V4SignerAdapter) SignRequest(ctx context.Context, r *smithyhttp.Request, identity auth.Identity, props smithy.Properties) error {
+ ca, ok := identity.(*CredentialsAdapter)
+ if !ok {
+ return fmt.Errorf("unexpected identity type: %T", identity)
+ }
+
+ name, ok := smithyhttp.GetSigV4SigningName(&props)
+ if !ok {
+ return fmt.Errorf("sigv4 signing name is required")
+ }
+
+ region, ok := smithyhttp.GetSigV4SigningRegion(&props)
+ if !ok {
+ return fmt.Errorf("sigv4 signing region is required")
+ }
+
+ hash := v4.GetPayloadHash(ctx)
+ signingTime := sdk.NowTime()
+ skew := internalcontext.GetAttemptSkewContext(ctx)
+ signingTime = signingTime.Add(skew)
+ err := v.Signer.SignHTTP(ctx, ca.Credentials, r.Request, hash, name, region, signingTime, func(o *v4.SignerOptions) {
+ o.DisableURIPathEscaping, _ = smithyhttp.GetDisableDoubleEncoding(&props)
+
+ o.Logger = v.Logger
+ o.LogSigning = v.LogSigning
+ })
+ if err != nil {
+ return fmt.Errorf("sign http: %w", err)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go
similarity index 93%
rename from vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
rename to vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go
index 1a3d106d5..938cd14c1 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/copy.go
@@ -51,7 +51,11 @@ func rcopy(dst, src reflect.Value, root bool) {
e := src.Type().Elem()
if dst.CanSet() && !src.IsNil() {
if _, ok := src.Interface().(*time.Time); !ok {
- dst.Set(reflect.New(e))
+ if dst.Kind() == reflect.String {
+ dst.SetString(e.String())
+ } else {
+ dst.Set(reflect.New(e))
+ }
} else {
tempValue := reflect.New(e)
tempValue.Elem().Set(src.Elem())
@@ -59,7 +63,7 @@ func rcopy(dst, src reflect.Value, root bool) {
dst.Set(tempValue)
}
}
- if src.Elem().IsValid() {
+ if dst.Kind() != reflect.String && src.Elem().IsValid() {
// Keep the current root state since the depth hasn't changed
rcopy(dst.Elem(), src.Elem(), root)
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go
new file mode 100644
index 000000000..bcfe51a2b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/equal.go
@@ -0,0 +1,33 @@
+package awsutil
+
+import (
+ "reflect"
+)
+
+// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
+// In addition to this, this method will also dereference the input values if
+// possible so the DeepEqual performed will not fail if one parameter is a
+// pointer and the other is not.
+//
+// DeepEqual will not perform indirection of nested values of the input parameters.
+func DeepEqual(a, b interface{}) bool {
+ ra := reflect.Indirect(reflect.ValueOf(a))
+ rb := reflect.Indirect(reflect.ValueOf(b))
+
+ if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
+ // If the elements are both nil, and of the same type the are equal
+ // If they are of different types they are not equal
+ return reflect.TypeOf(a) == reflect.TypeOf(b)
+ } else if raValid != rbValid {
+ // Both values must be valid to be equal
+ return false
+ }
+
+ // Special casing for strings as typed enumerations are string aliases
+ // but are not deep equal.
+ if ra.Kind() == reflect.String && rb.Kind() == reflect.String {
+ return ra.String() == rb.String()
+ }
+
+ return reflect.DeepEqual(ra.Interface(), rb.Interface())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go
similarity index 83%
rename from vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
rename to vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go
index 11d4240d6..1adecae6b 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/prettify.go
@@ -18,7 +18,9 @@ func Prettify(i interface{}) string {
// prettify will recursively walk value v to build a textual
// representation of the value.
func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
+ isPtr := false
for v.Kind() == reflect.Ptr {
+ isPtr = true
v = v.Elem()
}
@@ -33,6 +35,9 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
break
}
+ if isPtr {
+ buf.WriteRune('&')
+ }
buf.WriteString("{\n")
names := []string{}
@@ -50,19 +55,9 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
for i, n := range names {
val := v.FieldByName(n)
- ft, ok := v.Type().FieldByName(n)
- if !ok {
- panic(fmt.Sprintf("expected to find field %v on type %v, but was not found", n, v.Type()))
- }
-
buf.WriteString(strings.Repeat(" ", indent+2))
buf.WriteString(n + ": ")
-
- if tag := ft.Tag.Get("sensitive"); tag == "true" {
- buf.WriteString("")
- } else {
- prettify(val, indent+2, buf)
- }
+ prettify(val, indent+2, buf)
if i < len(names)-1 {
buf.WriteString(",\n")
@@ -77,9 +72,9 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
break
}
- nl, id, id2 := "", "", ""
- if v.Len() > 3 {
- nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ nl, id, id2 := "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ if isPtr {
+ buf.WriteRune('&')
}
buf.WriteString("[" + nl)
for i := 0; i < v.Len(); i++ {
@@ -93,6 +88,9 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
buf.WriteString(nl + id + "]")
case reflect.Map:
+ if isPtr {
+ buf.WriteRune('&')
+ }
buf.WriteString("{\n")
for i, k := range v.MapKeys() {
@@ -111,6 +109,16 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
fmt.Fprint(buf, "")
return
}
+
+ for v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+
+ if v.Kind() == reflect.Ptr || v.Kind() == reflect.Struct || v.Kind() == reflect.Map || v.Kind() == reflect.Slice {
+ prettify(v, indent, buf)
+ return
+ }
+
format := "%v"
switch v.Interface().(type) {
case string:
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go
similarity index 97%
rename from vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
rename to vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go
index 3f7cffd95..645df2450 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/awsutil/string_value.go
@@ -8,8 +8,6 @@ import (
)
// StringValue returns the string representation of a value.
-//
-// Deprecated: Use Prettify instead.
func StringValue(i interface{}) string {
var buf bytes.Buffer
stringValue(reflect.ValueOf(i), 0, &buf)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
index 56750f889..e0ebf3903 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/CHANGELOG.md
@@ -1,3 +1,316 @@
+# v1.4.6 (2025-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.5 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.4 (2025-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.3 (2025-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.2 (2025-08-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2025-07-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.37 (2025-07-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.36 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.35 (2025-06-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.34 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.33 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.32 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.31 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.30 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.29 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.3.28 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.27 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.26 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.25 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.24 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.23 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.22 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.21 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.20 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.19 (2024-10-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.18 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.17 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.16 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.15 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.14 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.13 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.12 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.11 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.10 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.9 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.8 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.7 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.6 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.5 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.4 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2024-03-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.8 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.7 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.6 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.3 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.43 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.42 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.41 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.40 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.39 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.38 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.37 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.36 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.35 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.34 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.33 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.32 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.31 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.30 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.29 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.28 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.27 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.26 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.25 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.24 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.1.23 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go
new file mode 100644
index 000000000..e7835f852
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/endpoints.go
@@ -0,0 +1,57 @@
+package configsources
+
+import (
+ "context"
+)
+
+// ServiceBaseEndpointProvider is needed to search for all providers
+// that provide a configured service endpoint
+type ServiceBaseEndpointProvider interface {
+ GetServiceBaseEndpoint(ctx context.Context, sdkID string) (string, bool, error)
+}
+
+// IgnoreConfiguredEndpointsProvider is needed to search for all providers
+// that provide a flag to disable configured endpoints.
+//
+// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because
+// service packages cannot import github.com/aws/aws-sdk-go-v2/config
+// due to result import cycle error.
+type IgnoreConfiguredEndpointsProvider interface {
+ GetIgnoreConfiguredEndpoints(ctx context.Context) (bool, bool, error)
+}
+
+// GetIgnoreConfiguredEndpoints is used in knowing when to disable configured
+// endpoints feature.
+//
+// Currently duplicated from github.com/aws/aws-sdk-go-v2/config because
+// service packages cannot import github.com/aws/aws-sdk-go-v2/config
+// due to result import cycle error.
+func GetIgnoreConfiguredEndpoints(ctx context.Context, configs []interface{}) (value bool, found bool, err error) {
+ for _, cfg := range configs {
+ if p, ok := cfg.(IgnoreConfiguredEndpointsProvider); ok {
+ value, found, err = p.GetIgnoreConfiguredEndpoints(ctx)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
+
+// ResolveServiceBaseEndpoint is used to retrieve service endpoints from configured sources
+// while allowing for configured endpoints to be disabled
+func ResolveServiceBaseEndpoint(ctx context.Context, sdkID string, configs []interface{}) (value string, found bool, err error) {
+ if val, found, _ := GetIgnoreConfiguredEndpoints(ctx, configs); found && val {
+ return "", false, nil
+ }
+
+ for _, cs := range configs {
+ if p, ok := cs.(ServiceBaseEndpointProvider); ok {
+ value, found, err = p.GetServiceBaseEndpoint(context.Background(), sdkID)
+ if err != nil || found {
+ break
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
index 99eaea1b8..3479c11c4 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/configsources/go_module_metadata.go
@@ -3,4 +3,4 @@
package configsources
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.1.23"
+const goModuleVersion = "1.4.6"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go
new file mode 100644
index 000000000..f0c283d39
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/context/context.go
@@ -0,0 +1,52 @@
+package context
+
+import (
+ "context"
+ "time"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+type s3BackendKey struct{}
+type checksumInputAlgorithmKey struct{}
+type clockSkew struct{}
+
+const (
+ // S3BackendS3Express identifies the S3Express backend
+ S3BackendS3Express = "S3Express"
+)
+
+// SetS3Backend stores the resolved endpoint backend within the request
+// context, which is required for a variety of custom S3 behaviors.
+func SetS3Backend(ctx context.Context, typ string) context.Context {
+ return middleware.WithStackValue(ctx, s3BackendKey{}, typ)
+}
+
+// GetS3Backend retrieves the stored endpoint backend within the context.
+func GetS3Backend(ctx context.Context) string {
+ v, _ := middleware.GetStackValue(ctx, s3BackendKey{}).(string)
+ return v
+}
+
+// SetChecksumInputAlgorithm sets the request checksum algorithm on the
+// context.
+func SetChecksumInputAlgorithm(ctx context.Context, value string) context.Context {
+ return middleware.WithStackValue(ctx, checksumInputAlgorithmKey{}, value)
+}
+
+// GetChecksumInputAlgorithm returns the checksum algorithm from the context.
+func GetChecksumInputAlgorithm(ctx context.Context) string {
+ v, _ := middleware.GetStackValue(ctx, checksumInputAlgorithmKey{}).(string)
+ return v
+}
+
+// SetAttemptSkewContext sets the clock skew value on the context
+func SetAttemptSkewContext(ctx context.Context, v time.Duration) context.Context {
+ return middleware.WithStackValue(ctx, clockSkew{}, v)
+}
+
+// GetAttemptSkewContext gets the clock skew value from the context
+func GetAttemptSkewContext(ctx context.Context) time.Duration {
+ x, _ := middleware.GetStackValue(ctx, clockSkew{}).(time.Duration)
+ return x
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go
new file mode 100644
index 000000000..e6223dd3b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/arn.go
@@ -0,0 +1,94 @@
+package awsrulesfn
+
+import (
+ "strings"
+)
+
+// ARN provides AWS ARN components broken out into a data structure.
+type ARN struct {
+ Partition string
+ Service string
+ Region string
+ AccountId string
+ ResourceId OptionalStringSlice
+}
+
+const (
+ arnDelimiters = ":"
+ resourceDelimiters = "/:"
+ arnSections = 6
+ arnPrefix = "arn:"
+
+ // zero-indexed
+ sectionPartition = 1
+ sectionService = 2
+ sectionRegion = 3
+ sectionAccountID = 4
+ sectionResource = 5
+)
+
+// ParseARN returns an [ARN] value parsed from the input string provided. If
+// the ARN cannot be parsed nil will be returned, and error added to
+// [ErrorCollector].
+func ParseARN(input string) *ARN {
+ if !strings.HasPrefix(input, arnPrefix) {
+ return nil
+ }
+
+ sections := strings.SplitN(input, arnDelimiters, arnSections)
+ if numSections := len(sections); numSections != arnSections {
+ return nil
+ }
+
+ if sections[sectionPartition] == "" {
+ return nil
+ }
+ if sections[sectionService] == "" {
+ return nil
+ }
+ if sections[sectionResource] == "" {
+ return nil
+ }
+
+ return &ARN{
+ Partition: sections[sectionPartition],
+ Service: sections[sectionService],
+ Region: sections[sectionRegion],
+ AccountId: sections[sectionAccountID],
+ ResourceId: splitResource(sections[sectionResource]),
+ }
+}
+
+// splitResource splits the resource components by the ARN resource delimiters.
+func splitResource(v string) []string {
+ var parts []string
+ var offset int
+
+ for offset <= len(v) {
+ idx := strings.IndexAny(v[offset:], "/:")
+ if idx < 0 {
+ parts = append(parts, v[offset:])
+ break
+ }
+ parts = append(parts, v[offset:idx+offset])
+ offset += idx + 1
+ }
+
+ return parts
+}
+
+// OptionalStringSlice provides a helper to safely get the index of a string
+// slice that may be out of bounds. Returns pointer to string if index is
+// valid. Otherwise returns nil.
+type OptionalStringSlice []string
+
+// Get returns a string pointer of the string at index i if the index is valid.
+// Otherwise returns nil.
+func (s OptionalStringSlice) Get(i int) *string {
+ if i < 0 || i >= len(s) {
+ return nil
+ }
+
+ v := s[i]
+ return &v
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go
new file mode 100644
index 000000000..d5a365853
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/doc.go
@@ -0,0 +1,3 @@
+// Package awsrulesfn provides AWS focused endpoint rule functions for
+// evaluating endpoint resolution rules.
+package awsrulesfn
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go
new file mode 100644
index 000000000..df72da97c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/generate.go
@@ -0,0 +1,7 @@
+//go:build codegen
+// +build codegen
+
+package awsrulesfn
+
+//go:generate go run -tags codegen ./internal/partition/codegen.go -model partitions.json -output partitions.go
+//go:generate gofmt -w -s .
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go
new file mode 100644
index 000000000..637e5fc18
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/host.go
@@ -0,0 +1,51 @@
+package awsrulesfn
+
+import (
+ "net"
+ "strings"
+
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// IsVirtualHostableS3Bucket returns if the input is a DNS compatible bucket
+// name and can be used with Amazon S3 virtual hosted style addressing. Similar
+// to [rulesfn.IsValidHostLabel] with the added restriction that the length of label
+// must be [3:63] characters long, all lowercase, and not formatted as an IP
+// address.
+func IsVirtualHostableS3Bucket(input string, allowSubDomains bool) bool {
+ // input should not be formatted as an IP address
+ // NOTE: this will technically trip up on IPv6 hosts with zone IDs, but
+ // validation further down will catch that anyway (it's guaranteed to have
+ // unfriendly characters % and : if that's the case)
+ if net.ParseIP(input) != nil {
+ return false
+ }
+
+ var labels []string
+ if allowSubDomains {
+ labels = strings.Split(input, ".")
+ } else {
+ labels = []string{input}
+ }
+
+ for _, label := range labels {
+ // validate special length constraints
+ if l := len(label); l < 3 || l > 63 {
+ return false
+ }
+
+ // Validate no capital letters
+ for _, r := range label {
+ if r >= 'A' && r <= 'Z' {
+ return false
+ }
+ }
+
+ // Validate valid host label
+ if !smithyhttp.ValidHostLabel(label) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go
new file mode 100644
index 000000000..91414afe8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partition.go
@@ -0,0 +1,76 @@
+package awsrulesfn
+
+import "regexp"
+
+// Partition provides the metadata describing an AWS partition.
+type Partition struct {
+ ID string `json:"id"`
+ Regions map[string]RegionOverrides `json:"regions"`
+ RegionRegex string `json:"regionRegex"`
+ DefaultConfig PartitionConfig `json:"outputs"`
+}
+
+// PartitionConfig provides the endpoint metadata for an AWS region or partition.
+type PartitionConfig struct {
+ Name string `json:"name"`
+ DnsSuffix string `json:"dnsSuffix"`
+ DualStackDnsSuffix string `json:"dualStackDnsSuffix"`
+ SupportsFIPS bool `json:"supportsFIPS"`
+ SupportsDualStack bool `json:"supportsDualStack"`
+ ImplicitGlobalRegion string `json:"implicitGlobalRegion"`
+}
+
+type RegionOverrides struct {
+ Name *string `json:"name"`
+ DnsSuffix *string `json:"dnsSuffix"`
+ DualStackDnsSuffix *string `json:"dualStackDnsSuffix"`
+ SupportsFIPS *bool `json:"supportsFIPS"`
+ SupportsDualStack *bool `json:"supportsDualStack"`
+}
+
+const defaultPartition = "aws"
+
+func getPartition(partitions []Partition, region string) *PartitionConfig {
+ for _, partition := range partitions {
+ if v, ok := partition.Regions[region]; ok {
+ p := mergeOverrides(partition.DefaultConfig, v)
+ return &p
+ }
+ }
+
+ for _, partition := range partitions {
+ regionRegex := regexp.MustCompile(partition.RegionRegex)
+ if regionRegex.MatchString(region) {
+ v := partition.DefaultConfig
+ return &v
+ }
+ }
+
+ for _, partition := range partitions {
+ if partition.ID == defaultPartition {
+ v := partition.DefaultConfig
+ return &v
+ }
+ }
+
+ return nil
+}
+
+func mergeOverrides(into PartitionConfig, from RegionOverrides) PartitionConfig {
+ if from.Name != nil {
+ into.Name = *from.Name
+ }
+ if from.DnsSuffix != nil {
+ into.DnsSuffix = *from.DnsSuffix
+ }
+ if from.DualStackDnsSuffix != nil {
+ into.DualStackDnsSuffix = *from.DualStackDnsSuffix
+ }
+ if from.SupportsFIPS != nil {
+ into.SupportsFIPS = *from.SupportsFIPS
+ }
+ if from.SupportsDualStack != nil {
+ into.SupportsDualStack = *from.SupportsDualStack
+ }
+ return into
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go
new file mode 100644
index 000000000..d4e6611f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.go
@@ -0,0 +1,488 @@
+// Code generated by endpoint/awsrulesfn/internal/partition. DO NOT EDIT.
+
+package awsrulesfn
+
+// GetPartition returns an AWS [Partition] for the region provided. If the
+// partition cannot be determined nil will be returned.
+func GetPartition(region string) *PartitionConfig {
+ return getPartition(partitions, region)
+}
+
+var partitions = []Partition{
+ {
+ ID: "aws",
+ RegionRegex: "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws",
+ DnsSuffix: "amazonaws.com",
+ DualStackDnsSuffix: "api.aws",
+ SupportsFIPS: true,
+ SupportsDualStack: true,
+ ImplicitGlobalRegion: "us-east-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "af-south-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-east-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-northeast-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-northeast-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-northeast-3": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-south-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-south-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-southeast-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-southeast-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-southeast-3": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-southeast-4": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-southeast-5": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-southeast-6": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ap-southeast-7": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "aws-global": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ca-central-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "ca-west-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-central-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-central-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-north-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-south-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-south-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-west-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-west-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-west-3": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "il-central-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "me-central-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "me-south-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "mx-central-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "sa-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-east-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-west-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-west-2": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+ {
+ ID: "aws-cn",
+ RegionRegex: "^cn\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-cn",
+ DnsSuffix: "amazonaws.com.cn",
+ DualStackDnsSuffix: "api.amazonwebservices.com.cn",
+ SupportsFIPS: true,
+ SupportsDualStack: true,
+ ImplicitGlobalRegion: "cn-northwest-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "aws-cn-global": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "cn-north-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "cn-northwest-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+ {
+ ID: "aws-eusc",
+ RegionRegex: "^eusc\\-(de)\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-eusc",
+ DnsSuffix: "amazonaws.eu",
+ DualStackDnsSuffix: "api.amazonwebservices.eu",
+ SupportsFIPS: true,
+ SupportsDualStack: true,
+ ImplicitGlobalRegion: "eusc-de-east-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "eusc-de-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+ {
+ ID: "aws-iso",
+ RegionRegex: "^us\\-iso\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-iso",
+ DnsSuffix: "c2s.ic.gov",
+ DualStackDnsSuffix: "api.aws.ic.gov",
+ SupportsFIPS: true,
+ SupportsDualStack: false,
+ ImplicitGlobalRegion: "us-iso-east-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "aws-iso-global": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-iso-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-iso-west-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+ {
+ ID: "aws-iso-b",
+ RegionRegex: "^us\\-isob\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-iso-b",
+ DnsSuffix: "sc2s.sgov.gov",
+ DualStackDnsSuffix: "api.aws.scloud",
+ SupportsFIPS: true,
+ SupportsDualStack: false,
+ ImplicitGlobalRegion: "us-isob-east-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "aws-iso-b-global": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-isob-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+ {
+ ID: "aws-iso-e",
+ RegionRegex: "^eu\\-isoe\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-iso-e",
+ DnsSuffix: "cloud.adc-e.uk",
+ DualStackDnsSuffix: "api.cloud-aws.adc-e.uk",
+ SupportsFIPS: true,
+ SupportsDualStack: false,
+ ImplicitGlobalRegion: "eu-isoe-west-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "aws-iso-e-global": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "eu-isoe-west-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+ {
+ ID: "aws-iso-f",
+ RegionRegex: "^us\\-isof\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-iso-f",
+ DnsSuffix: "csp.hci.ic.gov",
+ DualStackDnsSuffix: "api.aws.hci.ic.gov",
+ SupportsFIPS: true,
+ SupportsDualStack: false,
+ ImplicitGlobalRegion: "us-isof-south-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "aws-iso-f-global": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-isof-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-isof-south-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+ {
+ ID: "aws-us-gov",
+ RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$",
+ DefaultConfig: PartitionConfig{
+ Name: "aws-us-gov",
+ DnsSuffix: "amazonaws.com",
+ DualStackDnsSuffix: "api.aws",
+ SupportsFIPS: true,
+ SupportsDualStack: true,
+ ImplicitGlobalRegion: "us-gov-west-1",
+ },
+ Regions: map[string]RegionOverrides{
+ "aws-us-gov-global": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-gov-east-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ "us-gov-west-1": {
+ Name: nil,
+ DnsSuffix: nil,
+ DualStackDnsSuffix: nil,
+ SupportsFIPS: nil,
+ SupportsDualStack: nil,
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json
new file mode 100644
index 000000000..c6582c9c6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn/partitions.json
@@ -0,0 +1,264 @@
+{
+ "partitions" : [ {
+ "id" : "aws",
+ "outputs" : {
+ "dnsSuffix" : "amazonaws.com",
+ "dualStackDnsSuffix" : "api.aws",
+ "implicitGlobalRegion" : "us-east-1",
+ "name" : "aws",
+ "supportsDualStack" : true,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$",
+ "regions" : {
+ "af-south-1" : {
+ "description" : "Africa (Cape Town)"
+ },
+ "ap-east-1" : {
+ "description" : "Asia Pacific (Hong Kong)"
+ },
+ "ap-east-2" : {
+ "description" : "Asia Pacific (Taipei)"
+ },
+ "ap-northeast-1" : {
+ "description" : "Asia Pacific (Tokyo)"
+ },
+ "ap-northeast-2" : {
+ "description" : "Asia Pacific (Seoul)"
+ },
+ "ap-northeast-3" : {
+ "description" : "Asia Pacific (Osaka)"
+ },
+ "ap-south-1" : {
+ "description" : "Asia Pacific (Mumbai)"
+ },
+ "ap-south-2" : {
+ "description" : "Asia Pacific (Hyderabad)"
+ },
+ "ap-southeast-1" : {
+ "description" : "Asia Pacific (Singapore)"
+ },
+ "ap-southeast-2" : {
+ "description" : "Asia Pacific (Sydney)"
+ },
+ "ap-southeast-3" : {
+ "description" : "Asia Pacific (Jakarta)"
+ },
+ "ap-southeast-4" : {
+ "description" : "Asia Pacific (Melbourne)"
+ },
+ "ap-southeast-5" : {
+ "description" : "Asia Pacific (Malaysia)"
+ },
+ "ap-southeast-6" : {
+ "description" : "Asia Pacific (New Zealand)"
+ },
+ "ap-southeast-7" : {
+ "description" : "Asia Pacific (Thailand)"
+ },
+ "aws-global" : {
+ "description" : "aws global region"
+ },
+ "ca-central-1" : {
+ "description" : "Canada (Central)"
+ },
+ "ca-west-1" : {
+ "description" : "Canada West (Calgary)"
+ },
+ "eu-central-1" : {
+ "description" : "Europe (Frankfurt)"
+ },
+ "eu-central-2" : {
+ "description" : "Europe (Zurich)"
+ },
+ "eu-north-1" : {
+ "description" : "Europe (Stockholm)"
+ },
+ "eu-south-1" : {
+ "description" : "Europe (Milan)"
+ },
+ "eu-south-2" : {
+ "description" : "Europe (Spain)"
+ },
+ "eu-west-1" : {
+ "description" : "Europe (Ireland)"
+ },
+ "eu-west-2" : {
+ "description" : "Europe (London)"
+ },
+ "eu-west-3" : {
+ "description" : "Europe (Paris)"
+ },
+ "il-central-1" : {
+ "description" : "Israel (Tel Aviv)"
+ },
+ "me-central-1" : {
+ "description" : "Middle East (UAE)"
+ },
+ "me-south-1" : {
+ "description" : "Middle East (Bahrain)"
+ },
+ "mx-central-1" : {
+ "description" : "Mexico (Central)"
+ },
+ "sa-east-1" : {
+ "description" : "South America (Sao Paulo)"
+ },
+ "us-east-1" : {
+ "description" : "US East (N. Virginia)"
+ },
+ "us-east-2" : {
+ "description" : "US East (Ohio)"
+ },
+ "us-west-1" : {
+ "description" : "US West (N. California)"
+ },
+ "us-west-2" : {
+ "description" : "US West (Oregon)"
+ }
+ }
+ }, {
+ "id" : "aws-cn",
+ "outputs" : {
+ "dnsSuffix" : "amazonaws.com.cn",
+ "dualStackDnsSuffix" : "api.amazonwebservices.com.cn",
+ "implicitGlobalRegion" : "cn-northwest-1",
+ "name" : "aws-cn",
+ "supportsDualStack" : true,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^cn\\-\\w+\\-\\d+$",
+ "regions" : {
+ "aws-cn-global" : {
+ "description" : "aws-cn global region"
+ },
+ "cn-north-1" : {
+ "description" : "China (Beijing)"
+ },
+ "cn-northwest-1" : {
+ "description" : "China (Ningxia)"
+ }
+ }
+ }, {
+ "id" : "aws-eusc",
+ "outputs" : {
+ "dnsSuffix" : "amazonaws.eu",
+ "dualStackDnsSuffix" : "api.amazonwebservices.eu",
+ "implicitGlobalRegion" : "eusc-de-east-1",
+ "name" : "aws-eusc",
+ "supportsDualStack" : true,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^eusc\\-(de)\\-\\w+\\-\\d+$",
+ "regions" : {
+ "eusc-de-east-1" : {
+ "description" : "EU (Germany)"
+ }
+ }
+ }, {
+ "id" : "aws-iso",
+ "outputs" : {
+ "dnsSuffix" : "c2s.ic.gov",
+ "dualStackDnsSuffix" : "api.aws.ic.gov",
+ "implicitGlobalRegion" : "us-iso-east-1",
+ "name" : "aws-iso",
+ "supportsDualStack" : false,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^us\\-iso\\-\\w+\\-\\d+$",
+ "regions" : {
+ "aws-iso-global" : {
+ "description" : "aws-iso global region"
+ },
+ "us-iso-east-1" : {
+ "description" : "US ISO East"
+ },
+ "us-iso-west-1" : {
+ "description" : "US ISO WEST"
+ }
+ }
+ }, {
+ "id" : "aws-iso-b",
+ "outputs" : {
+ "dnsSuffix" : "sc2s.sgov.gov",
+ "dualStackDnsSuffix" : "api.aws.scloud",
+ "implicitGlobalRegion" : "us-isob-east-1",
+ "name" : "aws-iso-b",
+ "supportsDualStack" : false,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^us\\-isob\\-\\w+\\-\\d+$",
+ "regions" : {
+ "aws-iso-b-global" : {
+ "description" : "aws-iso-b global region"
+ },
+ "us-isob-east-1" : {
+ "description" : "US ISOB East (Ohio)"
+ }
+ }
+ }, {
+ "id" : "aws-iso-e",
+ "outputs" : {
+ "dnsSuffix" : "cloud.adc-e.uk",
+ "dualStackDnsSuffix" : "api.cloud-aws.adc-e.uk",
+ "implicitGlobalRegion" : "eu-isoe-west-1",
+ "name" : "aws-iso-e",
+ "supportsDualStack" : false,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^eu\\-isoe\\-\\w+\\-\\d+$",
+ "regions" : {
+ "aws-iso-e-global" : {
+ "description" : "aws-iso-e global region"
+ },
+ "eu-isoe-west-1" : {
+ "description" : "EU ISOE West"
+ }
+ }
+ }, {
+ "id" : "aws-iso-f",
+ "outputs" : {
+ "dnsSuffix" : "csp.hci.ic.gov",
+ "dualStackDnsSuffix" : "api.aws.hci.ic.gov",
+ "implicitGlobalRegion" : "us-isof-south-1",
+ "name" : "aws-iso-f",
+ "supportsDualStack" : false,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^us\\-isof\\-\\w+\\-\\d+$",
+ "regions" : {
+ "aws-iso-f-global" : {
+ "description" : "aws-iso-f global region"
+ },
+ "us-isof-east-1" : {
+ "description" : "US ISOF EAST"
+ },
+ "us-isof-south-1" : {
+ "description" : "US ISOF SOUTH"
+ }
+ }
+ }, {
+ "id" : "aws-us-gov",
+ "outputs" : {
+ "dnsSuffix" : "amazonaws.com",
+ "dualStackDnsSuffix" : "api.aws",
+ "implicitGlobalRegion" : "us-gov-west-1",
+ "name" : "aws-us-gov",
+ "supportsDualStack" : true,
+ "supportsFIPS" : true
+ },
+ "regionRegex" : "^us\\-gov\\-\\w+\\-\\d+$",
+ "regions" : {
+ "aws-us-gov-global" : {
+ "description" : "aws-us-gov global region"
+ },
+ "us-gov-east-1" : {
+ "description" : "AWS GovCloud (US-East)"
+ },
+ "us-gov-west-1" : {
+ "description" : "AWS GovCloud (US-West)"
+ }
+ }
+ } ],
+ "version" : "1.1"
+}
\ No newline at end of file
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go
new file mode 100644
index 000000000..67950ca36
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/endpoints.go
@@ -0,0 +1,201 @@
+package endpoints
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+)
+
+const (
+ defaultProtocol = "https"
+ defaultSigner = "v4"
+)
+
+var (
+ protocolPriority = []string{"https", "http"}
+ signerPriority = []string{"v4"}
+)
+
+// Options provide configuration needed to direct how endpoints are resolved.
+type Options struct {
+ // Disable usage of HTTPS (TLS / SSL)
+ DisableHTTPS bool
+}
+
+// Partitions is a slice of partition
+type Partitions []Partition
+
+// ResolveEndpoint resolves a service endpoint for the given region and options.
+func (ps Partitions) ResolveEndpoint(region string, opts Options) (aws.Endpoint, error) {
+ if len(ps) == 0 {
+ return aws.Endpoint{}, fmt.Errorf("no partitions found")
+ }
+
+ for i := 0; i < len(ps); i++ {
+ if !ps[i].canResolveEndpoint(region) {
+ continue
+ }
+
+ return ps[i].ResolveEndpoint(region, opts)
+ }
+
+ // fallback to first partition format to use when resolving the endpoint.
+ return ps[0].ResolveEndpoint(region, opts)
+}
+
+// Partition is an AWS partition description for a service and its' region endpoints.
+type Partition struct {
+ ID string
+ RegionRegex *regexp.Regexp
+ PartitionEndpoint string
+ IsRegionalized bool
+ Defaults Endpoint
+ Endpoints Endpoints
+}
+
+func (p Partition) canResolveEndpoint(region string) bool {
+ _, ok := p.Endpoints[region]
+ return ok || p.RegionRegex.MatchString(region)
+}
+
+// ResolveEndpoint resolves and service endpoint for the given region and options.
+func (p Partition) ResolveEndpoint(region string, options Options) (resolved aws.Endpoint, err error) {
+ if len(region) == 0 && len(p.PartitionEndpoint) != 0 {
+ region = p.PartitionEndpoint
+ }
+
+ e, _ := p.endpointForRegion(region)
+
+ return e.resolve(p.ID, region, p.Defaults, options), nil
+}
+
+func (p Partition) endpointForRegion(region string) (Endpoint, bool) {
+ if e, ok := p.Endpoints[region]; ok {
+ return e, true
+ }
+
+ if !p.IsRegionalized {
+ return p.Endpoints[p.PartitionEndpoint], region == p.PartitionEndpoint
+ }
+
+ // Unable to find any matching endpoint, return
+ // blank that will be used for generic endpoint creation.
+ return Endpoint{}, false
+}
+
+// Endpoints is a map of service config regions to endpoints
+type Endpoints map[string]Endpoint
+
+// CredentialScope is the credential scope of a region and service
+type CredentialScope struct {
+ Region string
+ Service string
+}
+
+// Endpoint is a service endpoint description
+type Endpoint struct {
+ // True if the endpoint cannot be resolved for this partition/region/service
+ Unresolveable aws.Ternary
+
+ Hostname string
+ Protocols []string
+
+ CredentialScope CredentialScope
+
+ SignatureVersions []string `json:"signatureVersions"`
+}
+
+func (e Endpoint) resolve(partition, region string, def Endpoint, options Options) aws.Endpoint {
+ var merged Endpoint
+ merged.mergeIn(def)
+ merged.mergeIn(e)
+ e = merged
+
+ var u string
+ if e.Unresolveable != aws.TrueTernary {
+ // Only attempt to resolve the endpoint if it can be resolved.
+ hostname := strings.Replace(e.Hostname, "{region}", region, 1)
+
+ scheme := getEndpointScheme(e.Protocols, options.DisableHTTPS)
+ u = scheme + "://" + hostname
+ }
+
+ signingRegion := e.CredentialScope.Region
+ if len(signingRegion) == 0 {
+ signingRegion = region
+ }
+ signingName := e.CredentialScope.Service
+
+ return aws.Endpoint{
+ URL: u,
+ PartitionID: partition,
+ SigningRegion: signingRegion,
+ SigningName: signingName,
+ SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
+ }
+}
+
+func (e *Endpoint) mergeIn(other Endpoint) {
+ if other.Unresolveable != aws.UnknownTernary {
+ e.Unresolveable = other.Unresolveable
+ }
+ if len(other.Hostname) > 0 {
+ e.Hostname = other.Hostname
+ }
+ if len(other.Protocols) > 0 {
+ e.Protocols = other.Protocols
+ }
+ if len(other.CredentialScope.Region) > 0 {
+ e.CredentialScope.Region = other.CredentialScope.Region
+ }
+ if len(other.CredentialScope.Service) > 0 {
+ e.CredentialScope.Service = other.CredentialScope.Service
+ }
+ if len(other.SignatureVersions) > 0 {
+ e.SignatureVersions = other.SignatureVersions
+ }
+}
+
+func getEndpointScheme(protocols []string, disableHTTPS bool) string {
+ if disableHTTPS {
+ return "http"
+ }
+
+ return getByPriority(protocols, protocolPriority, defaultProtocol)
+}
+
+func getByPriority(s []string, p []string, def string) string {
+ if len(s) == 0 {
+ return def
+ }
+
+ for i := 0; i < len(p); i++ {
+ for j := 0; j < len(s); j++ {
+ if s[j] == p[i] {
+ return s[j]
+ }
+ }
+ }
+
+ return s[0]
+}
+
+// MapFIPSRegion extracts the intrinsic AWS region from one that may have an
+// embedded FIPS microformat.
+func MapFIPSRegion(region string) string {
+ const fipsInfix = "-fips-"
+ const fipsPrefix = "fips-"
+ const fipsSuffix = "-fips"
+
+ if strings.Contains(region, fipsInfix) ||
+ strings.Contains(region, fipsPrefix) ||
+ strings.Contains(region, fipsSuffix) {
+ region = strings.ReplaceAll(region, fipsInfix, "-")
+ region = strings.ReplaceAll(region, fipsPrefix, "")
+ region = strings.ReplaceAll(region, fipsSuffix, "")
+ }
+
+ return region
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
index f715bf166..7ccb39033 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/CHANGELOG.md
@@ -1,3 +1,318 @@
+# v2.7.6 (2025-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.5 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.4 (2025-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.3 (2025-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.2 (2025-08-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.1 (2025-07-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.7.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.37 (2025-07-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.36 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.35 (2025-06-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.34 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.33 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.32 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.31 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.30 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.29 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v2.6.28 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.27 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.26 (2024-12-19)
+
+* **Bug Fix**: Fix improper use of printf-style functions.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.25 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.24 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.23 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.22 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.21 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.20 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.19 (2024-10-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.18 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.17 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.16 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.15 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.14 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.13 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.12 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.11 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.10 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.9 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.8 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.7 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.6 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.5 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.4 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.3 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.6.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.8 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.7 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.6 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.3 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.5.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.37 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.36 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.35 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.34 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.33 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.32 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.31 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.30 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.29 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.28 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.27 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.26 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.25 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.24 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.23 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.22 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.21 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.19 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v2.4.18 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v2.4.17 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
index 4c105f81e..2d36cac95 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/endpoints/v2/go_module_metadata.go
@@ -3,4 +3,4 @@
package endpoints
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "2.4.17"
+const goModuleVersion = "2.7.6"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
index a2daa01f6..f729db535 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/CHANGELOG.md
@@ -1,3 +1,207 @@
+# v1.8.3 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+
+# v1.8.2 (2025-01-24)
+
+* **Bug Fix**: Refactor filepath.Walk to filepath.WalkDir
+
+# v1.8.1 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+
+# v1.8.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+
+# v1.7.3 (2024-01-22)
+
+* **Bug Fix**: Remove invalid escaping of shared config values. All values in the shared config file will now be interpreted literally, save for fully-quoted strings which are unwrapped for legacy reasons.
+
+# v1.7.2 (2023-12-08)
+
+* **Bug Fix**: Correct loading of [services *] sections into shared config.
+
+# v1.7.1 (2023-11-16)
+
+* **Bug Fix**: Fix recognition of trailing comments in shared config properties. # or ; separators that aren't preceded by whitespace at the end of a property value should be considered part of it.
+
+# v1.7.0 (2023-11-13)
+
+* **Feature**: Replace the legacy config parser with a modern, less-strict implementation. Parsing failures within a section will now simply ignore the invalid line rather than silently drop the entire section.
+
+# v1.6.0 (2023-11-09.2)
+
+* **Feature**: BREAKFIX: In order to support subproperty parsing, invalid property definitions must not be ignored
+
+# v1.5.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.1 (2023-11-07)
+
+* **Bug Fix**: Fix subproperty performance regression
+
+# v1.5.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.45 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.44 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.43 (2023-09-22)
+
+* **Bug Fix**: Fixed a bug where merging `max_attempts` or `duration_seconds` fields across shared config files with invalid values would silently default them to 0.
+* **Bug Fix**: Move type assertion of config values out of the parsing stage, which resolves an issue where the contents of a profile would silently be dropped with certain numeric formats.
+
+# v1.3.42 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.41 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.40 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.39 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.38 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.37 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.36 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.35 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.34 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.33 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.32 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.31 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.30 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.29 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.28 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.27 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.26 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.25 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.24 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.23 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.22 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.21 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.20 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.19 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.18 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.17 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.16 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.15 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.14 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.13 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.12 (2022-05-17)
+
+* **Bug Fix**: Removes the fuzz testing files from the module, as they are invalid and not used.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.11 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.10 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.9 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.3.8 (2022-03-23)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go
deleted file mode 100644
index e83a99886..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ast.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package ini
-
-// ASTKind represents different states in the parse table
-// and the type of AST that is being constructed
-type ASTKind int
-
-// ASTKind* is used in the parse table to transition between
-// the different states
-const (
- ASTKindNone = ASTKind(iota)
- ASTKindStart
- ASTKindExpr
- ASTKindEqualExpr
- ASTKindStatement
- ASTKindSkipStatement
- ASTKindExprStatement
- ASTKindSectionStatement
- ASTKindNestedSectionStatement
- ASTKindCompletedNestedSectionStatement
- ASTKindCommentStatement
- ASTKindCompletedSectionStatement
-)
-
-func (k ASTKind) String() string {
- switch k {
- case ASTKindNone:
- return "none"
- case ASTKindStart:
- return "start"
- case ASTKindExpr:
- return "expr"
- case ASTKindStatement:
- return "stmt"
- case ASTKindSectionStatement:
- return "section_stmt"
- case ASTKindExprStatement:
- return "expr_stmt"
- case ASTKindCommentStatement:
- return "comment"
- case ASTKindNestedSectionStatement:
- return "nested_section_stmt"
- case ASTKindCompletedSectionStatement:
- return "completed_stmt"
- case ASTKindSkipStatement:
- return "skip"
- default:
- return ""
- }
-}
-
-// AST interface allows us to determine what kind of node we
-// are on and casting may not need to be necessary.
-//
-// The root is always the first node in Children
-type AST struct {
- Kind ASTKind
- Root Token
- RootToken bool
- Children []AST
-}
-
-func newAST(kind ASTKind, root AST, children ...AST) AST {
- return AST{
- Kind: kind,
- Children: append([]AST{root}, children...),
- }
-}
-
-func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST {
- return AST{
- Kind: kind,
- Root: root,
- RootToken: true,
- Children: children,
- }
-}
-
-// AppendChild will append to the list of children an AST has.
-func (a *AST) AppendChild(child AST) {
- a.Children = append(a.Children, child)
-}
-
-// GetRoot will return the root AST which can be the first entry
-// in the children list or a token.
-func (a *AST) GetRoot() AST {
- if a.RootToken {
- return *a
- }
-
- if len(a.Children) == 0 {
- return AST{}
- }
-
- return a.Children[0]
-}
-
-// GetChildren will return the current AST's list of children
-func (a *AST) GetChildren() []AST {
- if len(a.Children) == 0 {
- return []AST{}
- }
-
- if a.RootToken {
- return a.Children
- }
-
- return a.Children[1:]
-}
-
-// SetChildren will set and override all children of the AST.
-func (a *AST) SetChildren(children []AST) {
- if a.RootToken {
- a.Children = children
- } else {
- a.Children = append(a.Children[:1], children...)
- }
-}
-
-// Start is used to indicate the starting state of the parse table.
-var Start = newAST(ASTKindStart, AST{})
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go
deleted file mode 100644
index 0895d53cb..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comma_token.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package ini
-
-var commaRunes = []rune(",")
-
-func isComma(b rune) bool {
- return b == ','
-}
-
-func newCommaToken() Token {
- return newToken(TokenComma, commaRunes, NoneType)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go
deleted file mode 100644
index 0b76999ba..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/comment_token.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package ini
-
-// isComment will return whether or not the next byte(s) is a
-// comment.
-func isComment(b []rune) bool {
- if len(b) == 0 {
- return false
- }
-
- switch b[0] {
- case ';':
- return true
- case '#':
- return true
- }
-
- return false
-}
-
-// newCommentToken will create a comment token and
-// return how many bytes were read.
-func newCommentToken(b []rune) (Token, int, error) {
- i := 0
- for ; i < len(b); i++ {
- if b[i] == '\n' {
- break
- }
-
- if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' {
- break
- }
- }
-
- return newToken(TokenComment, b[:i], NoneType), i, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/dependency.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/dependency.go
deleted file mode 100644
index f5ebe52e1..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/dependency.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package ini
-
-import (
- // internal/ini module was carved out of this module
- _ "github.com/aws/aws-sdk-go-v2"
-)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go
deleted file mode 100644
index 1e55bbd07..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/doc.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Package ini is an LL(1) parser for configuration files.
-//
-// Example:
-// sections, err := ini.OpenFile("/path/to/file")
-// if err != nil {
-// panic(err)
-// }
-//
-// profile := "foo"
-// section, ok := sections.GetSection(profile)
-// if !ok {
-// fmt.Printf("section %q could not be found", profile)
-// }
-//
-// Below is the BNF that describes this parser
-// Grammar:
-// stmt -> section | stmt'
-// stmt' -> epsilon | expr
-// expr -> value (stmt)* | equal_expr (stmt)*
-// equal_expr -> value ( ':' | '=' ) equal_expr'
-// equal_expr' -> number | string | quoted_string
-// quoted_string -> " quoted_string'
-// quoted_string' -> string quoted_string_end
-// quoted_string_end -> "
-//
-// section -> [ section'
-// section' -> section_value section_close
-// section_value -> number | string_subset | boolean | quoted_string_subset
-// quoted_string_subset -> " quoted_string_subset'
-// quoted_string_subset' -> string_subset quoted_string_end
-// quoted_string_subset -> "
-// section_close -> ]
-//
-// value -> number | string_subset | boolean
-// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ?
-// string_subset -> ? Code-points excepted by grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ?
-//
-// SkipState will skip (NL WS)+
-//
-// comment -> # comment' | ; comment'
-// comment' -> epsilon | value
-package ini
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go
deleted file mode 100644
index 04345a54c..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/empty_token.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package ini
-
-// emptyToken is used to satisfy the Token interface
-var emptyToken = newToken(TokenNone, []rune{}, NoneType)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go
deleted file mode 100644
index 91ba2a59d..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/expression.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package ini
-
-// newExpression will return an expression AST.
-// Expr represents an expression
-//
-// grammar:
-// expr -> string | number
-func newExpression(tok Token) AST {
- return newASTWithRootToken(ASTKindExpr, tok)
-}
-
-func newEqualExpr(left AST, tok Token) AST {
- return newASTWithRootToken(ASTKindEqualExpr, tok, left)
-}
-
-// EqualExprKey will return a LHS value in the equal expr
-func EqualExprKey(ast AST) string {
- children := ast.GetChildren()
- if len(children) == 0 || ast.Kind != ASTKindEqualExpr {
- return ""
- }
-
- return string(children[0].Root.Raw())
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go
deleted file mode 100644
index 6e545b63b..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/fuzz.go
+++ /dev/null
@@ -1,18 +0,0 @@
-//go:build gofuzz
-// +build gofuzz
-
-package ini
-
-import (
- "bytes"
-)
-
-func Fuzz(data []byte) int {
- b := bytes.NewReader(data)
-
- if _, err := Parse(b); err != nil {
- return 0
- }
-
- return 1
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
index 001dad4e8..00df0e3cb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/go_module_metadata.go
@@ -3,4 +3,4 @@
package ini
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.3.8"
+const goModuleVersion = "1.8.3"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go
index f74062313..cefcce91e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini.go
@@ -1,13 +1,26 @@
+// Package ini implements parsing of the AWS shared config file.
+//
+// Example:
+// sections, err := ini.OpenFile("/path/to/file")
+// if err != nil {
+// panic(err)
+// }
+//
+// profile := "foo"
+// section, ok := sections.GetSection(profile)
+// if !ok {
+// fmt.Printf("section %q could not be found", profile)
+// }
package ini
import (
"fmt"
"io"
"os"
+ "strings"
)
-// OpenFile takes a path to a given file, and will open and parse
-// that file.
+// OpenFile parses shared config from the given file path.
func OpenFile(path string) (sections Sections, err error) {
f, oerr := os.Open(path)
if oerr != nil {
@@ -26,33 +39,18 @@ func OpenFile(path string) (sections Sections, err error) {
return Parse(f, path)
}
-// Parse will parse the given file using the shared config
-// visitor.
-func Parse(f io.Reader, path string) (Sections, error) {
- tree, err := ParseAST(f)
+// Parse parses shared config from the given reader.
+func Parse(r io.Reader, path string) (Sections, error) {
+ contents, err := io.ReadAll(r)
if err != nil {
- return Sections{}, err
+ return Sections{}, fmt.Errorf("read all: %v", err)
}
- v := NewDefaultVisitor(path)
- if err = Walk(tree, v); err != nil {
- return Sections{}, err
- }
-
- return v.Sections, nil
-}
-
-// ParseBytes will parse the given bytes and return the parsed sections.
-func ParseBytes(b []byte) (Sections, error) {
- tree, err := ParseASTBytes(b)
+ lines := strings.Split(string(contents), "\n")
+ tokens, err := tokenize(lines)
if err != nil {
- return Sections{}, err
- }
-
- v := NewDefaultVisitor("")
- if err = Walk(tree, v); err != nil {
- return Sections{}, err
+ return Sections{}, fmt.Errorf("tokenize: %v", err)
}
- return v.Sections, nil
+ return parse(tokens, path), nil
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go
deleted file mode 100644
index abf1fb036..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_lexer.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package ini
-
-import (
- "bytes"
- "io"
- "io/ioutil"
-)
-
-// TokenType represents the various different tokens types
-type TokenType int
-
-func (t TokenType) String() string {
- switch t {
- case TokenNone:
- return "none"
- case TokenLit:
- return "literal"
- case TokenSep:
- return "sep"
- case TokenOp:
- return "op"
- case TokenWS:
- return "ws"
- case TokenNL:
- return "newline"
- case TokenComment:
- return "comment"
- case TokenComma:
- return "comma"
- default:
- return ""
- }
-}
-
-// TokenType enums
-const (
- TokenNone = TokenType(iota)
- TokenLit
- TokenSep
- TokenComma
- TokenOp
- TokenWS
- TokenNL
- TokenComment
-)
-
-type iniLexer struct{}
-
-// Tokenize will return a list of tokens during lexical analysis of the
-// io.Reader.
-func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) {
- b, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, &UnableToReadFile{Err: err}
- }
-
- return l.tokenize(b)
-}
-
-func (l *iniLexer) tokenize(b []byte) ([]Token, error) {
- runes := bytes.Runes(b)
- var err error
- n := 0
- tokenAmount := countTokens(runes)
- tokens := make([]Token, tokenAmount)
- count := 0
-
- for len(runes) > 0 && count < tokenAmount {
- switch {
- case isWhitespace(runes[0]):
- tokens[count], n, err = newWSToken(runes)
- case isComma(runes[0]):
- tokens[count], n = newCommaToken(), 1
- case isComment(runes):
- tokens[count], n, err = newCommentToken(runes)
- case isNewline(runes):
- tokens[count], n, err = newNewlineToken(runes)
- case isSep(runes):
- tokens[count], n, err = newSepToken(runes)
- case isOp(runes):
- tokens[count], n, err = newOpToken(runes)
- default:
- tokens[count], n, err = newLitToken(runes)
- }
-
- if err != nil {
- return nil, err
- }
-
- count++
-
- runes = runes[n:]
- }
-
- return tokens[:count], nil
-}
-
-func countTokens(runes []rune) int {
- count, n := 0, 0
- var err error
-
- for len(runes) > 0 {
- switch {
- case isWhitespace(runes[0]):
- _, n, err = newWSToken(runes)
- case isComma(runes[0]):
- _, n = newCommaToken(), 1
- case isComment(runes):
- _, n, err = newCommentToken(runes)
- case isNewline(runes):
- _, n, err = newNewlineToken(runes)
- case isSep(runes):
- _, n, err = newSepToken(runes)
- case isOp(runes):
- _, n, err = newOpToken(runes)
- default:
- _, n, err = newLitToken(runes)
- }
-
- if err != nil {
- return 0
- }
-
- count++
- runes = runes[n:]
- }
-
- return count + 1
-}
-
-// Token indicates a metadata about a given value.
-type Token struct {
- t TokenType
- ValueType ValueType
- base int
- raw []rune
-}
-
-var emptyValue = Value{}
-
-func newToken(t TokenType, raw []rune, v ValueType) Token {
- return Token{
- t: t,
- raw: raw,
- ValueType: v,
- }
-}
-
-// Raw return the raw runes that were consumed
-func (tok Token) Raw() []rune {
- return tok.raw
-}
-
-// Type returns the token type
-func (tok Token) Type() TokenType {
- return tok.t
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go
deleted file mode 100644
index 12fc7d5aa..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ini_parser.go
+++ /dev/null
@@ -1,349 +0,0 @@
-package ini
-
-import (
- "fmt"
- "io"
-)
-
-// ParseState represents the current state of the parser.
-type ParseState uint
-
-// State enums for the parse table
-const (
- InvalidState ParseState = iota
- // stmt -> value stmt'
- StatementState
- // stmt' -> MarkComplete | op stmt
- StatementPrimeState
- // value -> number | string | boolean | quoted_string
- ValueState
- // section -> [ section'
- OpenScopeState
- // section' -> value section_close
- SectionState
- // section_close -> ]
- CloseScopeState
- // SkipState will skip (NL WS)+
- SkipState
- // SkipTokenState will skip any token and push the previous
- // state onto the stack.
- SkipTokenState
- // comment -> # comment' | ; comment'
- // comment' -> MarkComplete | value
- CommentState
- // MarkComplete state will complete statements and move that
- // to the completed AST list
- MarkCompleteState
- // TerminalState signifies that the tokens have been fully parsed
- TerminalState
-)
-
-// parseTable is a state machine to dictate the grammar above.
-var parseTable = map[ASTKind]map[TokenType]ParseState{
- ASTKindStart: {
- TokenLit: StatementState,
- TokenSep: OpenScopeState,
- TokenWS: SkipTokenState,
- TokenNL: SkipTokenState,
- TokenComment: CommentState,
- TokenNone: TerminalState,
- },
- ASTKindCommentStatement: {
- TokenLit: StatementState,
- TokenSep: OpenScopeState,
- TokenWS: SkipTokenState,
- TokenNL: SkipTokenState,
- TokenComment: CommentState,
- TokenNone: MarkCompleteState,
- },
- ASTKindExpr: {
- TokenOp: StatementPrimeState,
- TokenLit: ValueState,
- TokenSep: OpenScopeState,
- TokenWS: ValueState,
- TokenNL: SkipState,
- TokenComment: CommentState,
- TokenNone: MarkCompleteState,
- },
- ASTKindEqualExpr: {
- TokenLit: ValueState,
- TokenSep: ValueState,
- TokenOp: ValueState,
- TokenWS: SkipTokenState,
- TokenNL: SkipState,
- },
- ASTKindStatement: {
- TokenLit: SectionState,
- TokenSep: CloseScopeState,
- TokenWS: SkipTokenState,
- TokenNL: SkipTokenState,
- TokenComment: CommentState,
- TokenNone: MarkCompleteState,
- },
- ASTKindExprStatement: {
- TokenLit: ValueState,
- TokenSep: ValueState,
- TokenOp: ValueState,
- TokenWS: ValueState,
- TokenNL: MarkCompleteState,
- TokenComment: CommentState,
- TokenNone: TerminalState,
- TokenComma: SkipState,
- },
- ASTKindSectionStatement: {
- TokenLit: SectionState,
- TokenOp: SectionState,
- TokenSep: CloseScopeState,
- TokenWS: SectionState,
- TokenNL: SkipTokenState,
- },
- ASTKindCompletedSectionStatement: {
- TokenWS: SkipTokenState,
- TokenNL: SkipTokenState,
- TokenLit: StatementState,
- TokenSep: OpenScopeState,
- TokenComment: CommentState,
- TokenNone: MarkCompleteState,
- },
- ASTKindSkipStatement: {
- TokenLit: StatementState,
- TokenSep: OpenScopeState,
- TokenWS: SkipTokenState,
- TokenNL: SkipTokenState,
- TokenComment: CommentState,
- TokenNone: TerminalState,
- },
-}
-
-// ParseAST will parse input from an io.Reader using
-// an LL(1) parser.
-func ParseAST(r io.Reader) ([]AST, error) {
- lexer := iniLexer{}
- tokens, err := lexer.Tokenize(r)
- if err != nil {
- return []AST{}, err
- }
-
- return parse(tokens)
-}
-
-// ParseASTBytes will parse input from a byte slice using
-// an LL(1) parser.
-func ParseASTBytes(b []byte) ([]AST, error) {
- lexer := iniLexer{}
- tokens, err := lexer.tokenize(b)
- if err != nil {
- return []AST{}, err
- }
-
- return parse(tokens)
-}
-
-func parse(tokens []Token) ([]AST, error) {
- start := Start
- stack := newParseStack(3, len(tokens))
-
- stack.Push(start)
- s := newSkipper()
-
-loop:
- for stack.Len() > 0 {
- k := stack.Pop()
-
- var tok Token
- if len(tokens) == 0 {
- // this occurs when all the tokens have been processed
- // but reduction of what's left on the stack needs to
- // occur.
- tok = emptyToken
- } else {
- tok = tokens[0]
- }
-
- step := parseTable[k.Kind][tok.Type()]
- if s.ShouldSkip(tok) {
- // being in a skip state with no tokens will break out of
- // the parse loop since there is nothing left to process.
- if len(tokens) == 0 {
- break loop
- }
- // if should skip is true, we skip the tokens until should skip is set to false.
- step = SkipTokenState
- }
-
- switch step {
- case TerminalState:
- // Finished parsing. Push what should be the last
- // statement to the stack. If there is anything left
- // on the stack, an error in parsing has occurred.
- if k.Kind != ASTKindStart {
- stack.MarkComplete(k)
- }
- break loop
- case SkipTokenState:
- // When skipping a token, the previous state was popped off the stack.
- // To maintain the correct state, the previous state will be pushed
- // onto the stack.
- stack.Push(k)
- case StatementState:
- if k.Kind != ASTKindStart {
- stack.MarkComplete(k)
- }
- expr := newExpression(tok)
- stack.Push(expr)
- case StatementPrimeState:
- if tok.Type() != TokenOp {
- stack.MarkComplete(k)
- continue
- }
-
- if k.Kind != ASTKindExpr {
- return nil, NewParseError(
- fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k),
- )
- }
-
- k = trimSpaces(k)
- expr := newEqualExpr(k, tok)
- stack.Push(expr)
- case ValueState:
- // ValueState requires the previous state to either be an equal expression
- // or an expression statement.
- switch k.Kind {
- case ASTKindEqualExpr:
- // assigning a value to some key
- k.AppendChild(newExpression(tok))
- stack.Push(newExprStatement(k))
- case ASTKindExpr:
- k.Root.raw = append(k.Root.raw, tok.Raw()...)
- stack.Push(k)
- case ASTKindExprStatement:
- root := k.GetRoot()
- children := root.GetChildren()
- if len(children) == 0 {
- return nil, NewParseError(
- fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind),
- )
- }
-
- rhs := children[len(children)-1]
-
- if rhs.Root.ValueType != QuotedStringType {
- rhs.Root.ValueType = StringType
- rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...)
-
- }
-
- children[len(children)-1] = rhs
- root.SetChildren(children)
-
- stack.Push(k)
- }
- case OpenScopeState:
- if !runeCompare(tok.Raw(), openBrace) {
- return nil, NewParseError("expected '['")
- }
- // If OpenScopeState is not at the start, we must mark the previous ast as complete
- //
- // for example: if previous ast was a skip statement;
- // we should mark it as complete before we create a new statement
- if k.Kind != ASTKindStart {
- stack.MarkComplete(k)
- }
-
- stmt := newStatement()
- stack.Push(stmt)
- case CloseScopeState:
- if !runeCompare(tok.Raw(), closeBrace) {
- return nil, NewParseError("expected ']'")
- }
-
- k = trimSpaces(k)
- stack.Push(newCompletedSectionStatement(k))
- case SectionState:
- var stmt AST
-
- switch k.Kind {
- case ASTKindStatement:
- // If there are multiple literals inside of a scope declaration,
- // then the current token's raw value will be appended to the Name.
- //
- // This handles cases like [ profile default ]
- //
- // k will represent a SectionStatement with the children representing
- // the label of the section
- stmt = newSectionStatement(tok)
- case ASTKindSectionStatement:
- k.Root.raw = append(k.Root.raw, tok.Raw()...)
- stmt = k
- default:
- return nil, NewParseError(
- fmt.Sprintf("invalid statement: expected statement: %v", k.Kind),
- )
- }
-
- stack.Push(stmt)
- case MarkCompleteState:
- if k.Kind != ASTKindStart {
- stack.MarkComplete(k)
- }
-
- if stack.Len() == 0 {
- stack.Push(start)
- }
- case SkipState:
- stack.Push(newSkipStatement(k))
- s.Skip()
- case CommentState:
- if k.Kind == ASTKindStart {
- stack.Push(k)
- } else {
- stack.MarkComplete(k)
- }
-
- stmt := newCommentStatement(tok)
- stack.Push(stmt)
- default:
- return nil, NewParseError(
- fmt.Sprintf("invalid state with ASTKind %v and TokenType %v",
- k.Kind, tok.Type()))
- }
-
- if len(tokens) > 0 {
- tokens = tokens[1:]
- }
- }
-
- // this occurs when a statement has not been completed
- if stack.top > 1 {
- return nil, NewParseError(fmt.Sprintf("incomplete ini expression"))
- }
-
- // returns a sublist which exludes the start symbol
- return stack.List(), nil
-}
-
-// trimSpaces will trim spaces on the left and right hand side of
-// the literal.
-func trimSpaces(k AST) AST {
- // trim left hand side of spaces
- for i := 0; i < len(k.Root.raw); i++ {
- if !isWhitespace(k.Root.raw[i]) {
- break
- }
-
- k.Root.raw = k.Root.raw[1:]
- i--
- }
-
- // trim right hand side of spaces
- for i := len(k.Root.raw) - 1; i >= 0; i-- {
- if !isWhitespace(k.Root.raw[i]) {
- break
- }
-
- k.Root.raw = k.Root.raw[:len(k.Root.raw)-1]
- }
-
- return k
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go
deleted file mode 100644
index eca42d1b2..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/literal_tokens.go
+++ /dev/null
@@ -1,336 +0,0 @@
-package ini
-
-import (
- "fmt"
- "strconv"
- "strings"
- "unicode"
-)
-
-var (
- runesTrue = []rune("true")
- runesFalse = []rune("false")
-)
-
-var literalValues = [][]rune{
- runesTrue,
- runesFalse,
-}
-
-func isBoolValue(b []rune) bool {
- for _, lv := range literalValues {
- if isCaselessLitValue(lv, b) {
- return true
- }
- }
- return false
-}
-
-func isLitValue(want, have []rune) bool {
- if len(have) < len(want) {
- return false
- }
-
- for i := 0; i < len(want); i++ {
- if want[i] != have[i] {
- return false
- }
- }
-
- return true
-}
-
-// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency.
-func isCaselessLitValue(want, have []rune) bool {
- if len(have) < len(want) {
- return false
- }
-
- for i := 0; i < len(want); i++ {
- if want[i] != unicode.ToLower(have[i]) {
- return false
- }
- }
-
- return true
-}
-
-// isNumberValue will return whether not the leading characters in
-// a byte slice is a number. A number is delimited by whitespace or
-// the newline token.
-//
-// A number is defined to be in a binary, octal, decimal (int | float), hex format,
-// or in scientific notation.
-func isNumberValue(b []rune) bool {
- negativeIndex := 0
- helper := numberHelper{}
- needDigit := false
-
- for i := 0; i < len(b); i++ {
- negativeIndex++
-
- switch b[i] {
- case '-':
- if helper.IsNegative() || negativeIndex != 1 {
- return false
- }
- helper.Determine(b[i])
- needDigit = true
- continue
- case 'e', 'E':
- if err := helper.Determine(b[i]); err != nil {
- return false
- }
- negativeIndex = 0
- needDigit = true
- continue
- case 'b':
- if helper.numberFormat == hex {
- break
- }
- fallthrough
- case 'o', 'x':
- needDigit = true
- if i == 0 {
- return false
- }
-
- fallthrough
- case '.':
- if err := helper.Determine(b[i]); err != nil {
- return false
- }
- needDigit = true
- continue
- }
-
- if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) {
- return !needDigit
- }
-
- if !helper.CorrectByte(b[i]) {
- return false
- }
- needDigit = false
- }
-
- return !needDigit
-}
-
-func isValid(b []rune) (bool, int, error) {
- if len(b) == 0 {
- // TODO: should probably return an error
- return false, 0, nil
- }
-
- return isValidRune(b[0]), 1, nil
-}
-
-func isValidRune(r rune) bool {
- return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n'
-}
-
-// ValueType is an enum that will signify what type
-// the Value is
-type ValueType int
-
-func (v ValueType) String() string {
- switch v {
- case NoneType:
- return "NONE"
- case DecimalType:
- return "FLOAT"
- case IntegerType:
- return "INT"
- case StringType:
- return "STRING"
- case BoolType:
- return "BOOL"
- }
-
- return ""
-}
-
-// ValueType enums
-const (
- NoneType = ValueType(iota)
- DecimalType
- IntegerType
- StringType
- QuotedStringType
- BoolType
-)
-
-// Value is a union container
-type Value struct {
- Type ValueType
- raw []rune
-
- integer int64
- decimal float64
- boolean bool
- str string
-}
-
-func newValue(t ValueType, base int, raw []rune) (Value, error) {
- v := Value{
- Type: t,
- raw: raw,
- }
- var err error
-
- switch t {
- case DecimalType:
- v.decimal, err = strconv.ParseFloat(string(raw), 64)
- case IntegerType:
- if base != 10 {
- raw = raw[2:]
- }
-
- v.integer, err = strconv.ParseInt(string(raw), base, 64)
- case StringType:
- v.str = string(raw)
- case QuotedStringType:
- v.str = string(raw[1 : len(raw)-1])
- case BoolType:
- v.boolean = isCaselessLitValue(runesTrue, v.raw)
- }
-
- // issue 2253
- //
- // if the value trying to be parsed is too large, then we will use
- // the 'StringType' and raw value instead.
- if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange {
- v.Type = StringType
- v.str = string(raw)
- err = nil
- }
-
- return v, err
-}
-
-// NewStringValue returns a Value type generated using a string input.
-func NewStringValue(str string) (Value, error) {
- return newValue(StringType, 10, []rune(str))
-}
-
-// NewIntValue returns a Value type generated using an int64 input.
-func NewIntValue(i int64) (Value, error) {
- v := strconv.FormatInt(i, 10)
- return newValue(IntegerType, 10, []rune(v))
-}
-
-func (v Value) String() string {
- switch v.Type {
- case DecimalType:
- return fmt.Sprintf("decimal: %f", v.decimal)
- case IntegerType:
- return fmt.Sprintf("integer: %d", v.integer)
- case StringType:
- return fmt.Sprintf("string: %s", string(v.raw))
- case QuotedStringType:
- return fmt.Sprintf("quoted string: %s", string(v.raw))
- case BoolType:
- return fmt.Sprintf("bool: %t", v.boolean)
- default:
- return "union not set"
- }
-}
-
-func newLitToken(b []rune) (Token, int, error) {
- n := 0
- var err error
-
- token := Token{}
- if b[0] == '"' {
- n, err = getStringValue(b)
- if err != nil {
- return token, n, err
- }
-
- token = newToken(TokenLit, b[:n], QuotedStringType)
- } else if isNumberValue(b) {
- var base int
- base, n, err = getNumericalValue(b)
- if err != nil {
- return token, 0, err
- }
-
- value := b[:n]
- vType := IntegerType
- if contains(value, '.') || hasExponent(value) {
- vType = DecimalType
- }
- token = newToken(TokenLit, value, vType)
- token.base = base
- } else if isBoolValue(b) {
- n, err = getBoolValue(b)
-
- token = newToken(TokenLit, b[:n], BoolType)
- } else {
- n, err = getValue(b)
- token = newToken(TokenLit, b[:n], StringType)
- }
-
- return token, n, err
-}
-
-// IntValue returns an integer value
-func (v Value) IntValue() int64 {
- return v.integer
-}
-
-// FloatValue returns a float value
-func (v Value) FloatValue() float64 {
- return v.decimal
-}
-
-// BoolValue returns a bool value
-func (v Value) BoolValue() bool {
- return v.boolean
-}
-
-func isTrimmable(r rune) bool {
- switch r {
- case '\n', ' ':
- return true
- }
- return false
-}
-
-// StringValue returns the string value
-func (v Value) StringValue() string {
- switch v.Type {
- case StringType:
- return strings.TrimFunc(string(v.raw), isTrimmable)
- case QuotedStringType:
- // preserve all characters in the quotes
- return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1]))
- default:
- return strings.TrimFunc(string(v.raw), isTrimmable)
- }
-}
-
-func contains(runes []rune, c rune) bool {
- for i := 0; i < len(runes); i++ {
- if runes[i] == c {
- return true
- }
- }
-
- return false
-}
-
-func runeCompare(v1 []rune, v2 []rune) bool {
- if len(v1) != len(v2) {
- return false
- }
-
- for i := 0; i < len(v1); i++ {
- if v1[i] != v2[i] {
- return false
- }
- }
-
- return true
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go
deleted file mode 100644
index e52ac399f..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/newline_token.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package ini
-
-func isNewline(b []rune) bool {
- if len(b) == 0 {
- return false
- }
-
- if b[0] == '\n' {
- return true
- }
-
- if len(b) < 2 {
- return false
- }
-
- return b[0] == '\r' && b[1] == '\n'
-}
-
-func newNewlineToken(b []rune) (Token, int, error) {
- i := 1
- if b[0] == '\r' && isNewline(b[1:]) {
- i++
- }
-
- if !isNewline([]rune(b[:i])) {
- return emptyToken, 0, NewParseError("invalid new line token")
- }
-
- return newToken(TokenNL, b[:i], NoneType), i, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go
deleted file mode 100644
index a45c0bc56..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/number_helper.go
+++ /dev/null
@@ -1,152 +0,0 @@
-package ini
-
-import (
- "bytes"
- "fmt"
- "strconv"
-)
-
-const (
- none = numberFormat(iota)
- binary
- octal
- decimal
- hex
- exponent
-)
-
-type numberFormat int
-
-// numberHelper is used to dictate what format a number is in
-// and what to do for negative values. Since -1e-4 is a valid
-// number, we cannot just simply check for duplicate negatives.
-type numberHelper struct {
- numberFormat numberFormat
-
- negative bool
- negativeExponent bool
-}
-
-func (b numberHelper) Exists() bool {
- return b.numberFormat != none
-}
-
-func (b numberHelper) IsNegative() bool {
- return b.negative || b.negativeExponent
-}
-
-func (b *numberHelper) Determine(c rune) error {
- if b.Exists() {
- return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c)))
- }
-
- switch c {
- case 'b':
- b.numberFormat = binary
- case 'o':
- b.numberFormat = octal
- case 'x':
- b.numberFormat = hex
- case 'e', 'E':
- b.numberFormat = exponent
- case '-':
- if b.numberFormat != exponent {
- b.negative = true
- } else {
- b.negativeExponent = true
- }
- case '.':
- b.numberFormat = decimal
- default:
- return NewParseError(fmt.Sprintf("invalid number character: %v", string(c)))
- }
-
- return nil
-}
-
-func (b numberHelper) CorrectByte(c rune) bool {
- switch {
- case b.numberFormat == binary:
- if !isBinaryByte(c) {
- return false
- }
- case b.numberFormat == octal:
- if !isOctalByte(c) {
- return false
- }
- case b.numberFormat == hex:
- if !isHexByte(c) {
- return false
- }
- case b.numberFormat == decimal:
- if !isDigit(c) {
- return false
- }
- case b.numberFormat == exponent:
- if !isDigit(c) {
- return false
- }
- case b.negativeExponent:
- if !isDigit(c) {
- return false
- }
- case b.negative:
- if !isDigit(c) {
- return false
- }
- default:
- if !isDigit(c) {
- return false
- }
- }
-
- return true
-}
-
-func (b numberHelper) Base() int {
- switch b.numberFormat {
- case binary:
- return 2
- case octal:
- return 8
- case hex:
- return 16
- default:
- return 10
- }
-}
-
-func (b numberHelper) String() string {
- buf := bytes.Buffer{}
- i := 0
-
- switch b.numberFormat {
- case binary:
- i++
- buf.WriteString(strconv.Itoa(i) + ": binary format\n")
- case octal:
- i++
- buf.WriteString(strconv.Itoa(i) + ": octal format\n")
- case hex:
- i++
- buf.WriteString(strconv.Itoa(i) + ": hex format\n")
- case exponent:
- i++
- buf.WriteString(strconv.Itoa(i) + ": exponent format\n")
- default:
- i++
- buf.WriteString(strconv.Itoa(i) + ": integer format\n")
- }
-
- if b.negative {
- i++
- buf.WriteString(strconv.Itoa(i) + ": negative format\n")
- }
-
- if b.negativeExponent {
- i++
- buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n")
- }
-
- return buf.String()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go
deleted file mode 100644
index 8a84c7cbe..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/op_tokens.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package ini
-
-import (
- "fmt"
-)
-
-var (
- equalOp = []rune("=")
- equalColonOp = []rune(":")
-)
-
-func isOp(b []rune) bool {
- if len(b) == 0 {
- return false
- }
-
- switch b[0] {
- case '=':
- return true
- case ':':
- return true
- default:
- return false
- }
-}
-
-func newOpToken(b []rune) (Token, int, error) {
- tok := Token{}
-
- switch b[0] {
- case '=':
- tok = newToken(TokenOp, equalOp, NoneType)
- case ':':
- tok = newToken(TokenOp, equalColonOp, NoneType)
- default:
- return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0]))
- }
- return tok, 1, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go
new file mode 100644
index 000000000..2422d9046
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse.go
@@ -0,0 +1,109 @@
+package ini
+
+import (
+ "fmt"
+ "strings"
+)
+
+func parse(tokens []lineToken, path string) Sections {
+ parser := &parser{
+ path: path,
+ sections: NewSections(),
+ }
+ parser.parse(tokens)
+ return parser.sections
+}
+
+type parser struct {
+ csection, ckey string // current state
+ path string // source file path
+ sections Sections // parse result
+}
+
+func (p *parser) parse(tokens []lineToken) {
+ for _, otok := range tokens {
+ switch tok := otok.(type) {
+ case *lineTokenProfile:
+ p.handleProfile(tok)
+ case *lineTokenProperty:
+ p.handleProperty(tok)
+ case *lineTokenSubProperty:
+ p.handleSubProperty(tok)
+ case *lineTokenContinuation:
+ p.handleContinuation(tok)
+ }
+ }
+}
+
+func (p *parser) handleProfile(tok *lineTokenProfile) {
+ name := tok.Name
+ if tok.Type != "" {
+ name = fmt.Sprintf("%s %s", tok.Type, tok.Name)
+ }
+ p.ckey = ""
+ p.csection = name
+ if _, ok := p.sections.container[name]; !ok {
+ p.sections.container[name] = NewSection(name)
+ }
+}
+
+func (p *parser) handleProperty(tok *lineTokenProperty) {
+ if p.csection == "" {
+ return // LEGACY: don't error on "global" properties
+ }
+
+ p.ckey = tok.Key
+ if _, ok := p.sections.container[p.csection].values[tok.Key]; ok {
+ section := p.sections.container[p.csection]
+ section.Logs = append(p.sections.container[p.csection].Logs,
+ fmt.Sprintf(
+ "For profile: %v, overriding %v value, with a %v value found in a duplicate profile defined later in the same file %v. \n",
+ p.csection, tok.Key, tok.Key, p.path,
+ ),
+ )
+ p.sections.container[p.csection] = section
+ }
+
+ p.sections.container[p.csection].values[tok.Key] = Value{
+ str: tok.Value,
+ }
+ p.sections.container[p.csection].SourceFile[tok.Key] = p.path
+}
+
+func (p *parser) handleSubProperty(tok *lineTokenSubProperty) {
+ if p.csection == "" {
+ return // LEGACY: don't error on "global" properties
+ }
+
+ if p.ckey == "" || p.sections.container[p.csection].values[p.ckey].str != "" {
+ // This is an "orphaned" subproperty, either because it's at
+ // the beginning of a section or because the last property's
+ // value isn't empty. Either way we're lenient here and
+ // "promote" this to a normal property.
+ p.handleProperty(&lineTokenProperty{
+ Key: tok.Key,
+ Value: strings.TrimSpace(trimPropertyComment(tok.Value)),
+ })
+ return
+ }
+
+ if p.sections.container[p.csection].values[p.ckey].mp == nil {
+ p.sections.container[p.csection].values[p.ckey] = Value{
+ mp: map[string]string{},
+ }
+ }
+ p.sections.container[p.csection].values[p.ckey].mp[tok.Key] = tok.Value
+}
+
+func (p *parser) handleContinuation(tok *lineTokenContinuation) {
+ if p.ckey == "" {
+ return
+ }
+
+ value, _ := p.sections.container[p.csection].values[p.ckey]
+ if value.str != "" && value.mp == nil {
+ value.str = fmt.Sprintf("%s\n%s", value.str, tok.Value)
+ }
+
+ p.sections.container[p.csection].values[p.ckey] = value
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go
deleted file mode 100644
index 30ae0b8f2..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_error.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package ini
-
-// ParseError is an error which is returned during any part of
-// the parsing process.
-type ParseError struct {
- msg string
-}
-
-// NewParseError will return a new ParseError where message
-// is the description of the error.
-func NewParseError(message string) *ParseError {
- return &ParseError{
- msg: message,
- }
-}
-
-func (err *ParseError) Error() string {
- return err.msg
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go
deleted file mode 100644
index 7f01cf7c7..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/parse_stack.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package ini
-
-import (
- "bytes"
- "fmt"
-)
-
-// ParseStack is a stack that contains a container, the stack portion,
-// and the list which is the list of ASTs that have been successfully
-// parsed.
-type ParseStack struct {
- top int
- container []AST
- list []AST
- index int
-}
-
-func newParseStack(sizeContainer, sizeList int) ParseStack {
- return ParseStack{
- container: make([]AST, sizeContainer),
- list: make([]AST, sizeList),
- }
-}
-
-// Pop will return and truncate the last container element.
-func (s *ParseStack) Pop() AST {
- s.top--
- return s.container[s.top]
-}
-
-// Push will add the new AST to the container
-func (s *ParseStack) Push(ast AST) {
- s.container[s.top] = ast
- s.top++
-}
-
-// MarkComplete will append the AST to the list of completed statements
-func (s *ParseStack) MarkComplete(ast AST) {
- s.list[s.index] = ast
- s.index++
-}
-
-// List will return the completed statements
-func (s ParseStack) List() []AST {
- return s.list[:s.index]
-}
-
-// Len will return the length of the container
-func (s *ParseStack) Len() int {
- return s.top
-}
-
-func (s ParseStack) String() string {
- buf := bytes.Buffer{}
- for i, node := range s.list {
- buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node))
- }
-
- return buf.String()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go
new file mode 100644
index 000000000..dd89848e6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sections.go
@@ -0,0 +1,157 @@
+package ini
+
+import (
+ "sort"
+)
+
+// Sections is a map of Section structures that represent
+// a configuration.
+type Sections struct {
+ container map[string]Section
+}
+
+// NewSections returns empty ini Sections
+func NewSections() Sections {
+ return Sections{
+ container: make(map[string]Section, 0),
+ }
+}
+
+// GetSection will return section p. If section p does not exist,
+// false will be returned in the second parameter.
+func (t Sections) GetSection(p string) (Section, bool) {
+ v, ok := t.container[p]
+ return v, ok
+}
+
+// HasSection denotes if Sections consist of a section with
+// provided name.
+func (t Sections) HasSection(p string) bool {
+ _, ok := t.container[p]
+ return ok
+}
+
+// SetSection sets a section value for provided section name.
+func (t Sections) SetSection(p string, v Section) Sections {
+ t.container[p] = v
+ return t
+}
+
+// DeleteSection deletes a section entry/value for provided section name./
+func (t Sections) DeleteSection(p string) {
+ delete(t.container, p)
+}
+
+// values represents a map of union values.
+type values map[string]Value
+
+// List will return a list of all sections that were successfully
+// parsed.
+func (t Sections) List() []string {
+ keys := make([]string, len(t.container))
+ i := 0
+ for k := range t.container {
+ keys[i] = k
+ i++
+ }
+
+ sort.Strings(keys)
+ return keys
+}
+
+// Section contains a name and values. This represent
+// a sectioned entry in a configuration file.
+type Section struct {
+ // Name is the Section profile name
+ Name string
+
+ // values are the values within parsed profile
+ values values
+
+ // Errors is the list of errors
+ Errors []error
+
+ // Logs is the list of logs
+ Logs []string
+
+ // SourceFile is the INI Source file from where this section
+ // was retrieved. They key is the property, value is the
+ // source file the property was retrieved from.
+ SourceFile map[string]string
+}
+
+// NewSection returns an initialize section for the name
+func NewSection(name string) Section {
+ return Section{
+ Name: name,
+ values: values{},
+ SourceFile: map[string]string{},
+ }
+}
+
+// List will return a list of all
+// services in values
+func (t Section) List() []string {
+ keys := make([]string, len(t.values))
+ i := 0
+ for k := range t.values {
+ keys[i] = k
+ i++
+ }
+
+ sort.Strings(keys)
+ return keys
+}
+
+// UpdateSourceFile updates source file for a property to provided filepath.
+func (t Section) UpdateSourceFile(property string, filepath string) {
+ t.SourceFile[property] = filepath
+}
+
+// UpdateValue updates value for a provided key with provided value
+func (t Section) UpdateValue(k string, v Value) error {
+ t.values[k] = v
+ return nil
+}
+
+// Has will return whether or not an entry exists in a given section
+func (t Section) Has(k string) bool {
+ _, ok := t.values[k]
+ return ok
+}
+
+// ValueType will returned what type the union is set to. If
+// k was not found, the NoneType will be returned.
+func (t Section) ValueType(k string) (ValueType, bool) {
+ v, ok := t.values[k]
+ return v.Type, ok
+}
+
+// Bool returns a bool value at k
+func (t Section) Bool(k string) (bool, bool) {
+ return t.values[k].BoolValue()
+}
+
+// Int returns an integer value at k
+func (t Section) Int(k string) (int64, bool) {
+ return t.values[k].IntValue()
+}
+
+// Map returns a map value at k
+func (t Section) Map(k string) map[string]string {
+ return t.values[k].MapValue()
+}
+
+// Float64 returns a float value at k
+func (t Section) Float64(k string) (float64, bool) {
+ return t.values[k].FloatValue()
+}
+
+// String returns the string value at k
+func (t Section) String(k string) string {
+ _, ok := t.values[k]
+ if !ok {
+ return ""
+ }
+ return t.values[k].StringValue()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go
deleted file mode 100644
index f82095ba2..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/sep_tokens.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package ini
-
-import (
- "fmt"
-)
-
-var (
- emptyRunes = []rune{}
-)
-
-func isSep(b []rune) bool {
- if len(b) == 0 {
- return false
- }
-
- switch b[0] {
- case '[', ']':
- return true
- default:
- return false
- }
-}
-
-var (
- openBrace = []rune("[")
- closeBrace = []rune("]")
-)
-
-func newSepToken(b []rune) (Token, int, error) {
- tok := Token{}
-
- switch b[0] {
- case '[':
- tok = newToken(TokenSep, openBrace, NoneType)
- case ']':
- tok = newToken(TokenSep, closeBrace, NoneType)
- default:
- return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0]))
- }
- return tok, 1, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go
deleted file mode 100644
index 07e90876a..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/skipper.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package ini
-
-// skipper is used to skip certain blocks of an ini file.
-// Currently skipper is used to skip nested blocks of ini
-// files. See example below
-//
-// [ foo ]
-// nested = ; this section will be skipped
-// a=b
-// c=d
-// bar=baz ; this will be included
-type skipper struct {
- shouldSkip bool
- TokenSet bool
- prevTok Token
-}
-
-func newSkipper() skipper {
- return skipper{
- prevTok: emptyToken,
- }
-}
-
-func (s *skipper) ShouldSkip(tok Token) bool {
- // should skip state will be modified only if previous token was new line (NL);
- // and the current token is not WhiteSpace (WS).
- if s.shouldSkip &&
- s.prevTok.Type() == TokenNL &&
- tok.Type() != TokenWS {
- s.Continue()
- return false
- }
-
- s.prevTok = tok
- return s.shouldSkip
-}
-
-func (s *skipper) Skip() {
- s.shouldSkip = true
-}
-
-func (s *skipper) Continue() {
- s.shouldSkip = false
- s.prevTok = emptyToken
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go
deleted file mode 100644
index ba0af01b5..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/statement.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package ini
-
-// Statement is an empty AST mostly used for transitioning states.
-func newStatement() AST {
- return newAST(ASTKindStatement, AST{})
-}
-
-// SectionStatement represents a section AST
-func newSectionStatement(tok Token) AST {
- return newASTWithRootToken(ASTKindSectionStatement, tok)
-}
-
-// ExprStatement represents a completed expression AST
-func newExprStatement(ast AST) AST {
- return newAST(ASTKindExprStatement, ast)
-}
-
-// CommentStatement represents a comment in the ini defintion.
-//
-// grammar:
-// comment -> #comment' | ;comment'
-// comment' -> epsilon | value
-func newCommentStatement(tok Token) AST {
- return newAST(ASTKindCommentStatement, newExpression(tok))
-}
-
-// CompletedSectionStatement represents a completed section
-func newCompletedSectionStatement(ast AST) AST {
- return newAST(ASTKindCompletedSectionStatement, ast)
-}
-
-// SkipStatement is used to skip whole statements
-func newSkipStatement(ast AST) AST {
- return newAST(ASTKindSkipStatement, ast)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go
new file mode 100644
index 000000000..ed77d0835
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/strings.go
@@ -0,0 +1,89 @@
+package ini
+
+import (
+ "strings"
+)
+
+func trimProfileComment(s string) string {
+ r, _, _ := strings.Cut(s, "#")
+ r, _, _ = strings.Cut(r, ";")
+ return r
+}
+
+func trimPropertyComment(s string) string {
+ r, _, _ := strings.Cut(s, " #")
+ r, _, _ = strings.Cut(r, " ;")
+ r, _, _ = strings.Cut(r, "\t#")
+ r, _, _ = strings.Cut(r, "\t;")
+ return r
+}
+
+// assumes no surrounding comment
+func splitProperty(s string) (string, string, bool) {
+ equalsi := strings.Index(s, "=")
+ coloni := strings.Index(s, ":") // LEGACY: also supported for property assignment
+ sep := "="
+ if equalsi == -1 || coloni != -1 && coloni < equalsi {
+ sep = ":"
+ }
+
+ k, v, ok := strings.Cut(s, sep)
+ if !ok {
+ return "", "", false
+ }
+ return strings.TrimSpace(k), strings.TrimSpace(v), true
+}
+
+// assumes no surrounding comment, whitespace, or profile brackets
+func splitProfile(s string) (string, string) {
+ var first int
+ for i, r := range s {
+ if isLineSpace(r) {
+ if first == 0 {
+ first = i
+ }
+ } else {
+ if first != 0 {
+ return s[:first], s[i:]
+ }
+ }
+ }
+ if first == 0 {
+ return "", s // type component is effectively blank
+ }
+ return "", ""
+}
+
+func isLineSpace(r rune) bool {
+ return r == ' ' || r == '\t'
+}
+
+func unquote(s string) string {
+ if isSingleQuoted(s) || isDoubleQuoted(s) {
+ return s[1 : len(s)-1]
+ }
+ return s
+}
+
+// applies various legacy conversions to property values:
+// - remote wrapping single/doublequotes
+func legacyStrconv(s string) string {
+ s = unquote(s)
+ return s
+}
+
+func isSingleQuoted(s string) bool {
+ return hasAffixes(s, "'", "'")
+}
+
+func isDoubleQuoted(s string) bool {
+ return hasAffixes(s, `"`, `"`)
+}
+
+func isBracketed(s string) bool {
+ return hasAffixes(s, "[", "]")
+}
+
+func hasAffixes(s, left, right string) bool {
+ return strings.HasPrefix(s, left) && strings.HasSuffix(s, right)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go
new file mode 100644
index 000000000..6e9a03744
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/token.go
@@ -0,0 +1,32 @@
+package ini
+
+type lineToken interface {
+ isLineToken()
+}
+
+type lineTokenProfile struct {
+ Type string
+ Name string
+}
+
+func (*lineTokenProfile) isLineToken() {}
+
+type lineTokenProperty struct {
+ Key string
+ Value string
+}
+
+func (*lineTokenProperty) isLineToken() {}
+
+type lineTokenContinuation struct {
+ Value string
+}
+
+func (*lineTokenContinuation) isLineToken() {}
+
+type lineTokenSubProperty struct {
+ Key string
+ Value string
+}
+
+func (*lineTokenSubProperty) isLineToken() {}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go
new file mode 100644
index 000000000..89a773684
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/tokenize.go
@@ -0,0 +1,92 @@
+package ini
+
+import (
+ "strings"
+)
+
+func tokenize(lines []string) ([]lineToken, error) {
+ tokens := make([]lineToken, 0, len(lines))
+ for _, line := range lines {
+ if len(strings.TrimSpace(line)) == 0 || isLineComment(line) {
+ continue
+ }
+
+ if tok := asProfile(line); tok != nil {
+ tokens = append(tokens, tok)
+ } else if tok := asProperty(line); tok != nil {
+ tokens = append(tokens, tok)
+ } else if tok := asSubProperty(line); tok != nil {
+ tokens = append(tokens, tok)
+ } else if tok := asContinuation(line); tok != nil {
+ tokens = append(tokens, tok)
+ } // unrecognized tokens are effectively ignored
+ }
+ return tokens, nil
+}
+
+func isLineComment(line string) bool {
+ trimmed := strings.TrimLeft(line, " \t")
+ return strings.HasPrefix(trimmed, "#") || strings.HasPrefix(trimmed, ";")
+}
+
+func asProfile(line string) *lineTokenProfile { // " [ type name ] ; comment"
+ trimmed := strings.TrimSpace(trimProfileComment(line)) // "[ type name ]"
+ if !isBracketed(trimmed) {
+ return nil
+ }
+ trimmed = trimmed[1 : len(trimmed)-1] // " type name " (or just " name ")
+ trimmed = strings.TrimSpace(trimmed) // "type name" / "name"
+ typ, name := splitProfile(trimmed)
+ return &lineTokenProfile{
+ Type: typ,
+ Name: name,
+ }
+}
+
+func asProperty(line string) *lineTokenProperty {
+ if isLineSpace(rune(line[0])) {
+ return nil
+ }
+
+ trimmed := trimPropertyComment(line)
+ trimmed = strings.TrimRight(trimmed, " \t")
+ k, v, ok := splitProperty(trimmed)
+ if !ok {
+ return nil
+ }
+
+ return &lineTokenProperty{
+ Key: strings.ToLower(k), // LEGACY: normalize key case
+ Value: legacyStrconv(v), // LEGACY: see func docs
+ }
+}
+
+func asSubProperty(line string) *lineTokenSubProperty {
+ if !isLineSpace(rune(line[0])) {
+ return nil
+ }
+
+ // comments on sub-properties are included in the value
+ trimmed := strings.TrimLeft(line, " \t")
+ k, v, ok := splitProperty(trimmed)
+ if !ok {
+ return nil
+ }
+
+ return &lineTokenSubProperty{ // same LEGACY constraints as in normal property
+ Key: strings.ToLower(k),
+ Value: legacyStrconv(v),
+ }
+}
+
+func asContinuation(line string) *lineTokenContinuation {
+ if !isLineSpace(rune(line[0])) {
+ return nil
+ }
+
+ // includes comments like sub-properties
+ trimmed := strings.TrimLeft(line, " \t")
+ return &lineTokenContinuation{
+ Value: trimmed,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go
new file mode 100644
index 000000000..e3706b3c3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value.go
@@ -0,0 +1,93 @@
+package ini
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// ValueType is an enum that will signify what type
+// the Value is
+type ValueType int
+
+func (v ValueType) String() string {
+ switch v {
+ case NoneType:
+ return "NONE"
+ case StringType:
+ return "STRING"
+ }
+
+ return ""
+}
+
+// ValueType enums
+const (
+ NoneType = ValueType(iota)
+ StringType
+ QuotedStringType
+)
+
+// Value is a union container
+type Value struct {
+ Type ValueType
+
+ str string
+ mp map[string]string
+}
+
+// NewStringValue returns a Value type generated using a string input.
+func NewStringValue(str string) (Value, error) {
+ return Value{str: str}, nil
+}
+
+func (v Value) String() string {
+ switch v.Type {
+ case StringType:
+ return fmt.Sprintf("string: %s", string(v.str))
+ case QuotedStringType:
+ return fmt.Sprintf("quoted string: %s", string(v.str))
+ default:
+ return "union not set"
+ }
+}
+
+// MapValue returns a map value for sub properties
+func (v Value) MapValue() map[string]string {
+ return v.mp
+}
+
+// IntValue returns an integer value
+func (v Value) IntValue() (int64, bool) {
+ i, err := strconv.ParseInt(string(v.str), 0, 64)
+ if err != nil {
+ return 0, false
+ }
+ return i, true
+}
+
+// FloatValue returns a float value
+func (v Value) FloatValue() (float64, bool) {
+ f, err := strconv.ParseFloat(string(v.str), 64)
+ if err != nil {
+ return 0, false
+ }
+ return f, true
+}
+
+// BoolValue returns a bool value
+func (v Value) BoolValue() (bool, bool) {
+ // we don't use ParseBool as it recognizes more than what we've
+ // historically supported
+ if strings.EqualFold(v.str, "true") {
+ return true, true
+ } else if strings.EqualFold(v.str, "false") {
+ return false, true
+ }
+ return false, false
+}
+
+// StringValue returns the string value
+func (v Value) StringValue() string {
+ return v.str
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go
deleted file mode 100644
index b5480fdeb..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/value_util.go
+++ /dev/null
@@ -1,284 +0,0 @@
-package ini
-
-import (
- "fmt"
-)
-
-// getStringValue will return a quoted string and the amount
-// of bytes read
-//
-// an error will be returned if the string is not properly formatted
-func getStringValue(b []rune) (int, error) {
- if b[0] != '"' {
- return 0, NewParseError("strings must start with '\"'")
- }
-
- endQuote := false
- i := 1
-
- for ; i < len(b) && !endQuote; i++ {
- if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped {
- endQuote = true
- break
- } else if escaped {
- /*c, err := getEscapedByte(b[i])
- if err != nil {
- return 0, err
- }
-
- b[i-1] = c
- b = append(b[:i], b[i+1:]...)
- i--*/
-
- continue
- }
- }
-
- if !endQuote {
- return 0, NewParseError("missing '\"' in string value")
- }
-
- return i + 1, nil
-}
-
-// getBoolValue will return a boolean and the amount
-// of bytes read
-//
-// an error will be returned if the boolean is not of a correct
-// value
-func getBoolValue(b []rune) (int, error) {
- if len(b) < 4 {
- return 0, NewParseError("invalid boolean value")
- }
-
- n := 0
- for _, lv := range literalValues {
- if len(lv) > len(b) {
- continue
- }
-
- if isCaselessLitValue(lv, b) {
- n = len(lv)
- }
- }
-
- if n == 0 {
- return 0, NewParseError("invalid boolean value")
- }
-
- return n, nil
-}
-
-// getNumericalValue will return a numerical string, the amount
-// of bytes read, and the base of the number
-//
-// an error will be returned if the number is not of a correct
-// value
-func getNumericalValue(b []rune) (int, int, error) {
- if !isDigit(b[0]) {
- return 0, 0, NewParseError("invalid digit value")
- }
-
- i := 0
- helper := numberHelper{}
-
-loop:
- for negativeIndex := 0; i < len(b); i++ {
- negativeIndex++
-
- if !isDigit(b[i]) {
- switch b[i] {
- case '-':
- if helper.IsNegative() || negativeIndex != 1 {
- return 0, 0, NewParseError("parse error '-'")
- }
-
- n := getNegativeNumber(b[i:])
- i += (n - 1)
- helper.Determine(b[i])
- continue
- case '.':
- if err := helper.Determine(b[i]); err != nil {
- return 0, 0, err
- }
- case 'e', 'E':
- if err := helper.Determine(b[i]); err != nil {
- return 0, 0, err
- }
-
- negativeIndex = 0
- case 'b':
- if helper.numberFormat == hex {
- break
- }
- fallthrough
- case 'o', 'x':
- if i == 0 && b[i] != '0' {
- return 0, 0, NewParseError("incorrect base format, expected leading '0'")
- }
-
- if i != 1 {
- return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i))
- }
-
- if err := helper.Determine(b[i]); err != nil {
- return 0, 0, err
- }
- default:
- if isWhitespace(b[i]) {
- break loop
- }
-
- if isNewline(b[i:]) {
- break loop
- }
-
- if !(helper.numberFormat == hex && isHexByte(b[i])) {
- if i+2 < len(b) && !isNewline(b[i:i+2]) {
- return 0, 0, NewParseError("invalid numerical character")
- } else if !isNewline([]rune{b[i]}) {
- return 0, 0, NewParseError("invalid numerical character")
- }
-
- break loop
- }
- }
- }
- }
-
- return helper.Base(), i, nil
-}
-
-// isDigit will return whether or not something is an integer
-func isDigit(b rune) bool {
- return b >= '0' && b <= '9'
-}
-
-func hasExponent(v []rune) bool {
- return contains(v, 'e') || contains(v, 'E')
-}
-
-func isBinaryByte(b rune) bool {
- switch b {
- case '0', '1':
- return true
- default:
- return false
- }
-}
-
-func isOctalByte(b rune) bool {
- switch b {
- case '0', '1', '2', '3', '4', '5', '6', '7':
- return true
- default:
- return false
- }
-}
-
-func isHexByte(b rune) bool {
- if isDigit(b) {
- return true
- }
- return (b >= 'A' && b <= 'F') ||
- (b >= 'a' && b <= 'f')
-}
-
-func getValue(b []rune) (int, error) {
- i := 0
-
- for i < len(b) {
- if isNewline(b[i:]) {
- break
- }
-
- if isOp(b[i:]) {
- break
- }
-
- valid, n, err := isValid(b[i:])
- if err != nil {
- return 0, err
- }
-
- if !valid {
- break
- }
-
- i += n
- }
-
- return i, nil
-}
-
-// getNegativeNumber will return a negative number from a
-// byte slice. This will iterate through all characters until
-// a non-digit has been found.
-func getNegativeNumber(b []rune) int {
- if b[0] != '-' {
- return 0
- }
-
- i := 1
- for ; i < len(b); i++ {
- if !isDigit(b[i]) {
- return i
- }
- }
-
- return i
-}
-
-// isEscaped will return whether or not the character is an escaped
-// character.
-func isEscaped(value []rune, b rune) bool {
- if len(value) == 0 {
- return false
- }
-
- switch b {
- case '\'': // single quote
- case '"': // quote
- case 'n': // newline
- case 't': // tab
- case '\\': // backslash
- default:
- return false
- }
-
- return value[len(value)-1] == '\\'
-}
-
-func getEscapedByte(b rune) (rune, error) {
- switch b {
- case '\'': // single quote
- return '\'', nil
- case '"': // quote
- return '"', nil
- case 'n': // newline
- return '\n', nil
- case 't': // table
- return '\t', nil
- case '\\': // backslash
- return '\\', nil
- default:
- return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b))
- }
-}
-
-func removeEscapedCharacters(b []rune) []rune {
- for i := 0; i < len(b); i++ {
- if isEscaped(b[:i], b[i]) {
- c, err := getEscapedByte(b[i])
- if err != nil {
- return b
- }
-
- b[i-1] = c
- b = append(b[:i], b[i+1:]...)
- i--
- }
- }
-
- return b
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go
deleted file mode 100644
index a07a63738..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/visitor.go
+++ /dev/null
@@ -1,269 +0,0 @@
-package ini
-
-import (
- "fmt"
- "sort"
- "strings"
-)
-
-// Visitor is an interface used by walkers that will
-// traverse an array of ASTs.
-type Visitor interface {
- VisitExpr(AST) error
- VisitStatement(AST) error
-}
-
-// DefaultVisitor is used to visit statements and expressions
-// and ensure that they are both of the correct format.
-// In addition, upon visiting this will build sections and populate
-// the Sections field which can be used to retrieve profile
-// configuration.
-type DefaultVisitor struct {
-
- // scope is the profile which is being visited
- scope string
-
- // path is the file path which the visitor is visiting
- path string
-
- // Sections defines list of the profile section
- Sections Sections
-}
-
-// NewDefaultVisitor returns a DefaultVisitor. It takes in a filepath
-// which points to the file it is visiting.
-func NewDefaultVisitor(filepath string) *DefaultVisitor {
- return &DefaultVisitor{
- Sections: Sections{
- container: map[string]Section{},
- },
- path: filepath,
- }
-}
-
-// VisitExpr visits expressions...
-func (v *DefaultVisitor) VisitExpr(expr AST) error {
- t := v.Sections.container[v.scope]
- if t.values == nil {
- t.values = values{}
- }
- if t.SourceFile == nil {
- t.SourceFile = make(map[string]string, 0)
- }
-
- switch expr.Kind {
- case ASTKindExprStatement:
- opExpr := expr.GetRoot()
- switch opExpr.Kind {
- case ASTKindEqualExpr:
- children := opExpr.GetChildren()
- if len(children) <= 1 {
- return NewParseError("unexpected token type")
- }
-
- rhs := children[1]
-
- // The right-hand value side the equality expression is allowed to contain '[', ']', ':', '=' in the values.
- // If the token is not either a literal or one of the token types that identifies those four additional
- // tokens then error.
- if !(rhs.Root.Type() == TokenLit || rhs.Root.Type() == TokenOp || rhs.Root.Type() == TokenSep) {
- return NewParseError("unexpected token type")
- }
-
- key := EqualExprKey(opExpr)
- val, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw())
- if err != nil {
- return err
- }
-
- // lower case key to standardize
- k := strings.ToLower(key)
-
- // identify if the section already had this key, append log on section
- if t.Has(k) {
- t.Logs = append(t.Logs,
- fmt.Sprintf("For profile: %v, overriding %v value, "+
- "with a %v value found in a duplicate profile defined later in the same file %v. \n",
- t.Name, k, k, v.path))
- }
-
- // assign the value
- t.values[k] = val
- // update the source file path for region
- t.SourceFile[k] = v.path
- default:
- return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
- }
- default:
- return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
- }
-
- v.Sections.container[v.scope] = t
- return nil
-}
-
-// VisitStatement visits statements...
-func (v *DefaultVisitor) VisitStatement(stmt AST) error {
- switch stmt.Kind {
- case ASTKindCompletedSectionStatement:
- child := stmt.GetRoot()
- if child.Kind != ASTKindSectionStatement {
- return NewParseError(fmt.Sprintf("unsupported child statement: %T", child))
- }
-
- name := string(child.Root.Raw())
-
- // trim start and end space
- name = strings.TrimSpace(name)
-
- // if has prefix "profile " + [ws+] + "profile-name",
- // we standardize by removing the [ws+] between prefix and profile-name.
- if strings.HasPrefix(name, "profile ") {
- names := strings.SplitN(name, " ", 2)
- name = names[0] + " " + strings.TrimLeft(names[1], " ")
- }
-
- // attach profile name on section
- if !v.Sections.HasSection(name) {
- v.Sections.container[name] = NewSection(name)
- }
- v.scope = name
- default:
- return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind))
- }
-
- return nil
-}
-
-// Sections is a map of Section structures that represent
-// a configuration.
-type Sections struct {
- container map[string]Section
-}
-
-// NewSections returns empty ini Sections
-func NewSections() Sections {
- return Sections{
- container: make(map[string]Section, 0),
- }
-}
-
-// GetSection will return section p. If section p does not exist,
-// false will be returned in the second parameter.
-func (t Sections) GetSection(p string) (Section, bool) {
- v, ok := t.container[p]
- return v, ok
-}
-
-// HasSection denotes if Sections consist of a section with
-// provided name.
-func (t Sections) HasSection(p string) bool {
- _, ok := t.container[p]
- return ok
-}
-
-// SetSection sets a section value for provided section name.
-func (t Sections) SetSection(p string, v Section) Sections {
- t.container[p] = v
- return t
-}
-
-// DeleteSection deletes a section entry/value for provided section name./
-func (t Sections) DeleteSection(p string) {
- delete(t.container, p)
-}
-
-// values represents a map of union values.
-type values map[string]Value
-
-// List will return a list of all sections that were successfully
-// parsed.
-func (t Sections) List() []string {
- keys := make([]string, len(t.container))
- i := 0
- for k := range t.container {
- keys[i] = k
- i++
- }
-
- sort.Strings(keys)
- return keys
-}
-
-// Section contains a name and values. This represent
-// a sectioned entry in a configuration file.
-type Section struct {
- // Name is the Section profile name
- Name string
-
- // values are the values within parsed profile
- values values
-
- // Errors is the list of errors
- Errors []error
-
- // Logs is the list of logs
- Logs []string
-
- // SourceFile is the INI Source file from where this section
- // was retrieved. They key is the property, value is the
- // source file the property was retrieved from.
- SourceFile map[string]string
-}
-
-// NewSection returns an initialize section for the name
-func NewSection(name string) Section {
- return Section{
- Name: name,
- values: values{},
- SourceFile: map[string]string{},
- }
-}
-
-// UpdateSourceFile updates source file for a property to provided filepath.
-func (t Section) UpdateSourceFile(property string, filepath string) {
- t.SourceFile[property] = filepath
-}
-
-// UpdateValue updates value for a provided key with provided value
-func (t Section) UpdateValue(k string, v Value) error {
- t.values[k] = v
- return nil
-}
-
-// Has will return whether or not an entry exists in a given section
-func (t Section) Has(k string) bool {
- _, ok := t.values[k]
- return ok
-}
-
-// ValueType will returned what type the union is set to. If
-// k was not found, the NoneType will be returned.
-func (t Section) ValueType(k string) (ValueType, bool) {
- v, ok := t.values[k]
- return v.Type, ok
-}
-
-// Bool returns a bool value at k
-func (t Section) Bool(k string) bool {
- return t.values[k].BoolValue()
-}
-
-// Int returns an integer value at k
-func (t Section) Int(k string) int64 {
- return t.values[k].IntValue()
-}
-
-// Float64 returns a float value at k
-func (t Section) Float64(k string) float64 {
- return t.values[k].FloatValue()
-}
-
-// String returns the string value at k
-func (t Section) String(k string) string {
- _, ok := t.values[k]
- if !ok {
- return ""
- }
- return t.values[k].StringValue()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go
deleted file mode 100644
index 99915f7f7..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/walker.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package ini
-
-// Walk will traverse the AST using the v, the Visitor.
-func Walk(tree []AST, v Visitor) error {
- for _, node := range tree {
- switch node.Kind {
- case ASTKindExpr,
- ASTKindExprStatement:
-
- if err := v.VisitExpr(node); err != nil {
- return err
- }
- case ASTKindStatement,
- ASTKindCompletedSectionStatement,
- ASTKindNestedSectionStatement,
- ASTKindCompletedNestedSectionStatement:
-
- if err := v.VisitStatement(node); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go
deleted file mode 100644
index 7ffb4ae06..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/internal/ini/ws_token.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package ini
-
-import (
- "unicode"
-)
-
-// isWhitespace will return whether or not the character is
-// a whitespace character.
-//
-// Whitespace is defined as a space or tab.
-func isWhitespace(c rune) bool {
- return unicode.IsSpace(c) && c != '\n' && c != '\r'
-}
-
-func newWSToken(b []rune) (Token, int, error) {
- i := 0
- for ; i < len(b); i++ {
- if !isWhitespace(b[i]) {
- break
- }
- }
-
- return newToken(TokenWS, b[:i], NoneType), i, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go
new file mode 100644
index 000000000..8e24a3f0a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/middleware/middleware.go
@@ -0,0 +1,42 @@
+package middleware
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+
+ internalcontext "github.com/aws/aws-sdk-go-v2/internal/context"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// AddTimeOffsetMiddleware sets a value representing clock skew on the request context.
+// This can be read by other operations (such as signing) to correct the date value they send
+// on the request
+type AddTimeOffsetMiddleware struct {
+ Offset *atomic.Int64
+}
+
+// ID the identifier for AddTimeOffsetMiddleware
+func (m *AddTimeOffsetMiddleware) ID() string { return "AddTimeOffsetMiddleware" }
+
+// HandleBuild sets a value for attemptSkew on the request context if one is set on the client.
+func (m AddTimeOffsetMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ if m.Offset != nil {
+ offset := time.Duration(m.Offset.Load())
+ ctx = internalcontext.SetAttemptSkewContext(ctx, offset)
+ }
+ return next.HandleBuild(ctx, in)
+}
+
+// HandleDeserialize gets the clock skew context from the context, and if set, sets it on the pointer
+// held by AddTimeOffsetMiddleware
+func (m *AddTimeOffsetMiddleware) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ if v := internalcontext.GetAttemptSkewContext(ctx); v != 0 {
+ m.Offset.Store(v.Nanoseconds())
+ }
+ return next.HandleDeserialize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go
new file mode 100644
index 000000000..c96b717e0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/internal/shareddefaults/shared_config.go
@@ -0,0 +1,47 @@
+package shareddefaults
+
+import (
+ "os"
+ "os/user"
+ "path/filepath"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "credentials")
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "config")
+}
+
+// UserHomeDir returns the home directory for the user the process is
+// running under.
+func UserHomeDir() string {
+ // Ignore errors since we only care about Windows and *nix.
+ home, _ := os.UserHomeDir()
+
+ if len(home) > 0 {
+ return home
+ }
+
+ currUser, _ := user.Current()
+ if currUser != nil {
+ home = currUser.HomeDir
+ }
+
+ return home
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh b/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh
deleted file mode 100644
index 81a836127..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/local-mod-replace.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env bash
-
-PROJECT_DIR=""
-SDK_SOURCE_DIR=$(cd `dirname $0` && pwd)
-
-usage() {
- echo "Usage: $0 [-s SDK_SOURCE_DIR] [-d PROJECT_DIR]" 1>&2
- exit 1
-}
-
-while getopts "hs:d:" options; do
- case "${options}" in
- s)
- SDK_SOURCE_DIR=${OPTARG}
- if [ "$SDK_SOURCE_DIR" == "" ]; then
- echo "path to SDK source directory is required" || exit
- usage
- fi
- ;;
- d)
- PROJECT_DIR=${OPTARG}
- ;;
- h)
- usage
- ;;
- *)
- usage
- ;;
- esac
-done
-
-if [ "$PROJECT_DIR" != "" ]; then
- cd "$PROJECT_DIR" || exit
-fi
-
-go mod graph | awk '{print $1}' | cut -d '@' -f 1 | sort | uniq | grep "github.com/aws/aws-sdk-go-v2" | while read x; do
- repPath=${x/github.com\/aws\/aws-sdk-go-v2/${SDK_SOURCE_DIR}}
- echo -replace $x=$repPath
-done | xargs go mod edit
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml b/vendor/github.com/aws/aws-sdk-go-v2/modman.toml
deleted file mode 100644
index 969f0e467..000000000
--- a/vendor/github.com/aws/aws-sdk-go-v2/modman.toml
+++ /dev/null
@@ -1,78 +0,0 @@
-
-[dependencies]
- "github.com/aws/aws-sdk-go" = "v1.44.28"
- "github.com/aws/smithy-go" = "v1.13.3"
- "github.com/google/go-cmp" = "v0.5.8"
- "github.com/jmespath/go-jmespath" = "v0.4.0"
- "golang.org/x/net" = "v0.0.0-20220127200216-cd36cc0744dd"
-
-[modules]
-
- [modules."."]
- metadata_package = "aws"
-
- [modules.codegen]
- no_tag = true
-
- [modules."example/service/dynamodb/createTable"]
- no_tag = true
-
- [modules."example/service/dynamodb/scanItems"]
- no_tag = true
-
- [modules."example/service/s3/listObjects"]
- no_tag = true
-
- [modules."example/service/s3/usingPrivateLink"]
- no_tag = true
-
- [modules."feature/ec2/imds/internal/configtesting"]
- no_tag = true
-
- [modules."internal/codegen"]
- no_tag = true
-
- [modules."internal/configsources/configtesting"]
- no_tag = true
-
- [modules."internal/protocoltest/awsrestjson"]
- no_tag = true
-
- [modules."internal/protocoltest/ec2query"]
- no_tag = true
-
- [modules."internal/protocoltest/jsonrpc"]
- no_tag = true
-
- [modules."internal/protocoltest/jsonrpc10"]
- no_tag = true
-
- [modules."internal/protocoltest/query"]
- no_tag = true
-
- [modules."internal/protocoltest/restxml"]
- no_tag = true
-
- [modules."internal/protocoltest/restxmlwithnamespace"]
- no_tag = true
-
- [modules."internal/repotools"]
- no_tag = true
-
- [modules."internal/repotools/changes"]
- no_tag = true
-
- [modules."service/internal/benchmark"]
- no_tag = true
-
- [modules."service/internal/integrationtest"]
- no_tag = true
-
- [modules."service/kinesis/internal/testing"]
- no_tag = true
-
- [modules."service/s3/internal/configtesting"]
- no_tag = true
-
- [modules."service/transcribestreaming/internal/testing"]
- no_tag = true
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md
new file mode 100644
index 000000000..815e1331b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/CHANGELOG.md
@@ -0,0 +1,818 @@
+# v1.50.1 (2025-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.50.0 (2025-08-28)
+
+* **Feature**: Remove incorrect endpoint tests
+
+# v1.49.2 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.49.1 (2025-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.49.0 (2025-08-20)
+
+* **Feature**: Remove incorrect endpoint tests
+* **Bug Fix**: Remove unused deserialization code.
+
+# v1.48.0 (2025-08-14)
+
+* **Feature**: This release 1/ Adds support for throttled keys mode for CloudWatch Contributor Insights, 2/ Adds throttling reasons to exceptions across dataplane APIs. 3/ Explicitly models ThrottlingException as a class in statically typed languages. Refer to the launch day blog post for more details.
+
+# v1.47.0 (2025-08-11)
+
+* **Feature**: Add support for configuring per-service Options via callback on global config.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.46.0 (2025-08-04)
+
+* **Feature**: Support configurable auth scheme preferences in service clients via AWS_AUTH_SCHEME_PREFERENCE in the environment, auth_scheme_preference in the config file, and through in-code settings on LoadDefaultConfig and client constructor methods.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.45.1 (2025-07-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.45.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.44.1 (2025-07-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.44.0 (2025-06-30)
+
+* **Feature**: This change adds support for witnesses in global tables. It also adds a new table status, REPLICATION_NOT_AUTHORIZED. This status will indicate scenarios where global replicas table can't be utilized for data plane operations.
+
+# v1.43.4 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.43.3 (2025-06-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.43.2 (2025-06-06)
+
+* No change notes available for this release.
+
+# v1.43.1 (2025-04-28)
+
+* **Documentation**: Doc only update for GSI descriptions.
+
+# v1.43.0 (2025-04-24)
+
+* **Feature**: Add support for ARN-sourced account endpoint generation for TransactWriteItems. This will generate account endpoints for DynamoDB TransactWriteItems requests using ARN-sourced account ID when available.
+
+# v1.42.4 (2025-04-11)
+
+* **Documentation**: Doc only update for API descriptions.
+
+# v1.42.3 (2025-04-10)
+
+* No change notes available for this release.
+
+# v1.42.2 (2025-04-09)
+
+* **Documentation**: Documentation update for secondary indexes and Create_Table.
+
+# v1.42.1 (2025-04-03)
+
+* No change notes available for this release.
+
+# v1.42.0 (2025-03-13)
+
+* **Feature**: Generate account endpoints for DynamoDB requests using ARN-sourced account ID when available
+
+# v1.41.1 (2025-03-04.2)
+
+* **Bug Fix**: Add assurance test for operation order.
+
+# v1.41.0 (2025-02-27)
+
+* **Feature**: Track credential providers via User-Agent Feature ids
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.40.2 (2025-02-18)
+
+* **Bug Fix**: Add missing AccountIDEndpointMode binding to endpoint resolution.
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.40.1 (2025-02-11)
+
+* No change notes available for this release.
+
+# v1.40.0 (2025-02-05)
+
+* **Feature**: Track AccountID endpoint mode in user-agent.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.39.9 (2025-02-04)
+
+* No change notes available for this release.
+
+# v1.39.8 (2025-01-31)
+
+* **Dependency Update**: Switch to code-generated waiter matchers, removing the dependency on go-jmespath.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.39.7 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.39.6 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.39.5 (2025-01-17)
+
+* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop.
+
+# v1.39.4 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.39.3 (2025-01-14)
+
+* **Bug Fix**: Fix issue where waiters were not failing on unmatched errors as they should. This may have breaking behavioral changes for users in fringe cases. See [this announcement](https://github.com/aws/aws-sdk-go-v2/discussions/2954) for more information.
+
+# v1.39.2 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.39.1 (2025-01-08)
+
+* No change notes available for this release.
+
+# v1.39.0 (2025-01-07)
+
+* **Feature**: This release makes Amazon DynamoDB point-in-time-recovery (PITR) to be configurable. You can set PITR recovery period for each table individually to between 1 and 35 days.
+
+# v1.38.1 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.38.0 (2024-12-03.2)
+
+* **Feature**: This change adds support for global tables with multi-Region strong consistency (in preview). The UpdateTable API now supports a new attribute MultiRegionConsistency to set consistency when creating global tables. The DescribeTable output now optionally includes the MultiRegionConsistency attribute.
+
+# v1.37.2 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.37.1 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.37.0 (2024-11-13)
+
+* **Feature**: This release includes supports the new WarmThroughput feature for DynamoDB. You can now provide an optional WarmThroughput attribute for CreateTable or UpdateTable APIs to pre-warm your table or global secondary index. You can also use DescribeTable to see the latest WarmThroughput value.
+
+# v1.36.5 (2024-11-07)
+
+* **Bug Fix**: Adds case-insensitive handling of error message fields in service responses
+
+# v1.36.4 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.36.3 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.36.2 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.36.1 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.36.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.35.4 (2024-10-03)
+
+* No change notes available for this release.
+
+# v1.35.3 (2024-09-27)
+
+* No change notes available for this release.
+
+# v1.35.2 (2024-09-25)
+
+* No change notes available for this release.
+
+# v1.35.1 (2024-09-23)
+
+* No change notes available for this release.
+
+# v1.35.0 (2024-09-20)
+
+* **Feature**: Add tracing and metrics support to service clients.
+* **Feature**: Generate and use AWS-account-based endpoints for DynamoDB requests when the account ID is available. The new endpoint URL pattern will be https://.ddb..amazonaws.com. See the documentation for details: https://docs.aws.amazon.com/sdkref/latest/guide/feature-account-endpoints.html.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.34.10 (2024-09-17)
+
+* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution.
+
+# v1.34.9 (2024-09-09)
+
+* **Documentation**: Doc-only update for DynamoDB. Added information about async behavior for TagResource and UntagResource APIs and updated the description of ResourceInUseException.
+
+# v1.34.8 (2024-09-04)
+
+* No change notes available for this release.
+
+# v1.34.7 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.34.6 (2024-08-22)
+
+* No change notes available for this release.
+
+# v1.34.5 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.34.4 (2024-07-24)
+
+* **Documentation**: DynamoDB doc only update for July
+
+# v1.34.3 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.34.2 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.34.1 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.34.0 (2024-06-26)
+
+* **Feature**: Support list-of-string endpoint parameter.
+
+# v1.33.2 (2024-06-20)
+
+* **Documentation**: Doc-only update for DynamoDB. Fixed Important note in 6 Global table APIs - CreateGlobalTable, DescribeGlobalTable, DescribeGlobalTableSettings, ListGlobalTables, UpdateGlobalTable, and UpdateGlobalTableSettings.
+
+# v1.33.1 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.0 (2024-06-18)
+
+* **Feature**: Track usage of various AWS SDK features in user-agent string.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.9 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.8 (2024-06-07)
+
+* **Bug Fix**: Add clock skew correction on all service clients
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.7 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.6 (2024-05-28)
+
+* **Documentation**: Doc-only update for DynamoDB. Specified the IAM actions needed to authorize a user to create a table with a resource-based policy.
+
+# v1.32.5 (2024-05-24)
+
+* **Documentation**: Documentation only updates for DynamoDB.
+
+# v1.32.4 (2024-05-23)
+
+* No change notes available for this release.
+
+# v1.32.3 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.2 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.1 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
+# v1.32.0 (2024-05-02)
+
+* **Feature**: This release adds support to specify an optional, maximum OnDemandThroughput for DynamoDB tables and global secondary indexes in the CreateTable or UpdateTable APIs. You can also override the OnDemandThroughput settings by calling the ImportTable, RestoreFromPointInTime, or RestoreFromBackup APIs.
+
+# v1.31.1 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.0 (2024-03-20)
+
+* **Feature**: This release introduces 3 new APIs ('GetResourcePolicy', 'PutResourcePolicy' and 'DeleteResourcePolicy') and modifies the existing 'CreateTable' API for the resource-based policy support. It also modifies several APIs to accept a 'TableArn' for the 'TableName' parameter.
+
+# v1.30.5 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.4 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.3 (2024-03-06)
+
+* **Documentation**: Doc only updates for DynamoDB documentation
+
+# v1.30.2 (2024-03-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.29.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.1 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+* **Documentation**: Publishing quick fix for doc only update.
+
+# v1.29.0 (2024-02-16)
+
+* **Feature**: Add new ClientOptions field to waiter config which allows you to extend the config for operation calls made by waiters.
+
+# v1.28.1 (2024-02-15)
+
+* **Bug Fix**: Correct failure to determine the error type in awsJson services that could occur when errors were modeled with a non-string `code` field.
+
+# v1.28.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.1 (2024-02-02)
+
+* **Documentation**: Any number of users can execute up to 50 concurrent restores (any type of restore) in a given account.
+
+# v1.27.0 (2024-01-19)
+
+* **Feature**: This release adds support for including ApproximateCreationDateTimePrecision configurations in EnableKinesisStreamingDestination API, adds the same as an optional field in the response of DescribeKinesisStreamingDestination, and adds support for a new UpdateKinesisStreamingDestination API.
+
+# v1.26.9 (2024-01-17)
+
+* **Documentation**: Updating note for enabling streams for UpdateTable.
+
+# v1.26.8 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.7 (2023-12-20)
+
+* No change notes available for this release.
+
+# v1.26.6 (2023-12-08)
+
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.26.5 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.4 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+
+# v1.26.3 (2023-12-01)
+
+* **Bug Fix**: Correct wrapping of errors in authentication workflow.
+* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.2 (2023-11-30.2)
+
+* **Bug Fix**: Respect caller region overrides in endpoint discovery.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.1 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.0 (2023-11-29)
+
+* **Feature**: Expose Options() accessor on service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.4 (2023-11-28)
+
+* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction.
+
+# v1.25.3 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.2 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.1 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.0 (2023-10-18)
+
+* **Feature**: Add handwritten paginators that were present in some services in the v1 SDK.
+* **Documentation**: Updating descriptions for several APIs.
+
+# v1.22.2 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.1 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.0 (2023-09-26)
+
+* **Feature**: Amazon DynamoDB now supports Incremental Export as an enhancement to the existing Export Table
+
+# v1.21.5 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.4 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.3 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.2 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.1 (2023-08-01)
+
+* No change notes available for this release.
+
+# v1.21.0 (2023-07-31)
+
+* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.3 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.2 (2023-07-25)
+
+* **Documentation**: Documentation updates for DynamoDB
+
+# v1.20.1 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2023-06-29)
+
+* **Feature**: This release adds ReturnValuesOnConditionCheckFailure parameter to PutItem, UpdateItem, DeleteItem, ExecuteStatement, BatchExecuteStatement and ExecuteTransaction APIs. When set to ALL_OLD, API returns a copy of the item as it was when a conditional write failed
+
+# v1.19.11 (2023-06-21)
+
+* **Documentation**: Documentation updates for DynamoDB
+
+# v1.19.10 (2023-06-15)
+
+* No change notes available for this release.
+
+# v1.19.9 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.8 (2023-06-12)
+
+* **Documentation**: Documentation updates for DynamoDB
+
+# v1.19.7 (2023-05-04)
+
+* No change notes available for this release.
+
+# v1.19.6 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.5 (2023-04-17)
+
+* **Documentation**: Documentation updates for DynamoDB API
+
+# v1.19.4 (2023-04-10)
+
+* No change notes available for this release.
+
+# v1.19.3 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.2 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.1 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.0 (2023-03-08)
+
+* **Feature**: Adds deletion protection support to DynamoDB tables. Tables with deletion protection enabled cannot be deleted. Deletion protection is disabled by default, can be enabled via the CreateTable or UpdateTable APIs, and is visible in TableDescription. This setting is not replicated for Global Tables.
+
+# v1.18.6 (2023-03-03)
+
+* **Documentation**: Documentation updates for DynamoDB.
+
+# v1.18.5 (2023-02-22)
+
+* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
+
+# v1.18.4 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.3 (2023-02-15)
+
+* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910.
+* **Bug Fix**: Correct error type parsing for restJson services.
+
+# v1.18.2 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.1 (2023-01-23)
+
+* No change notes available for this release.
+
+# v1.18.0 (2023-01-05)
+
+* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401).
+
+# v1.17.9 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.8 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.7 (2022-11-22)
+
+* No change notes available for this release.
+
+# v1.17.6 (2022-11-18)
+
+* **Documentation**: Updated minor fixes for DynamoDB documentation.
+
+# v1.17.5 (2022-11-16)
+
+* No change notes available for this release.
+
+# v1.17.4 (2022-11-10)
+
+* No change notes available for this release.
+
+# v1.17.3 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2022-09-15)
+
+* **Feature**: Increased DynamoDB transaction limit from 25 to 100.
+
+# v1.16.5 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.4 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.3 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.2 (2022-08-30)
+
+* No change notes available for this release.
+
+# v1.16.1 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2022-08-18)
+
+* **Feature**: This release adds support for importing data from S3 into a new DynamoDB table
+
+# v1.15.13 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.12 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.11 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.10 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.9 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.8 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.7 (2022-06-17)
+
+* **Documentation**: Doc only update for DynamoDB service
+
+# v1.15.6 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.5 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2022-02-24)
+
+* **Feature**: API client updated
+* **Feature**: Adds RetryMaxAttempts and RetryMod to API client Options. This allows the API clients' default Retryer to be configured from the shared configuration files or environment variables. Adding a new Retry mode of `Adaptive`. `Adaptive` retry mode is an experimental mode, adding client rate limiting when throttles reponses are received from an API. See [retry.AdaptiveMode](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/aws/retry#AdaptiveMode) for more details, and configuration options.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2021-12-21)
+
+* **Feature**: API Paginators now support specifying the initial starting token, and support stopping on empty string tokens.
+* **Feature**: Updated to latest service endpoints
+
+# v1.10.0 (2021-12-02)
+
+* **Feature**: API client updated
+* **Bug Fix**: Fixes a bug that prevented aws.EndpointResolverWithOptions from being used by the service client. ([#1514](https://github.com/aws/aws-sdk-go-v2/pull/1514))
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2021-11-30)
+
+* **Feature**: API client updated
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2021-11-12)
+
+* **Feature**: Service clients now support custom endpoints that have an initial URI path defined.
+* **Feature**: Waiters now have a `WaitForOutput` method, which can be used to retrieve the output of the successful wait operation. Thank you to [Andrew Haines](https://github.com/haines) for contributing this feature.
+* **Documentation**: Updated service to latest API model.
+
+# v1.7.0 (2021-11-06)
+
+* **Feature**: The SDK now supports configuration of FIPS and DualStack endpoints using environment variables, shared configuration, or programmatically.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2021-10-21)
+
+* **Feature**: API client updated
+* **Feature**: Updated to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.2 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.1 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.3 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.2 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2021-06-25)
+
+* **Feature**: Adds support for endpoint discovery.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.1 (2021-05-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_client.go
new file mode 100644
index 000000000..487c8200a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_client.go
@@ -0,0 +1,1175 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ cryptorand "crypto/rand"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/aws/defaults"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/aws/retry"
+ "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
+ awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware"
+ ddbcust "github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations"
+ acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ smithydocument "github.com/aws/smithy-go/document"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ smithyrand "github.com/aws/smithy-go/rand"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync/atomic"
+ "time"
+)
+
+const ServiceID = "DynamoDB"
+const ServiceAPIVersion = "2012-08-10"
+
+type operationMetrics struct {
+ Duration metrics.Float64Histogram
+ SerializeDuration metrics.Float64Histogram
+ ResolveIdentityDuration metrics.Float64Histogram
+ ResolveEndpointDuration metrics.Float64Histogram
+ SignRequestDuration metrics.Float64Histogram
+ DeserializeDuration metrics.Float64Histogram
+}
+
+func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram {
+ switch name {
+ case "client.call.duration":
+ return m.Duration
+ case "client.call.serialization_duration":
+ return m.SerializeDuration
+ case "client.call.resolve_identity_duration":
+ return m.ResolveIdentityDuration
+ case "client.call.resolve_endpoint_duration":
+ return m.ResolveEndpointDuration
+ case "client.call.signing_duration":
+ return m.SignRequestDuration
+ case "client.call.deserialization_duration":
+ return m.DeserializeDuration
+ default:
+ panic("unrecognized operation metric")
+ }
+}
+
+func timeOperationMetric[T any](
+ ctx context.Context, metric string, fn func() (T, error),
+ opts ...metrics.RecordMetricOption,
+) (T, error) {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
+
+ start := time.Now()
+ v, err := fn()
+ end := time.Now()
+
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
+ return v, err
+}
+
+func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
+
+ var ended bool
+ start := time.Now()
+ return func() {
+ if ended {
+ return
+ }
+ ended = true
+
+ end := time.Now()
+
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
+ }
+}
+
+func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption {
+ return func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("rpc.service", middleware.GetServiceID(ctx))
+ o.Properties.Set("rpc.method", middleware.GetOperationName(ctx))
+ }
+}
+
+type operationMetricsKey struct{}
+
+func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) {
+ meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/dynamodb")
+ om := &operationMetrics{}
+
+ var err error
+
+ om.Duration, err = operationMetricTimer(meter, "client.call.duration",
+ "Overall call duration (including retries and time to send or receive request and response body)")
+ if err != nil {
+ return nil, err
+ }
+ om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration",
+ "The time it takes to serialize a message body")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration",
+ "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration",
+ "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request")
+ if err != nil {
+ return nil, err
+ }
+ om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration",
+ "The time it takes to sign a request")
+ if err != nil {
+ return nil, err
+ }
+ om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration",
+ "The time it takes to deserialize a message body")
+ if err != nil {
+ return nil, err
+ }
+
+ return context.WithValue(parent, operationMetricsKey{}, om), nil
+}
+
+func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) {
+ return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = desc
+ })
+}
+
+func getOperationMetrics(ctx context.Context) *operationMetrics {
+ return ctx.Value(operationMetricsKey{}).(*operationMetrics)
+}
+
+func operationTracer(p tracing.TracerProvider) tracing.Tracer {
+ return p.Tracer("github.com/aws/aws-sdk-go-v2/service/dynamodb")
+}
+
+// Client provides the API client to make operations call for Amazon DynamoDB.
+type Client struct {
+ options Options
+
+ // cache used to store discovered endpoints
+ endpointCache *internalEndpointDiscovery.EndpointCache
+
+ // Difference between the time reported by the server and the client
+ timeOffset *atomic.Int64
+}
+
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
+
+ resolveDefaultLogger(&options)
+
+ setResolvedDefaultsMode(&options)
+
+ resolveRetryer(&options)
+
+ resolveHTTPClient(&options)
+
+ resolveHTTPSignerV4(&options)
+
+ resolveIdempotencyTokenProvider(&options)
+
+ resolveEnableEndpointDiscovery(&options)
+
+ resolveEndpointResolverV2(&options)
+
+ resolveTracerProvider(&options)
+
+ resolveMeterProvider(&options)
+
+ resolveAuthSchemeResolver(&options)
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ finalizeRetryMaxAttempts(&options)
+
+ ignoreAnonymousAuth(&options)
+
+ wrapWithAnonymousAuth(&options)
+
+ resolveAuthSchemes(&options)
+
+ client := &Client{
+ options: options,
+ }
+
+ resolveEndpointCache(client)
+
+ initializeTimeOffsetResolver(client)
+
+ return client
+}
+
+// Options returns a copy of the client configuration.
+//
+// Callers SHOULD NOT perform mutations on any inner structures within client
+// config. Config overrides should instead be made on a per-operation basis through
+// functional options.
+func (c *Client) Options() Options {
+ return c.options.Copy()
+}
+
+func (c *Client) invokeOperation(
+ ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error,
+) (
+ result interface{}, metadata middleware.Metadata, err error,
+) {
+ ctx = middleware.ClearStackValues(ctx)
+ ctx = middleware.WithServiceID(ctx, ServiceID)
+ ctx = middleware.WithOperationName(ctx, opID)
+
+ stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
+ options := c.options.Copy()
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ finalizeOperationRetryMaxAttempts(&options, *c)
+
+ finalizeClientEndpointResolverOptions(&options)
+
+ for _, fn := range stackFns {
+ if err := fn(stack, options); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ for _, fn := range options.APIOptions {
+ if err := fn(stack); err != nil {
+ return nil, metadata, err
+ }
+ }
+
+ ctx, err = withOperationMetrics(ctx, options.MeterProvider)
+ if err != nil {
+ return nil, metadata, err
+ }
+
+ tracer := operationTracer(options.TracerProvider)
+ spanName := fmt.Sprintf("%s.%s", ServiceID, opID)
+
+ ctx = tracing.WithOperationTracer(ctx, tracer)
+
+ ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) {
+ o.Kind = tracing.SpanKindClient
+ o.Properties.Set("rpc.system", "aws-api")
+ o.Properties.Set("rpc.method", opID)
+ o.Properties.Set("rpc.service", ServiceID)
+ })
+ endTimer := startMetricTimer(ctx, "client.call.duration")
+ defer endTimer()
+ defer span.End()
+
+ handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) {
+ o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/dynamodb")
+ })
+ decorated := middleware.DecorateHandler(handler, stack)
+ result, metadata, err = decorated.Handle(ctx, params)
+ if err != nil {
+ span.SetProperty("exception.type", fmt.Sprintf("%T", err))
+ span.SetProperty("exception.message", err.Error())
+
+ var aerr smithy.APIError
+ if errors.As(err, &aerr) {
+ span.SetProperty("api.error_code", aerr.ErrorCode())
+ span.SetProperty("api.error_message", aerr.ErrorMessage())
+ span.SetProperty("api.error_fault", aerr.ErrorFault().String())
+ }
+
+ err = &smithy.OperationError{
+ ServiceID: ServiceID,
+ OperationName: opID,
+ Err: err,
+ }
+ }
+
+ span.SetProperty("error", err != nil)
+ if err == nil {
+ span.SetStatus(tracing.SpanStatusOK)
+ } else {
+ span.SetStatus(tracing.SpanStatusError)
+ }
+
+ return result, metadata, err
+}
+
+type operationInputKey struct{}
+
+func setOperationInput(ctx context.Context, input interface{}) context.Context {
+ return middleware.WithStackValue(ctx, operationInputKey{}, input)
+}
+
+func getOperationInput(ctx context.Context) interface{} {
+ return middleware.GetStackValue(ctx, operationInputKey{})
+}
+
+type setOperationInputMiddleware struct {
+}
+
+func (*setOperationInputMiddleware) ID() string {
+ return "setOperationInput"
+}
+
+func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ ctx = setOperationInput(ctx, in.Parameters)
+ return next.HandleSerialize(ctx, in)
+}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+ if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+ return fmt.Errorf("add ResolveAuthScheme: %w", err)
+ }
+ if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+ return fmt.Errorf("add GetIdentity: %v", err)
+ }
+ if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+ return fmt.Errorf("add ResolveEndpointV2: %v", err)
+ }
+ if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil {
+ return fmt.Errorf("add Signing: %w", err)
+ }
+ return nil
+}
+func resolveAuthSchemeResolver(options *Options) {
+ if options.AuthSchemeResolver == nil {
+ options.AuthSchemeResolver = &defaultAuthSchemeResolver{}
+ }
+}
+
+func resolveAuthSchemes(options *Options) {
+ if options.AuthSchemes == nil {
+ options.AuthSchemes = []smithyhttp.AuthScheme{
+ internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{
+ Signer: options.HTTPSignerV4,
+ Logger: options.Logger,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ }),
+ }
+ }
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
+
+type legacyEndpointContextSetter struct {
+ LegacyResolver EndpointResolver
+}
+
+func (*legacyEndpointContextSetter) ID() string {
+ return "legacyEndpointContextSetter"
+}
+
+func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.LegacyResolver != nil {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true)
+ }
+
+ return next.HandleInitialize(ctx, in)
+
+}
+func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error {
+ return stack.Initialize.Add(&legacyEndpointContextSetter{
+ LegacyResolver: o.EndpointResolver,
+ }, middleware.Before)
+}
+
+func resolveDefaultLogger(o *Options) {
+ if o.Logger != nil {
+ return
+ }
+ o.Logger = logging.Nop{}
+}
+
+func addSetLoggerMiddleware(stack *middleware.Stack, o Options) error {
+ return middleware.AddSetLoggerMiddleware(stack, o.Logger)
+}
+
+func setResolvedDefaultsMode(o *Options) {
+ if len(o.resolvedDefaultsMode) > 0 {
+ return
+ }
+
+ var mode aws.DefaultsMode
+ mode.SetFromString(string(o.DefaultsMode))
+
+ if mode == aws.DefaultsModeAuto {
+ mode = defaults.ResolveDefaultsModeAuto(o.Region, o.RuntimeEnvironment)
+ }
+
+ o.resolvedDefaultsMode = mode
+}
+
+// NewFromConfig returns a new client from the provided config.
+func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
+ opts := Options{
+ Region: cfg.Region,
+ DefaultsMode: cfg.DefaultsMode,
+ RuntimeEnvironment: cfg.RuntimeEnvironment,
+ HTTPClient: cfg.HTTPClient,
+ Credentials: cfg.Credentials,
+ APIOptions: cfg.APIOptions,
+ Logger: cfg.Logger,
+ ClientLogMode: cfg.ClientLogMode,
+ AppID: cfg.AppID,
+ AccountIDEndpointMode: cfg.AccountIDEndpointMode,
+ AuthSchemePreference: cfg.AuthSchemePreference,
+ }
+ resolveAWSRetryerProvider(cfg, &opts)
+ resolveAWSRetryMaxAttempts(cfg, &opts)
+ resolveAWSRetryMode(cfg, &opts)
+ resolveAWSEndpointResolver(cfg, &opts)
+ resolveInterceptors(cfg, &opts)
+ resolveEnableEndpointDiscoveryFromConfigSources(cfg, &opts)
+ resolveUseDualStackEndpoint(cfg, &opts)
+ resolveUseFIPSEndpoint(cfg, &opts)
+ resolveBaseEndpoint(cfg, &opts)
+ return New(opts, func(o *Options) {
+ for _, opt := range cfg.ServiceOptions {
+ opt(ServiceID, o)
+ }
+ for _, opt := range optFns {
+ opt(o)
+ }
+ })
+}
+
+func resolveHTTPClient(o *Options) {
+ var buildable *awshttp.BuildableClient
+
+ if o.HTTPClient != nil {
+ var ok bool
+ buildable, ok = o.HTTPClient.(*awshttp.BuildableClient)
+ if !ok {
+ return
+ }
+ } else {
+ buildable = awshttp.NewBuildableClient()
+ }
+
+ modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+ if err == nil {
+ buildable = buildable.WithDialerOptions(func(dialer *net.Dialer) {
+ if dialerTimeout, ok := modeConfig.GetConnectTimeout(); ok {
+ dialer.Timeout = dialerTimeout
+ }
+ })
+
+ buildable = buildable.WithTransportOptions(func(transport *http.Transport) {
+ if tlsHandshakeTimeout, ok := modeConfig.GetTLSNegotiationTimeout(); ok {
+ transport.TLSHandshakeTimeout = tlsHandshakeTimeout
+ }
+ })
+ }
+
+ o.HTTPClient = buildable
+}
+
+func resolveRetryer(o *Options) {
+ if o.Retryer != nil {
+ return
+ }
+
+ if len(o.RetryMode) == 0 {
+ modeConfig, err := defaults.GetModeConfiguration(o.resolvedDefaultsMode)
+ if err == nil {
+ o.RetryMode = modeConfig.RetryMode
+ }
+ }
+ if len(o.RetryMode) == 0 {
+ o.RetryMode = aws.RetryModeStandard
+ }
+
+ var standardOptions []func(*retry.StandardOptions)
+ if v := o.RetryMaxAttempts; v != 0 {
+ standardOptions = append(standardOptions, func(so *retry.StandardOptions) {
+ so.MaxAttempts = v
+ })
+ }
+
+ switch o.RetryMode {
+ case aws.RetryModeAdaptive:
+ var adaptiveOptions []func(*retry.AdaptiveModeOptions)
+ if len(standardOptions) != 0 {
+ adaptiveOptions = append(adaptiveOptions, func(ao *retry.AdaptiveModeOptions) {
+ ao.StandardOptions = append(ao.StandardOptions, standardOptions...)
+ })
+ }
+ o.Retryer = retry.NewAdaptiveMode(adaptiveOptions...)
+
+ default:
+ o.Retryer = retry.NewStandard(standardOptions...)
+ }
+}
+
+func resolveAWSRetryerProvider(cfg aws.Config, o *Options) {
+ if cfg.Retryer == nil {
+ return
+ }
+ o.Retryer = cfg.Retryer()
+}
+
+func resolveAWSRetryMode(cfg aws.Config, o *Options) {
+ if len(cfg.RetryMode) == 0 {
+ return
+ }
+ o.RetryMode = cfg.RetryMode
+}
+func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
+ if cfg.RetryMaxAttempts == 0 {
+ return
+ }
+ o.RetryMaxAttempts = cfg.RetryMaxAttempts
+}
+
+func finalizeRetryMaxAttempts(o *Options) {
+ if o.RetryMaxAttempts == 0 {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
+ if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
+ if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
+ return
+ }
+ o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions)
+}
+
+func resolveInterceptors(cfg aws.Config, o *Options) {
+ o.Interceptors = cfg.Interceptors.Copy()
+}
+
+func addClientUserAgent(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "dynamodb", goModuleVersion)
+ if len(options.AppID) > 0 {
+ ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
+ }
+
+ return nil
+}
+
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+ id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+ mw, ok := stack.Build.Get(id)
+ if !ok {
+ mw = awsmiddleware.NewRequestUserAgent()
+ if err := stack.Build.Add(mw, middleware.After); err != nil {
+ return nil, err
+ }
+ }
+
+ ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+ }
+
+ return ua, nil
+}
+
+type HTTPSignerV4 interface {
+ SignHTTP(ctx context.Context, credentials aws.Credentials, r *http.Request, payloadHash string, service string, region string, signingTime time.Time, optFns ...func(*v4.SignerOptions)) error
+}
+
+func resolveHTTPSignerV4(o *Options) {
+ if o.HTTPSignerV4 != nil {
+ return
+ }
+ o.HTTPSignerV4 = newDefaultV4Signer(*o)
+}
+
+func newDefaultV4Signer(o Options) *v4.Signer {
+ return v4.NewSigner(func(so *v4.SignerOptions) {
+ so.Logger = o.Logger
+ so.LogSigning = o.ClientLogMode.IsSigning()
+ })
+}
+
+func addClientRequestID(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+ return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+
+func addSpanRetryLoop(stack *middleware.Stack, options Options) error {
+ return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before)
+}
+
+type spanRetryLoop struct {
+ options Options
+}
+
+func (*spanRetryLoop) ID() string {
+ return "spanRetryLoop"
+}
+
+func (m *spanRetryLoop) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ middleware.FinalizeOutput, middleware.Metadata, error,
+) {
+ tracer := operationTracer(m.options.TracerProvider)
+ ctx, span := tracer.StartSpan(ctx, "RetryLoop")
+ defer span.End()
+
+ return next.HandleFinalize(ctx, in)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addIsWaiterUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter)
+ return nil
+ })
+}
+
+func addIsPaginatorUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator)
+ return nil
+ })
+}
+
+func resolveIdempotencyTokenProvider(o *Options) {
+ if o.IdempotencyTokenProvider != nil {
+ return
+ }
+ o.IdempotencyTokenProvider = smithyrand.NewUUIDIdempotencyToken(cryptorand.Reader)
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+ attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+ m.LogAttempts = o.ClientLogMode.IsRetries()
+ m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/dynamodb")
+ })
+ if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil {
+ return err
+ }
+ if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+ return err
+ }
+ return nil
+}
+
+// resolves EnableEndpointDiscovery configuration
+func resolveEnableEndpointDiscoveryFromConfigSources(cfg aws.Config, o *Options) error {
+ if len(cfg.ConfigSources) == 0 {
+ return nil
+ }
+ value, found, err := internalConfig.ResolveEnableEndpointDiscovery(context.Background(), cfg.ConfigSources)
+ if err != nil {
+ return err
+ }
+ if found {
+ o.EndpointDiscovery.EnableEndpointDiscovery = value
+ }
+ return nil
+}
+
+// resolves dual-stack endpoint configuration
+func resolveUseDualStackEndpoint(cfg aws.Config, o *Options) error {
+ if len(cfg.ConfigSources) == 0 {
+ return nil
+ }
+ value, found, err := internalConfig.ResolveUseDualStackEndpoint(context.Background(), cfg.ConfigSources)
+ if err != nil {
+ return err
+ }
+ if found {
+ o.EndpointOptions.UseDualStackEndpoint = value
+ }
+ return nil
+}
+
+// resolves FIPS endpoint configuration
+func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
+ if len(cfg.ConfigSources) == 0 {
+ return nil
+ }
+ value, found, err := internalConfig.ResolveUseFIPSEndpoint(context.Background(), cfg.ConfigSources)
+ if err != nil {
+ return err
+ }
+ if found {
+ o.EndpointOptions.UseFIPSEndpoint = value
+ }
+ return nil
+}
+
+// resolves endpoint cache on client
+func resolveEndpointCache(c *Client) {
+ c.endpointCache = internalEndpointDiscovery.NewEndpointCache(10)
+}
+
+// EndpointDiscoveryOptions used to configure endpoint discovery
+type EndpointDiscoveryOptions struct {
+ // Enables endpoint discovery
+ EnableEndpointDiscovery aws.EndpointDiscoveryEnableState
+}
+
+func resolveEnableEndpointDiscovery(o *Options) {
+ if o.EndpointDiscovery.EnableEndpointDiscovery != aws.EndpointDiscoveryUnset {
+ return
+ }
+ o.EndpointDiscovery.EnableEndpointDiscovery = aws.EndpointDiscoveryAuto
+}
+
+func (c *Client) handleEndpointDiscoveryFromService(ctx context.Context, input *DescribeEndpointsInput, region, key string, opt internalEndpointDiscovery.DiscoverEndpointOptions) (internalEndpointDiscovery.Endpoint, error) {
+ output, err := c.DescribeEndpoints(ctx, input, func(o *Options) {
+ o.Region = region
+
+ o.EndpointOptions.DisableHTTPS = opt.DisableHTTPS
+ o.Logger = opt.Logger
+ })
+ if err != nil {
+ return internalEndpointDiscovery.Endpoint{}, err
+ }
+
+ endpoint := internalEndpointDiscovery.Endpoint{}
+ endpoint.Key = key
+
+ for _, e := range output.Endpoints {
+ if e.Address == nil {
+ continue
+ }
+ address := *e.Address
+
+ var scheme string
+ if idx := strings.Index(address, "://"); idx != -1 {
+ scheme = address[:idx]
+ }
+ if len(scheme) == 0 {
+ scheme = "https"
+ if opt.DisableHTTPS {
+ scheme = "http"
+ }
+ address = fmt.Sprintf("%s://%s", scheme, address)
+ }
+
+ cachedInMinutes := e.CachePeriodInMinutes
+ u, err := url.Parse(address)
+ if err != nil {
+ continue
+ }
+
+ addr := internalEndpointDiscovery.WeightedAddress{
+ URL: u,
+ Expired: time.Now().Add(time.Duration(cachedInMinutes) * time.Minute).Round(0),
+ }
+ endpoint.Add(addr)
+ }
+
+ c.endpointCache.Add(endpoint)
+ return endpoint, nil
+}
+
+func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string {
+ if mode == aws.AccountIDEndpointModeDisabled {
+ return nil
+ }
+
+ if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" {
+ return aws.String(ca.Credentials.AccountID)
+ }
+
+ return nil
+}
+
+func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error {
+ mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset}
+ if err := stack.Build.Add(&mw, middleware.After); err != nil {
+ return err
+ }
+ return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before)
+}
+func initializeTimeOffsetResolver(c *Client) {
+ c.timeOffset = new(atomic.Int64)
+}
+
+func checkAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) error {
+ switch mode {
+ case aws.AccountIDEndpointModeUnset:
+ case aws.AccountIDEndpointModePreferred:
+ case aws.AccountIDEndpointModeDisabled:
+ case aws.AccountIDEndpointModeRequired:
+ if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); !ok {
+ return fmt.Errorf("accountID is required but not set")
+ } else if ca.Credentials.AccountID == "" {
+ return fmt.Errorf("accountID is required but not set")
+ }
+ // default check in case invalid mode is configured through request config
+ default:
+ return fmt.Errorf("invalid accountID endpoint mode %s, must be preferred/required/disabled", mode)
+ }
+
+ return nil
+}
+
+func addUserAgentRetryMode(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ switch options.Retryer.(type) {
+ case *retry.Standard:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard)
+ case *retry.AdaptiveMode:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive)
+ }
+ return nil
+}
+
+func addUserAgentAccountIDEndpointMode(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ switch options.AccountIDEndpointMode {
+ case aws.AccountIDEndpointModePreferred:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureAccountIDModePreferred)
+ case aws.AccountIDEndpointModeRequired:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureAccountIDModeRequired)
+ case aws.AccountIDEndpointModeDisabled:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureAccountIDModeDisabled)
+ }
+ return nil
+}
+
+type setCredentialSourceMiddleware struct {
+ ua *awsmiddleware.RequestUserAgent
+ options Options
+}
+
+func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" }
+
+func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource)
+ if !ok {
+ return next.HandleBuild(ctx, in)
+ }
+ providerSources := asProviderSource.ProviderSources()
+ for _, source := range providerSources {
+ m.ua.AddCredentialsSource(source)
+ }
+ return next.HandleBuild(ctx, in)
+}
+
+func addCredentialSource(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ mw := setCredentialSourceMiddleware{ua: ua, options: options}
+ return stack.Build.Insert(&mw, "UserAgent", middleware.Before)
+}
+
+func resolveTracerProvider(options *Options) {
+ if options.TracerProvider == nil {
+ options.TracerProvider = &tracing.NopTracerProvider{}
+ }
+}
+
+func resolveMeterProvider(options *Options) {
+ if options.MeterProvider == nil {
+ options.MeterProvider = metrics.NopMeterProvider{}
+ }
+}
+
+// IdempotencyTokenProvider interface for providing idempotency token
+type IdempotencyTokenProvider interface {
+ GetIdempotencyToken() (string, error)
+}
+
+func addRecursionDetection(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
+func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+
+}
+
+func addResponseErrorMiddleware(stack *middleware.Stack) error {
+ return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+
+}
+
+func addValidateResponseChecksum(stack *middleware.Stack, options Options) error {
+ return ddbcust.AddValidateResponseChecksum(stack, ddbcust.AddValidateResponseChecksumOptions{Disable: options.DisableValidateResponseChecksum})
+}
+
+func addAcceptEncodingGzip(stack *middleware.Stack, options Options) error {
+ return acceptencodingcust.AddAcceptEncodingGzip(stack, acceptencodingcust.AddAcceptEncodingGzipOptions{Enable: options.EnableAcceptEncodingGzip})
+}
+
+func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
+ return stack.Deserialize.Add(&smithyhttp.RequestResponseLogger{
+ LogRequest: o.ClientLogMode.IsRequest(),
+ LogRequestWithBody: o.ClientLogMode.IsRequestWithBody(),
+ LogResponse: o.ClientLogMode.IsResponse(),
+ LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
+ }, middleware.After)
+}
+
+type disableHTTPSMiddleware struct {
+ DisableHTTPS bool
+}
+
+func (*disableHTTPSMiddleware) ID() string {
+ return "disableHTTPS"
+}
+
+func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) {
+ req.URL.Scheme = "http"
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Finalize.Insert(&disableHTTPSMiddleware{
+ DisableHTTPS: o.EndpointOptions.DisableHTTPS,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func addInterceptBeforeRetryLoop(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptBeforeRetryLoop{
+ Interceptors: opts.Interceptors.BeforeRetryLoop,
+ }, "Retry", middleware.Before)
+}
+
+func addInterceptAttempt(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptAttempt{
+ BeforeAttempt: opts.Interceptors.BeforeAttempt,
+ AfterAttempt: opts.Interceptors.AfterAttempt,
+ }, "Retry", middleware.After)
+}
+
+func addInterceptExecution(stack *middleware.Stack, opts Options) error {
+ return stack.Initialize.Add(&smithyhttp.InterceptExecution{
+ BeforeExecution: opts.Interceptors.BeforeExecution,
+ AfterExecution: opts.Interceptors.AfterExecution,
+ }, middleware.Before)
+}
+
+func addInterceptBeforeSerialization(stack *middleware.Stack, opts Options) error {
+ return stack.Serialize.Insert(&smithyhttp.InterceptBeforeSerialization{
+ Interceptors: opts.Interceptors.BeforeSerialization,
+ }, "OperationSerializer", middleware.Before)
+}
+
+func addInterceptAfterSerialization(stack *middleware.Stack, opts Options) error {
+ return stack.Serialize.Insert(&smithyhttp.InterceptAfterSerialization{
+ Interceptors: opts.Interceptors.AfterSerialization,
+ }, "OperationSerializer", middleware.After)
+}
+
+func addInterceptBeforeSigning(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptBeforeSigning{
+ Interceptors: opts.Interceptors.BeforeSigning,
+ }, "Signing", middleware.Before)
+}
+
+func addInterceptAfterSigning(stack *middleware.Stack, opts Options) error {
+ return stack.Finalize.Insert(&smithyhttp.InterceptAfterSigning{
+ Interceptors: opts.Interceptors.AfterSigning,
+ }, "Signing", middleware.After)
+}
+
+func addInterceptTransmit(stack *middleware.Stack, opts Options) error {
+ return stack.Deserialize.Add(&smithyhttp.InterceptTransmit{
+ BeforeTransmit: opts.Interceptors.BeforeTransmit,
+ AfterTransmit: opts.Interceptors.AfterTransmit,
+ }, middleware.After)
+}
+
+func addInterceptBeforeDeserialization(stack *middleware.Stack, opts Options) error {
+ return stack.Deserialize.Insert(&smithyhttp.InterceptBeforeDeserialization{
+ Interceptors: opts.Interceptors.BeforeDeserialization,
+ }, "OperationDeserializer", middleware.After) // (deserialize stack is called in reverse)
+}
+
+func addInterceptAfterDeserialization(stack *middleware.Stack, opts Options) error {
+ return stack.Deserialize.Insert(&smithyhttp.InterceptAfterDeserialization{
+ Interceptors: opts.Interceptors.AfterDeserialization,
+ }, "OperationDeserializer", middleware.Before)
+}
+
+type spanInitializeStart struct {
+}
+
+func (*spanInitializeStart) ID() string {
+ return "spanInitializeStart"
+}
+
+func (m *spanInitializeStart) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "Initialize")
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanInitializeEnd struct {
+}
+
+func (*spanInitializeEnd) ID() string {
+ return "spanInitializeEnd"
+}
+
+func (m *spanInitializeEnd) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanBuildRequestStart struct {
+}
+
+func (*spanBuildRequestStart) ID() string {
+ return "spanBuildRequestStart"
+}
+
+func (m *spanBuildRequestStart) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ middleware.SerializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "BuildRequest")
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type spanBuildRequestEnd struct {
+}
+
+func (*spanBuildRequestEnd) ID() string {
+ return "spanBuildRequestEnd"
+}
+
+func (m *spanBuildRequestEnd) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ middleware.BuildOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleBuild(ctx, in)
+}
+
+func addSpanInitializeStart(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before)
+}
+
+func addSpanInitializeEnd(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After)
+}
+
+func addSpanBuildRequestStart(stack *middleware.Stack) error {
+ return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before)
+}
+
+func addSpanBuildRequestEnd(stack *middleware.Stack) error {
+ return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchExecuteStatement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchExecuteStatement.go
new file mode 100644
index 000000000..0b4d0edaf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchExecuteStatement.go
@@ -0,0 +1,235 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation allows you to perform batch reads or writes on data stored in
+// DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement must
+// specify an equality condition on all key attributes. This enforces that each
+// SELECT statement in a batch returns at most a single item. For more information,
+// see [Running batch operations with PartiQL for DynamoDB].
+//
+// The entire batch must consist of either read statements or write statements,
+// you cannot mix both in one batch.
+//
+// A HTTP 200 response does not mean that all statements in the
+// BatchExecuteStatement succeeded. Error details for individual statements can be
+// found under the [Error]field of the BatchStatementResponse for each statement.
+//
+// [Error]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchStatementResponse.html#DDB-Type-BatchStatementResponse-Error
+// [Running batch operations with PartiQL for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ql-reference.multiplestatements.batching.html
+func (c *Client) BatchExecuteStatement(ctx context.Context, params *BatchExecuteStatementInput, optFns ...func(*Options)) (*BatchExecuteStatementOutput, error) {
+ if params == nil {
+ params = &BatchExecuteStatementInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "BatchExecuteStatement", params, optFns, c.addOperationBatchExecuteStatementMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*BatchExecuteStatementOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type BatchExecuteStatementInput struct {
+
+ // The list of PartiQL statements representing the batch to run.
+ //
+ // This member is required.
+ Statements []types.BatchStatementRequest
+
+ // Determines the level of detail about either provisioned or on-demand throughput
+ // consumption that is returned in the response:
+ //
+ // - INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary index
+ // that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem , do not access any
+ // indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // - TOTAL - The response includes only the aggregate ConsumedCapacity for the
+ // operation.
+ //
+ // - NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity types.ReturnConsumedCapacity
+
+ noSmithyDocumentSerde
+}
+
+type BatchExecuteStatementOutput struct {
+
+ // The capacity units consumed by the entire operation. The values of the list are
+ // ordered according to the ordering of the statements.
+ ConsumedCapacity []types.ConsumedCapacity
+
+ // The response to each PartiQL statement in the batch. The values of the list are
+ // ordered according to the ordering of the request statements.
+ Responses []types.BatchStatementResponse
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationBatchExecuteStatementMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpBatchExecuteStatement{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpBatchExecuteStatement{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "BatchExecuteStatement"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpBatchExecuteStatementValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchExecuteStatement(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opBatchExecuteStatement(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "BatchExecuteStatement",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go
new file mode 100644
index 000000000..5e2394a85
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchGetItem.go
@@ -0,0 +1,428 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// The BatchGetItem operation returns the attributes of one or more items from one
+// or more tables. You identify requested items by primary key.
+//
+// A single operation can retrieve up to 16 MB of data, which can contain as many
+// as 100 items. BatchGetItem returns a partial result if the response size limit
+// is exceeded, the table's provisioned throughput is exceeded, more than 1MB per
+// partition is requested, or an internal processing failure occurs. If a partial
+// result is returned, the operation returns a value for UnprocessedKeys . You can
+// use this value to retry the operation starting with the next item to get.
+//
+// If you request more than 100 items, BatchGetItem returns a ValidationException
+// with the message "Too many items requested for the BatchGetItem call."
+//
+// For example, if you ask to retrieve 100 items, but each individual item is 300
+// KB in size, the system returns 52 items (so as not to exceed the 16 MB limit).
+// It also returns an appropriate UnprocessedKeys value so you can get the next
+// page of results. If desired, your application can include its own logic to
+// assemble the pages of results into one dataset.
+//
+// If none of the items can be processed due to insufficient provisioned
+// throughput on all of the tables in the request, then BatchGetItem returns a
+// ProvisionedThroughputExceededException . If at least one of the items is
+// successfully processed, then BatchGetItem completes successfully, while
+// returning the keys of the unread items in UnprocessedKeys .
+//
+// If DynamoDB returns any unprocessed items, you should retry the batch operation
+// on those items. However, we strongly recommend that you use an exponential
+// backoff algorithm. If you retry the batch operation immediately, the underlying
+// read or write requests can still fail due to throttling on the individual
+// tables. If you delay the batch operation using exponential backoff, the
+// individual requests in the batch are much more likely to succeed.
+//
+// For more information, see [Batch Operations and Error Handling] in the Amazon DynamoDB Developer Guide.
+//
+// By default, BatchGetItem performs eventually consistent reads on every table in
+// the request. If you want strongly consistent reads instead, you can set
+// ConsistentRead to true for any or all tables.
+//
+// In order to minimize response latency, BatchGetItem may retrieve items in
+// parallel.
+//
+// When designing your application, keep in mind that DynamoDB does not return
+// items in any particular order. To help parse the response by item, include the
+// primary key values for the items in your request in the ProjectionExpression
+// parameter.
+//
+// If a requested item does not exist, it is not returned in the result. Requests
+// for nonexistent items consume the minimum read capacity units according to the
+// type of read. For more information, see [Working with Tables]in the Amazon DynamoDB Developer Guide.
+//
+// BatchGetItem will result in a ValidationException if the same key is specified
+// multiple times.
+//
+// [Batch Operations and Error Handling]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations
+// [Working with Tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations
+func (c *Client) BatchGetItem(ctx context.Context, params *BatchGetItemInput, optFns ...func(*Options)) (*BatchGetItemOutput, error) {
+ if params == nil {
+ params = &BatchGetItemInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "BatchGetItem", params, optFns, c.addOperationBatchGetItemMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*BatchGetItemOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of a BatchGetItem operation.
+type BatchGetItemInput struct {
+
+ // A map of one or more table names or table ARNs and, for each table, a map that
+ // describes one or more items to retrieve from that table. Each table name or ARN
+ // can be used only once per BatchGetItem request.
+ //
+ // Each element in the map of items to retrieve consists of the following:
+ //
+ // - ConsistentRead - If true , a strongly consistent read is used; if false (the
+ // default), an eventually consistent read is used.
+ //
+ // - ExpressionAttributeNames - One or more substitution tokens for attribute
+ // names in the ProjectionExpression parameter. The following are some use cases
+ // for using ExpressionAttributeNames :
+ //
+ // - To access an attribute whose name conflicts with a DynamoDB reserved word.
+ //
+ // - To create a placeholder for repeating occurrences of an attribute name in
+ // an expression.
+ //
+ // - To prevent special characters in an attribute name from being
+ // misinterpreted in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // - Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be used
+ // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in
+ // the Amazon DynamoDB Developer Guide). To work around this, you could specify the
+ // following for ExpressionAttributeNames :
+ //
+ // - {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // - #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information about expression attribute names, see [Accessing Item Attributes]in the Amazon
+ // DynamoDB Developer Guide.
+ //
+ // - Keys - An array of primary key attribute values that define specific items
+ // in the table. For each primary key, you must provide all of the key attributes.
+ // For example, with a simple primary key, you only need to provide the partition
+ // key value. For a composite key, you must provide both the partition key value
+ // and the sort key value.
+ //
+ // - ProjectionExpression - A string that identifies one or more attributes to
+ // retrieve from the table. These attributes can include scalars, sets, or elements
+ // of a JSON document. The attributes in the expression must be separated by
+ // commas.
+ //
+ // If no attribute names are specified, then all attributes are returned. If any
+ // of the requested attributes are not found, they do not appear in the result.
+ //
+ // For more information, see [Accessing Item Attributes]in the Amazon DynamoDB Developer Guide.
+ //
+ // - AttributesToGet - This is a legacy parameter. Use ProjectionExpression
+ // instead. For more information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html
+ // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html
+ // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html
+ //
+ // This member is required.
+ RequestItems map[string]types.KeysAndAttributes
+
+ // Determines the level of detail about either provisioned or on-demand throughput
+ // consumption that is returned in the response:
+ //
+ // - INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary index
+ // that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem , do not access any
+ // indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // - TOTAL - The response includes only the aggregate ConsumedCapacity for the
+ // operation.
+ //
+ // - NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity types.ReturnConsumedCapacity
+
+ noSmithyDocumentSerde
+}
+
+func (in *BatchGetItemInput) bindEndpointParams(p *EndpointParameters) {
+ func() {
+ v1 := in.RequestItems
+ var v2 []string
+ for k := range v1 {
+ v2 = append(v2, k)
+ }
+ p.ResourceArnList = v2
+ }()
+
+}
+
+// Represents the output of a BatchGetItem operation.
+type BatchGetItemOutput struct {
+
+ // The read capacity units consumed by the entire BatchGetItem operation.
+ //
+ // Each element consists of:
+ //
+ // - TableName - The table that consumed the provisioned throughput.
+ //
+ // - CapacityUnits - The total number of capacity units consumed.
+ ConsumedCapacity []types.ConsumedCapacity
+
+ // A map of table name or table ARN to a list of items. Each object in Responses
+ // consists of a table name or ARN, along with a map of attribute data consisting
+ // of the data type and attribute value.
+ Responses map[string][]map[string]types.AttributeValue
+
+ // A map of tables and their respective keys that were not processed with the
+ // current response. The UnprocessedKeys value is in the same form as RequestItems
+ // , so the value can be provided directly to a subsequent BatchGetItem operation.
+ // For more information, see RequestItems in the Request Parameters section.
+ //
+ // Each element consists of:
+ //
+ // - Keys - An array of primary key attribute values that define specific items
+ // in the table.
+ //
+ // - ProjectionExpression - One or more attributes to be retrieved from the table
+ // or index. By default, all attributes are returned. If a requested attribute is
+ // not found, it does not appear in the result.
+ //
+ // - ConsistentRead - The consistency of a read operation. If set to true , then
+ // a strongly consistent read is used; otherwise, an eventually consistent read is
+ // used.
+ //
+ // If there are no unprocessed keys remaining, the response contains an empty
+ // UnprocessedKeys map.
+ UnprocessedKeys map[string]types.KeysAndAttributes
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationBatchGetItemMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpBatchGetItem{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpBatchGetItem{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "BatchGetItem"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpBatchGetItemDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpBatchGetItemValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchGetItem(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpBatchGetItemDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpBatchGetItemDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpBatchGetItemDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*BatchGetItemInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opBatchGetItem(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "BatchGetItem",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go
new file mode 100644
index 000000000..2056cdd92
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_BatchWriteItem.go
@@ -0,0 +1,448 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// The BatchWriteItem operation puts or deletes multiple items in one or more
+// tables. A single call to BatchWriteItem can transmit up to 16MB of data over
+// the network, consisting of up to 25 item put or delete operations. While
+// individual items can be up to 400 KB once stored, it's important to note that an
+// item's representation might be greater than 400KB while being sent in DynamoDB's
+// JSON format for the API call. For more details on this distinction, see [Naming Rules and Data Types].
+//
+// BatchWriteItem cannot update items. If you perform a BatchWriteItem operation
+// on an existing item, that item's values will be overwritten by the operation and
+// it will appear like it was updated. To update items, we recommend you use the
+// UpdateItem action.
+//
+// The individual PutItem and DeleteItem operations specified in BatchWriteItem
+// are atomic; however BatchWriteItem as a whole is not. If any requested
+// operations fail because the table's provisioned throughput is exceeded or an
+// internal processing failure occurs, the failed operations are returned in the
+// UnprocessedItems response parameter. You can investigate and optionally resend
+// the requests. Typically, you would call BatchWriteItem in a loop. Each
+// iteration would check for unprocessed items and submit a new BatchWriteItem
+// request with those unprocessed items until all items have been processed.
+//
+// For tables and indexes with provisioned capacity, if none of the items can be
+// processed due to insufficient provisioned throughput on all of the tables in the
+// request, then BatchWriteItem returns a ProvisionedThroughputExceededException .
+// For all tables and indexes, if none of the items can be processed due to other
+// throttling scenarios (such as exceeding partition level limits), then
+// BatchWriteItem returns a ThrottlingException .
+//
+// If DynamoDB returns any unprocessed items, you should retry the batch operation
+// on those items. However, we strongly recommend that you use an exponential
+// backoff algorithm. If you retry the batch operation immediately, the underlying
+// read or write requests can still fail due to throttling on the individual
+// tables. If you delay the batch operation using exponential backoff, the
+// individual requests in the batch are much more likely to succeed.
+//
+// For more information, see [Batch Operations and Error Handling] in the Amazon DynamoDB Developer Guide.
+//
+// With BatchWriteItem , you can efficiently write or delete large amounts of data,
+// such as from Amazon EMR, or copy data from another database into DynamoDB. In
+// order to improve performance with these large-scale operations, BatchWriteItem
+// does not behave in the same way as individual PutItem and DeleteItem calls
+// would. For example, you cannot specify conditions on individual put and delete
+// requests, and BatchWriteItem does not return deleted items in the response.
+//
+// If you use a programming language that supports concurrency, you can use
+// threads to write items in parallel. Your application must include the necessary
+// logic to manage the threads. With languages that don't support threading, you
+// must update or delete the specified items one at a time. In both situations,
+// BatchWriteItem performs the specified put and delete operations in parallel,
+// giving you the power of the thread pool approach without having to introduce
+// complexity into your application.
+//
+// Parallel processing reduces latency, but each specified put and delete request
+// consumes the same number of write capacity units whether it is processed in
+// parallel or not. Delete operations on nonexistent items consume one write
+// capacity unit.
+//
+// If one or more of the following is true, DynamoDB rejects the entire batch
+// write operation:
+//
+// - One or more tables specified in the BatchWriteItem request does not exist.
+//
+// - Primary key attributes specified on an item in the request do not match
+// those in the corresponding table's primary key schema.
+//
+// - You try to perform multiple operations on the same item in the same
+// BatchWriteItem request. For example, you cannot put and delete the same item
+// in the same BatchWriteItem request.
+//
+// - Your request contains at least two items with identical hash and range keys
+// (which essentially is two put operations).
+//
+// - There are more than 25 requests in the batch.
+//
+// - Any individual item in a batch exceeds 400 KB.
+//
+// - The total request size exceeds 16 MB.
+//
+// - Any individual items with keys exceeding the key length limits. For a
+// partition key, the limit is 2048 bytes and for a sort key, the limit is 1024
+// bytes.
+//
+// [Batch Operations and Error Handling]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations
+// [Naming Rules and Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html
+func (c *Client) BatchWriteItem(ctx context.Context, params *BatchWriteItemInput, optFns ...func(*Options)) (*BatchWriteItemOutput, error) {
+ if params == nil {
+ params = &BatchWriteItemInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "BatchWriteItem", params, optFns, c.addOperationBatchWriteItemMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*BatchWriteItemOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of a BatchWriteItem operation.
+type BatchWriteItemInput struct {
+
+ // A map of one or more table names or table ARNs and, for each table, a list of
+ // operations to be performed ( DeleteRequest or PutRequest ). Each element in the
+ // map consists of the following:
+ //
+ // - DeleteRequest - Perform a DeleteItem operation on the specified item. The
+ // item to be deleted is identified by a Key subelement:
+ //
+ // - Key - A map of primary key attribute values that uniquely identify the item.
+ // Each entry in this map consists of an attribute name and an attribute value. For
+ // each primary key, you must provide all of the key attributes. For example, with
+ // a simple primary key, you only need to provide a value for the partition key.
+ // For a composite primary key, you must provide values for both the partition key
+ // and the sort key.
+ //
+ // - PutRequest - Perform a PutItem operation on the specified item. The item to
+ // be put is identified by an Item subelement:
+ //
+ // - Item - A map of attributes and their values. Each entry in this map consists
+ // of an attribute name and an attribute value. Attribute values must not be null;
+ // string and binary type attributes must have lengths greater than zero; and set
+ // type attributes must not be empty. Requests that contain empty values are
+ // rejected with a ValidationException exception.
+ //
+ // If you specify any attributes that are part of an index key, then the data
+ // types for those attributes must match those of the schema in the table's
+ // attribute definition.
+ //
+ // This member is required.
+ RequestItems map[string][]types.WriteRequest
+
+ // Determines the level of detail about either provisioned or on-demand throughput
+ // consumption that is returned in the response:
+ //
+ // - INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary index
+ // that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem , do not access any
+ // indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // - TOTAL - The response includes only the aggregate ConsumedCapacity for the
+ // operation.
+ //
+ // - NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity types.ReturnConsumedCapacity
+
+ // Determines whether item collection metrics are returned. If set to SIZE , the
+ // response includes statistics about item collections, if any, that were modified
+ // during the operation are returned in the response. If set to NONE (the
+ // default), no statistics are returned.
+ ReturnItemCollectionMetrics types.ReturnItemCollectionMetrics
+
+ noSmithyDocumentSerde
+}
+
+func (in *BatchWriteItemInput) bindEndpointParams(p *EndpointParameters) {
+ func() {
+ v1 := in.RequestItems
+ var v2 []string
+ for k := range v1 {
+ v2 = append(v2, k)
+ }
+ p.ResourceArnList = v2
+ }()
+
+}
+
+// Represents the output of a BatchWriteItem operation.
+type BatchWriteItemOutput struct {
+
+ // The capacity units consumed by the entire BatchWriteItem operation.
+ //
+ // Each element consists of:
+ //
+ // - TableName - The table that consumed the provisioned throughput.
+ //
+ // - CapacityUnits - The total number of capacity units consumed.
+ ConsumedCapacity []types.ConsumedCapacity
+
+ // A list of tables that were processed by BatchWriteItem and, for each table,
+ // information about any item collections that were affected by individual
+ // DeleteItem or PutItem operations.
+ //
+ // Each entry consists of the following subelements:
+ //
+ // - ItemCollectionKey - The partition key value of the item collection. This is
+ // the same as the partition key value of the item.
+ //
+ // - SizeEstimateRangeGB - An estimate of item collection size, expressed in GB.
+ // This is a two-element array containing a lower bound and an upper bound for the
+ // estimate. The estimate includes the size of all the items in the table, plus the
+ // size of all attributes projected into all of the local secondary indexes on the
+ // table. Use this estimate to measure whether a local secondary index is
+ // approaching its size limit.
+ //
+ // The estimate is subject to change over time; therefore, do not rely on the
+ // precision or accuracy of the estimate.
+ ItemCollectionMetrics map[string][]types.ItemCollectionMetrics
+
+ // A map of tables and requests against those tables that were not processed. The
+ // UnprocessedItems value is in the same form as RequestItems , so you can provide
+ // this value directly to a subsequent BatchWriteItem operation. For more
+ // information, see RequestItems in the Request Parameters section.
+ //
+ // Each UnprocessedItems entry consists of a table name or table ARN and, for that
+ // table, a list of operations to perform ( DeleteRequest or PutRequest ).
+ //
+ // - DeleteRequest - Perform a DeleteItem operation on the specified item. The
+ // item to be deleted is identified by a Key subelement:
+ //
+ // - Key - A map of primary key attribute values that uniquely identify the item.
+ // Each entry in this map consists of an attribute name and an attribute value.
+ //
+ // - PutRequest - Perform a PutItem operation on the specified item. The item to
+ // be put is identified by an Item subelement:
+ //
+ // - Item - A map of attributes and their values. Each entry in this map consists
+ // of an attribute name and an attribute value. Attribute values must not be null;
+ // string and binary type attributes must have lengths greater than zero; and set
+ // type attributes must not be empty. Requests that contain empty values will be
+ // rejected with a ValidationException exception.
+ //
+ // If you specify any attributes that are part of an index key, then the data
+ // types for those attributes must match those of the schema in the table's
+ // attribute definition.
+ //
+ // If there are no unprocessed items remaining, the response contains an empty
+ // UnprocessedItems map.
+ UnprocessedItems map[string][]types.WriteRequest
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationBatchWriteItemMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpBatchWriteItem{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpBatchWriteItem{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "BatchWriteItem"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpBatchWriteItemDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpBatchWriteItemValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opBatchWriteItem(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpBatchWriteItemDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpBatchWriteItemDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpBatchWriteItemDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*BatchWriteItemInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opBatchWriteItem(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "BatchWriteItem",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go
new file mode 100644
index 000000000..db7e07761
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateBackup.go
@@ -0,0 +1,287 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates a backup for an existing table.
+//
+// Each time you create an on-demand backup, the entire table data is backed up.
+// There is no limit to the number of on-demand backups that can be taken.
+//
+// When you create an on-demand backup, a time marker of the request is cataloged,
+// and the backup is created asynchronously, by applying all changes until the time
+// of the request to the last full table snapshot. Backup requests are processed
+// instantaneously and become available for restore within minutes.
+//
+// You can call CreateBackup at a maximum rate of 50 times per second.
+//
+// All backups in DynamoDB work without consuming any provisioned throughput on
+// the table.
+//
+// If you submit a backup request on 2018-12-14 at 14:25:00, the backup is
+// guaranteed to contain all data committed to the table up to 14:24:00, and data
+// committed after 14:26:00 will not be. The backup might contain data
+// modifications made between 14:24:00 and 14:26:00. On-demand backup does not
+// support causal consistency.
+//
+// Along with data, the following are also included on the backups:
+//
+// - Global secondary indexes (GSIs)
+//
+// - Local secondary indexes (LSIs)
+//
+// - Streams
+//
+// - Provisioned read and write capacity
+func (c *Client) CreateBackup(ctx context.Context, params *CreateBackupInput, optFns ...func(*Options)) (*CreateBackupOutput, error) {
+ if params == nil {
+ params = &CreateBackupInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CreateBackup", params, optFns, c.addOperationCreateBackupMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CreateBackupOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CreateBackupInput struct {
+
+ // Specified name for the backup.
+ //
+ // This member is required.
+ BackupName *string
+
+ // The name of the table. You can also provide the Amazon Resource Name (ARN) of
+ // the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *CreateBackupInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type CreateBackupOutput struct {
+
+ // Contains the details of the backup created for the table.
+ BackupDetails *types.BackupDetails
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationCreateBackupMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpCreateBackup{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpCreateBackup{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "CreateBackup"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpCreateBackupDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpCreateBackupValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateBackup(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpCreateBackupDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpCreateBackupDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpCreateBackupDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*CreateBackupInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opCreateBackup(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "CreateBackup",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateGlobalTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateGlobalTable.go
new file mode 100644
index 000000000..163348c82
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateGlobalTable.go
@@ -0,0 +1,309 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates a global table from an existing table. A global table creates a
+// replication relationship between two or more DynamoDB tables with the same table
+// name in the provided Regions.
+//
+// This documentation is for version 2017.11.29 (Legacy) of global tables, which
+// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible,
+// because it provides greater flexibility, higher efficiency, and consumes less
+// write capacity than 2017.11.29 (Legacy).
+//
+// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables
+// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables].
+//
+// If you want to add a new replica table to a global table, each of the following
+// conditions must be true:
+//
+// - The table must have the same primary key as all of the other replicas.
+//
+// - The table must have the same name as all of the other replicas.
+//
+// - The table must have DynamoDB Streams enabled, with the stream containing
+// both the new and the old images of the item.
+//
+// - None of the replica tables in the global table can contain any data.
+//
+// If global secondary indexes are specified, then the following conditions must
+// also be met:
+//
+// - The global secondary indexes must have the same name.
+//
+// - The global secondary indexes must have the same hash key and sort key (if
+// present).
+//
+// If local secondary indexes are specified, then the following conditions must
+// also be met:
+//
+// - The local secondary indexes must have the same name.
+//
+// - The local secondary indexes must have the same hash key and sort key (if
+// present).
+//
+// Write capacity settings should be set consistently across your replica tables
+// and secondary indexes. DynamoDB strongly recommends enabling auto scaling to
+// manage the write capacity settings for all of your global tables replicas and
+// indexes.
+//
+// If you prefer to manage write capacity settings manually, you should provision
+// equal replicated write capacity units to your replica tables. You should also
+// provision equal replicated write capacity units to matching secondary indexes
+// across your global table.
+//
+// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html
+// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html
+// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html
+func (c *Client) CreateGlobalTable(ctx context.Context, params *CreateGlobalTableInput, optFns ...func(*Options)) (*CreateGlobalTableOutput, error) {
+ if params == nil {
+ params = &CreateGlobalTableInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CreateGlobalTable", params, optFns, c.addOperationCreateGlobalTableMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CreateGlobalTableOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CreateGlobalTableInput struct {
+
+ // The global table name.
+ //
+ // This member is required.
+ GlobalTableName *string
+
+ // The Regions where the global table needs to be created.
+ //
+ // This member is required.
+ ReplicationGroup []types.Replica
+
+ noSmithyDocumentSerde
+}
+
+func (in *CreateGlobalTableInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.GlobalTableName
+
+}
+
+type CreateGlobalTableOutput struct {
+
+ // Contains the details of the global table.
+ GlobalTableDescription *types.GlobalTableDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationCreateGlobalTableMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpCreateGlobalTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpCreateGlobalTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "CreateGlobalTable"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpCreateGlobalTableDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpCreateGlobalTableValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateGlobalTable(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpCreateGlobalTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpCreateGlobalTableDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpCreateGlobalTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*CreateGlobalTableInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opCreateGlobalTable(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "CreateGlobalTable",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go
new file mode 100644
index 000000000..7d4510adb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_CreateTable.go
@@ -0,0 +1,475 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// The CreateTable operation adds a new table to your account. In an Amazon Web
+// Services account, table names must be unique within each Region. That is, you
+// can have two tables with same name if you create the tables in different
+// Regions.
+//
+// CreateTable is an asynchronous operation. Upon receiving a CreateTable request,
+// DynamoDB immediately returns a response with a TableStatus of CREATING . After
+// the table is created, DynamoDB sets the TableStatus to ACTIVE . You can perform
+// read and write operations only on an ACTIVE table.
+//
+// You can optionally define secondary indexes on the new table, as part of the
+// CreateTable operation. If you want to create multiple tables with secondary
+// indexes on them, you must create the tables sequentially. Only one table with
+// secondary indexes can be in the CREATING state at any given time.
+//
+// You can use the DescribeTable action to check the table status.
+func (c *Client) CreateTable(ctx context.Context, params *CreateTableInput, optFns ...func(*Options)) (*CreateTableOutput, error) {
+ if params == nil {
+ params = &CreateTableInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CreateTable", params, optFns, c.addOperationCreateTableMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CreateTableOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of a CreateTable operation.
+type CreateTableInput struct {
+
+ // An array of attributes that describe the key schema for the table and indexes.
+ //
+ // This member is required.
+ AttributeDefinitions []types.AttributeDefinition
+
+ // Specifies the attributes that make up the primary key for a table or an index.
+ // The attributes in KeySchema must also be defined in the AttributeDefinitions
+ // array. For more information, see [Data Model]in the Amazon DynamoDB Developer Guide.
+ //
+ // Each KeySchemaElement in the array is composed of:
+ //
+ // - AttributeName - The name of this key attribute.
+ //
+ // - KeyType - The role that the key attribute will assume:
+ //
+ // - HASH - partition key
+ //
+ // - RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from the DynamoDB usage of an internal hash function to
+ // evenly distribute data items across partitions, based on their partition key
+ // values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ //
+ // For a simple primary key (partition key), you must provide exactly one element
+ // with a KeyType of HASH .
+ //
+ // For a composite primary key (partition key and sort key), you must provide
+ // exactly two elements, in this order: The first element must have a KeyType of
+ // HASH , and the second element must have a KeyType of RANGE .
+ //
+ // For more information, see [Working with Tables] in the Amazon DynamoDB Developer Guide.
+ //
+ // [Data Model]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html
+ // [Working with Tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key
+ //
+ // This member is required.
+ KeySchema []types.KeySchemaElement
+
+ // The name of the table to create. You can also provide the Amazon Resource Name
+ // (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // Controls how you are charged for read and write throughput and how you manage
+ // capacity. This setting can be changed later.
+ //
+ // - PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for most DynamoDB
+ // workloads. PAY_PER_REQUEST sets the billing mode to [On-demand capacity mode].
+ //
+ // - PROVISIONED - We recommend using PROVISIONED for steady workloads with
+ // predictable growth where capacity requirements can be reliably forecasted.
+ // PROVISIONED sets the billing mode to [Provisioned capacity mode].
+ //
+ // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html
+ // [On-demand capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html
+ BillingMode types.BillingMode
+
+ // Indicates whether deletion protection is to be enabled (true) or disabled
+ // (false) on the table.
+ DeletionProtectionEnabled *bool
+
+ // One or more global secondary indexes (the maximum is 20) to be created on the
+ // table. Each global secondary index in the array includes the following:
+ //
+ // - IndexName - The name of the global secondary index. Must be unique only for
+ // this table.
+ //
+ // - KeySchema - Specifies the key schema for the global secondary index.
+ //
+ // - Projection - Specifies attributes that are copied (projected) from the table
+ // into the index. These are in addition to the primary key attributes and index
+ // key attributes, which are automatically projected. Each attribute specification
+ // is composed of:
+ //
+ // - ProjectionType - One of the following:
+ //
+ // - KEYS_ONLY - Only the index and primary keys are projected into the index.
+ //
+ // - INCLUDE - Only the specified table attributes are projected into the index.
+ // The list of projected attributes is in NonKeyAttributes .
+ //
+ // - ALL - All of the table attributes are projected into the index.
+ //
+ // - NonKeyAttributes - A list of one or more non-key attribute names that are
+ // projected into the secondary index. The total count of attributes provided in
+ // NonKeyAttributes , summed across all of the secondary indexes, must not exceed
+ // 100. If you project the same attribute into two different indexes, this counts
+ // as two distinct attributes when determining the total. This limit only applies
+ // when you specify the ProjectionType of INCLUDE . You still can specify the
+ // ProjectionType of ALL to project all attributes from the source table, even if
+ // the table has more than 100 attributes.
+ //
+ // - ProvisionedThroughput - The provisioned throughput settings for the global
+ // secondary index, consisting of read and write capacity units.
+ GlobalSecondaryIndexes []types.GlobalSecondaryIndex
+
+ // One or more local secondary indexes (the maximum is 5) to be created on the
+ // table. Each index is scoped to a given partition key value. There is a 10 GB
+ // size limit per partition key value; otherwise, the size of a local secondary
+ // index is unconstrained.
+ //
+ // Each local secondary index in the array includes the following:
+ //
+ // - IndexName - The name of the local secondary index. Must be unique only for
+ // this table.
+ //
+ // - KeySchema - Specifies the key schema for the local secondary index. The key
+ // schema must begin with the same partition key as the table.
+ //
+ // - Projection - Specifies attributes that are copied (projected) from the table
+ // into the index. These are in addition to the primary key attributes and index
+ // key attributes, which are automatically projected. Each attribute specification
+ // is composed of:
+ //
+ // - ProjectionType - One of the following:
+ //
+ // - KEYS_ONLY - Only the index and primary keys are projected into the index.
+ //
+ // - INCLUDE - Only the specified table attributes are projected into the index.
+ // The list of projected attributes is in NonKeyAttributes .
+ //
+ // - ALL - All of the table attributes are projected into the index.
+ //
+ // - NonKeyAttributes - A list of one or more non-key attribute names that are
+ // projected into the secondary index. The total count of attributes provided in
+ // NonKeyAttributes , summed across all of the secondary indexes, must not exceed
+ // 100. If you project the same attribute into two different indexes, this counts
+ // as two distinct attributes when determining the total. This limit only applies
+ // when you specify the ProjectionType of INCLUDE . You still can specify the
+ // ProjectionType of ALL to project all attributes from the source table, even if
+ // the table has more than 100 attributes.
+ LocalSecondaryIndexes []types.LocalSecondaryIndex
+
+ // Sets the maximum number of read and write units for the specified table in
+ // on-demand capacity mode. If you use this parameter, you must specify
+ // MaxReadRequestUnits , MaxWriteRequestUnits , or both.
+ OnDemandThroughput *types.OnDemandThroughput
+
+ // Represents the provisioned throughput settings for a specified table or index.
+ // The settings can be modified using the UpdateTable operation.
+ //
+ // If you set BillingMode as PROVISIONED , you must specify this property. If you
+ // set BillingMode as PAY_PER_REQUEST , you cannot specify this property.
+ //
+ // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the
+ // Amazon DynamoDB Developer Guide.
+ //
+ // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html
+ ProvisionedThroughput *types.ProvisionedThroughput
+
+ // An Amazon Web Services resource-based policy document in JSON format that will
+ // be attached to the table.
+ //
+ // When you attach a resource-based policy while creating a table, the policy
+ // application is strongly consistent.
+ //
+ // The maximum size supported for a resource-based policy document is 20 KB.
+ // DynamoDB counts whitespaces when calculating the size of a policy against this
+ // limit. For a full list of all considerations that apply for resource-based
+ // policies, see [Resource-based policy considerations].
+ //
+ // You need to specify the CreateTable and PutResourcePolicy IAM actions for
+ // authorizing a user to create a table with a resource-based policy.
+ //
+ // [Resource-based policy considerations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html
+ ResourcePolicy *string
+
+ // Represents the settings used to enable server-side encryption.
+ SSESpecification *types.SSESpecification
+
+ // The settings for DynamoDB Streams on the table. These settings consist of:
+ //
+ // - StreamEnabled - Indicates whether DynamoDB Streams is to be enabled (true)
+ // or disabled (false).
+ //
+ // - StreamViewType - When an item in the table is modified, StreamViewType
+ // determines what information is written to the table's stream. Valid values for
+ // StreamViewType are:
+ //
+ // - KEYS_ONLY - Only the key attributes of the modified item are written to the
+ // stream.
+ //
+ // - NEW_IMAGE - The entire item, as it appears after it was modified, is written
+ // to the stream.
+ //
+ // - OLD_IMAGE - The entire item, as it appeared before it was modified, is
+ // written to the stream.
+ //
+ // - NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are
+ // written to the stream.
+ StreamSpecification *types.StreamSpecification
+
+ // The table class of the new table. Valid values are STANDARD and
+ // STANDARD_INFREQUENT_ACCESS .
+ TableClass types.TableClass
+
+ // A list of key-value pairs to label the table. For more information, see [Tagging for DynamoDB].
+ //
+ // [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html
+ Tags []types.Tag
+
+ // Represents the warm throughput (in read units per second and write units per
+ // second) for creating a table.
+ WarmThroughput *types.WarmThroughput
+
+ noSmithyDocumentSerde
+}
+
+func (in *CreateTableInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+// Represents the output of a CreateTable operation.
+type CreateTableOutput struct {
+
+ // Represents the properties of the table.
+ TableDescription *types.TableDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationCreateTableMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpCreateTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpCreateTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "CreateTable"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpCreateTableDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpCreateTableValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTable(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpCreateTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpCreateTableDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpCreateTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*CreateTableInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opCreateTable(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "CreateTable",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteBackup.go
new file mode 100644
index 000000000..799be66b6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteBackup.go
@@ -0,0 +1,254 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes an existing backup of a table.
+//
+// You can call DeleteBackup at a maximum rate of 10 times per second.
+func (c *Client) DeleteBackup(ctx context.Context, params *DeleteBackupInput, optFns ...func(*Options)) (*DeleteBackupOutput, error) {
+ if params == nil {
+ params = &DeleteBackupInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteBackup", params, optFns, c.addOperationDeleteBackupMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteBackupOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteBackupInput struct {
+
+ // The ARN associated with the backup.
+ //
+ // This member is required.
+ BackupArn *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteBackupInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.BackupArn
+
+}
+
+type DeleteBackupOutput struct {
+
+ // Contains the description of the backup created for the table.
+ BackupDescription *types.BackupDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteBackupMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDeleteBackup{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDeleteBackup{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteBackup"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteBackupDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteBackupValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteBackup(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpDeleteBackupDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpDeleteBackupDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpDeleteBackupDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*DeleteBackupInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opDeleteBackup(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteBackup",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go
new file mode 100644
index 000000000..386e2ecf1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteItem.go
@@ -0,0 +1,450 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes a single item in a table by primary key. You can perform a conditional
+// delete operation that deletes the item if it exists, or if it has an expected
+// attribute value.
+//
+// In addition to deleting an item, you can also return the item's attribute
+// values in the same operation, using the ReturnValues parameter.
+//
+// Unless you specify conditions, the DeleteItem is an idempotent operation;
+// running it multiple times on the same item or attribute does not result in an
+// error response.
+//
+// Conditional deletes are useful for deleting items only if specific conditions
+// are met. If those conditions are met, DynamoDB performs the delete. Otherwise,
+// the item is not deleted.
+func (c *Client) DeleteItem(ctx context.Context, params *DeleteItemInput, optFns ...func(*Options)) (*DeleteItemOutput, error) {
+ if params == nil {
+ params = &DeleteItemInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteItem", params, optFns, c.addOperationDeleteItemMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteItemOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of a DeleteItem operation.
+type DeleteItemInput struct {
+
+ // A map of attribute names to AttributeValue objects, representing the primary
+ // key of the item to delete.
+ //
+ // For the primary key, you must provide all of the key attributes. For example,
+ // with a simple primary key, you only need to provide a value for the partition
+ // key. For a composite primary key, you must provide values for both the partition
+ // key and the sort key.
+ //
+ // This member is required.
+ Key map[string]types.AttributeValue
+
+ // The name of the table from which to delete the item. You can also provide the
+ // Amazon Resource Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // A condition that must be satisfied in order for a conditional DeleteItem to
+ // succeed.
+ //
+ // An expression can contain any of the following:
+ //
+ // - Functions: attribute_exists | attribute_not_exists | attribute_type |
+ // contains | begins_with | size
+ //
+ // These function names are case-sensitive.
+ //
+ // - Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
+ //
+ // - Logical operators: AND | OR | NOT
+ //
+ // For more information about condition expressions, see [Condition Expressions] in the Amazon DynamoDB
+ // Developer Guide.
+ //
+ // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html
+ ConditionExpression *string
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more
+ // information, see [ConditionalOperator]in the Amazon DynamoDB Developer Guide.
+ //
+ // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html
+ ConditionalOperator types.ConditionalOperator
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more
+ // information, see [Expected]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Expected]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html
+ Expected map[string]types.ExpectedAttributeValue
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames :
+ //
+ // - To access an attribute whose name conflicts with a DynamoDB reserved word.
+ //
+ // - To create a placeholder for repeating occurrences of an attribute name in
+ // an expression.
+ //
+ // - To prevent special characters in an attribute name from being
+ // misinterpreted in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // - Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be used
+ // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the
+ // Amazon DynamoDB Developer Guide). To work around this, you could specify the
+ // following for ExpressionAttributeNames :
+ //
+ // - {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // - #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB
+ // Developer Guide.
+ //
+ // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html
+ // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html
+ ExpressionAttributeNames map[string]string
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute value.
+ // For example, suppose that you wanted to check whether the value of the
+ // ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ // ":disc":{"S":"Discontinued"} }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see [Condition Expressions] in the Amazon
+ // DynamoDB Developer Guide.
+ //
+ // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html
+ ExpressionAttributeValues map[string]types.AttributeValue
+
+ // Determines the level of detail about either provisioned or on-demand throughput
+ // consumption that is returned in the response:
+ //
+ // - INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary index
+ // that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem , do not access any
+ // indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // - TOTAL - The response includes only the aggregate ConsumedCapacity for the
+ // operation.
+ //
+ // - NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity types.ReturnConsumedCapacity
+
+ // Determines whether item collection metrics are returned. If set to SIZE , the
+ // response includes statistics about item collections, if any, that were modified
+ // during the operation are returned in the response. If set to NONE (the
+ // default), no statistics are returned.
+ ReturnItemCollectionMetrics types.ReturnItemCollectionMetrics
+
+ // Use ReturnValues if you want to get the item attributes as they appeared before
+ // they were deleted. For DeleteItem , the valid values are:
+ //
+ // - NONE - If ReturnValues is not specified, or if its value is NONE , then
+ // nothing is returned. (This setting is the default for ReturnValues .)
+ //
+ // - ALL_OLD - The content of the old item is returned.
+ //
+ // There is no additional cost associated with requesting a return value aside
+ // from the small network and processing overhead of receiving a larger response.
+ // No read capacity units are consumed.
+ //
+ // The ReturnValues parameter is used by several DynamoDB operations; however,
+ // DeleteItem does not recognize any values other than NONE or ALL_OLD .
+ ReturnValues types.ReturnValue
+
+ // An optional parameter that returns the item attributes for a DeleteItem
+ // operation that failed a condition check.
+ //
+ // There is no additional cost associated with requesting a return value aside
+ // from the small network and processing overhead of receiving a larger response.
+ // No read capacity units are consumed.
+ ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteItemInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+// Represents the output of a DeleteItem operation.
+type DeleteItemOutput struct {
+
+ // A map of attribute names to AttributeValue objects, representing the item as it
+ // appeared before the DeleteItem operation. This map appears in the response only
+ // if ReturnValues was specified as ALL_OLD in the request.
+ Attributes map[string]types.AttributeValue
+
+ // The capacity units consumed by the DeleteItem operation. The data returned
+ // includes the total provisioned throughput consumed, along with statistics for
+ // the table and any indexes involved in the operation. ConsumedCapacity is only
+ // returned if the ReturnConsumedCapacity parameter was specified. For more
+ // information, see [Provisioned capacity mode]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html
+ ConsumedCapacity *types.ConsumedCapacity
+
+ // Information about item collections, if any, that were affected by the DeleteItem
+ // operation. ItemCollectionMetrics is only returned if the
+ // ReturnItemCollectionMetrics parameter was specified. If the table does not have
+ // any local secondary indexes, this information is not returned in the response.
+ //
+ // Each ItemCollectionMetrics element consists of:
+ //
+ // - ItemCollectionKey - The partition key value of the item collection. This is
+ // the same as the partition key value of the item itself.
+ //
+ // - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
+ // This value is a two-element array containing a lower bound and an upper bound
+ // for the estimate. The estimate includes the size of all the items in the table,
+ // plus the size of all attributes projected into all of the local secondary
+ // indexes on that table. Use this estimate to measure whether a local secondary
+ // index is approaching its size limit.
+ //
+ // The estimate is subject to change over time; therefore, do not rely on the
+ // precision or accuracy of the estimate.
+ ItemCollectionMetrics *types.ItemCollectionMetrics
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteItemMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDeleteItem{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDeleteItem{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteItem"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteItemDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteItemValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteItem(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpDeleteItemDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpDeleteItemDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpDeleteItemDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*DeleteItemInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opDeleteItem(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteItem",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go
new file mode 100644
index 000000000..768fb574d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteResourcePolicy.go
@@ -0,0 +1,281 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Deletes the resource-based policy attached to the resource, which can be a
+// table or stream.
+//
+// DeleteResourcePolicy is an idempotent operation; running it multiple times on
+// the same resource doesn't result in an error response, unless you specify an
+// ExpectedRevisionId , which will then return a PolicyNotFoundException .
+//
+// To make sure that you don't inadvertently lock yourself out of your own
+// resources, the root principal in your Amazon Web Services account can perform
+// DeleteResourcePolicy requests, even if your resource-based policy explicitly
+// denies the root principal's access.
+//
+// DeleteResourcePolicy is an asynchronous operation. If you issue a
+// GetResourcePolicy request immediately after running the DeleteResourcePolicy
+// request, DynamoDB might still return the deleted policy. This is because the
+// policy for your resource might not have been deleted yet. Wait for a few
+// seconds, and then try the GetResourcePolicy request again.
+func (c *Client) DeleteResourcePolicy(ctx context.Context, params *DeleteResourcePolicyInput, optFns ...func(*Options)) (*DeleteResourcePolicyOutput, error) {
+ if params == nil {
+ params = &DeleteResourcePolicyInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteResourcePolicy", params, optFns, c.addOperationDeleteResourcePolicyMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteResourcePolicyOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DeleteResourcePolicyInput struct {
+
+ // The Amazon Resource Name (ARN) of the DynamoDB resource from which the policy
+ // will be removed. The resources you can specify include tables and streams. If
+ // you remove the policy of a table, it will also remove the permissions for the
+ // table's indexes defined in that policy document. This is because index
+ // permissions are defined in the table's policy.
+ //
+ // This member is required.
+ ResourceArn *string
+
+ // A string value that you can use to conditionally delete your policy. When you
+ // provide an expected revision ID, if the revision ID of the existing policy on
+ // the resource doesn't match or if there's no policy attached to the resource, the
+ // request will fail and return a PolicyNotFoundException .
+ ExpectedRevisionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteResourcePolicyInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.ResourceArn
+
+}
+
+type DeleteResourcePolicyOutput struct {
+
+ // A unique string that represents the revision ID of the policy. If you're
+ // comparing revision IDs, make sure to always use string comparison logic.
+ //
+ // This value will be empty if you make a request against a resource without a
+ // policy.
+ RevisionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteResourcePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDeleteResourcePolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDeleteResourcePolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteResourcePolicy"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteResourcePolicyDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteResourcePolicyValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteResourcePolicy(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpDeleteResourcePolicyDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpDeleteResourcePolicyDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpDeleteResourcePolicyDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*DeleteResourcePolicyInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opDeleteResourcePolicy(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteResourcePolicy",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go
new file mode 100644
index 000000000..a9dbd3077
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DeleteTable.go
@@ -0,0 +1,275 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// The DeleteTable operation deletes a table and all of its items. After a
+// DeleteTable request, the specified table is in the DELETING state until
+// DynamoDB completes the deletion. If the table is in the ACTIVE state, you can
+// delete it. If a table is in CREATING or UPDATING states, then DynamoDB returns
+// a ResourceInUseException . If the specified table does not exist, DynamoDB
+// returns a ResourceNotFoundException . If table is already in the DELETING
+// state, no error is returned.
+//
+// DynamoDB might continue to accept data read and write operations, such as
+// GetItem and PutItem , on a table in the DELETING state until the table deletion
+// is complete. For the full list of table states, see [TableStatus].
+//
+// When you delete a table, any indexes on that table are also deleted.
+//
+// If you have DynamoDB Streams enabled on the table, then the corresponding
+// stream on that table goes into the DISABLED state, and the stream is
+// automatically deleted after 24 hours.
+//
+// Use the DescribeTable action to check the status of the table.
+//
+// [TableStatus]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TableDescription.html#DDB-Type-TableDescription-TableStatus
+func (c *Client) DeleteTable(ctx context.Context, params *DeleteTableInput, optFns ...func(*Options)) (*DeleteTableOutput, error) {
+ if params == nil {
+ params = &DeleteTableInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DeleteTable", params, optFns, c.addOperationDeleteTableMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DeleteTableOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of a DeleteTable operation.
+type DeleteTableInput struct {
+
+ // The name of the table to delete. You can also provide the Amazon Resource Name
+ // (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DeleteTableInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+// Represents the output of a DeleteTable operation.
+type DeleteTableOutput struct {
+
+ // Represents the properties of a table.
+ TableDescription *types.TableDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDeleteTableMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDeleteTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDeleteTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DeleteTable"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDeleteTableDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDeleteTableValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDeleteTable(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpDeleteTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpDeleteTableDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpDeleteTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*DeleteTableInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opDeleteTable(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DeleteTable",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeBackup.go
new file mode 100644
index 000000000..be01e6407
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeBackup.go
@@ -0,0 +1,254 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Describes an existing backup of a table.
+//
+// You can call DescribeBackup at a maximum rate of 10 times per second.
+func (c *Client) DescribeBackup(ctx context.Context, params *DescribeBackupInput, optFns ...func(*Options)) (*DescribeBackupOutput, error) {
+ if params == nil {
+ params = &DescribeBackupInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DescribeBackup", params, optFns, c.addOperationDescribeBackupMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DescribeBackupOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DescribeBackupInput struct {
+
+ // The Amazon Resource Name (ARN) associated with the backup.
+ //
+ // This member is required.
+ BackupArn *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DescribeBackupInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.BackupArn
+
+}
+
+type DescribeBackupOutput struct {
+
+ // Contains the description of the backup created for the table.
+ BackupDescription *types.BackupDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDescribeBackupMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeBackup{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeBackup{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeBackup"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDescribeBackupDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDescribeBackupValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeBackup(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpDescribeBackupDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpDescribeBackupDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpDescribeBackupDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*DescribeBackupInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opDescribeBackup(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DescribeBackup",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go
new file mode 100644
index 000000000..b814e0517
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContinuousBackups.go
@@ -0,0 +1,270 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Checks the status of continuous backups and point in time recovery on the
+// specified table. Continuous backups are ENABLED on all tables at table
+// creation. If point in time recovery is enabled, PointInTimeRecoveryStatus will
+// be set to ENABLED.
+//
+// After continuous backups and point in time recovery are enabled, you can
+// restore to any point in time within EarliestRestorableDateTime and
+// LatestRestorableDateTime .
+//
+// LatestRestorableDateTime is typically 5 minutes before the current time. You
+// can restore your table to any point in time in the last 35 days. You can set the
+// recovery period to any value between 1 and 35 days.
+//
+// You can call DescribeContinuousBackups at a maximum rate of 10 times per second.
+func (c *Client) DescribeContinuousBackups(ctx context.Context, params *DescribeContinuousBackupsInput, optFns ...func(*Options)) (*DescribeContinuousBackupsOutput, error) {
+ if params == nil {
+ params = &DescribeContinuousBackupsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DescribeContinuousBackups", params, optFns, c.addOperationDescribeContinuousBackupsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DescribeContinuousBackupsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DescribeContinuousBackupsInput struct {
+
+ // Name of the table for which the customer wants to check the continuous backups
+ // and point in time recovery settings.
+ //
+ // You can also provide the Amazon Resource Name (ARN) of the table in this
+ // parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DescribeContinuousBackupsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type DescribeContinuousBackupsOutput struct {
+
+ // Represents the continuous backups and point in time recovery settings on the
+ // table.
+ ContinuousBackupsDescription *types.ContinuousBackupsDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDescribeContinuousBackupsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeContinuousBackups{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeContinuousBackups{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeContinuousBackups"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDescribeContinuousBackupsDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDescribeContinuousBackupsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeContinuousBackups(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpDescribeContinuousBackupsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpDescribeContinuousBackupsDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpDescribeContinuousBackupsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*DescribeContinuousBackupsInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opDescribeContinuousBackups(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DescribeContinuousBackups",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go
new file mode 100644
index 000000000..338ff4df1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeContributorInsights.go
@@ -0,0 +1,246 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// Returns information about contributor insights for a given table or global
+// secondary index.
+func (c *Client) DescribeContributorInsights(ctx context.Context, params *DescribeContributorInsightsInput, optFns ...func(*Options)) (*DescribeContributorInsightsOutput, error) {
+ if params == nil {
+ params = &DescribeContributorInsightsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DescribeContributorInsights", params, optFns, c.addOperationDescribeContributorInsightsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DescribeContributorInsightsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DescribeContributorInsightsInput struct {
+
+ // The name of the table to describe. You can also provide the Amazon Resource
+ // Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // The name of the global secondary index to describe, if applicable.
+ IndexName *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DescribeContributorInsightsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type DescribeContributorInsightsOutput struct {
+
+ // The mode of CloudWatch Contributor Insights for DynamoDB that determines which
+ // events are emitted. Can be set to track all access and throttled events or
+ // throttled events only.
+ ContributorInsightsMode types.ContributorInsightsMode
+
+ // List of names of the associated contributor insights rules.
+ ContributorInsightsRuleList []string
+
+ // Current status of contributor insights.
+ ContributorInsightsStatus types.ContributorInsightsStatus
+
+ // Returns information about the last failure that was encountered.
+ //
+ // The most common exceptions for a FAILED status are:
+ //
+ // - LimitExceededException - Per-account Amazon CloudWatch Contributor Insights
+ // rule limit reached. Please disable Contributor Insights for other tables/indexes
+ // OR disable Contributor Insights rules before retrying.
+ //
+ // - AccessDeniedException - Amazon CloudWatch Contributor Insights rules cannot
+ // be modified due to insufficient permissions.
+ //
+ // - AccessDeniedException - Failed to create service-linked role for
+ // Contributor Insights due to insufficient permissions.
+ //
+ // - InternalServerError - Failed to create Amazon CloudWatch Contributor
+ // Insights rules. Please retry request.
+ FailureException *types.FailureException
+
+ // The name of the global secondary index being described.
+ IndexName *string
+
+ // Timestamp of the last time the status was changed.
+ LastUpdateDateTime *time.Time
+
+ // The name of the table being described.
+ TableName *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDescribeContributorInsightsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeContributorInsights{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeContributorInsights{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeContributorInsights"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDescribeContributorInsightsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeContributorInsights(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDescribeContributorInsights(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DescribeContributorInsights",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeEndpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeEndpoints.go
new file mode 100644
index 000000000..0e7c17b82
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeEndpoints.go
@@ -0,0 +1,195 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the regional endpoint information. For more information on policy
+// permissions, please see [Internetwork traffic privacy].
+//
+// [Internetwork traffic privacy]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/inter-network-traffic-privacy.html#inter-network-traffic-DescribeEndpoints
+func (c *Client) DescribeEndpoints(ctx context.Context, params *DescribeEndpointsInput, optFns ...func(*Options)) (*DescribeEndpointsOutput, error) {
+ if params == nil {
+ params = &DescribeEndpointsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DescribeEndpoints", params, optFns, c.addOperationDescribeEndpointsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DescribeEndpointsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DescribeEndpointsInput struct {
+ noSmithyDocumentSerde
+}
+
+type DescribeEndpointsOutput struct {
+
+ // List of endpoints.
+ //
+ // This member is required.
+ Endpoints []types.Endpoint
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDescribeEndpointsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeEndpoints{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeEndpoints{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeEndpoints"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeEndpoints(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDescribeEndpoints(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DescribeEndpoints",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeExport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeExport.go
new file mode 100644
index 000000000..22eba3e4a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeExport.go
@@ -0,0 +1,205 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Describes an existing table export.
+func (c *Client) DescribeExport(ctx context.Context, params *DescribeExportInput, optFns ...func(*Options)) (*DescribeExportOutput, error) {
+ if params == nil {
+ params = &DescribeExportInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DescribeExport", params, optFns, c.addOperationDescribeExportMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DescribeExportOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DescribeExportInput struct {
+
+ // The Amazon Resource Name (ARN) associated with the export.
+ //
+ // This member is required.
+ ExportArn *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DescribeExportInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.ExportArn
+
+}
+
+type DescribeExportOutput struct {
+
+ // Represents the properties of the export.
+ ExportDescription *types.ExportDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDescribeExportMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeExport{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeExport{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeExport"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDescribeExportValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeExport(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDescribeExport(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DescribeExport",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTable.go
new file mode 100644
index 000000000..95b1fb4e6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTable.go
@@ -0,0 +1,264 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns information about the specified global table.
+//
+// This documentation is for version 2017.11.29 (Legacy) of global tables, which
+// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible,
+// because it provides greater flexibility, higher efficiency, and consumes less
+// write capacity than 2017.11.29 (Legacy).
+//
+// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables
+// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables].
+//
+// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html
+// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html
+// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html
+func (c *Client) DescribeGlobalTable(ctx context.Context, params *DescribeGlobalTableInput, optFns ...func(*Options)) (*DescribeGlobalTableOutput, error) {
+ if params == nil {
+ params = &DescribeGlobalTableInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DescribeGlobalTable", params, optFns, c.addOperationDescribeGlobalTableMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DescribeGlobalTableOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DescribeGlobalTableInput struct {
+
+ // The name of the global table.
+ //
+ // This member is required.
+ GlobalTableName *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DescribeGlobalTableInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.GlobalTableName
+
+}
+
+type DescribeGlobalTableOutput struct {
+
+ // Contains the details of the global table.
+ GlobalTableDescription *types.GlobalTableDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDescribeGlobalTableMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeGlobalTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeGlobalTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeGlobalTable"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDescribeGlobalTableDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDescribeGlobalTableValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeGlobalTable(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpDescribeGlobalTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpDescribeGlobalTableDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpDescribeGlobalTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*DescribeGlobalTableInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opDescribeGlobalTable(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DescribeGlobalTable",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTableSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTableSettings.go
new file mode 100644
index 000000000..78378b4c8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeGlobalTableSettings.go
@@ -0,0 +1,267 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Describes Region-specific settings for a global table.
+//
+// This documentation is for version 2017.11.29 (Legacy) of global tables, which
+// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible,
+// because it provides greater flexibility, higher efficiency, and consumes less
+// write capacity than 2017.11.29 (Legacy).
+//
+// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables
+// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables].
+//
+// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html
+// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html
+// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html
+func (c *Client) DescribeGlobalTableSettings(ctx context.Context, params *DescribeGlobalTableSettingsInput, optFns ...func(*Options)) (*DescribeGlobalTableSettingsOutput, error) {
+ if params == nil {
+ params = &DescribeGlobalTableSettingsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DescribeGlobalTableSettings", params, optFns, c.addOperationDescribeGlobalTableSettingsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DescribeGlobalTableSettingsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DescribeGlobalTableSettingsInput struct {
+
+ // The name of the global table to describe.
+ //
+ // This member is required.
+ GlobalTableName *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DescribeGlobalTableSettingsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.GlobalTableName
+
+}
+
+type DescribeGlobalTableSettingsOutput struct {
+
+ // The name of the global table.
+ GlobalTableName *string
+
+ // The Region-specific settings for the global table.
+ ReplicaSettings []types.ReplicaSettingsDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDescribeGlobalTableSettingsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeGlobalTableSettings{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeGlobalTableSettings{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeGlobalTableSettings"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDescribeGlobalTableSettingsDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDescribeGlobalTableSettingsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeGlobalTableSettings(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpDescribeGlobalTableSettingsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpDescribeGlobalTableSettingsDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpDescribeGlobalTableSettingsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*DescribeGlobalTableSettingsInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opDescribeGlobalTableSettings(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DescribeGlobalTableSettings",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeImport.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeImport.go
new file mode 100644
index 000000000..a4c70e5d5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeImport.go
@@ -0,0 +1,209 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Represents the properties of the import.
+func (c *Client) DescribeImport(ctx context.Context, params *DescribeImportInput, optFns ...func(*Options)) (*DescribeImportOutput, error) {
+ if params == nil {
+ params = &DescribeImportInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DescribeImport", params, optFns, c.addOperationDescribeImportMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DescribeImportOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DescribeImportInput struct {
+
+ // The Amazon Resource Name (ARN) associated with the table you're importing to.
+ //
+ // This member is required.
+ ImportArn *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DescribeImportInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.ImportArn
+
+}
+
+type DescribeImportOutput struct {
+
+ // Represents the properties of the table created for the import, and parameters
+ // of the import. The import parameters include import status, how many items were
+ // processed, and how many errors were encountered.
+ //
+ // This member is required.
+ ImportTableDescription *types.ImportTableDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDescribeImportMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeImport{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeImport{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeImport"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDescribeImportValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeImport(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDescribeImport(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DescribeImport",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeKinesisStreamingDestination.go
new file mode 100644
index 000000000..dd882cfe7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeKinesisStreamingDestination.go
@@ -0,0 +1,256 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns information about the status of Kinesis streaming.
+func (c *Client) DescribeKinesisStreamingDestination(ctx context.Context, params *DescribeKinesisStreamingDestinationInput, optFns ...func(*Options)) (*DescribeKinesisStreamingDestinationOutput, error) {
+ if params == nil {
+ params = &DescribeKinesisStreamingDestinationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DescribeKinesisStreamingDestination", params, optFns, c.addOperationDescribeKinesisStreamingDestinationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DescribeKinesisStreamingDestinationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DescribeKinesisStreamingDestinationInput struct {
+
+ // The name of the table being described. You can also provide the Amazon Resource
+ // Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DescribeKinesisStreamingDestinationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type DescribeKinesisStreamingDestinationOutput struct {
+
+ // The list of replica structures for the table being described.
+ KinesisDataStreamDestinations []types.KinesisDataStreamDestination
+
+ // The name of the table being described.
+ TableName *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDescribeKinesisStreamingDestinationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeKinesisStreamingDestination{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeKinesisStreamingDestination{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeKinesisStreamingDestination"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDescribeKinesisStreamingDestinationDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDescribeKinesisStreamingDestinationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeKinesisStreamingDestination(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpDescribeKinesisStreamingDestinationDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpDescribeKinesisStreamingDestinationDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpDescribeKinesisStreamingDestinationDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*DescribeKinesisStreamingDestinationInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opDescribeKinesisStreamingDestination(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DescribeKinesisStreamingDestination",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeLimits.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeLimits.go
new file mode 100644
index 000000000..915a2a018
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeLimits.go
@@ -0,0 +1,312 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the current provisioned-capacity quotas for your Amazon Web Services
+// account in a Region, both for the Region as a whole and for any one DynamoDB
+// table that you create there.
+//
+// When you establish an Amazon Web Services account, the account has initial
+// quotas on the maximum read capacity units and write capacity units that you can
+// provision across all of your DynamoDB tables in a given Region. Also, there are
+// per-table quotas that apply when you create a table there. For more information,
+// see [Service, Account, and Table Quotas]page in the Amazon DynamoDB Developer Guide.
+//
+// Although you can increase these quotas by filing a case at [Amazon Web Services Support Center], obtaining the
+// increase is not instantaneous. The DescribeLimits action lets you write code to
+// compare the capacity you are currently using to those quotas imposed by your
+// account so that you have enough time to apply for an increase before you hit a
+// quota.
+//
+// For example, you could use one of the Amazon Web Services SDKs to do the
+// following:
+//
+// - Call DescribeLimits for a particular Region to obtain your current account
+// quotas on provisioned capacity there.
+//
+// - Create a variable to hold the aggregate read capacity units provisioned for
+// all your tables in that Region, and one to hold the aggregate write capacity
+// units. Zero them both.
+//
+// - Call ListTables to obtain a list of all your DynamoDB tables.
+//
+// - For each table name listed by ListTables , do the following:
+//
+// - Call DescribeTable with the table name.
+//
+// - Use the data returned by DescribeTable to add the read capacity units and
+// write capacity units provisioned for the table itself to your variables.
+//
+// - If the table has one or more global secondary indexes (GSIs), loop over
+// these GSIs and add their provisioned capacity values to your variables as well.
+//
+// - Report the account quotas for that Region returned by DescribeLimits , along
+// with the total current provisioned capacity levels you have calculated.
+//
+// This will let you see whether you are getting close to your account-level
+// quotas.
+//
+// The per-table quotas apply only when you are creating a new table. They
+// restrict the sum of the provisioned capacity of the new table itself and all its
+// global secondary indexes.
+//
+// For existing tables and their GSIs, DynamoDB doesn't let you increase
+// provisioned capacity extremely rapidly, but the only quota that applies is that
+// the aggregate provisioned capacity over all your tables and GSIs cannot exceed
+// either of the per-account quotas.
+//
+// DescribeLimits should only be called periodically. You can expect throttling
+// errors if you call it more than once in a minute.
+//
+// The DescribeLimits Request element has no content.
+//
+// [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html
+// [Amazon Web Services Support Center]: https://console.aws.amazon.com/support/home#/
+func (c *Client) DescribeLimits(ctx context.Context, params *DescribeLimitsInput, optFns ...func(*Options)) (*DescribeLimitsOutput, error) {
+ if params == nil {
+ params = &DescribeLimitsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DescribeLimits", params, optFns, c.addOperationDescribeLimitsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DescribeLimitsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of a DescribeLimits operation. Has no content.
+type DescribeLimitsInput struct {
+ noSmithyDocumentSerde
+}
+
+// Represents the output of a DescribeLimits operation.
+type DescribeLimitsOutput struct {
+
+ // The maximum total read capacity units that your account allows you to provision
+ // across all of your tables in this Region.
+ AccountMaxReadCapacityUnits *int64
+
+ // The maximum total write capacity units that your account allows you to
+ // provision across all of your tables in this Region.
+ AccountMaxWriteCapacityUnits *int64
+
+ // The maximum read capacity units that your account allows you to provision for a
+ // new table that you are creating in this Region, including the read capacity
+ // units provisioned for its global secondary indexes (GSIs).
+ TableMaxReadCapacityUnits *int64
+
+ // The maximum write capacity units that your account allows you to provision for
+ // a new table that you are creating in this Region, including the write capacity
+ // units provisioned for its global secondary indexes (GSIs).
+ TableMaxWriteCapacityUnits *int64
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDescribeLimitsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeLimits{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeLimits{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeLimits"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDescribeLimitsDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeLimits(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpDescribeLimitsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpDescribeLimitsDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpDescribeLimitsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*DescribeLimitsInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opDescribeLimits(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DescribeLimits",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go
new file mode 100644
index 000000000..4a0cd9d29
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTable.go
@@ -0,0 +1,631 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithytime "github.com/aws/smithy-go/time"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ smithywaiter "github.com/aws/smithy-go/waiter"
+ "time"
+)
+
+// Returns information about the table, including the current status of the table,
+// when it was created, the primary key schema, and any indexes on the table.
+//
+// If you issue a DescribeTable request immediately after a CreateTable request,
+// DynamoDB might return a ResourceNotFoundException . This is because
+// DescribeTable uses an eventually consistent query, and the metadata for your
+// table might not be available at that moment. Wait for a few seconds, and then
+// try the DescribeTable request again.
+func (c *Client) DescribeTable(ctx context.Context, params *DescribeTableInput, optFns ...func(*Options)) (*DescribeTableOutput, error) {
+ if params == nil {
+ params = &DescribeTableInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DescribeTable", params, optFns, c.addOperationDescribeTableMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DescribeTableOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of a DescribeTable operation.
+type DescribeTableInput struct {
+
+ // The name of the table to describe. You can also provide the Amazon Resource
+ // Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DescribeTableInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+// Represents the output of a DescribeTable operation.
+type DescribeTableOutput struct {
+
+ // The properties of the table.
+ Table *types.TableDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDescribeTableMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeTable"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDescribeTableDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDescribeTableValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTable(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// TableExistsWaiterOptions are waiter options for TableExistsWaiter
+type TableExistsWaiterOptions struct {
+
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ //
+ // Passing options here is functionally equivalent to passing values to this
+ // config's ClientOptions field that extend the inner client's APIOptions directly.
+ APIOptions []func(*middleware.Stack) error
+
+ // Functional options to be passed to all operations invoked by this client.
+ //
+ // Function values that modify the inner APIOptions are applied after the waiter
+ // config's own APIOptions modifiers.
+ ClientOptions []func(*Options)
+
+ // MinDelay is the minimum amount of time to delay between retries. If unset,
+ // TableExistsWaiter will use default minimum delay of 20 seconds. Note that
+ // MinDelay must resolve to a value lesser than or equal to the MaxDelay.
+ MinDelay time.Duration
+
+ // MaxDelay is the maximum amount of time to delay between retries. If unset or
+ // set to zero, TableExistsWaiter will use default max delay of 120 seconds. Note
+ // that MaxDelay must resolve to value greater than or equal to the MinDelay.
+ MaxDelay time.Duration
+
+ // LogWaitAttempts is used to enable logging for waiter retry attempts
+ LogWaitAttempts bool
+
+ // Retryable is function that can be used to override the service defined
+ // waiter-behavior based on operation output, or returned error. This function is
+ // used by the waiter to decide if a state is retryable or a terminal state.
+ //
+ // By default service-modeled logic will populate this option. This option can
+ // thus be used to define a custom waiter state with fall-back to service-modeled
+ // waiter state mutators.The function returns an error in case of a failure state.
+ // In case of retry state, this function returns a bool value of true and nil
+ // error, while in case of success it returns a bool value of false and nil error.
+ Retryable func(context.Context, *DescribeTableInput, *DescribeTableOutput, error) (bool, error)
+}
+
+// TableExistsWaiter defines the waiters for TableExists
+type TableExistsWaiter struct {
+ client DescribeTableAPIClient
+
+ options TableExistsWaiterOptions
+}
+
+// NewTableExistsWaiter constructs a TableExistsWaiter.
+func NewTableExistsWaiter(client DescribeTableAPIClient, optFns ...func(*TableExistsWaiterOptions)) *TableExistsWaiter {
+ options := TableExistsWaiterOptions{}
+ options.MinDelay = 20 * time.Second
+ options.MaxDelay = 120 * time.Second
+ options.Retryable = tableExistsStateRetryable
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ return &TableExistsWaiter{
+ client: client,
+ options: options,
+ }
+}
+
+// Wait calls the waiter function for TableExists waiter. The maxWaitDur is the
+// maximum wait duration the waiter will wait. The maxWaitDur is required and must
+// be greater than zero.
+func (w *TableExistsWaiter) Wait(ctx context.Context, params *DescribeTableInput, maxWaitDur time.Duration, optFns ...func(*TableExistsWaiterOptions)) error {
+ _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...)
+ return err
+}
+
+// WaitForOutput calls the waiter function for TableExists waiter and returns the
+// output of the successful operation. The maxWaitDur is the maximum wait duration
+// the waiter will wait. The maxWaitDur is required and must be greater than zero.
+func (w *TableExistsWaiter) WaitForOutput(ctx context.Context, params *DescribeTableInput, maxWaitDur time.Duration, optFns ...func(*TableExistsWaiterOptions)) (*DescribeTableOutput, error) {
+ if maxWaitDur <= 0 {
+ return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero")
+ }
+
+ options := w.options
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.MaxDelay <= 0 {
+ options.MaxDelay = 120 * time.Second
+ }
+
+ if options.MinDelay > options.MaxDelay {
+ return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay)
+ }
+
+ ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur)
+ defer cancelFn()
+
+ logger := smithywaiter.Logger{}
+ remainingTime := maxWaitDur
+
+ var attempt int64
+ for {
+
+ attempt++
+ apiOptions := options.APIOptions
+ start := time.Now()
+
+ if options.LogWaitAttempts {
+ logger.Attempt = attempt
+ apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...)
+ apiOptions = append(apiOptions, logger.AddLogger)
+ }
+
+ out, err := w.client.DescribeTable(ctx, params, func(o *Options) {
+ baseOpts := []func(*Options){
+ addIsWaiterUserAgent,
+ }
+ o.APIOptions = append(o.APIOptions, apiOptions...)
+ for _, opt := range baseOpts {
+ opt(o)
+ }
+ for _, opt := range options.ClientOptions {
+ opt(o)
+ }
+ })
+
+ retryable, err := options.Retryable(ctx, params, out, err)
+ if err != nil {
+ return nil, err
+ }
+ if !retryable {
+ return out, nil
+ }
+
+ remainingTime -= time.Since(start)
+ if remainingTime < options.MinDelay || remainingTime <= 0 {
+ break
+ }
+
+ // compute exponential backoff between waiter retries
+ delay, err := smithywaiter.ComputeDelay(
+ attempt, options.MinDelay, options.MaxDelay, remainingTime,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("error computing waiter delay, %w", err)
+ }
+
+ remainingTime -= delay
+ // sleep for the delay amount before invoking a request
+ if err := smithytime.SleepWithContext(ctx, delay); err != nil {
+ return nil, fmt.Errorf("request cancelled while waiting, %w", err)
+ }
+ }
+ return nil, fmt.Errorf("exceeded max wait time for TableExists waiter")
+}
+
+func tableExistsStateRetryable(ctx context.Context, input *DescribeTableInput, output *DescribeTableOutput, err error) (bool, error) {
+
+ if err == nil {
+ v1 := output.Table
+ var v2 types.TableStatus
+ if v1 != nil {
+ v3 := v1.TableStatus
+ v2 = v3
+ }
+ expectedValue := "ACTIVE"
+ var pathValue string
+ pathValue = string(v2)
+ if pathValue == expectedValue {
+ return false, nil
+ }
+ }
+
+ if err != nil {
+ var errorType *types.ResourceNotFoundException
+ if errors.As(err, &errorType) {
+ return true, nil
+ }
+ }
+
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+// TableNotExistsWaiterOptions are waiter options for TableNotExistsWaiter
+type TableNotExistsWaiterOptions struct {
+
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ //
+ // Passing options here is functionally equivalent to passing values to this
+ // config's ClientOptions field that extend the inner client's APIOptions directly.
+ APIOptions []func(*middleware.Stack) error
+
+ // Functional options to be passed to all operations invoked by this client.
+ //
+ // Function values that modify the inner APIOptions are applied after the waiter
+ // config's own APIOptions modifiers.
+ ClientOptions []func(*Options)
+
+ // MinDelay is the minimum amount of time to delay between retries. If unset,
+ // TableNotExistsWaiter will use default minimum delay of 20 seconds. Note that
+ // MinDelay must resolve to a value lesser than or equal to the MaxDelay.
+ MinDelay time.Duration
+
+ // MaxDelay is the maximum amount of time to delay between retries. If unset or
+ // set to zero, TableNotExistsWaiter will use default max delay of 120 seconds.
+ // Note that MaxDelay must resolve to value greater than or equal to the MinDelay.
+ MaxDelay time.Duration
+
+ // LogWaitAttempts is used to enable logging for waiter retry attempts
+ LogWaitAttempts bool
+
+ // Retryable is function that can be used to override the service defined
+ // waiter-behavior based on operation output, or returned error. This function is
+ // used by the waiter to decide if a state is retryable or a terminal state.
+ //
+ // By default service-modeled logic will populate this option. This option can
+ // thus be used to define a custom waiter state with fall-back to service-modeled
+ // waiter state mutators.The function returns an error in case of a failure state.
+ // In case of retry state, this function returns a bool value of true and nil
+ // error, while in case of success it returns a bool value of false and nil error.
+ Retryable func(context.Context, *DescribeTableInput, *DescribeTableOutput, error) (bool, error)
+}
+
+// TableNotExistsWaiter defines the waiters for TableNotExists
+type TableNotExistsWaiter struct {
+ client DescribeTableAPIClient
+
+ options TableNotExistsWaiterOptions
+}
+
+// NewTableNotExistsWaiter constructs a TableNotExistsWaiter.
+func NewTableNotExistsWaiter(client DescribeTableAPIClient, optFns ...func(*TableNotExistsWaiterOptions)) *TableNotExistsWaiter {
+ options := TableNotExistsWaiterOptions{}
+ options.MinDelay = 20 * time.Second
+ options.MaxDelay = 120 * time.Second
+ options.Retryable = tableNotExistsStateRetryable
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ return &TableNotExistsWaiter{
+ client: client,
+ options: options,
+ }
+}
+
+// Wait calls the waiter function for TableNotExists waiter. The maxWaitDur is the
+// maximum wait duration the waiter will wait. The maxWaitDur is required and must
+// be greater than zero.
+func (w *TableNotExistsWaiter) Wait(ctx context.Context, params *DescribeTableInput, maxWaitDur time.Duration, optFns ...func(*TableNotExistsWaiterOptions)) error {
+ _, err := w.WaitForOutput(ctx, params, maxWaitDur, optFns...)
+ return err
+}
+
+// WaitForOutput calls the waiter function for TableNotExists waiter and returns
+// the output of the successful operation. The maxWaitDur is the maximum wait
+// duration the waiter will wait. The maxWaitDur is required and must be greater
+// than zero.
+func (w *TableNotExistsWaiter) WaitForOutput(ctx context.Context, params *DescribeTableInput, maxWaitDur time.Duration, optFns ...func(*TableNotExistsWaiterOptions)) (*DescribeTableOutput, error) {
+ if maxWaitDur <= 0 {
+ return nil, fmt.Errorf("maximum wait time for waiter must be greater than zero")
+ }
+
+ options := w.options
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ if options.MaxDelay <= 0 {
+ options.MaxDelay = 120 * time.Second
+ }
+
+ if options.MinDelay > options.MaxDelay {
+ return nil, fmt.Errorf("minimum waiter delay %v must be lesser than or equal to maximum waiter delay of %v.", options.MinDelay, options.MaxDelay)
+ }
+
+ ctx, cancelFn := context.WithTimeout(ctx, maxWaitDur)
+ defer cancelFn()
+
+ logger := smithywaiter.Logger{}
+ remainingTime := maxWaitDur
+
+ var attempt int64
+ for {
+
+ attempt++
+ apiOptions := options.APIOptions
+ start := time.Now()
+
+ if options.LogWaitAttempts {
+ logger.Attempt = attempt
+ apiOptions = append([]func(*middleware.Stack) error{}, options.APIOptions...)
+ apiOptions = append(apiOptions, logger.AddLogger)
+ }
+
+ out, err := w.client.DescribeTable(ctx, params, func(o *Options) {
+ baseOpts := []func(*Options){
+ addIsWaiterUserAgent,
+ }
+ o.APIOptions = append(o.APIOptions, apiOptions...)
+ for _, opt := range baseOpts {
+ opt(o)
+ }
+ for _, opt := range options.ClientOptions {
+ opt(o)
+ }
+ })
+
+ retryable, err := options.Retryable(ctx, params, out, err)
+ if err != nil {
+ return nil, err
+ }
+ if !retryable {
+ return out, nil
+ }
+
+ remainingTime -= time.Since(start)
+ if remainingTime < options.MinDelay || remainingTime <= 0 {
+ break
+ }
+
+ // compute exponential backoff between waiter retries
+ delay, err := smithywaiter.ComputeDelay(
+ attempt, options.MinDelay, options.MaxDelay, remainingTime,
+ )
+ if err != nil {
+ return nil, fmt.Errorf("error computing waiter delay, %w", err)
+ }
+
+ remainingTime -= delay
+ // sleep for the delay amount before invoking a request
+ if err := smithytime.SleepWithContext(ctx, delay); err != nil {
+ return nil, fmt.Errorf("request cancelled while waiting, %w", err)
+ }
+ }
+ return nil, fmt.Errorf("exceeded max wait time for TableNotExists waiter")
+}
+
+func tableNotExistsStateRetryable(ctx context.Context, input *DescribeTableInput, output *DescribeTableOutput, err error) (bool, error) {
+
+ if err != nil {
+ var errorType *types.ResourceNotFoundException
+ if errors.As(err, &errorType) {
+ return false, nil
+ }
+ }
+
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+func addOpDescribeTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpDescribeTableDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpDescribeTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*DescribeTableInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+// DescribeTableAPIClient is a client that implements the DescribeTable operation.
+type DescribeTableAPIClient interface {
+ DescribeTable(context.Context, *DescribeTableInput, ...func(*Options)) (*DescribeTableOutput, error)
+}
+
+var _ DescribeTableAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opDescribeTable(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DescribeTable",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go
new file mode 100644
index 000000000..f8e2f77b7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTableReplicaAutoScaling.go
@@ -0,0 +1,206 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Describes auto scaling settings across replicas of the global table at once.
+func (c *Client) DescribeTableReplicaAutoScaling(ctx context.Context, params *DescribeTableReplicaAutoScalingInput, optFns ...func(*Options)) (*DescribeTableReplicaAutoScalingOutput, error) {
+ if params == nil {
+ params = &DescribeTableReplicaAutoScalingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DescribeTableReplicaAutoScaling", params, optFns, c.addOperationDescribeTableReplicaAutoScalingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DescribeTableReplicaAutoScalingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DescribeTableReplicaAutoScalingInput struct {
+
+ // The name of the table. You can also provide the Amazon Resource Name (ARN) of
+ // the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DescribeTableReplicaAutoScalingInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type DescribeTableReplicaAutoScalingOutput struct {
+
+ // Represents the auto scaling properties of the table.
+ TableAutoScalingDescription *types.TableAutoScalingDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDescribeTableReplicaAutoScalingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeTableReplicaAutoScaling{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeTableReplicaAutoScaling{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeTableReplicaAutoScaling"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDescribeTableReplicaAutoScalingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTableReplicaAutoScaling(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opDescribeTableReplicaAutoScaling(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DescribeTableReplicaAutoScaling",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTimeToLive.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTimeToLive.go
new file mode 100644
index 000000000..5ea9ab1ee
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DescribeTimeToLive.go
@@ -0,0 +1,253 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Gives a description of the Time to Live (TTL) status on the specified table.
+func (c *Client) DescribeTimeToLive(ctx context.Context, params *DescribeTimeToLiveInput, optFns ...func(*Options)) (*DescribeTimeToLiveOutput, error) {
+ if params == nil {
+ params = &DescribeTimeToLiveInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DescribeTimeToLive", params, optFns, c.addOperationDescribeTimeToLiveMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DescribeTimeToLiveOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DescribeTimeToLiveInput struct {
+
+ // The name of the table to be described. You can also provide the Amazon Resource
+ // Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *DescribeTimeToLiveInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type DescribeTimeToLiveOutput struct {
+
+ //
+ TimeToLiveDescription *types.TimeToLiveDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDescribeTimeToLiveMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDescribeTimeToLive{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDescribeTimeToLive{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DescribeTimeToLive"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDescribeTimeToLiveDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDescribeTimeToLiveValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDescribeTimeToLive(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpDescribeTimeToLiveDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpDescribeTimeToLiveDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpDescribeTimeToLiveDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*DescribeTimeToLiveInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opDescribeTimeToLive(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DescribeTimeToLive",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go
new file mode 100644
index 000000000..5df8f9ea0
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_DisableKinesisStreamingDestination.go
@@ -0,0 +1,271 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Stops replication from the DynamoDB table to the Kinesis data stream. This is
+// done without deleting either of the resources.
+func (c *Client) DisableKinesisStreamingDestination(ctx context.Context, params *DisableKinesisStreamingDestinationInput, optFns ...func(*Options)) (*DisableKinesisStreamingDestinationOutput, error) {
+ if params == nil {
+ params = &DisableKinesisStreamingDestinationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "DisableKinesisStreamingDestination", params, optFns, c.addOperationDisableKinesisStreamingDestinationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*DisableKinesisStreamingDestinationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type DisableKinesisStreamingDestinationInput struct {
+
+ // The ARN for a Kinesis data stream.
+ //
+ // This member is required.
+ StreamArn *string
+
+ // The name of the DynamoDB table. You can also provide the Amazon Resource Name
+ // (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // The source for the Kinesis streaming information that is being enabled.
+ EnableKinesisStreamingConfiguration *types.EnableKinesisStreamingConfiguration
+
+ noSmithyDocumentSerde
+}
+
+func (in *DisableKinesisStreamingDestinationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type DisableKinesisStreamingDestinationOutput struct {
+
+ // The current status of the replication.
+ DestinationStatus types.DestinationStatus
+
+ // The destination for the Kinesis streaming information that is being enabled.
+ EnableKinesisStreamingConfiguration *types.EnableKinesisStreamingConfiguration
+
+ // The ARN for the specific Kinesis data stream.
+ StreamArn *string
+
+ // The name of the table being modified.
+ TableName *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationDisableKinesisStreamingDestinationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpDisableKinesisStreamingDestination{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpDisableKinesisStreamingDestination{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DisableKinesisStreamingDestination"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpDisableKinesisStreamingDestinationDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpDisableKinesisStreamingDestinationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDisableKinesisStreamingDestination(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpDisableKinesisStreamingDestinationDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpDisableKinesisStreamingDestinationDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpDisableKinesisStreamingDestinationDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*DisableKinesisStreamingDestinationInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opDisableKinesisStreamingDestination(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "DisableKinesisStreamingDestination",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go
new file mode 100644
index 000000000..c7b047c7f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_EnableKinesisStreamingDestination.go
@@ -0,0 +1,273 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Starts table data replication to the specified Kinesis data stream at a
+// timestamp chosen during the enable workflow. If this operation doesn't return
+// results immediately, use DescribeKinesisStreamingDestination to check if
+// streaming to the Kinesis data stream is ACTIVE.
+func (c *Client) EnableKinesisStreamingDestination(ctx context.Context, params *EnableKinesisStreamingDestinationInput, optFns ...func(*Options)) (*EnableKinesisStreamingDestinationOutput, error) {
+ if params == nil {
+ params = &EnableKinesisStreamingDestinationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "EnableKinesisStreamingDestination", params, optFns, c.addOperationEnableKinesisStreamingDestinationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*EnableKinesisStreamingDestinationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type EnableKinesisStreamingDestinationInput struct {
+
+ // The ARN for a Kinesis data stream.
+ //
+ // This member is required.
+ StreamArn *string
+
+ // The name of the DynamoDB table. You can also provide the Amazon Resource Name
+ // (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // The source for the Kinesis streaming information that is being enabled.
+ EnableKinesisStreamingConfiguration *types.EnableKinesisStreamingConfiguration
+
+ noSmithyDocumentSerde
+}
+
+func (in *EnableKinesisStreamingDestinationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type EnableKinesisStreamingDestinationOutput struct {
+
+ // The current status of the replication.
+ DestinationStatus types.DestinationStatus
+
+ // The destination for the Kinesis streaming information that is being enabled.
+ EnableKinesisStreamingConfiguration *types.EnableKinesisStreamingConfiguration
+
+ // The ARN for the specific Kinesis data stream.
+ StreamArn *string
+
+ // The name of the table being modified.
+ TableName *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationEnableKinesisStreamingDestinationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpEnableKinesisStreamingDestination{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpEnableKinesisStreamingDestination{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "EnableKinesisStreamingDestination"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpEnableKinesisStreamingDestinationDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpEnableKinesisStreamingDestinationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opEnableKinesisStreamingDestination(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpEnableKinesisStreamingDestinationDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpEnableKinesisStreamingDestinationDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpEnableKinesisStreamingDestinationDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*EnableKinesisStreamingDestinationInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opEnableKinesisStreamingDestination(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "EnableKinesisStreamingDestination",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteStatement.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteStatement.go
new file mode 100644
index 000000000..655372e14
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteStatement.go
@@ -0,0 +1,283 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation allows you to perform reads and singleton writes on data stored
+// in DynamoDB, using PartiQL.
+//
+// For PartiQL reads ( SELECT statement), if the total number of processed items
+// exceeds the maximum dataset size limit of 1 MB, the read stops and results are
+// returned to the user as a LastEvaluatedKey value to continue the read in a
+// subsequent operation. If the filter criteria in WHERE clause does not match any
+// data, the read will return an empty result set.
+//
+// A single SELECT statement response can return up to the maximum number of items
+// (if using the Limit parameter) or a maximum of 1 MB of data (and then apply any
+// filtering to the results using WHERE clause). If LastEvaluatedKey is present in
+// the response, you need to paginate the result set. If NextToken is present, you
+// need to paginate the result set and include NextToken .
+func (c *Client) ExecuteStatement(ctx context.Context, params *ExecuteStatementInput, optFns ...func(*Options)) (*ExecuteStatementOutput, error) {
+ if params == nil {
+ params = &ExecuteStatementInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ExecuteStatement", params, optFns, c.addOperationExecuteStatementMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ExecuteStatementOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ExecuteStatementInput struct {
+
+ // The PartiQL statement representing the operation to run.
+ //
+ // This member is required.
+ Statement *string
+
+ // The consistency of a read operation. If set to true , then a strongly consistent
+ // read is used; otherwise, an eventually consistent read is used.
+ ConsistentRead *bool
+
+ // The maximum number of items to evaluate (not necessarily the number of matching
+ // items). If DynamoDB processes the number of items up to the limit while
+ // processing the results, it stops the operation and returns the matching values
+ // up to that point, along with a key in LastEvaluatedKey to apply in a subsequent
+ // operation so you can pick up where you left off. Also, if the processed dataset
+ // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation and
+ // returns the matching values up to the limit, and a key in LastEvaluatedKey to
+ // apply in a subsequent operation to continue the operation.
+ Limit *int32
+
+ // Set this value to get remaining results, if NextToken was returned in the
+ // statement response.
+ NextToken *string
+
+ // The parameters for the PartiQL statement, if any.
+ Parameters []types.AttributeValue
+
+ // Determines the level of detail about either provisioned or on-demand throughput
+ // consumption that is returned in the response:
+ //
+ // - INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary index
+ // that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem , do not access any
+ // indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // - TOTAL - The response includes only the aggregate ConsumedCapacity for the
+ // operation.
+ //
+ // - NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity types.ReturnConsumedCapacity
+
+ // An optional parameter that returns the item attributes for an ExecuteStatement
+ // operation that failed a condition check.
+ //
+ // There is no additional cost associated with requesting a return value aside
+ // from the small network and processing overhead of receiving a larger response.
+ // No read capacity units are consumed.
+ ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure
+
+ noSmithyDocumentSerde
+}
+
+type ExecuteStatementOutput struct {
+
+ // The capacity units consumed by an operation. The data returned includes the
+ // total provisioned throughput consumed, along with statistics for the table and
+ // any indexes involved in the operation. ConsumedCapacity is only returned if the
+ // request asked for it. For more information, see [Provisioned capacity mode]in the Amazon DynamoDB
+ // Developer Guide.
+ //
+ // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html
+ ConsumedCapacity *types.ConsumedCapacity
+
+ // If a read operation was used, this property will contain the result of the read
+ // operation; a map of attribute names and their values. For the write operations
+ // this value will be empty.
+ Items []map[string]types.AttributeValue
+
+ // The primary key of the item where the operation stopped, inclusive of the
+ // previous result set. Use this value to start a new operation, excluding this
+ // value in the new request. If LastEvaluatedKey is empty, then the "last page" of
+ // results has been processed and there is no more data to be retrieved. If
+ // LastEvaluatedKey is not empty, it does not necessarily mean that there is more
+ // data in the result set. The only way to know when you have reached the end of
+ // the result set is when LastEvaluatedKey is empty.
+ LastEvaluatedKey map[string]types.AttributeValue
+
+ // If the response of a read request exceeds the response payload limit DynamoDB
+ // will set this value in the response. If set, you can use that this value in the
+ // subsequent request to get the remaining results.
+ NextToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationExecuteStatementMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpExecuteStatement{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpExecuteStatement{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ExecuteStatement"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpExecuteStatementValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opExecuteStatement(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opExecuteStatement(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ExecuteStatement",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteTransaction.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteTransaction.go
new file mode 100644
index 000000000..85c587bc3
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExecuteTransaction.go
@@ -0,0 +1,258 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// This operation allows you to perform transactional reads or writes on data
+// stored in DynamoDB, using PartiQL.
+//
+// The entire transaction must consist of either read statements or write
+// statements, you cannot mix both in one transaction. The EXISTS function is an
+// exception and can be used to check the condition of specific attributes of the
+// item in a similar manner to ConditionCheck in the [TransactWriteItems] API.
+//
+// [TransactWriteItems]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/transaction-apis.html#transaction-apis-txwriteitems
+func (c *Client) ExecuteTransaction(ctx context.Context, params *ExecuteTransactionInput, optFns ...func(*Options)) (*ExecuteTransactionOutput, error) {
+ if params == nil {
+ params = &ExecuteTransactionInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ExecuteTransaction", params, optFns, c.addOperationExecuteTransactionMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ExecuteTransactionOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ExecuteTransactionInput struct {
+
+ // The list of PartiQL statements representing the transaction to run.
+ //
+ // This member is required.
+ TransactStatements []types.ParameterizedStatement
+
+ // Set this value to get remaining results, if NextToken was returned in the
+ // statement response.
+ ClientRequestToken *string
+
+ // Determines the level of detail about either provisioned or on-demand throughput
+ // consumption that is returned in the response. For more information, see [TransactGetItems]and [TransactWriteItems].
+ //
+ // [TransactWriteItems]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html
+ // [TransactGetItems]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactGetItems.html
+ ReturnConsumedCapacity types.ReturnConsumedCapacity
+
+ noSmithyDocumentSerde
+}
+
+type ExecuteTransactionOutput struct {
+
+ // The capacity units consumed by the entire operation. The values of the list are
+ // ordered according to the ordering of the statements.
+ ConsumedCapacity []types.ConsumedCapacity
+
+ // The response to a PartiQL transaction.
+ Responses []types.ItemResponse
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationExecuteTransactionMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpExecuteTransaction{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpExecuteTransaction{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ExecuteTransaction"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addIdempotencyToken_opExecuteTransactionMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addOpExecuteTransactionValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opExecuteTransaction(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+type idempotencyToken_initializeOpExecuteTransaction struct {
+ tokenProvider IdempotencyTokenProvider
+}
+
+func (*idempotencyToken_initializeOpExecuteTransaction) ID() string {
+ return "OperationIdempotencyTokenAutoFill"
+}
+
+func (m *idempotencyToken_initializeOpExecuteTransaction) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.tokenProvider == nil {
+ return next.HandleInitialize(ctx, in)
+ }
+
+ input, ok := in.Parameters.(*ExecuteTransactionInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("expected middleware input to be of type *ExecuteTransactionInput ")
+ }
+
+ if input.ClientRequestToken == nil {
+ t, err := m.tokenProvider.GetIdempotencyToken()
+ if err != nil {
+ return out, metadata, err
+ }
+ input.ClientRequestToken = &t
+ }
+ return next.HandleInitialize(ctx, in)
+}
+func addIdempotencyToken_opExecuteTransactionMiddleware(stack *middleware.Stack, cfg Options) error {
+ return stack.Initialize.Add(&idempotencyToken_initializeOpExecuteTransaction{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before)
+}
+
+func newServiceMetadataMiddleware_opExecuteTransaction(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ExecuteTransaction",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExportTableToPointInTime.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExportTableToPointInTime.go
new file mode 100644
index 000000000..491a44823
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ExportTableToPointInTime.go
@@ -0,0 +1,304 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// Exports table data to an S3 bucket. The table must have point in time recovery
+// enabled, and you can export data from any time within the point in time recovery
+// window.
+func (c *Client) ExportTableToPointInTime(ctx context.Context, params *ExportTableToPointInTimeInput, optFns ...func(*Options)) (*ExportTableToPointInTimeOutput, error) {
+ if params == nil {
+ params = &ExportTableToPointInTimeInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ExportTableToPointInTime", params, optFns, c.addOperationExportTableToPointInTimeMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ExportTableToPointInTimeOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ExportTableToPointInTimeInput struct {
+
+ // The name of the Amazon S3 bucket to export the snapshot to.
+ //
+ // This member is required.
+ S3Bucket *string
+
+ // The Amazon Resource Name (ARN) associated with the table to export.
+ //
+ // This member is required.
+ TableArn *string
+
+ // Providing a ClientToken makes the call to ExportTableToPointInTimeInput
+ // idempotent, meaning that multiple identical calls have the same effect as one
+ // single call.
+ //
+ // A client token is valid for 8 hours after the first request that uses it is
+ // completed. After 8 hours, any request with the same client token is treated as a
+ // new request. Do not resubmit the same request with the same client token for
+ // more than 8 hours, or the result might not be idempotent.
+ //
+ // If you submit a request with the same client token but a change in other
+ // parameters within the 8-hour idempotency window, DynamoDB returns an
+ // ImportConflictException .
+ ClientToken *string
+
+ // The format for the exported data. Valid values for ExportFormat are
+ // DYNAMODB_JSON or ION .
+ ExportFormat types.ExportFormat
+
+ // Time in the past from which to export table data, counted in seconds from the
+ // start of the Unix epoch. The table export will be a snapshot of the table's
+ // state at this point in time.
+ ExportTime *time.Time
+
+ // Choice of whether to execute as a full export or incremental export. Valid
+ // values are FULL_EXPORT or INCREMENTAL_EXPORT. The default value is FULL_EXPORT.
+ // If INCREMENTAL_EXPORT is provided, the IncrementalExportSpecification must also
+ // be used.
+ ExportType types.ExportType
+
+ // Optional object containing the parameters specific to an incremental export.
+ IncrementalExportSpecification *types.IncrementalExportSpecification
+
+ // The ID of the Amazon Web Services account that owns the bucket the export will
+ // be stored in.
+ //
+ // S3BucketOwner is a required parameter when exporting to a S3 bucket in another
+ // account.
+ S3BucketOwner *string
+
+ // The Amazon S3 bucket prefix to use as the file name and path of the exported
+ // snapshot.
+ S3Prefix *string
+
+ // Type of encryption used on the bucket where export data will be stored. Valid
+ // values for S3SseAlgorithm are:
+ //
+ // - AES256 - server-side encryption with Amazon S3 managed keys
+ //
+ // - KMS - server-side encryption with KMS managed keys
+ S3SseAlgorithm types.S3SseAlgorithm
+
+ // The ID of the KMS managed key used to encrypt the S3 bucket where export data
+ // will be stored (if applicable).
+ S3SseKmsKeyId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *ExportTableToPointInTimeInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableArn
+
+}
+
+type ExportTableToPointInTimeOutput struct {
+
+ // Contains a description of the table export.
+ ExportDescription *types.ExportDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationExportTableToPointInTimeMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpExportTableToPointInTime{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpExportTableToPointInTime{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ExportTableToPointInTime"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addIdempotencyToken_opExportTableToPointInTimeMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addOpExportTableToPointInTimeValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opExportTableToPointInTime(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+type idempotencyToken_initializeOpExportTableToPointInTime struct {
+ tokenProvider IdempotencyTokenProvider
+}
+
+func (*idempotencyToken_initializeOpExportTableToPointInTime) ID() string {
+ return "OperationIdempotencyTokenAutoFill"
+}
+
+func (m *idempotencyToken_initializeOpExportTableToPointInTime) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.tokenProvider == nil {
+ return next.HandleInitialize(ctx, in)
+ }
+
+ input, ok := in.Parameters.(*ExportTableToPointInTimeInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("expected middleware input to be of type *ExportTableToPointInTimeInput ")
+ }
+
+ if input.ClientToken == nil {
+ t, err := m.tokenProvider.GetIdempotencyToken()
+ if err != nil {
+ return out, metadata, err
+ }
+ input.ClientToken = &t
+ }
+ return next.HandleInitialize(ctx, in)
+}
+func addIdempotencyToken_opExportTableToPointInTimeMiddleware(stack *middleware.Stack, cfg Options) error {
+ return stack.Initialize.Add(&idempotencyToken_initializeOpExportTableToPointInTime{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before)
+}
+
+func newServiceMetadataMiddleware_opExportTableToPointInTime(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ExportTableToPointInTime",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go
new file mode 100644
index 000000000..5a8e1f6be
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetItem.go
@@ -0,0 +1,360 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// The GetItem operation returns a set of attributes for the item with the given
+// primary key. If there is no matching item, GetItem does not return any data and
+// there will be no Item element in the response.
+//
+// GetItem provides an eventually consistent read by default. If your application
+// requires a strongly consistent read, set ConsistentRead to true . Although a
+// strongly consistent read might take more time than an eventually consistent
+// read, it always returns the last updated value.
+func (c *Client) GetItem(ctx context.Context, params *GetItemInput, optFns ...func(*Options)) (*GetItemOutput, error) {
+ if params == nil {
+ params = &GetItemInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetItem", params, optFns, c.addOperationGetItemMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetItemOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of a GetItem operation.
+type GetItemInput struct {
+
+ // A map of attribute names to AttributeValue objects, representing the primary
+ // key of the item to retrieve.
+ //
+ // For the primary key, you must provide all of the attributes. For example, with
+ // a simple primary key, you only need to provide a value for the partition key.
+ // For a composite primary key, you must provide values for both the partition key
+ // and the sort key.
+ //
+ // This member is required.
+ Key map[string]types.AttributeValue
+
+ // The name of the table containing the requested item. You can also provide the
+ // Amazon Resource Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // This is a legacy parameter. Use ProjectionExpression instead. For more
+ // information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide.
+ //
+ // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html
+ AttributesToGet []string
+
+ // Determines the read consistency model: If set to true , then the operation uses
+ // strongly consistent reads; otherwise, the operation uses eventually consistent
+ // reads.
+ ConsistentRead *bool
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames :
+ //
+ // - To access an attribute whose name conflicts with a DynamoDB reserved word.
+ //
+ // - To create a placeholder for repeating occurrences of an attribute name in
+ // an expression.
+ //
+ // - To prevent special characters in an attribute name from being
+ // misinterpreted in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // - Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be used
+ // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the
+ // Amazon DynamoDB Developer Guide). To work around this, you could specify the
+ // following for ExpressionAttributeNames :
+ //
+ // - {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // - #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB
+ // Developer Guide.
+ //
+ // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html
+ // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html
+ ExpressionAttributeNames map[string]string
+
+ // A string that identifies one or more attributes to retrieve from the table.
+ // These attributes can include scalars, sets, or elements of a JSON document. The
+ // attributes in the expression must be separated by commas.
+ //
+ // If no attribute names are specified, then all attributes are returned. If any
+ // of the requested attributes are not found, they do not appear in the result.
+ //
+ // For more information, see [Specifying Item Attributes] in the Amazon DynamoDB Developer Guide.
+ //
+ // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html
+ ProjectionExpression *string
+
+ // Determines the level of detail about either provisioned or on-demand throughput
+ // consumption that is returned in the response:
+ //
+ // - INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary index
+ // that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem , do not access any
+ // indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // - TOTAL - The response includes only the aggregate ConsumedCapacity for the
+ // operation.
+ //
+ // - NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity types.ReturnConsumedCapacity
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetItemInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+// Represents the output of a GetItem operation.
+type GetItemOutput struct {
+
+ // The capacity units consumed by the GetItem operation. The data returned
+ // includes the total provisioned throughput consumed, along with statistics for
+ // the table and any indexes involved in the operation. ConsumedCapacity is only
+ // returned if the ReturnConsumedCapacity parameter was specified. For more
+ // information, see [Capacity unit consumption for read operations]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Capacity unit consumption for read operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption
+ ConsumedCapacity *types.ConsumedCapacity
+
+ // A map of attribute names to AttributeValue objects, as specified by
+ // ProjectionExpression .
+ Item map[string]types.AttributeValue
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetItemMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpGetItem{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpGetItem{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetItem"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetItemDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetItemValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetItem(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpGetItemDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpGetItemDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpGetItemDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*GetItemInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opGetItem(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetItem",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go
new file mode 100644
index 000000000..31bae3f42
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_GetResourcePolicy.go
@@ -0,0 +1,286 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns the resource-based policy document attached to the resource, which can
+// be a table or stream, in JSON format.
+//
+// GetResourcePolicy follows an [eventually consistent] model. The following list describes the outcomes
+// when you issue the GetResourcePolicy request immediately after issuing another
+// request:
+//
+// - If you issue a GetResourcePolicy request immediately after a
+// PutResourcePolicy request, DynamoDB might return a PolicyNotFoundException .
+//
+// - If you issue a GetResourcePolicy request immediately after a
+// DeleteResourcePolicy request, DynamoDB might return the policy that was
+// present before the deletion request.
+//
+// - If you issue a GetResourcePolicy request immediately after a CreateTable
+// request, which includes a resource-based policy, DynamoDB might return a
+// ResourceNotFoundException or a PolicyNotFoundException .
+//
+// Because GetResourcePolicy uses an eventually consistent query, the metadata for
+// your policy or table might not be available at that moment. Wait for a few
+// seconds, and then retry the GetResourcePolicy request.
+//
+// After a GetResourcePolicy request returns a policy created using the
+// PutResourcePolicy request, the policy will be applied in the authorization of
+// requests to the resource. Because this process is eventually consistent, it will
+// take some time to apply the policy to all requests to a resource. Policies that
+// you attach while creating a table using the CreateTable request will always be
+// applied to all requests for that table.
+//
+// [eventually consistent]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html
+func (c *Client) GetResourcePolicy(ctx context.Context, params *GetResourcePolicyInput, optFns ...func(*Options)) (*GetResourcePolicyOutput, error) {
+ if params == nil {
+ params = &GetResourcePolicyInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "GetResourcePolicy", params, optFns, c.addOperationGetResourcePolicyMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*GetResourcePolicyOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type GetResourcePolicyInput struct {
+
+ // The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy is
+ // attached. The resources you can specify include tables and streams.
+ //
+ // This member is required.
+ ResourceArn *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *GetResourcePolicyInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.ResourceArn
+
+}
+
+type GetResourcePolicyOutput struct {
+
+ // The resource-based policy document attached to the resource, which can be a
+ // table or stream, in JSON format.
+ Policy *string
+
+ // A unique string that represents the revision ID of the policy. If you're
+ // comparing revision IDs, make sure to always use string comparison logic.
+ RevisionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationGetResourcePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpGetResourcePolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpGetResourcePolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetResourcePolicy"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpGetResourcePolicyDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpGetResourcePolicyValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetResourcePolicy(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpGetResourcePolicyDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpGetResourcePolicyDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpGetResourcePolicyDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*GetResourcePolicyInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opGetResourcePolicy(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "GetResourcePolicy",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ImportTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ImportTable.go
new file mode 100644
index 000000000..ab97739f8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ImportTable.go
@@ -0,0 +1,282 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Imports table data from an S3 bucket.
+func (c *Client) ImportTable(ctx context.Context, params *ImportTableInput, optFns ...func(*Options)) (*ImportTableOutput, error) {
+ if params == nil {
+ params = &ImportTableInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ImportTable", params, optFns, c.addOperationImportTableMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ImportTableOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ImportTableInput struct {
+
+ // The format of the source data. Valid values for ImportFormat are CSV ,
+ // DYNAMODB_JSON or ION .
+ //
+ // This member is required.
+ InputFormat types.InputFormat
+
+ // The S3 bucket that provides the source for the import.
+ //
+ // This member is required.
+ S3BucketSource *types.S3BucketSource
+
+ // Parameters for the table to import the data into.
+ //
+ // This member is required.
+ TableCreationParameters *types.TableCreationParameters
+
+ // Providing a ClientToken makes the call to ImportTableInput idempotent, meaning
+ // that multiple identical calls have the same effect as one single call.
+ //
+ // A client token is valid for 8 hours after the first request that uses it is
+ // completed. After 8 hours, any request with the same client token is treated as a
+ // new request. Do not resubmit the same request with the same client token for
+ // more than 8 hours, or the result might not be idempotent.
+ //
+ // If you submit a request with the same client token but a change in other
+ // parameters within the 8-hour idempotency window, DynamoDB returns an
+ // IdempotentParameterMismatch exception.
+ ClientToken *string
+
+ // Type of compression to be used on the input coming from the imported table.
+ InputCompressionType types.InputCompressionType
+
+ // Additional properties that specify how the input is formatted,
+ InputFormatOptions *types.InputFormatOptions
+
+ noSmithyDocumentSerde
+}
+
+func (in *ImportTableInput) bindEndpointParams(p *EndpointParameters) {
+ func() {
+ v1 := in.TableCreationParameters
+ var v2 *string
+ if v1 != nil {
+ v3 := v1.TableName
+ v2 = v3
+ }
+ p.ResourceArn = v2
+ }()
+
+}
+
+type ImportTableOutput struct {
+
+ // Represents the properties of the table created for the import, and parameters
+ // of the import. The import parameters include import status, how many items were
+ // processed, and how many errors were encountered.
+ //
+ // This member is required.
+ ImportTableDescription *types.ImportTableDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationImportTableMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpImportTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpImportTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ImportTable"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addIdempotencyToken_opImportTableMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addOpImportTableValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opImportTable(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+type idempotencyToken_initializeOpImportTable struct {
+ tokenProvider IdempotencyTokenProvider
+}
+
+func (*idempotencyToken_initializeOpImportTable) ID() string {
+ return "OperationIdempotencyTokenAutoFill"
+}
+
+func (m *idempotencyToken_initializeOpImportTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.tokenProvider == nil {
+ return next.HandleInitialize(ctx, in)
+ }
+
+ input, ok := in.Parameters.(*ImportTableInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("expected middleware input to be of type *ImportTableInput ")
+ }
+
+ if input.ClientToken == nil {
+ t, err := m.tokenProvider.GetIdempotencyToken()
+ if err != nil {
+ return out, metadata, err
+ }
+ input.ClientToken = &t
+ }
+ return next.HandleInitialize(ctx, in)
+}
+func addIdempotencyToken_opImportTableMiddleware(stack *middleware.Stack, cfg Options) error {
+ return stack.Initialize.Add(&idempotencyToken_initializeOpImportTable{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before)
+}
+
+func newServiceMetadataMiddleware_opImportTable(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ImportTable",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go
new file mode 100644
index 000000000..4d594fa5b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListBackups.go
@@ -0,0 +1,306 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// List DynamoDB backups that are associated with an Amazon Web Services account
+// and weren't made with Amazon Web Services Backup. To list these backups for a
+// given table, specify TableName . ListBackups returns a paginated list of
+// results with at most 1 MB worth of items in a page. You can also specify a
+// maximum number of entries to be returned in a page.
+//
+// In the request, start time is inclusive, but end time is exclusive. Note that
+// these boundaries are for the time at which the original backup was requested.
+//
+// You can call ListBackups a maximum of five times per second.
+//
+// If you want to retrieve the complete list of backups made with Amazon Web
+// Services Backup, use the [Amazon Web Services Backup list API.]
+//
+// [Amazon Web Services Backup list API.]: https://docs.aws.amazon.com/aws-backup/latest/devguide/API_ListBackupJobs.html
+func (c *Client) ListBackups(ctx context.Context, params *ListBackupsInput, optFns ...func(*Options)) (*ListBackupsOutput, error) {
+ if params == nil {
+ params = &ListBackupsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListBackups", params, optFns, c.addOperationListBackupsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListBackupsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListBackupsInput struct {
+
+ // The backups from the table specified by BackupType are listed.
+ //
+ // Where BackupType can be:
+ //
+ // - USER - On-demand backup created by you. (The default setting if no other
+ // backup types are specified.)
+ //
+ // - SYSTEM - On-demand backup automatically created by DynamoDB.
+ //
+ // - ALL - All types of on-demand backups (USER and SYSTEM).
+ BackupType types.BackupTypeFilter
+
+ // LastEvaluatedBackupArn is the Amazon Resource Name (ARN) of the backup last
+ // evaluated when the current page of results was returned, inclusive of the
+ // current page of results. This value may be specified as the
+ // ExclusiveStartBackupArn of a new ListBackups operation in order to fetch the
+ // next page of results.
+ ExclusiveStartBackupArn *string
+
+ // Maximum number of backups to return at once.
+ Limit *int32
+
+ // Lists the backups from the table specified in TableName . You can also provide
+ // the Amazon Resource Name (ARN) of the table in this parameter.
+ TableName *string
+
+ // Only backups created after this time are listed. TimeRangeLowerBound is
+ // inclusive.
+ TimeRangeLowerBound *time.Time
+
+ // Only backups created before this time are listed. TimeRangeUpperBound is
+ // exclusive.
+ TimeRangeUpperBound *time.Time
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListBackupsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type ListBackupsOutput struct {
+
+ // List of BackupSummary objects.
+ BackupSummaries []types.BackupSummary
+
+ // The ARN of the backup last evaluated when the current page of results was
+ // returned, inclusive of the current page of results. This value may be specified
+ // as the ExclusiveStartBackupArn of a new ListBackups operation in order to fetch
+ // the next page of results.
+ //
+ // If LastEvaluatedBackupArn is empty, then the last page of results has been
+ // processed and there are no more results to be retrieved.
+ //
+ // If LastEvaluatedBackupArn is not empty, this may or may not indicate that there
+ // is more data to be returned. All results are guaranteed to have been returned if
+ // and only if no value for LastEvaluatedBackupArn is returned.
+ LastEvaluatedBackupArn *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListBackupsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpListBackups{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListBackups{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListBackups"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListBackupsDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListBackups(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpListBackupsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpListBackupsDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpListBackupsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*ListBackupsInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opListBackups(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListBackups",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListContributorInsights.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListContributorInsights.go
new file mode 100644
index 000000000..d4870973e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListContributorInsights.go
@@ -0,0 +1,302 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a list of ContributorInsightsSummary for a table and all its global
+// secondary indexes.
+func (c *Client) ListContributorInsights(ctx context.Context, params *ListContributorInsightsInput, optFns ...func(*Options)) (*ListContributorInsightsOutput, error) {
+ if params == nil {
+ params = &ListContributorInsightsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListContributorInsights", params, optFns, c.addOperationListContributorInsightsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListContributorInsightsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListContributorInsightsInput struct {
+
+ // Maximum number of results to return per page.
+ MaxResults int32
+
+ // A token to for the desired page, if there is one.
+ NextToken *string
+
+ // The name of the table. You can also provide the Amazon Resource Name (ARN) of
+ // the table in this parameter.
+ TableName *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListContributorInsightsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type ListContributorInsightsOutput struct {
+
+ // A list of ContributorInsightsSummary.
+ ContributorInsightsSummaries []types.ContributorInsightsSummary
+
+ // A token to go to the next page if there is one.
+ NextToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListContributorInsightsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpListContributorInsights{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListContributorInsights{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListContributorInsights"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListContributorInsights(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListContributorInsightsPaginatorOptions is the paginator options for
+// ListContributorInsights
+type ListContributorInsightsPaginatorOptions struct {
+ // Maximum number of results to return per page.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListContributorInsightsPaginator is a paginator for ListContributorInsights
+type ListContributorInsightsPaginator struct {
+ options ListContributorInsightsPaginatorOptions
+ client ListContributorInsightsAPIClient
+ params *ListContributorInsightsInput
+ nextToken *string
+ firstPage bool
+}
+
+// NewListContributorInsightsPaginator returns a new
+// ListContributorInsightsPaginator
+func NewListContributorInsightsPaginator(client ListContributorInsightsAPIClient, params *ListContributorInsightsInput, optFns ...func(*ListContributorInsightsPaginatorOptions)) *ListContributorInsightsPaginator {
+ if params == nil {
+ params = &ListContributorInsightsInput{}
+ }
+
+ options := ListContributorInsightsPaginatorOptions{}
+ if params.MaxResults != 0 {
+ options.Limit = params.MaxResults
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListContributorInsightsPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ nextToken: params.NextToken,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListContributorInsightsPaginator) HasMorePages() bool {
+ return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
+}
+
+// NextPage retrieves the next ListContributorInsights page.
+func (p *ListContributorInsightsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListContributorInsightsOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.NextToken = p.nextToken
+
+ params.MaxResults = p.options.Limit
+
+ optFns = append([]func(*Options){
+ addIsPaginatorUserAgent,
+ }, optFns...)
+ result, err := p.client.ListContributorInsights(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = result.NextToken
+
+ if p.options.StopOnDuplicateToken &&
+ prevToken != nil &&
+ p.nextToken != nil &&
+ *prevToken == *p.nextToken {
+ p.nextToken = nil
+ }
+
+ return result, nil
+}
+
+// ListContributorInsightsAPIClient is a client that implements the
+// ListContributorInsights operation.
+type ListContributorInsightsAPIClient interface {
+ ListContributorInsights(context.Context, *ListContributorInsightsInput, ...func(*Options)) (*ListContributorInsightsOutput, error)
+}
+
+var _ ListContributorInsightsAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opListContributorInsights(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListContributorInsights",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListExports.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListExports.go
new file mode 100644
index 000000000..33c8aef46
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListExports.go
@@ -0,0 +1,304 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Lists completed exports within the past 90 days.
+func (c *Client) ListExports(ctx context.Context, params *ListExportsInput, optFns ...func(*Options)) (*ListExportsOutput, error) {
+ if params == nil {
+ params = &ListExportsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListExports", params, optFns, c.addOperationListExportsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListExportsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListExportsInput struct {
+
+ // Maximum number of results to return per page.
+ MaxResults *int32
+
+ // An optional string that, if supplied, must be copied from the output of a
+ // previous call to ListExports . When provided in this manner, the API fetches the
+ // next page of results.
+ NextToken *string
+
+ // The Amazon Resource Name (ARN) associated with the exported table.
+ TableArn *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListExportsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableArn
+
+}
+
+type ListExportsOutput struct {
+
+ // A list of ExportSummary objects.
+ ExportSummaries []types.ExportSummary
+
+ // If this value is returned, there are additional results to be displayed. To
+ // retrieve them, call ListExports again, with NextToken set to this value.
+ NextToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListExportsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpListExports{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListExports{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListExports"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListExports(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListExportsPaginatorOptions is the paginator options for ListExports
+type ListExportsPaginatorOptions struct {
+ // Maximum number of results to return per page.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListExportsPaginator is a paginator for ListExports
+type ListExportsPaginator struct {
+ options ListExportsPaginatorOptions
+ client ListExportsAPIClient
+ params *ListExportsInput
+ nextToken *string
+ firstPage bool
+}
+
+// NewListExportsPaginator returns a new ListExportsPaginator
+func NewListExportsPaginator(client ListExportsAPIClient, params *ListExportsInput, optFns ...func(*ListExportsPaginatorOptions)) *ListExportsPaginator {
+ if params == nil {
+ params = &ListExportsInput{}
+ }
+
+ options := ListExportsPaginatorOptions{}
+ if params.MaxResults != nil {
+ options.Limit = *params.MaxResults
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListExportsPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ nextToken: params.NextToken,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListExportsPaginator) HasMorePages() bool {
+ return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
+}
+
+// NextPage retrieves the next ListExports page.
+func (p *ListExportsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListExportsOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.NextToken = p.nextToken
+
+ var limit *int32
+ if p.options.Limit > 0 {
+ limit = &p.options.Limit
+ }
+ params.MaxResults = limit
+
+ optFns = append([]func(*Options){
+ addIsPaginatorUserAgent,
+ }, optFns...)
+ result, err := p.client.ListExports(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = result.NextToken
+
+ if p.options.StopOnDuplicateToken &&
+ prevToken != nil &&
+ p.nextToken != nil &&
+ *prevToken == *p.nextToken {
+ p.nextToken = nil
+ }
+
+ return result, nil
+}
+
+// ListExportsAPIClient is a client that implements the ListExports operation.
+type ListExportsAPIClient interface {
+ ListExports(context.Context, *ListExportsInput, ...func(*Options)) (*ListExportsOutput, error)
+}
+
+var _ ListExportsAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opListExports(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListExports",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListGlobalTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListGlobalTables.go
new file mode 100644
index 000000000..dcf73cdad
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListGlobalTables.go
@@ -0,0 +1,268 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Lists all global tables that have a replica in the specified Region.
+//
+// This documentation is for version 2017.11.29 (Legacy) of global tables, which
+// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible,
+// because it provides greater flexibility, higher efficiency, and consumes less
+// write capacity than 2017.11.29 (Legacy).
+//
+// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables
+// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables].
+//
+// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html
+// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html
+// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html
+func (c *Client) ListGlobalTables(ctx context.Context, params *ListGlobalTablesInput, optFns ...func(*Options)) (*ListGlobalTablesOutput, error) {
+ if params == nil {
+ params = &ListGlobalTablesInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListGlobalTables", params, optFns, c.addOperationListGlobalTablesMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListGlobalTablesOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListGlobalTablesInput struct {
+
+ // The first global table name that this operation will evaluate.
+ ExclusiveStartGlobalTableName *string
+
+ // The maximum number of table names to return, if the parameter is not specified
+ // DynamoDB defaults to 100.
+ //
+ // If the number of global tables DynamoDB finds reaches this limit, it stops the
+ // operation and returns the table names collected up to that point, with a table
+ // name in the LastEvaluatedGlobalTableName to apply in a subsequent operation to
+ // the ExclusiveStartGlobalTableName parameter.
+ Limit *int32
+
+ // Lists the global tables in a specific Region.
+ RegionName *string
+
+ noSmithyDocumentSerde
+}
+
+type ListGlobalTablesOutput struct {
+
+ // List of global table names.
+ GlobalTables []types.GlobalTable
+
+ // Last evaluated global table name.
+ LastEvaluatedGlobalTableName *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListGlobalTablesMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpListGlobalTables{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListGlobalTables{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListGlobalTables"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListGlobalTablesDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListGlobalTables(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpListGlobalTablesDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpListGlobalTablesDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpListGlobalTablesDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*ListGlobalTablesInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opListGlobalTables(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListGlobalTables",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListImports.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListImports.go
new file mode 100644
index 000000000..5a02bf9c9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListImports.go
@@ -0,0 +1,304 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Lists completed imports within the past 90 days.
+func (c *Client) ListImports(ctx context.Context, params *ListImportsInput, optFns ...func(*Options)) (*ListImportsOutput, error) {
+ if params == nil {
+ params = &ListImportsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListImports", params, optFns, c.addOperationListImportsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListImportsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListImportsInput struct {
+
+ // An optional string that, if supplied, must be copied from the output of a
+ // previous call to ListImports . When provided in this manner, the API fetches the
+ // next page of results.
+ NextToken *string
+
+ // The number of ImportSummary objects returned in a single page.
+ PageSize *int32
+
+ // The Amazon Resource Name (ARN) associated with the table that was imported to.
+ TableArn *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListImportsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableArn
+
+}
+
+type ListImportsOutput struct {
+
+ // A list of ImportSummary objects.
+ ImportSummaryList []types.ImportSummary
+
+ // If this value is returned, there are additional results to be displayed. To
+ // retrieve them, call ListImports again, with NextToken set to this value.
+ NextToken *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListImportsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpListImports{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListImports{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListImports"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListImports(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListImportsPaginatorOptions is the paginator options for ListImports
+type ListImportsPaginatorOptions struct {
+ // The number of ImportSummary objects returned in a single page.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListImportsPaginator is a paginator for ListImports
+type ListImportsPaginator struct {
+ options ListImportsPaginatorOptions
+ client ListImportsAPIClient
+ params *ListImportsInput
+ nextToken *string
+ firstPage bool
+}
+
+// NewListImportsPaginator returns a new ListImportsPaginator
+func NewListImportsPaginator(client ListImportsAPIClient, params *ListImportsInput, optFns ...func(*ListImportsPaginatorOptions)) *ListImportsPaginator {
+ if params == nil {
+ params = &ListImportsInput{}
+ }
+
+ options := ListImportsPaginatorOptions{}
+ if params.PageSize != nil {
+ options.Limit = *params.PageSize
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListImportsPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ nextToken: params.NextToken,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListImportsPaginator) HasMorePages() bool {
+ return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
+}
+
+// NextPage retrieves the next ListImports page.
+func (p *ListImportsPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListImportsOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.NextToken = p.nextToken
+
+ var limit *int32
+ if p.options.Limit > 0 {
+ limit = &p.options.Limit
+ }
+ params.PageSize = limit
+
+ optFns = append([]func(*Options){
+ addIsPaginatorUserAgent,
+ }, optFns...)
+ result, err := p.client.ListImports(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = result.NextToken
+
+ if p.options.StopOnDuplicateToken &&
+ prevToken != nil &&
+ p.nextToken != nil &&
+ *prevToken == *p.nextToken {
+ p.nextToken = nil
+ }
+
+ return result, nil
+}
+
+// ListImportsAPIClient is a client that implements the ListImports operation.
+type ListImportsAPIClient interface {
+ ListImports(context.Context, *ListImportsInput, ...func(*Options)) (*ListImportsOutput, error)
+}
+
+var _ ListImportsAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opListImports(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListImports",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTables.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTables.go
new file mode 100644
index 000000000..ec575e967
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTables.go
@@ -0,0 +1,356 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns an array of table names associated with the current account and
+// endpoint. The output from ListTables is paginated, with each page returning a
+// maximum of 100 table names.
+func (c *Client) ListTables(ctx context.Context, params *ListTablesInput, optFns ...func(*Options)) (*ListTablesOutput, error) {
+ if params == nil {
+ params = &ListTablesInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListTables", params, optFns, c.addOperationListTablesMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListTablesOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of a ListTables operation.
+type ListTablesInput struct {
+
+ // The first table name that this operation will evaluate. Use the value that was
+ // returned for LastEvaluatedTableName in a previous operation, so that you can
+ // obtain the next page of results.
+ ExclusiveStartTableName *string
+
+ // A maximum number of table names to return. If this parameter is not specified,
+ // the limit is 100.
+ Limit *int32
+
+ noSmithyDocumentSerde
+}
+
+// Represents the output of a ListTables operation.
+type ListTablesOutput struct {
+
+ // The name of the last table in the current page of results. Use this value as
+ // the ExclusiveStartTableName in a new request to obtain the next page of
+ // results, until all the table names are returned.
+ //
+ // If you do not receive a LastEvaluatedTableName value in the response, this
+ // means that there are no more table names to be retrieved.
+ LastEvaluatedTableName *string
+
+ // The names of the tables associated with the current account at the current
+ // endpoint. The maximum size of this array is 100.
+ //
+ // If LastEvaluatedTableName also appears in the output, you can use this value as
+ // the ExclusiveStartTableName parameter in a subsequent ListTables request and
+ // obtain the next page of results.
+ TableNames []string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListTablesMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpListTables{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListTables{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListTables"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListTablesDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTables(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ListTablesPaginatorOptions is the paginator options for ListTables
+type ListTablesPaginatorOptions struct {
+ // A maximum number of table names to return. If this parameter is not specified,
+ // the limit is 100.
+ Limit int32
+
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// ListTablesPaginator is a paginator for ListTables
+type ListTablesPaginator struct {
+ options ListTablesPaginatorOptions
+ client ListTablesAPIClient
+ params *ListTablesInput
+ nextToken *string
+ firstPage bool
+}
+
+// NewListTablesPaginator returns a new ListTablesPaginator
+func NewListTablesPaginator(client ListTablesAPIClient, params *ListTablesInput, optFns ...func(*ListTablesPaginatorOptions)) *ListTablesPaginator {
+ if params == nil {
+ params = &ListTablesInput{}
+ }
+
+ options := ListTablesPaginatorOptions{}
+ if params.Limit != nil {
+ options.Limit = *params.Limit
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ListTablesPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ nextToken: params.ExclusiveStartTableName,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ListTablesPaginator) HasMorePages() bool {
+ return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0)
+}
+
+// NextPage retrieves the next ListTables page.
+func (p *ListTablesPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ListTablesOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.ExclusiveStartTableName = p.nextToken
+
+ var limit *int32
+ if p.options.Limit > 0 {
+ limit = &p.options.Limit
+ }
+ params.Limit = limit
+
+ optFns = append([]func(*Options){
+ addIsPaginatorUserAgent,
+ }, optFns...)
+ result, err := p.client.ListTables(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = result.LastEvaluatedTableName
+
+ if p.options.StopOnDuplicateToken &&
+ prevToken != nil &&
+ p.nextToken != nil &&
+ *prevToken == *p.nextToken {
+ p.nextToken = nil
+ }
+
+ return result, nil
+}
+
+func addOpListTablesDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpListTablesDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpListTablesDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*ListTablesInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+// ListTablesAPIClient is a client that implements the ListTables operation.
+type ListTablesAPIClient interface {
+ ListTables(context.Context, *ListTablesInput, ...func(*Options)) (*ListTablesOutput, error)
+}
+
+var _ ListTablesAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opListTables(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListTables",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTagsOfResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTagsOfResource.go
new file mode 100644
index 000000000..6d9ed7392
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_ListTagsOfResource.go
@@ -0,0 +1,268 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource
+// up to 10 times per second, per account.
+//
+// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB
+// Developer Guide.
+//
+// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html
+func (c *Client) ListTagsOfResource(ctx context.Context, params *ListTagsOfResourceInput, optFns ...func(*Options)) (*ListTagsOfResourceOutput, error) {
+ if params == nil {
+ params = &ListTagsOfResourceInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "ListTagsOfResource", params, optFns, c.addOperationListTagsOfResourceMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ListTagsOfResourceOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type ListTagsOfResourceInput struct {
+
+ // The Amazon DynamoDB resource with tags to be listed. This value is an Amazon
+ // Resource Name (ARN).
+ //
+ // This member is required.
+ ResourceArn *string
+
+ // An optional string that, if supplied, must be copied from the output of a
+ // previous call to ListTagOfResource. When provided in this manner, this API
+ // fetches the next page of results.
+ NextToken *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *ListTagsOfResourceInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.ResourceArn
+
+}
+
+type ListTagsOfResourceOutput struct {
+
+ // If this value is returned, there are additional results to be displayed. To
+ // retrieve them, call ListTagsOfResource again, with NextToken set to this value.
+ NextToken *string
+
+ // The tags currently associated with the Amazon DynamoDB resource.
+ Tags []types.Tag
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationListTagsOfResourceMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpListTagsOfResource{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpListTagsOfResource{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListTagsOfResource"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpListTagsOfResourceDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpListTagsOfResourceValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListTagsOfResource(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpListTagsOfResourceDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpListTagsOfResourceDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpListTagsOfResourceDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*ListTagsOfResourceInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opListTagsOfResource(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "ListTagsOfResource",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go
new file mode 100644
index 000000000..43bd113d2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutItem.go
@@ -0,0 +1,483 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates a new item, or replaces an old item with a new item. If an item that
+// has the same primary key as the new item already exists in the specified table,
+// the new item completely replaces the existing item. You can perform a
+// conditional put operation (add a new item if one with the specified primary key
+// doesn't exist), or replace an existing item if it has certain attribute values.
+// You can return the item's attribute values in the same operation, using the
+// ReturnValues parameter.
+//
+// When you add an item, the primary key attributes are the only required
+// attributes.
+//
+// Empty String and Binary attribute values are allowed. Attribute values of type
+// String and Binary must have a length greater than zero if the attribute is used
+// as a key attribute for a table or index. Set type attributes cannot be empty.
+//
+// Invalid Requests with empty values will be rejected with a ValidationException
+// exception.
+//
+// To prevent a new item from replacing an existing item, use a conditional
+// expression that contains the attribute_not_exists function with the name of the
+// attribute being used as the partition key for the table. Since every record must
+// contain that attribute, the attribute_not_exists function will only succeed if
+// no matching item exists.
+//
+// For more information about PutItem , see [Working with Items] in the Amazon DynamoDB Developer
+// Guide.
+//
+// [Working with Items]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html
+func (c *Client) PutItem(ctx context.Context, params *PutItemInput, optFns ...func(*Options)) (*PutItemOutput, error) {
+ if params == nil {
+ params = &PutItemInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutItem", params, optFns, c.addOperationPutItemMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutItemOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of a PutItem operation.
+type PutItemInput struct {
+
+ // A map of attribute name/value pairs, one for each attribute. Only the primary
+ // key attributes are required; you can optionally provide other attribute
+ // name-value pairs for the item.
+ //
+ // You must provide all of the attributes for the primary key. For example, with a
+ // simple primary key, you only need to provide a value for the partition key. For
+ // a composite primary key, you must provide both values for both the partition key
+ // and the sort key.
+ //
+ // If you specify any attributes that are part of an index key, then the data
+ // types for those attributes must match those of the schema in the table's
+ // attribute definition.
+ //
+ // Empty String and Binary attribute values are allowed. Attribute values of type
+ // String and Binary must have a length greater than zero if the attribute is used
+ // as a key attribute for a table or index.
+ //
+ // For more information about primary keys, see [Primary Key] in the Amazon DynamoDB Developer
+ // Guide.
+ //
+ // Each element in the Item map is an AttributeValue object.
+ //
+ // [Primary Key]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey
+ //
+ // This member is required.
+ Item map[string]types.AttributeValue
+
+ // The name of the table to contain the item. You can also provide the Amazon
+ // Resource Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // A condition that must be satisfied in order for a conditional PutItem operation
+ // to succeed.
+ //
+ // An expression can contain any of the following:
+ //
+ // - Functions: attribute_exists | attribute_not_exists | attribute_type |
+ // contains | begins_with | size
+ //
+ // These function names are case-sensitive.
+ //
+ // - Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
+ //
+ // - Logical operators: AND | OR | NOT
+ //
+ // For more information on condition expressions, see [Condition Expressions] in the Amazon DynamoDB
+ // Developer Guide.
+ //
+ // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html
+ ConditionExpression *string
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more
+ // information, see [ConditionalOperator]in the Amazon DynamoDB Developer Guide.
+ //
+ // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html
+ ConditionalOperator types.ConditionalOperator
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more
+ // information, see [Expected]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Expected]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html
+ Expected map[string]types.ExpectedAttributeValue
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames :
+ //
+ // - To access an attribute whose name conflicts with a DynamoDB reserved word.
+ //
+ // - To create a placeholder for repeating occurrences of an attribute name in
+ // an expression.
+ //
+ // - To prevent special characters in an attribute name from being
+ // misinterpreted in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // - Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be used
+ // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the
+ // Amazon DynamoDB Developer Guide). To work around this, you could specify the
+ // following for ExpressionAttributeNames :
+ //
+ // - {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // - #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB
+ // Developer Guide.
+ //
+ // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html
+ // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html
+ ExpressionAttributeNames map[string]string
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute value.
+ // For example, suppose that you wanted to check whether the value of the
+ // ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ // ":disc":{"S":"Discontinued"} }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see [Condition Expressions] in the Amazon
+ // DynamoDB Developer Guide.
+ //
+ // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html
+ ExpressionAttributeValues map[string]types.AttributeValue
+
+ // Determines the level of detail about either provisioned or on-demand throughput
+ // consumption that is returned in the response:
+ //
+ // - INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary index
+ // that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem , do not access any
+ // indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // - TOTAL - The response includes only the aggregate ConsumedCapacity for the
+ // operation.
+ //
+ // - NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity types.ReturnConsumedCapacity
+
+ // Determines whether item collection metrics are returned. If set to SIZE , the
+ // response includes statistics about item collections, if any, that were modified
+ // during the operation are returned in the response. If set to NONE (the
+ // default), no statistics are returned.
+ ReturnItemCollectionMetrics types.ReturnItemCollectionMetrics
+
+ // Use ReturnValues if you want to get the item attributes as they appeared before
+ // they were updated with the PutItem request. For PutItem , the valid values are:
+ //
+ // - NONE - If ReturnValues is not specified, or if its value is NONE , then
+ // nothing is returned. (This setting is the default for ReturnValues .)
+ //
+ // - ALL_OLD - If PutItem overwrote an attribute name-value pair, then the
+ // content of the old item is returned.
+ //
+ // The values returned are strongly consistent.
+ //
+ // There is no additional cost associated with requesting a return value aside
+ // from the small network and processing overhead of receiving a larger response.
+ // No read capacity units are consumed.
+ //
+ // The ReturnValues parameter is used by several DynamoDB operations; however,
+ // PutItem does not recognize any values other than NONE or ALL_OLD .
+ ReturnValues types.ReturnValue
+
+ // An optional parameter that returns the item attributes for a PutItem operation
+ // that failed a condition check.
+ //
+ // There is no additional cost associated with requesting a return value aside
+ // from the small network and processing overhead of receiving a larger response.
+ // No read capacity units are consumed.
+ ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutItemInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+// Represents the output of a PutItem operation.
+type PutItemOutput struct {
+
+ // The attribute values as they appeared before the PutItem operation, but only if
+ // ReturnValues is specified as ALL_OLD in the request. Each element consists of
+ // an attribute name and an attribute value.
+ Attributes map[string]types.AttributeValue
+
+ // The capacity units consumed by the PutItem operation. The data returned
+ // includes the total provisioned throughput consumed, along with statistics for
+ // the table and any indexes involved in the operation. ConsumedCapacity is only
+ // returned if the ReturnConsumedCapacity parameter was specified. For more
+ // information, see [Capacity unity consumption for write operations]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Capacity unity consumption for write operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#write-operation-consumption
+ ConsumedCapacity *types.ConsumedCapacity
+
+ // Information about item collections, if any, that were affected by the PutItem
+ // operation. ItemCollectionMetrics is only returned if the
+ // ReturnItemCollectionMetrics parameter was specified. If the table does not have
+ // any local secondary indexes, this information is not returned in the response.
+ //
+ // Each ItemCollectionMetrics element consists of:
+ //
+ // - ItemCollectionKey - The partition key value of the item collection. This is
+ // the same as the partition key value of the item itself.
+ //
+ // - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
+ // This value is a two-element array containing a lower bound and an upper bound
+ // for the estimate. The estimate includes the size of all the items in the table,
+ // plus the size of all attributes projected into all of the local secondary
+ // indexes on that table. Use this estimate to measure whether a local secondary
+ // index is approaching its size limit.
+ //
+ // The estimate is subject to change over time; therefore, do not rely on the
+ // precision or accuracy of the estimate.
+ ItemCollectionMetrics *types.ItemCollectionMetrics
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutItemMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpPutItem{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpPutItem{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutItem"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutItemDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutItemValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutItem(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpPutItemDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpPutItemDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpPutItemDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*PutItemInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opPutItem(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutItem",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go
new file mode 100644
index 000000000..d14c3e1f5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_PutResourcePolicy.go
@@ -0,0 +1,310 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Attaches a resource-based policy document to the resource, which can be a table
+// or stream. When you attach a resource-based policy using this API, the policy
+// application is [eventually consistent].
+//
+// PutResourcePolicy is an idempotent operation; running it multiple times on the
+// same resource using the same policy document will return the same revision ID.
+// If you specify an ExpectedRevisionId that doesn't match the current policy's
+// RevisionId , the PolicyNotFoundException will be returned.
+//
+// PutResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy
+// request immediately after a PutResourcePolicy request, DynamoDB might return
+// your previous policy, if there was one, or return the PolicyNotFoundException .
+// This is because GetResourcePolicy uses an eventually consistent query, and the
+// metadata for your policy or table might not be available at that moment. Wait
+// for a few seconds, and then try the GetResourcePolicy request again.
+//
+// [eventually consistent]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html
+func (c *Client) PutResourcePolicy(ctx context.Context, params *PutResourcePolicyInput, optFns ...func(*Options)) (*PutResourcePolicyOutput, error) {
+ if params == nil {
+ params = &PutResourcePolicyInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "PutResourcePolicy", params, optFns, c.addOperationPutResourcePolicyMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*PutResourcePolicyOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type PutResourcePolicyInput struct {
+
+ // An Amazon Web Services resource-based policy document in JSON format.
+ //
+ // - The maximum size supported for a resource-based policy document is 20 KB.
+ // DynamoDB counts whitespaces when calculating the size of a policy against this
+ // limit.
+ //
+ // - Within a resource-based policy, if the action for a DynamoDB service-linked
+ // role (SLR) to replicate data for a global table is denied, adding or deleting a
+ // replica will fail with an error.
+ //
+ // For a full list of all considerations that apply while attaching a
+ // resource-based policy, see [Resource-based policy considerations].
+ //
+ // [Resource-based policy considerations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html
+ //
+ // This member is required.
+ Policy *string
+
+ // The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy
+ // will be attached. The resources you can specify include tables and streams.
+ //
+ // You can control index permissions using the base table's policy. To specify the
+ // same permission level for your table and its indexes, you can provide both the
+ // table and index Amazon Resource Name (ARN)s in the Resource field of a given
+ // Statement in your policy document. Alternatively, to specify different
+ // permissions for your table, indexes, or both, you can define multiple Statement
+ // fields in your policy document.
+ //
+ // This member is required.
+ ResourceArn *string
+
+ // Set this parameter to true to confirm that you want to remove your permissions
+ // to change the policy of this resource in the future.
+ ConfirmRemoveSelfResourceAccess bool
+
+ // A string value that you can use to conditionally update your policy. You can
+ // provide the revision ID of your existing policy to make mutating requests
+ // against that policy.
+ //
+ // When you provide an expected revision ID, if the revision ID of the existing
+ // policy on the resource doesn't match or if there's no policy attached to the
+ // resource, your request will be rejected with a PolicyNotFoundException .
+ //
+ // To conditionally attach a policy when no policy exists for the resource,
+ // specify NO_POLICY for the revision ID.
+ ExpectedRevisionId *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *PutResourcePolicyInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.ResourceArn
+
+}
+
+type PutResourcePolicyOutput struct {
+
+ // A unique string that represents the revision ID of the policy. If you're
+ // comparing revision IDs, make sure to always use string comparison logic.
+ RevisionId *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationPutResourcePolicyMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpPutResourcePolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpPutResourcePolicy{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "PutResourcePolicy"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpPutResourcePolicyDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpPutResourcePolicyValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opPutResourcePolicy(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpPutResourcePolicyDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpPutResourcePolicyDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpPutResourcePolicyDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*PutResourcePolicyInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opPutResourcePolicy(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "PutResourcePolicy",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go
new file mode 100644
index 000000000..d0e6f1836
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Query.go
@@ -0,0 +1,745 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// You must provide the name of the partition key attribute and a single value for
+// that attribute. Query returns all items with that partition key value.
+// Optionally, you can provide a sort key attribute and use a comparison operator
+// to refine the search results.
+//
+// Use the KeyConditionExpression parameter to provide a specific value for the
+// partition key. The Query operation will return all of the items from the table
+// or index with that partition key value. You can optionally narrow the scope of
+// the Query operation by specifying a sort key value and a comparison operator in
+// KeyConditionExpression . To further refine the Query results, you can
+// optionally provide a FilterExpression . A FilterExpression determines which
+// items within the results should be returned to you. All of the other results are
+// discarded.
+//
+// A Query operation always returns a result set. If no matching items are found,
+// the result set will be empty. Queries that do not return results consume the
+// minimum number of read capacity units for that type of read operation.
+//
+// DynamoDB calculates the number of read capacity units consumed based on item
+// size, not on the amount of data that is returned to an application. The number
+// of capacity units consumed will be the same whether you request all of the
+// attributes (the default behavior) or just some of them (using a projection
+// expression). The number will also be the same whether or not you use a
+// FilterExpression .
+//
+// Query results are always sorted by the sort key value. If the data type of the
+// sort key is Number, the results are returned in numeric order; otherwise, the
+// results are returned in order of UTF-8 bytes. By default, the sort order is
+// ascending. To reverse the order, set the ScanIndexForward parameter to false.
+//
+// A single Query operation will read up to the maximum number of items set (if
+// using the Limit parameter) or a maximum of 1 MB of data and then apply any
+// filtering to the results using FilterExpression . If LastEvaluatedKey is
+// present in the response, you will need to paginate the result set. For more
+// information, see [Paginating the Results]in the Amazon DynamoDB Developer Guide.
+//
+// FilterExpression is applied after a Query finishes, but before the results are
+// returned. A FilterExpression cannot contain partition key or sort key
+// attributes. You need to specify those attributes in the KeyConditionExpression .
+//
+// A Query operation can return an empty result set and a LastEvaluatedKey if all
+// the items read for the page of results are filtered out.
+//
+// You can query a table, a local secondary index, or a global secondary index.
+// For a query on a table or on a local secondary index, you can set the
+// ConsistentRead parameter to true and obtain a strongly consistent result.
+// Global secondary indexes support eventually consistent reads only, so do not
+// specify ConsistentRead when querying a global secondary index.
+//
+// [Paginating the Results]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination
+func (c *Client) Query(ctx context.Context, params *QueryInput, optFns ...func(*Options)) (*QueryOutput, error) {
+ if params == nil {
+ params = &QueryInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "Query", params, optFns, c.addOperationQueryMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*QueryOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of a Query operation.
+type QueryInput struct {
+
+ // The name of the table containing the requested items. You can also provide the
+ // Amazon Resource Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // This is a legacy parameter. Use ProjectionExpression instead. For more
+ // information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide.
+ //
+ // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html
+ AttributesToGet []string
+
+ // This is a legacy parameter. Use FilterExpression instead. For more information,
+ // see [ConditionalOperator]in the Amazon DynamoDB Developer Guide.
+ //
+ // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html
+ ConditionalOperator types.ConditionalOperator
+
+ // Determines the read consistency model: If set to true , then the operation uses
+ // strongly consistent reads; otherwise, the operation uses eventually consistent
+ // reads.
+ //
+ // Strongly consistent reads are not supported on global secondary indexes. If you
+ // query a global secondary index with ConsistentRead set to true , you will
+ // receive a ValidationException .
+ ConsistentRead *bool
+
+ // The primary key of the first item that this operation will evaluate. Use the
+ // value that was returned for LastEvaluatedKey in the previous operation.
+ //
+ // The data type for ExclusiveStartKey must be String, Number, or Binary. No set
+ // data types are allowed.
+ ExclusiveStartKey map[string]types.AttributeValue
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames :
+ //
+ // - To access an attribute whose name conflicts with a DynamoDB reserved word.
+ //
+ // - To create a placeholder for repeating occurrences of an attribute name in
+ // an expression.
+ //
+ // - To prevent special characters in an attribute name from being
+ // misinterpreted in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // - Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be used
+ // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the
+ // Amazon DynamoDB Developer Guide). To work around this, you could specify the
+ // following for ExpressionAttributeNames :
+ //
+ // - {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // - #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB
+ // Developer Guide.
+ //
+ // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html
+ // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html
+ ExpressionAttributeNames map[string]string
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute value.
+ // For example, suppose that you wanted to check whether the value of the
+ // ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ // ":disc":{"S":"Discontinued"} }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see [Specifying Conditions] in the Amazon
+ // DynamoDB Developer Guide.
+ //
+ // [Specifying Conditions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html
+ ExpressionAttributeValues map[string]types.AttributeValue
+
+ // A string that contains conditions that DynamoDB applies after the Query
+ // operation, but before the data is returned to you. Items that do not satisfy the
+ // FilterExpression criteria are not returned.
+ //
+ // A FilterExpression does not allow key attributes. You cannot define a filter
+ // expression based on a partition key or a sort key.
+ //
+ // A FilterExpression is applied after the items have already been read; the
+ // process of filtering does not consume any additional read capacity units.
+ //
+ // For more information, see [Filter Expressions] in the Amazon DynamoDB Developer Guide.
+ //
+ // [Filter Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.FilterExpression.html
+ FilterExpression *string
+
+ // The name of an index to query. This index can be any local secondary index or
+ // global secondary index on the table. Note that if you use the IndexName
+ // parameter, you must also provide TableName.
+ IndexName *string
+
+ // The condition that specifies the key values for items to be retrieved by the
+ // Query action.
+ //
+ // The condition must perform an equality test on a single partition key value.
+ //
+ // The condition can optionally perform one of several comparison tests on a
+ // single sort key value. This allows Query to retrieve one item with a given
+ // partition key value and sort key value, or several items that have the same
+ // partition key value but different sort key values.
+ //
+ // The partition key equality test is required, and must be specified in the
+ // following format:
+ //
+ // partitionKeyName = :partitionkeyval
+ //
+ // If you also want to provide a condition for the sort key, it must be combined
+ // using AND with the condition for the sort key. Following is an example, using
+ // the = comparison operator for the sort key:
+ //
+ // partitionKeyName
+ //
+ // =
+ //
+ // :partitionkeyval
+ //
+ // AND
+ //
+ // sortKeyName
+ //
+ // =
+ //
+ // :sortkeyval
+ //
+ // Valid comparisons for the sort key condition are as follows:
+ //
+ // - sortKeyName = :sortkeyval - true if the sort key value is equal to
+ // :sortkeyval .
+ //
+ // - sortKeyName < :sortkeyval - true if the sort key value is less than
+ // :sortkeyval .
+ //
+ // - sortKeyName <= :sortkeyval - true if the sort key value is less than or
+ // equal to :sortkeyval .
+ //
+ // - sortKeyName > :sortkeyval - true if the sort key value is greater than
+ // :sortkeyval .
+ //
+ // - sortKeyName >= :sortkeyval - true if the sort key value is greater than or
+ // equal to :sortkeyval .
+ //
+ // - sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort key
+ // value is greater than or equal to :sortkeyval1 , and less than or equal to
+ // :sortkeyval2 .
+ //
+ // - begins_with ( sortKeyName , :sortkeyval ) - true if the sort key value
+ // begins with a particular operand. (You cannot use this function with a sort key
+ // that is of type Number.) Note that the function name begins_with is
+ // case-sensitive.
+ //
+ // Use the ExpressionAttributeValues parameter to replace tokens such as
+ // :partitionval and :sortval with actual values at runtime.
+ //
+ // You can optionally use the ExpressionAttributeNames parameter to replace the
+ // names of the partition key and sort key with placeholder tokens. This option
+ // might be necessary if an attribute name conflicts with a DynamoDB reserved word.
+ // For example, the following KeyConditionExpression parameter causes an error
+ // because Size is a reserved word:
+ //
+ // - Size = :myval
+ //
+ // To work around this, define a placeholder (such a #S ) to represent the
+ // attribute name Size. KeyConditionExpression then is as follows:
+ //
+ // - #S = :myval
+ //
+ // For a list of reserved words, see [Reserved Words] in the Amazon DynamoDB Developer Guide.
+ //
+ // For more information on ExpressionAttributeNames and ExpressionAttributeValues ,
+ // see [Using Placeholders for Attribute Names and Values]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html
+ // [Using Placeholders for Attribute Names and Values]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html
+ KeyConditionExpression *string
+
+ // This is a legacy parameter. Use KeyConditionExpression instead. For more
+ // information, see [KeyConditions]in the Amazon DynamoDB Developer Guide.
+ //
+ // [KeyConditions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html
+ KeyConditions map[string]types.Condition
+
+ // The maximum number of items to evaluate (not necessarily the number of matching
+ // items). If DynamoDB processes the number of items up to the limit while
+ // processing the results, it stops the operation and returns the matching values
+ // up to that point, and a key in LastEvaluatedKey to apply in a subsequent
+ // operation, so that you can pick up where you left off. Also, if the processed
+ // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the
+ // operation and returns the matching values up to the limit, and a key in
+ // LastEvaluatedKey to apply in a subsequent operation to continue the operation.
+ // For more information, see [Query and Scan]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Query and Scan]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html
+ Limit *int32
+
+ // A string that identifies one or more attributes to retrieve from the table.
+ // These attributes can include scalars, sets, or elements of a JSON document. The
+ // attributes in the expression must be separated by commas.
+ //
+ // If no attribute names are specified, then all attributes will be returned. If
+ // any of the requested attributes are not found, they will not appear in the
+ // result.
+ //
+ // For more information, see [Accessing Item Attributes] in the Amazon DynamoDB Developer Guide.
+ //
+ // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html
+ ProjectionExpression *string
+
+ // This is a legacy parameter. Use FilterExpression instead. For more information,
+ // see [QueryFilter]in the Amazon DynamoDB Developer Guide.
+ //
+ // [QueryFilter]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html
+ QueryFilter map[string]types.Condition
+
+ // Determines the level of detail about either provisioned or on-demand throughput
+ // consumption that is returned in the response:
+ //
+ // - INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary index
+ // that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem , do not access any
+ // indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // - TOTAL - The response includes only the aggregate ConsumedCapacity for the
+ // operation.
+ //
+ // - NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity types.ReturnConsumedCapacity
+
+ // Specifies the order for index traversal: If true (default), the traversal is
+ // performed in ascending order; if false , the traversal is performed in
+ // descending order.
+ //
+ // Items with the same partition key value are stored in sorted order by sort key.
+ // If the sort key data type is Number, the results are stored in numeric order.
+ // For type String, the results are stored in order of UTF-8 bytes. For type
+ // Binary, DynamoDB treats each byte of the binary data as unsigned.
+ //
+ // If ScanIndexForward is true , DynamoDB returns the results in the order in which
+ // they are stored (by sort key value). This is the default behavior. If
+ // ScanIndexForward is false , DynamoDB reads the results in reverse order by sort
+ // key value, and then returns the results to the client.
+ ScanIndexForward *bool
+
+ // The attributes to be returned in the result. You can retrieve all item
+ // attributes, specific item attributes, the count of matching items, or in the
+ // case of an index, some or all of the attributes projected into the index.
+ //
+ // - ALL_ATTRIBUTES - Returns all of the item attributes from the specified table
+ // or index. If you query a local secondary index, then for each matching item in
+ // the index, DynamoDB fetches the entire item from the parent table. If the index
+ // is configured to project all item attributes, then all of the data can be
+ // obtained from the local secondary index, and no fetching is required.
+ //
+ // - ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves
+ // all attributes that have been projected into the index. If the index is
+ // configured to project all attributes, this return value is equivalent to
+ // specifying ALL_ATTRIBUTES .
+ //
+ // - COUNT - Returns the number of matching items, rather than the matching items
+ // themselves. Note that this uses the same quantity of read capacity units as
+ // getting the items, and is subject to the same item size calculations.
+ //
+ // - SPECIFIC_ATTRIBUTES - Returns only the attributes listed in
+ // ProjectionExpression . This return value is equivalent to specifying
+ // ProjectionExpression without specifying any value for Select .
+ //
+ // If you query or scan a local secondary index and request only attributes that
+ // are projected into that index, the operation will read only the index and not
+ // the table. If any of the requested attributes are not projected into the local
+ // secondary index, DynamoDB fetches each of these attributes from the parent
+ // table. This extra fetching incurs additional throughput cost and latency.
+ //
+ // If you query or scan a global secondary index, you can only request attributes
+ // that are projected into the index. Global secondary index queries cannot fetch
+ // attributes from the parent table.
+ //
+ // If neither Select nor ProjectionExpression are specified, DynamoDB defaults to
+ // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when
+ // accessing an index. You cannot use both Select and ProjectionExpression
+ // together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES
+ // . (This usage is equivalent to specifying ProjectionExpression without any
+ // value for Select .)
+ //
+ // If you use the ProjectionExpression parameter, then the value for Select can
+ // only be SPECIFIC_ATTRIBUTES . Any other value for Select will return an error.
+ Select types.Select
+
+ noSmithyDocumentSerde
+}
+
+func (in *QueryInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+// Represents the output of a Query operation.
+type QueryOutput struct {
+
+ // The capacity units consumed by the Query operation. The data returned includes
+ // the total provisioned throughput consumed, along with statistics for the table
+ // and any indexes involved in the operation. ConsumedCapacity is only returned if
+ // the ReturnConsumedCapacity parameter was specified. For more information, see [Capacity unit consumption for read operations]
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // [Capacity unit consumption for read operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption
+ ConsumedCapacity *types.ConsumedCapacity
+
+ // The number of items in the response.
+ //
+ // If you used a QueryFilter in the request, then Count is the number of items
+ // returned after the filter was applied, and ScannedCount is the number of
+ // matching items before the filter was applied.
+ //
+ // If you did not use a filter in the request, then Count and ScannedCount are the
+ // same.
+ Count int32
+
+ // An array of item attributes that match the query criteria. Each element in this
+ // array consists of an attribute name and the value for that attribute.
+ Items []map[string]types.AttributeValue
+
+ // The primary key of the item where the operation stopped, inclusive of the
+ // previous result set. Use this value to start a new operation, excluding this
+ // value in the new request.
+ //
+ // If LastEvaluatedKey is empty, then the "last page" of results has been
+ // processed and there is no more data to be retrieved.
+ //
+ // If LastEvaluatedKey is not empty, it does not necessarily mean that there is
+ // more data in the result set. The only way to know when you have reached the end
+ // of the result set is when LastEvaluatedKey is empty.
+ LastEvaluatedKey map[string]types.AttributeValue
+
+ // The number of items evaluated, before any QueryFilter is applied. A high
+ // ScannedCount value with few, or no, Count results indicates an inefficient Query
+ // operation. For more information, see [Count and ScannedCount]in the Amazon DynamoDB Developer Guide.
+ //
+ // If you did not use a filter in the request, then ScannedCount is the same as
+ // Count .
+ //
+ // [Count and ScannedCount]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Count
+ ScannedCount int32
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationQueryMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpQuery{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpQuery{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "Query"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpQueryDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpQueryValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opQuery(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// QueryPaginatorOptions is the paginator options for Query
+type QueryPaginatorOptions struct {
+ // The maximum number of items to evaluate (not necessarily the number of matching
+ // items). If DynamoDB processes the number of items up to the limit while
+ // processing the results, it stops the operation and returns the matching values
+ // up to that point, and a key in LastEvaluatedKey to apply in a subsequent
+ // operation, so that you can pick up where you left off. Also, if the processed
+ // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the
+ // operation and returns the matching values up to the limit, and a key in
+ // LastEvaluatedKey to apply in a subsequent operation to continue the operation.
+ // For more information, see [Query and Scan]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Query and Scan]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html
+ Limit int32
+}
+
+// QueryPaginator is a paginator for Query
+type QueryPaginator struct {
+ options QueryPaginatorOptions
+ client QueryAPIClient
+ params *QueryInput
+ nextToken map[string]types.AttributeValue
+ firstPage bool
+}
+
+// NewQueryPaginator returns a new QueryPaginator
+func NewQueryPaginator(client QueryAPIClient, params *QueryInput, optFns ...func(*QueryPaginatorOptions)) *QueryPaginator {
+ if params == nil {
+ params = &QueryInput{}
+ }
+
+ options := QueryPaginatorOptions{}
+ if params.Limit != nil {
+ options.Limit = *params.Limit
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &QueryPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ nextToken: params.ExclusiveStartKey,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *QueryPaginator) HasMorePages() bool {
+ return p.firstPage || p.nextToken != nil
+}
+
+// NextPage retrieves the next Query page.
+func (p *QueryPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*QueryOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.ExclusiveStartKey = p.nextToken
+
+ var limit *int32
+ if p.options.Limit > 0 {
+ limit = &p.options.Limit
+ }
+ params.Limit = limit
+
+ optFns = append([]func(*Options){
+ addIsPaginatorUserAgent,
+ }, optFns...)
+ result, err := p.client.Query(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = result.LastEvaluatedKey
+
+ _ = prevToken
+
+ return result, nil
+}
+
+func addOpQueryDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpQueryDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpQueryDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*QueryInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+// QueryAPIClient is a client that implements the Query operation.
+type QueryAPIClient interface {
+ Query(context.Context, *QueryInput, ...func(*Options)) (*QueryOutput, error)
+}
+
+var _ QueryAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opQuery(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "Query",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableFromBackup.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableFromBackup.go
new file mode 100644
index 000000000..250d96388
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableFromBackup.go
@@ -0,0 +1,298 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates a new table from an existing backup. Any number of users can execute up
+// to 50 concurrent restores (any type of restore) in a given account.
+//
+// You can call RestoreTableFromBackup at a maximum rate of 10 times per second.
+//
+// You must manually set up the following on the restored table:
+//
+// - Auto scaling policies
+//
+// - IAM policies
+//
+// - Amazon CloudWatch metrics and alarms
+//
+// - Tags
+//
+// - Stream settings
+//
+// - Time to Live (TTL) settings
+func (c *Client) RestoreTableFromBackup(ctx context.Context, params *RestoreTableFromBackupInput, optFns ...func(*Options)) (*RestoreTableFromBackupOutput, error) {
+ if params == nil {
+ params = &RestoreTableFromBackupInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "RestoreTableFromBackup", params, optFns, c.addOperationRestoreTableFromBackupMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*RestoreTableFromBackupOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type RestoreTableFromBackupInput struct {
+
+ // The Amazon Resource Name (ARN) associated with the backup.
+ //
+ // This member is required.
+ BackupArn *string
+
+ // The name of the new table to which the backup must be restored.
+ //
+ // This member is required.
+ TargetTableName *string
+
+ // The billing mode of the restored table.
+ BillingModeOverride types.BillingMode
+
+ // List of global secondary indexes for the restored table. The indexes provided
+ // should match existing secondary indexes. You can choose to exclude some or all
+ // of the indexes at the time of restore.
+ GlobalSecondaryIndexOverride []types.GlobalSecondaryIndex
+
+ // List of local secondary indexes for the restored table. The indexes provided
+ // should match existing secondary indexes. You can choose to exclude some or all
+ // of the indexes at the time of restore.
+ LocalSecondaryIndexOverride []types.LocalSecondaryIndex
+
+ // Sets the maximum number of read and write units for the specified on-demand
+ // table. If you use this parameter, you must specify MaxReadRequestUnits ,
+ // MaxWriteRequestUnits , or both.
+ OnDemandThroughputOverride *types.OnDemandThroughput
+
+ // Provisioned throughput settings for the restored table.
+ ProvisionedThroughputOverride *types.ProvisionedThroughput
+
+ // The new server-side encryption settings for the restored table.
+ SSESpecificationOverride *types.SSESpecification
+
+ noSmithyDocumentSerde
+}
+
+func (in *RestoreTableFromBackupInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TargetTableName
+
+}
+
+type RestoreTableFromBackupOutput struct {
+
+ // The description of the table created from an existing backup.
+ TableDescription *types.TableDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationRestoreTableFromBackupMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpRestoreTableFromBackup{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpRestoreTableFromBackup{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "RestoreTableFromBackup"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpRestoreTableFromBackupDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpRestoreTableFromBackupValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRestoreTableFromBackup(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpRestoreTableFromBackupDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpRestoreTableFromBackupDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpRestoreTableFromBackupDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*RestoreTableFromBackupInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opRestoreTableFromBackup(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "RestoreTableFromBackup",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableToPointInTime.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableToPointInTime.go
new file mode 100644
index 000000000..380fcb85e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_RestoreTableToPointInTime.go
@@ -0,0 +1,330 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "time"
+)
+
+// Restores the specified table to the specified point in time within
+// EarliestRestorableDateTime and LatestRestorableDateTime . You can restore your
+// table to any point in time in the last 35 days. You can set the recovery period
+// to any value between 1 and 35 days. Any number of users can execute up to 50
+// concurrent restores (any type of restore) in a given account.
+//
+// When you restore using point in time recovery, DynamoDB restores your table
+// data to the state based on the selected date and time (day:hour:minute:second)
+// to a new table.
+//
+// Along with data, the following are also included on the new restored table
+// using point in time recovery:
+//
+// - Global secondary indexes (GSIs)
+//
+// - Local secondary indexes (LSIs)
+//
+// - Provisioned read and write capacity
+//
+// - Encryption settings
+//
+// All these settings come from the current settings of the source table at the
+//
+// time of restore.
+//
+// You must manually set up the following on the restored table:
+//
+// - Auto scaling policies
+//
+// - IAM policies
+//
+// - Amazon CloudWatch metrics and alarms
+//
+// - Tags
+//
+// - Stream settings
+//
+// - Time to Live (TTL) settings
+//
+// - Point in time recovery settings
+func (c *Client) RestoreTableToPointInTime(ctx context.Context, params *RestoreTableToPointInTimeInput, optFns ...func(*Options)) (*RestoreTableToPointInTimeOutput, error) {
+ if params == nil {
+ params = &RestoreTableToPointInTimeInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "RestoreTableToPointInTime", params, optFns, c.addOperationRestoreTableToPointInTimeMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*RestoreTableToPointInTimeOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type RestoreTableToPointInTimeInput struct {
+
+ // The name of the new table to which it must be restored to.
+ //
+ // This member is required.
+ TargetTableName *string
+
+ // The billing mode of the restored table.
+ BillingModeOverride types.BillingMode
+
+ // List of global secondary indexes for the restored table. The indexes provided
+ // should match existing secondary indexes. You can choose to exclude some or all
+ // of the indexes at the time of restore.
+ GlobalSecondaryIndexOverride []types.GlobalSecondaryIndex
+
+ // List of local secondary indexes for the restored table. The indexes provided
+ // should match existing secondary indexes. You can choose to exclude some or all
+ // of the indexes at the time of restore.
+ LocalSecondaryIndexOverride []types.LocalSecondaryIndex
+
+ // Sets the maximum number of read and write units for the specified on-demand
+ // table. If you use this parameter, you must specify MaxReadRequestUnits ,
+ // MaxWriteRequestUnits , or both.
+ OnDemandThroughputOverride *types.OnDemandThroughput
+
+ // Provisioned throughput settings for the restored table.
+ ProvisionedThroughputOverride *types.ProvisionedThroughput
+
+ // Time in the past to restore the table to.
+ RestoreDateTime *time.Time
+
+ // The new server-side encryption settings for the restored table.
+ SSESpecificationOverride *types.SSESpecification
+
+ // The DynamoDB table that will be restored. This value is an Amazon Resource Name
+ // (ARN).
+ SourceTableArn *string
+
+ // Name of the source table that is being restored.
+ SourceTableName *string
+
+ // Restore the table to the latest possible time. LatestRestorableDateTime is
+ // typically 5 minutes before the current time.
+ UseLatestRestorableTime *bool
+
+ noSmithyDocumentSerde
+}
+
+func (in *RestoreTableToPointInTimeInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TargetTableName
+
+}
+
+type RestoreTableToPointInTimeOutput struct {
+
+ // Represents the properties of a table.
+ TableDescription *types.TableDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationRestoreTableToPointInTimeMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpRestoreTableToPointInTime{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpRestoreTableToPointInTime{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "RestoreTableToPointInTime"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpRestoreTableToPointInTimeDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpRestoreTableToPointInTimeValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRestoreTableToPointInTime(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpRestoreTableToPointInTimeDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpRestoreTableToPointInTimeDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpRestoreTableToPointInTimeDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*RestoreTableToPointInTimeInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opRestoreTableToPointInTime(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "RestoreTableToPointInTime",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go
new file mode 100644
index 000000000..d3604bc75
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_Scan.go
@@ -0,0 +1,676 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// The Scan operation returns one or more items and item attributes by accessing
+// every item in a table or a secondary index. To have DynamoDB return fewer items,
+// you can provide a FilterExpression operation.
+//
+// If the total size of scanned items exceeds the maximum dataset size limit of 1
+// MB, the scan completes and results are returned to the user. The
+// LastEvaluatedKey value is also returned and the requestor can use the
+// LastEvaluatedKey to continue the scan in a subsequent operation. Each scan
+// response also includes number of items that were scanned (ScannedCount) as part
+// of the request. If using a FilterExpression , a scan result can result in no
+// items meeting the criteria and the Count will result in zero. If you did not
+// use a FilterExpression in the scan request, then Count is the same as
+// ScannedCount .
+//
+// Count and ScannedCount only return the count of items specific to a single scan
+// request and, unless the table is less than 1MB, do not represent the total
+// number of items in the table.
+//
+// A single Scan operation first reads up to the maximum number of items set (if
+// using the Limit parameter) or a maximum of 1 MB of data and then applies any
+// filtering to the results if a FilterExpression is provided. If LastEvaluatedKey
+// is present in the response, pagination is required to complete the full table
+// scan. For more information, see [Paginating the Results]in the Amazon DynamoDB Developer Guide.
+//
+// Scan operations proceed sequentially; however, for faster performance on a
+// large table or secondary index, applications can request a parallel Scan
+// operation by providing the Segment and TotalSegments parameters. For more
+// information, see [Parallel Scan]in the Amazon DynamoDB Developer Guide.
+//
+// By default, a Scan uses eventually consistent reads when accessing the items in
+// a table. Therefore, the results from an eventually consistent Scan may not
+// include the latest item changes at the time the scan iterates through each item
+// in the table. If you require a strongly consistent read of each item as the scan
+// iterates through the items in the table, you can set the ConsistentRead
+// parameter to true. Strong consistency only relates to the consistency of the
+// read at the item level.
+//
+// DynamoDB does not provide snapshot isolation for a scan operation when the
+// ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation does
+// not guarantee that all reads in a scan see a consistent snapshot of the table
+// when the scan operation was requested.
+//
+// [Paginating the Results]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination
+// [Parallel Scan]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan
+func (c *Client) Scan(ctx context.Context, params *ScanInput, optFns ...func(*Options)) (*ScanOutput, error) {
+ if params == nil {
+ params = &ScanInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "Scan", params, optFns, c.addOperationScanMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*ScanOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of a Scan operation.
+type ScanInput struct {
+
+ // The name of the table containing the requested items or if you provide IndexName
+ // , the name of the table to which that index belongs.
+ //
+ // You can also provide the Amazon Resource Name (ARN) of the table in this
+ // parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // This is a legacy parameter. Use ProjectionExpression instead. For more
+ // information, see [AttributesToGet]in the Amazon DynamoDB Developer Guide.
+ //
+ // [AttributesToGet]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html
+ AttributesToGet []string
+
+ // This is a legacy parameter. Use FilterExpression instead. For more information,
+ // see [ConditionalOperator]in the Amazon DynamoDB Developer Guide.
+ //
+ // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html
+ ConditionalOperator types.ConditionalOperator
+
+ // A Boolean value that determines the read consistency model during the scan:
+ //
+ // - If ConsistentRead is false , then the data returned from Scan might not
+ // contain the results from other recently completed write operations ( PutItem ,
+ // UpdateItem , or DeleteItem ).
+ //
+ // - If ConsistentRead is true , then all of the write operations that completed
+ // before the Scan began are guaranteed to be contained in the Scan response.
+ //
+ // The default setting for ConsistentRead is false .
+ //
+ // The ConsistentRead parameter is not supported on global secondary indexes. If
+ // you scan a global secondary index with ConsistentRead set to true, you will
+ // receive a ValidationException .
+ ConsistentRead *bool
+
+ // The primary key of the first item that this operation will evaluate. Use the
+ // value that was returned for LastEvaluatedKey in the previous operation.
+ //
+ // The data type for ExclusiveStartKey must be String, Number or Binary. No set
+ // data types are allowed.
+ //
+ // In a parallel scan, a Scan request that includes ExclusiveStartKey must specify
+ // the same segment whose previous Scan returned the corresponding value of
+ // LastEvaluatedKey .
+ ExclusiveStartKey map[string]types.AttributeValue
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames :
+ //
+ // - To access an attribute whose name conflicts with a DynamoDB reserved word.
+ //
+ // - To create a placeholder for repeating occurrences of an attribute name in
+ // an expression.
+ //
+ // - To prevent special characters in an attribute name from being
+ // misinterpreted in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // - Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be used
+ // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the
+ // Amazon DynamoDB Developer Guide). To work around this, you could specify the
+ // following for ExpressionAttributeNames :
+ //
+ // - {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // - #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see [Specifying Item Attributes] in the Amazon DynamoDB
+ // Developer Guide.
+ //
+ // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html
+ // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html
+ ExpressionAttributeNames map[string]string
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute value.
+ // For example, suppose that you wanted to check whether the value of the
+ // ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ // ":disc":{"S":"Discontinued"} }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see [Condition Expressions] in the Amazon
+ // DynamoDB Developer Guide.
+ //
+ // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html
+ ExpressionAttributeValues map[string]types.AttributeValue
+
+ // A string that contains conditions that DynamoDB applies after the Scan
+ // operation, but before the data is returned to you. Items that do not satisfy the
+ // FilterExpression criteria are not returned.
+ //
+ // A FilterExpression is applied after the items have already been read; the
+ // process of filtering does not consume any additional read capacity units.
+ //
+ // For more information, see [Filter Expressions] in the Amazon DynamoDB Developer Guide.
+ //
+ // [Filter Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.FilterExpression
+ FilterExpression *string
+
+ // The name of a secondary index to scan. This index can be any local secondary
+ // index or global secondary index. Note that if you use the IndexName parameter,
+ // you must also provide TableName .
+ IndexName *string
+
+ // The maximum number of items to evaluate (not necessarily the number of matching
+ // items). If DynamoDB processes the number of items up to the limit while
+ // processing the results, it stops the operation and returns the matching values
+ // up to that point, and a key in LastEvaluatedKey to apply in a subsequent
+ // operation, so that you can pick up where you left off. Also, if the processed
+ // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the
+ // operation and returns the matching values up to the limit, and a key in
+ // LastEvaluatedKey to apply in a subsequent operation to continue the operation.
+ // For more information, see [Working with Queries]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Working with Queries]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html
+ Limit *int32
+
+ // A string that identifies one or more attributes to retrieve from the specified
+ // table or index. These attributes can include scalars, sets, or elements of a
+ // JSON document. The attributes in the expression must be separated by commas.
+ //
+ // If no attribute names are specified, then all attributes will be returned. If
+ // any of the requested attributes are not found, they will not appear in the
+ // result.
+ //
+ // For more information, see [Specifying Item Attributes] in the Amazon DynamoDB Developer Guide.
+ //
+ // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html
+ ProjectionExpression *string
+
+ // Determines the level of detail about either provisioned or on-demand throughput
+ // consumption that is returned in the response:
+ //
+ // - INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary index
+ // that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem , do not access any
+ // indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // - TOTAL - The response includes only the aggregate ConsumedCapacity for the
+ // operation.
+ //
+ // - NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity types.ReturnConsumedCapacity
+
+ // This is a legacy parameter. Use FilterExpression instead. For more information,
+ // see [ScanFilter]in the Amazon DynamoDB Developer Guide.
+ //
+ // [ScanFilter]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html
+ ScanFilter map[string]types.Condition
+
+ // For a parallel Scan request, Segment identifies an individual segment to be
+ // scanned by an application worker.
+ //
+ // Segment IDs are zero-based, so the first segment is always 0. For example, if
+ // you want to use four application threads to scan a table or an index, then the
+ // first thread specifies a Segment value of 0, the second thread specifies 1, and
+ // so on.
+ //
+ // The value of LastEvaluatedKey returned from a parallel Scan request must be
+ // used as ExclusiveStartKey with the same segment ID in a subsequent Scan
+ // operation.
+ //
+ // The value for Segment must be greater than or equal to 0, and less than the
+ // value provided for TotalSegments .
+ //
+ // If you provide Segment , you must also provide TotalSegments .
+ Segment *int32
+
+ // The attributes to be returned in the result. You can retrieve all item
+ // attributes, specific item attributes, the count of matching items, or in the
+ // case of an index, some or all of the attributes projected into the index.
+ //
+ // - ALL_ATTRIBUTES - Returns all of the item attributes from the specified table
+ // or index. If you query a local secondary index, then for each matching item in
+ // the index, DynamoDB fetches the entire item from the parent table. If the index
+ // is configured to project all item attributes, then all of the data can be
+ // obtained from the local secondary index, and no fetching is required.
+ //
+ // - ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves
+ // all attributes that have been projected into the index. If the index is
+ // configured to project all attributes, this return value is equivalent to
+ // specifying ALL_ATTRIBUTES .
+ //
+ // - COUNT - Returns the number of matching items, rather than the matching items
+ // themselves. Note that this uses the same quantity of read capacity units as
+ // getting the items, and is subject to the same item size calculations.
+ //
+ // - SPECIFIC_ATTRIBUTES - Returns only the attributes listed in
+ // ProjectionExpression . This return value is equivalent to specifying
+ // ProjectionExpression without specifying any value for Select .
+ //
+ // If you query or scan a local secondary index and request only attributes that
+ // are projected into that index, the operation reads only the index and not the
+ // table. If any of the requested attributes are not projected into the local
+ // secondary index, DynamoDB fetches each of these attributes from the parent
+ // table. This extra fetching incurs additional throughput cost and latency.
+ //
+ // If you query or scan a global secondary index, you can only request attributes
+ // that are projected into the index. Global secondary index queries cannot fetch
+ // attributes from the parent table.
+ //
+ // If neither Select nor ProjectionExpression are specified, DynamoDB defaults to
+ // ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when
+ // accessing an index. You cannot use both Select and ProjectionExpression
+ // together in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES
+ // . (This usage is equivalent to specifying ProjectionExpression without any
+ // value for Select .)
+ //
+ // If you use the ProjectionExpression parameter, then the value for Select can
+ // only be SPECIFIC_ATTRIBUTES . Any other value for Select will return an error.
+ Select types.Select
+
+ // For a parallel Scan request, TotalSegments represents the total number of
+ // segments into which the Scan operation will be divided. The value of
+ // TotalSegments corresponds to the number of application workers that will perform
+ // the parallel scan. For example, if you want to use four application threads to
+ // scan a table or an index, specify a TotalSegments value of 4.
+ //
+ // The value for TotalSegments must be greater than or equal to 1, and less than
+ // or equal to 1000000. If you specify a TotalSegments value of 1, the Scan
+ // operation will be sequential rather than parallel.
+ //
+ // If you specify TotalSegments , you must also specify Segment .
+ TotalSegments *int32
+
+ noSmithyDocumentSerde
+}
+
+func (in *ScanInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+// Represents the output of a Scan operation.
+type ScanOutput struct {
+
+ // The capacity units consumed by the Scan operation. The data returned includes
+ // the total provisioned throughput consumed, along with statistics for the table
+ // and any indexes involved in the operation. ConsumedCapacity is only returned if
+ // the ReturnConsumedCapacity parameter was specified. For more information, see [Capacity unit consumption for read operations]
+ // in the Amazon DynamoDB Developer Guide.
+ //
+ // [Capacity unit consumption for read operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption
+ ConsumedCapacity *types.ConsumedCapacity
+
+ // The number of items in the response.
+ //
+ // If you set ScanFilter in the request, then Count is the number of items
+ // returned after the filter was applied, and ScannedCount is the number of
+ // matching items before the filter was applied.
+ //
+ // If you did not use a filter in the request, then Count is the same as
+ // ScannedCount .
+ Count int32
+
+ // An array of item attributes that match the scan criteria. Each element in this
+ // array consists of an attribute name and the value for that attribute.
+ Items []map[string]types.AttributeValue
+
+ // The primary key of the item where the operation stopped, inclusive of the
+ // previous result set. Use this value to start a new operation, excluding this
+ // value in the new request.
+ //
+ // If LastEvaluatedKey is empty, then the "last page" of results has been
+ // processed and there is no more data to be retrieved.
+ //
+ // If LastEvaluatedKey is not empty, it does not necessarily mean that there is
+ // more data in the result set. The only way to know when you have reached the end
+ // of the result set is when LastEvaluatedKey is empty.
+ LastEvaluatedKey map[string]types.AttributeValue
+
+ // The number of items evaluated, before any ScanFilter is applied. A high
+ // ScannedCount value with few, or no, Count results indicates an inefficient Scan
+ // operation. For more information, see [Count and ScannedCount]in the Amazon DynamoDB Developer Guide.
+ //
+ // If you did not use a filter in the request, then ScannedCount is the same as
+ // Count .
+ //
+ // [Count and ScannedCount]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count
+ ScannedCount int32
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationScanMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpScan{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpScan{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "Scan"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpScanDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpScanValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opScan(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+// ScanPaginatorOptions is the paginator options for Scan
+type ScanPaginatorOptions struct {
+ // The maximum number of items to evaluate (not necessarily the number of matching
+ // items). If DynamoDB processes the number of items up to the limit while
+ // processing the results, it stops the operation and returns the matching values
+ // up to that point, and a key in LastEvaluatedKey to apply in a subsequent
+ // operation, so that you can pick up where you left off. Also, if the processed
+ // dataset size exceeds 1 MB before DynamoDB reaches this limit, it stops the
+ // operation and returns the matching values up to the limit, and a key in
+ // LastEvaluatedKey to apply in a subsequent operation to continue the operation.
+ // For more information, see [Working with Queries]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Working with Queries]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html
+ Limit int32
+}
+
+// ScanPaginator is a paginator for Scan
+type ScanPaginator struct {
+ options ScanPaginatorOptions
+ client ScanAPIClient
+ params *ScanInput
+ nextToken map[string]types.AttributeValue
+ firstPage bool
+}
+
+// NewScanPaginator returns a new ScanPaginator
+func NewScanPaginator(client ScanAPIClient, params *ScanInput, optFns ...func(*ScanPaginatorOptions)) *ScanPaginator {
+ if params == nil {
+ params = &ScanInput{}
+ }
+
+ options := ScanPaginatorOptions{}
+ if params.Limit != nil {
+ options.Limit = *params.Limit
+ }
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &ScanPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ nextToken: params.ExclusiveStartKey,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *ScanPaginator) HasMorePages() bool {
+ return p.firstPage || p.nextToken != nil
+}
+
+// NextPage retrieves the next Scan page.
+func (p *ScanPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*ScanOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.ExclusiveStartKey = p.nextToken
+
+ var limit *int32
+ if p.options.Limit > 0 {
+ limit = &p.options.Limit
+ }
+ params.Limit = limit
+
+ optFns = append([]func(*Options){
+ addIsPaginatorUserAgent,
+ }, optFns...)
+ result, err := p.client.Scan(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.nextToken
+ p.nextToken = result.LastEvaluatedKey
+
+ _ = prevToken
+
+ return result, nil
+}
+
+func addOpScanDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpScanDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpScanDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*ScanInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+// ScanAPIClient is a client that implements the Scan operation.
+type ScanAPIClient interface {
+ Scan(context.Context, *ScanInput, ...func(*Options)) (*ScanOutput, error)
+}
+
+var _ ScanAPIClient = (*Client)(nil)
+
+func newServiceMetadataMiddleware_opScan(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "Scan",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TagResource.go
new file mode 100644
index 000000000..f80afd79e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TagResource.go
@@ -0,0 +1,273 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Associate a set of tags with an Amazon DynamoDB resource. You can then activate
+// these user-defined tags so that they appear on the Billing and Cost Management
+// console for cost allocation tracking. You can call TagResource up to five times
+// per second, per account.
+//
+// - TagResource is an asynchronous operation. If you issue a ListTagsOfResourcerequest
+// immediately after a TagResource request, DynamoDB might return your previous
+// tag set, if there was one, or an empty tag set. This is because
+// ListTagsOfResource uses an eventually consistent query, and the metadata for
+// your tags or table might not be available at that moment. Wait for a few
+// seconds, and then try the ListTagsOfResource request again.
+//
+// - The application or removal of tags using TagResource and UntagResource APIs
+// is eventually consistent. ListTagsOfResource API will only reflect the changes
+// after a few seconds.
+//
+// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB
+// Developer Guide.
+//
+// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html
+func (c *Client) TagResource(ctx context.Context, params *TagResourceInput, optFns ...func(*Options)) (*TagResourceOutput, error) {
+ if params == nil {
+ params = &TagResourceInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "TagResource", params, optFns, c.addOperationTagResourceMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*TagResourceOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type TagResourceInput struct {
+
+ // Identifies the Amazon DynamoDB resource to which tags should be added. This
+ // value is an Amazon Resource Name (ARN).
+ //
+ // This member is required.
+ ResourceArn *string
+
+ // The tags to be assigned to the Amazon DynamoDB resource.
+ //
+ // This member is required.
+ Tags []types.Tag
+
+ noSmithyDocumentSerde
+}
+
+func (in *TagResourceInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.ResourceArn
+
+}
+
+type TagResourceOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationTagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpTagResource{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpTagResource{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "TagResource"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpTagResourceDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpTagResourceValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTagResource(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpTagResourceDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpTagResourceDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpTagResourceDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*TagResourceInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opTagResource(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "TagResource",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactGetItems.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactGetItems.go
new file mode 100644
index 000000000..ce6db313c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactGetItems.go
@@ -0,0 +1,303 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// TransactGetItems is a synchronous operation that atomically retrieves multiple
+// items from one or more tables (but not from indexes) in a single account and
+// Region. A TransactGetItems call can contain up to 100 TransactGetItem objects,
+// each of which contains a Get structure that specifies an item to retrieve from
+// a table in the account and Region. A call to TransactGetItems cannot retrieve
+// items from tables in more than one Amazon Web Services account or Region. The
+// aggregate size of the items in the transaction cannot exceed 4 MB.
+//
+// DynamoDB rejects the entire TransactGetItems request if any of the following is
+// true:
+//
+// - A conflicting operation is in the process of updating an item to be read.
+//
+// - There is insufficient provisioned capacity for the transaction to be
+// completed.
+//
+// - There is a user error, such as an invalid data format.
+//
+// - The aggregate size of the items in the transaction exceeded 4 MB.
+func (c *Client) TransactGetItems(ctx context.Context, params *TransactGetItemsInput, optFns ...func(*Options)) (*TransactGetItemsOutput, error) {
+ if params == nil {
+ params = &TransactGetItemsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "TransactGetItems", params, optFns, c.addOperationTransactGetItemsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*TransactGetItemsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type TransactGetItemsInput struct {
+
+ // An ordered array of up to 100 TransactGetItem objects, each of which contains a
+ // Get structure.
+ //
+ // This member is required.
+ TransactItems []types.TransactGetItem
+
+ // A value of TOTAL causes consumed capacity information to be returned, and a
+ // value of NONE prevents that information from being returned. No other value is
+ // valid.
+ ReturnConsumedCapacity types.ReturnConsumedCapacity
+
+ noSmithyDocumentSerde
+}
+
+func (in *TransactGetItemsInput) bindEndpointParams(p *EndpointParameters) {
+ func() {
+ v1 := in.TransactItems
+ var v2 []string
+ for _, v := range v1 {
+ v3 := v.Get
+ var v4 *string
+ if v3 != nil {
+ v5 := v3.TableName
+ v4 = v5
+ }
+ if v4 != nil {
+ v2 = append(v2, *v4)
+ }
+ }
+ p.ResourceArnList = v2
+ }()
+
+}
+
+type TransactGetItemsOutput struct {
+
+ // If the ReturnConsumedCapacity value was TOTAL , this is an array of
+ // ConsumedCapacity objects, one for each table addressed by TransactGetItem
+ // objects in the TransactItems parameter. These ConsumedCapacity objects report
+ // the read-capacity units consumed by the TransactGetItems call in that table.
+ ConsumedCapacity []types.ConsumedCapacity
+
+ // An ordered array of up to 100 ItemResponse objects, each of which corresponds
+ // to the TransactGetItem object in the same position in the TransactItems array.
+ // Each ItemResponse object contains a Map of the name-value pairs that are the
+ // projected attributes of the requested item.
+ //
+ // If a requested item could not be retrieved, the corresponding ItemResponse
+ // object is Null, or if the requested item has no projected attributes, the
+ // corresponding ItemResponse object is an empty Map.
+ Responses []types.ItemResponse
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationTransactGetItemsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpTransactGetItems{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpTransactGetItems{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "TransactGetItems"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpTransactGetItemsDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpTransactGetItemsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTransactGetItems(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpTransactGetItemsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpTransactGetItemsDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpTransactGetItemsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*TransactGetItemsInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opTransactGetItems(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "TransactGetItems",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactWriteItems.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactWriteItems.go
new file mode 100644
index 000000000..a345302a8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_TransactWriteItems.go
@@ -0,0 +1,441 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// TransactWriteItems is a synchronous write operation that groups up to 100
+// action requests. These actions can target items in different tables, but not in
+// different Amazon Web Services accounts or Regions, and no two actions can target
+// the same item. For example, you cannot both ConditionCheck and Update the same
+// item. The aggregate size of the items in the transaction cannot exceed 4 MB.
+//
+// The actions are completed atomically so that either all of them succeed, or all
+// of them fail. They are defined by the following objects:
+//
+// - Put — Initiates a PutItem operation to write a new item. This structure
+// specifies the primary key of the item to be written, the name of the table to
+// write it in, an optional condition expression that must be satisfied for the
+// write to succeed, a list of the item's attributes, and a field indicating
+// whether to retrieve the item's attributes if the condition is not met.
+//
+// - Update — Initiates an UpdateItem operation to update an existing item. This
+// structure specifies the primary key of the item to be updated, the name of the
+// table where it resides, an optional condition expression that must be satisfied
+// for the update to succeed, an expression that defines one or more attributes to
+// be updated, and a field indicating whether to retrieve the item's attributes if
+// the condition is not met.
+//
+// - Delete — Initiates a DeleteItem operation to delete an existing item. This
+// structure specifies the primary key of the item to be deleted, the name of the
+// table where it resides, an optional condition expression that must be satisfied
+// for the deletion to succeed, and a field indicating whether to retrieve the
+// item's attributes if the condition is not met.
+//
+// - ConditionCheck — Applies a condition to an item that is not being modified
+// by the transaction. This structure specifies the primary key of the item to be
+// checked, the name of the table where it resides, a condition expression that
+// must be satisfied for the transaction to succeed, and a field indicating whether
+// to retrieve the item's attributes if the condition is not met.
+//
+// DynamoDB rejects the entire TransactWriteItems request if any of the following
+// is true:
+//
+// - A condition in one of the condition expressions is not met.
+//
+// - An ongoing operation is in the process of updating the same item.
+//
+// - There is insufficient provisioned capacity for the transaction to be
+// completed.
+//
+// - An item size becomes too large (bigger than 400 KB), a local secondary
+// index (LSI) becomes too large, or a similar validation error occurs because of
+// changes made by the transaction.
+//
+// - The aggregate size of the items in the transaction exceeds 4 MB.
+//
+// - There is a user error, such as an invalid data format.
+func (c *Client) TransactWriteItems(ctx context.Context, params *TransactWriteItemsInput, optFns ...func(*Options)) (*TransactWriteItemsOutput, error) {
+ if params == nil {
+ params = &TransactWriteItemsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "TransactWriteItems", params, optFns, c.addOperationTransactWriteItemsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*TransactWriteItemsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type TransactWriteItemsInput struct {
+
+ // An ordered array of up to 100 TransactWriteItem objects, each of which contains
+ // a ConditionCheck , Put , Update , or Delete object. These can operate on items
+ // in different tables, but the tables must reside in the same Amazon Web Services
+ // account and Region, and no two of them can operate on the same item.
+ //
+ // This member is required.
+ TransactItems []types.TransactWriteItem
+
+ // Providing a ClientRequestToken makes the call to TransactWriteItems idempotent,
+ // meaning that multiple identical calls have the same effect as one single call.
+ //
+ // Although multiple identical calls using the same client request token produce
+ // the same result on the server (no side effects), the responses to the calls
+ // might not be the same. If the ReturnConsumedCapacity parameter is set, then the
+ // initial TransactWriteItems call returns the amount of write capacity units
+ // consumed in making the changes. Subsequent TransactWriteItems calls with the
+ // same client token return the number of read capacity units consumed in reading
+ // the item.
+ //
+ // A client request token is valid for 10 minutes after the first request that
+ // uses it is completed. After 10 minutes, any request with the same client token
+ // is treated as a new request. Do not resubmit the same request with the same
+ // client token for more than 10 minutes, or the result might not be idempotent.
+ //
+ // If you submit a request with the same client token but a change in other
+ // parameters within the 10-minute idempotency window, DynamoDB returns an
+ // IdempotentParameterMismatch exception.
+ ClientRequestToken *string
+
+ // Determines the level of detail about either provisioned or on-demand throughput
+ // consumption that is returned in the response:
+ //
+ // - INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary index
+ // that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem , do not access any
+ // indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // - TOTAL - The response includes only the aggregate ConsumedCapacity for the
+ // operation.
+ //
+ // - NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity types.ReturnConsumedCapacity
+
+ // Determines whether item collection metrics are returned. If set to SIZE , the
+ // response includes statistics about item collections (if any), that were modified
+ // during the operation and are returned in the response. If set to NONE (the
+ // default), no statistics are returned.
+ ReturnItemCollectionMetrics types.ReturnItemCollectionMetrics
+
+ noSmithyDocumentSerde
+}
+
+func (in *TransactWriteItemsInput) bindEndpointParams(p *EndpointParameters) {
+ func() {
+ v1 := in.TransactItems
+ var v2 [][]string
+ for _, v := range v1 {
+ v3 := v.ConditionCheck
+ var v4 *string
+ if v3 != nil {
+ v5 := v3.TableName
+ v4 = v5
+ }
+ v6 := v.Put
+ var v7 *string
+ if v6 != nil {
+ v8 := v6.TableName
+ v7 = v8
+ }
+ v9 := v.Delete
+ var v10 *string
+ if v9 != nil {
+ v11 := v9.TableName
+ v10 = v11
+ }
+ v12 := v.Update
+ var v13 *string
+ if v12 != nil {
+ v14 := v12.TableName
+ v13 = v14
+ }
+ v15 := []string{}
+ if v4 != nil {
+ v15 = append(v15, *v4)
+ }
+ if v7 != nil {
+ v15 = append(v15, *v7)
+ }
+ if v10 != nil {
+ v15 = append(v15, *v10)
+ }
+ if v13 != nil {
+ v15 = append(v15, *v13)
+ }
+ if v15 != nil {
+ v2 = append(v2, v15)
+ }
+ }
+ var v16 []string
+ for _, v := range v2 {
+ v16 = append(v16, v...)
+ }
+ p.ResourceArnList = v16
+ }()
+
+}
+
+type TransactWriteItemsOutput struct {
+
+ // The capacity units consumed by the entire TransactWriteItems operation. The
+ // values of the list are ordered according to the ordering of the TransactItems
+ // request parameter.
+ ConsumedCapacity []types.ConsumedCapacity
+
+ // A list of tables that were processed by TransactWriteItems and, for each table,
+ // information about any item collections that were affected by individual
+ // UpdateItem , PutItem , or DeleteItem operations.
+ ItemCollectionMetrics map[string][]types.ItemCollectionMetrics
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationTransactWriteItemsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpTransactWriteItems{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpTransactWriteItems{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "TransactWriteItems"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpTransactWriteItemsDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addIdempotencyToken_opTransactWriteItemsMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addOpTransactWriteItemsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opTransactWriteItems(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpTransactWriteItemsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpTransactWriteItemsDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpTransactWriteItemsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*TransactWriteItemsInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+type idempotencyToken_initializeOpTransactWriteItems struct {
+ tokenProvider IdempotencyTokenProvider
+}
+
+func (*idempotencyToken_initializeOpTransactWriteItems) ID() string {
+ return "OperationIdempotencyTokenAutoFill"
+}
+
+func (m *idempotencyToken_initializeOpTransactWriteItems) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.tokenProvider == nil {
+ return next.HandleInitialize(ctx, in)
+ }
+
+ input, ok := in.Parameters.(*TransactWriteItemsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("expected middleware input to be of type *TransactWriteItemsInput ")
+ }
+
+ if input.ClientRequestToken == nil {
+ t, err := m.tokenProvider.GetIdempotencyToken()
+ if err != nil {
+ return out, metadata, err
+ }
+ input.ClientRequestToken = &t
+ }
+ return next.HandleInitialize(ctx, in)
+}
+func addIdempotencyToken_opTransactWriteItemsMiddleware(stack *middleware.Stack, cfg Options) error {
+ return stack.Initialize.Add(&idempotencyToken_initializeOpTransactWriteItems{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before)
+}
+
+func newServiceMetadataMiddleware_opTransactWriteItems(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "TransactWriteItems",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UntagResource.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UntagResource.go
new file mode 100644
index 000000000..c481bb262
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UntagResource.go
@@ -0,0 +1,271 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Removes the association of tags from an Amazon DynamoDB resource. You can call
+// UntagResource up to five times per second, per account.
+//
+// - UntagResource is an asynchronous operation. If you issue a ListTagsOfResourcerequest
+// immediately after an UntagResource request, DynamoDB might return your
+// previous tag set, if there was one, or an empty tag set. This is because
+// ListTagsOfResource uses an eventually consistent query, and the metadata for
+// your tags or table might not be available at that moment. Wait for a few
+// seconds, and then try the ListTagsOfResource request again.
+//
+// - The application or removal of tags using TagResource and UntagResource APIs
+// is eventually consistent. ListTagsOfResource API will only reflect the changes
+// after a few seconds.
+//
+// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB
+// Developer Guide.
+//
+// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html
+func (c *Client) UntagResource(ctx context.Context, params *UntagResourceInput, optFns ...func(*Options)) (*UntagResourceOutput, error) {
+ if params == nil {
+ params = &UntagResourceInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "UntagResource", params, optFns, c.addOperationUntagResourceMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*UntagResourceOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type UntagResourceInput struct {
+
+ // The DynamoDB resource that the tags will be removed from. This value is an
+ // Amazon Resource Name (ARN).
+ //
+ // This member is required.
+ ResourceArn *string
+
+ // A list of tag keys. Existing tags of the resource whose keys are members of
+ // this list will be removed from the DynamoDB resource.
+ //
+ // This member is required.
+ TagKeys []string
+
+ noSmithyDocumentSerde
+}
+
+func (in *UntagResourceInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.ResourceArn
+
+}
+
+type UntagResourceOutput struct {
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationUntagResourceMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpUntagResource{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUntagResource{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "UntagResource"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpUntagResourceDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpUntagResourceValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUntagResource(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpUntagResourceDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpUntagResourceDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpUntagResourceDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*UntagResourceInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opUntagResource(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "UntagResource",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go
new file mode 100644
index 000000000..4d9dcb86a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContinuousBackups.go
@@ -0,0 +1,271 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// UpdateContinuousBackups enables or disables point in time recovery for the
+// specified table. A successful UpdateContinuousBackups call returns the current
+// ContinuousBackupsDescription . Continuous backups are ENABLED on all tables at
+// table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus
+// will be set to ENABLED.
+//
+// Once continuous backups and point in time recovery are enabled, you can restore
+// to any point in time within EarliestRestorableDateTime and
+// LatestRestorableDateTime .
+//
+// LatestRestorableDateTime is typically 5 minutes before the current time. You
+// can restore your table to any point in time in the last 35 days. You can set the
+// RecoveryPeriodInDays to any value between 1 and 35 days.
+func (c *Client) UpdateContinuousBackups(ctx context.Context, params *UpdateContinuousBackupsInput, optFns ...func(*Options)) (*UpdateContinuousBackupsOutput, error) {
+ if params == nil {
+ params = &UpdateContinuousBackupsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "UpdateContinuousBackups", params, optFns, c.addOperationUpdateContinuousBackupsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*UpdateContinuousBackupsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type UpdateContinuousBackupsInput struct {
+
+ // Represents the settings used to enable point in time recovery.
+ //
+ // This member is required.
+ PointInTimeRecoverySpecification *types.PointInTimeRecoverySpecification
+
+ // The name of the table. You can also provide the Amazon Resource Name (ARN) of
+ // the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *UpdateContinuousBackupsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type UpdateContinuousBackupsOutput struct {
+
+ // Represents the continuous backups and point in time recovery settings on the
+ // table.
+ ContinuousBackupsDescription *types.ContinuousBackupsDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationUpdateContinuousBackupsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateContinuousBackups{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateContinuousBackups{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateContinuousBackups"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpUpdateContinuousBackupsDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpUpdateContinuousBackupsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateContinuousBackups(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpUpdateContinuousBackupsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpUpdateContinuousBackupsDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpUpdateContinuousBackupsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*UpdateContinuousBackupsInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opUpdateContinuousBackups(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "UpdateContinuousBackups",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContributorInsights.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContributorInsights.go
new file mode 100644
index 000000000..edb045481
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateContributorInsights.go
@@ -0,0 +1,235 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Updates the status for contributor insights for a specific table or index.
+// CloudWatch Contributor Insights for DynamoDB graphs display the partition key
+// and (if applicable) sort key of frequently accessed items and frequently
+// throttled items in plaintext. If you require the use of Amazon Web Services Key
+// Management Service (KMS) to encrypt this table’s partition key and sort key data
+// with an Amazon Web Services managed key or customer managed key, you should not
+// enable CloudWatch Contributor Insights for DynamoDB for this table.
+func (c *Client) UpdateContributorInsights(ctx context.Context, params *UpdateContributorInsightsInput, optFns ...func(*Options)) (*UpdateContributorInsightsOutput, error) {
+ if params == nil {
+ params = &UpdateContributorInsightsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "UpdateContributorInsights", params, optFns, c.addOperationUpdateContributorInsightsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*UpdateContributorInsightsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type UpdateContributorInsightsInput struct {
+
+ // Represents the contributor insights action.
+ //
+ // This member is required.
+ ContributorInsightsAction types.ContributorInsightsAction
+
+ // The name of the table. You can also provide the Amazon Resource Name (ARN) of
+ // the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // Specifies whether to track all access and throttled events or throttled events
+ // only for the DynamoDB table or index.
+ ContributorInsightsMode types.ContributorInsightsMode
+
+ // The global secondary index name, if applicable.
+ IndexName *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *UpdateContributorInsightsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type UpdateContributorInsightsOutput struct {
+
+ // The updated mode of CloudWatch Contributor Insights that determines whether to
+ // monitor all access and throttled events or to track throttled events
+ // exclusively.
+ ContributorInsightsMode types.ContributorInsightsMode
+
+ // The status of contributor insights
+ ContributorInsightsStatus types.ContributorInsightsStatus
+
+ // The name of the global secondary index, if applicable.
+ IndexName *string
+
+ // The name of the table.
+ TableName *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationUpdateContributorInsightsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateContributorInsights{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateContributorInsights{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateContributorInsights"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpUpdateContributorInsightsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateContributorInsights(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opUpdateContributorInsights(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "UpdateContributorInsights",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTable.go
new file mode 100644
index 000000000..25878256f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTable.go
@@ -0,0 +1,292 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Adds or removes replicas in the specified global table. The global table must
+// already exist to be able to use this operation. Any replica to be added must be
+// empty, have the same name as the global table, have the same key schema, have
+// DynamoDB Streams enabled, and have the same provisioned and maximum write
+// capacity units.
+//
+// This documentation is for version 2017.11.29 (Legacy) of global tables, which
+// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible,
+// because it provides greater flexibility, higher efficiency, and consumes less
+// write capacity than 2017.11.29 (Legacy).
+//
+// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables
+// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables].
+//
+// If you are using global tables [Version 2019.11.21] (Current) you can use [UpdateTable] instead.
+//
+// Although you can use UpdateGlobalTable to add replicas and remove replicas in a
+// single request, for simplicity we recommend that you issue separate requests for
+// adding or removing replicas.
+//
+// If global secondary indexes are specified, then the following conditions must
+// also be met:
+//
+// - The global secondary indexes must have the same name.
+//
+// - The global secondary indexes must have the same hash key and sort key (if
+// present).
+//
+// - The global secondary indexes must have the same provisioned and maximum
+// write capacity units.
+//
+// [UpdateTable]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html
+// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html
+// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html
+// [Version 2019.11.21]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html
+// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html
+func (c *Client) UpdateGlobalTable(ctx context.Context, params *UpdateGlobalTableInput, optFns ...func(*Options)) (*UpdateGlobalTableOutput, error) {
+ if params == nil {
+ params = &UpdateGlobalTableInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "UpdateGlobalTable", params, optFns, c.addOperationUpdateGlobalTableMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*UpdateGlobalTableOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type UpdateGlobalTableInput struct {
+
+ // The global table name.
+ //
+ // This member is required.
+ GlobalTableName *string
+
+ // A list of Regions that should be added or removed from the global table.
+ //
+ // This member is required.
+ ReplicaUpdates []types.ReplicaUpdate
+
+ noSmithyDocumentSerde
+}
+
+func (in *UpdateGlobalTableInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.GlobalTableName
+
+}
+
+type UpdateGlobalTableOutput struct {
+
+ // Contains the details of the global table.
+ GlobalTableDescription *types.GlobalTableDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationUpdateGlobalTableMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateGlobalTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateGlobalTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateGlobalTable"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpUpdateGlobalTableDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpUpdateGlobalTableValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateGlobalTable(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpUpdateGlobalTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpUpdateGlobalTableDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpUpdateGlobalTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*UpdateGlobalTableInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opUpdateGlobalTable(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "UpdateGlobalTable",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTableSettings.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTableSettings.go
new file mode 100644
index 000000000..6505381e8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateGlobalTableSettings.go
@@ -0,0 +1,295 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Updates settings for a global table.
+//
+// This documentation is for version 2017.11.29 (Legacy) of global tables, which
+// should be avoided for new global tables. Customers should use [Global Tables version 2019.11.21 (Current)]when possible,
+// because it provides greater flexibility, higher efficiency, and consumes less
+// write capacity than 2017.11.29 (Legacy).
+//
+// To determine which version you're using, see [Determining the global table version you are using]. To update existing global tables
+// from version 2017.11.29 (Legacy) to version 2019.11.21 (Current), see [Upgrading global tables].
+//
+// [Global Tables version 2019.11.21 (Current)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html
+// [Upgrading global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html
+// [Determining the global table version you are using]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html
+func (c *Client) UpdateGlobalTableSettings(ctx context.Context, params *UpdateGlobalTableSettingsInput, optFns ...func(*Options)) (*UpdateGlobalTableSettingsOutput, error) {
+ if params == nil {
+ params = &UpdateGlobalTableSettingsInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "UpdateGlobalTableSettings", params, optFns, c.addOperationUpdateGlobalTableSettingsMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*UpdateGlobalTableSettingsOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type UpdateGlobalTableSettingsInput struct {
+
+ // The name of the global table
+ //
+ // This member is required.
+ GlobalTableName *string
+
+ // The billing mode of the global table. If GlobalTableBillingMode is not
+ // specified, the global table defaults to PROVISIONED capacity billing mode.
+ //
+ // - PROVISIONED - We recommend using PROVISIONED for predictable workloads.
+ // PROVISIONED sets the billing mode to [Provisioned capacity mode].
+ //
+ // - PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable
+ // workloads. PAY_PER_REQUEST sets the billing mode to [On-demand capacity mode].
+ //
+ // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html
+ // [On-demand capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html
+ GlobalTableBillingMode types.BillingMode
+
+ // Represents the settings of a global secondary index for a global table that
+ // will be modified.
+ GlobalTableGlobalSecondaryIndexSettingsUpdate []types.GlobalTableGlobalSecondaryIndexSettingsUpdate
+
+ // Auto scaling settings for managing provisioned write capacity for the global
+ // table.
+ GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate *types.AutoScalingSettingsUpdate
+
+ // The maximum number of writes consumed per second before DynamoDB returns a
+ // ThrottlingException.
+ GlobalTableProvisionedWriteCapacityUnits *int64
+
+ // Represents the settings for a global table in a Region that will be modified.
+ ReplicaSettingsUpdate []types.ReplicaSettingsUpdate
+
+ noSmithyDocumentSerde
+}
+
+func (in *UpdateGlobalTableSettingsInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.GlobalTableName
+
+}
+
+type UpdateGlobalTableSettingsOutput struct {
+
+ // The name of the global table.
+ GlobalTableName *string
+
+ // The Region-specific settings for the global table.
+ ReplicaSettings []types.ReplicaSettingsDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationUpdateGlobalTableSettingsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateGlobalTableSettings{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateGlobalTableSettings{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateGlobalTableSettings"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpUpdateGlobalTableSettingsDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpUpdateGlobalTableSettingsValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateGlobalTableSettings(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpUpdateGlobalTableSettingsDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpUpdateGlobalTableSettingsDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpUpdateGlobalTableSettingsDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*UpdateGlobalTableSettingsInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opUpdateGlobalTableSettings(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "UpdateGlobalTableSettings",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go
new file mode 100644
index 000000000..8416ad88e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateItem.go
@@ -0,0 +1,536 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Edits an existing item's attributes, or adds a new item to the table if it does
+// not already exist. You can put, delete, or add attribute values. You can also
+// perform a conditional update on an existing item (insert a new attribute
+// name-value pair if it doesn't exist, or replace an existing name-value pair if
+// it has certain expected attribute values).
+//
+// You can also return the item's attribute values in the same UpdateItem
+// operation using the ReturnValues parameter.
+func (c *Client) UpdateItem(ctx context.Context, params *UpdateItemInput, optFns ...func(*Options)) (*UpdateItemOutput, error) {
+ if params == nil {
+ params = &UpdateItemInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "UpdateItem", params, optFns, c.addOperationUpdateItemMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*UpdateItemOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of an UpdateItem operation.
+type UpdateItemInput struct {
+
+ // The primary key of the item to be updated. Each element consists of an
+ // attribute name and a value for that attribute.
+ //
+ // For the primary key, you must provide all of the attributes. For example, with
+ // a simple primary key, you only need to provide a value for the partition key.
+ // For a composite primary key, you must provide values for both the partition key
+ // and the sort key.
+ //
+ // This member is required.
+ Key map[string]types.AttributeValue
+
+ // The name of the table containing the item to update. You can also provide the
+ // Amazon Resource Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // This is a legacy parameter. Use UpdateExpression instead. For more information,
+ // see [AttributeUpdates]in the Amazon DynamoDB Developer Guide.
+ //
+ // [AttributeUpdates]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html
+ AttributeUpdates map[string]types.AttributeValueUpdate
+
+ // A condition that must be satisfied in order for a conditional update to succeed.
+ //
+ // An expression can contain any of the following:
+ //
+ // - Functions: attribute_exists | attribute_not_exists | attribute_type |
+ // contains | begins_with | size
+ //
+ // These function names are case-sensitive.
+ //
+ // - Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
+ //
+ // - Logical operators: AND | OR | NOT
+ //
+ // For more information about condition expressions, see [Specifying Conditions] in the Amazon DynamoDB
+ // Developer Guide.
+ //
+ // [Specifying Conditions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html
+ ConditionExpression *string
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more
+ // information, see [ConditionalOperator]in the Amazon DynamoDB Developer Guide.
+ //
+ // [ConditionalOperator]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html
+ ConditionalOperator types.ConditionalOperator
+
+ // This is a legacy parameter. Use ConditionExpression instead. For more
+ // information, see [Expected]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Expected]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html
+ Expected map[string]types.ExpectedAttributeValue
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames :
+ //
+ // - To access an attribute whose name conflicts with a DynamoDB reserved word.
+ //
+ // - To create a placeholder for repeating occurrences of an attribute name in
+ // an expression.
+ //
+ // - To prevent special characters in an attribute name from being
+ // misinterpreted in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // - Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be used
+ // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the
+ // Amazon DynamoDB Developer Guide.) To work around this, you could specify the
+ // following for ExpressionAttributeNames :
+ //
+ // - {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // - #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information about expression attribute names, see [Specifying Item Attributes] in the Amazon
+ // DynamoDB Developer Guide.
+ //
+ // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html
+ // [Specifying Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html
+ ExpressionAttributeNames map[string]string
+
+ // One or more values that can be substituted in an expression.
+ //
+ // Use the : (colon) character in an expression to dereference an attribute value.
+ // For example, suppose that you wanted to check whether the value of the
+ // ProductStatus attribute was one of the following:
+ //
+ // Available | Backordered | Discontinued
+ //
+ // You would first need to specify ExpressionAttributeValues as follows:
+ //
+ // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"},
+ // ":disc":{"S":"Discontinued"} }
+ //
+ // You could then use these values in an expression, such as this:
+ //
+ // ProductStatus IN (:avail, :back, :disc)
+ //
+ // For more information on expression attribute values, see [Condition Expressions] in the Amazon
+ // DynamoDB Developer Guide.
+ //
+ // [Condition Expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html
+ ExpressionAttributeValues map[string]types.AttributeValue
+
+ // Determines the level of detail about either provisioned or on-demand throughput
+ // consumption that is returned in the response:
+ //
+ // - INDEXES - The response includes the aggregate ConsumedCapacity for the
+ // operation, together with ConsumedCapacity for each table and secondary index
+ // that was accessed.
+ //
+ // Note that some operations, such as GetItem and BatchGetItem , do not access any
+ // indexes at all. In these cases, specifying INDEXES will only return
+ // ConsumedCapacity information for table(s).
+ //
+ // - TOTAL - The response includes only the aggregate ConsumedCapacity for the
+ // operation.
+ //
+ // - NONE - No ConsumedCapacity details are included in the response.
+ ReturnConsumedCapacity types.ReturnConsumedCapacity
+
+ // Determines whether item collection metrics are returned. If set to SIZE , the
+ // response includes statistics about item collections, if any, that were modified
+ // during the operation are returned in the response. If set to NONE (the
+ // default), no statistics are returned.
+ ReturnItemCollectionMetrics types.ReturnItemCollectionMetrics
+
+ // Use ReturnValues if you want to get the item attributes as they appear before
+ // or after they are successfully updated. For UpdateItem , the valid values are:
+ //
+ // - NONE - If ReturnValues is not specified, or if its value is NONE , then
+ // nothing is returned. (This setting is the default for ReturnValues .)
+ //
+ // - ALL_OLD - Returns all of the attributes of the item, as they appeared before
+ // the UpdateItem operation.
+ //
+ // - UPDATED_OLD - Returns only the updated attributes, as they appeared before
+ // the UpdateItem operation.
+ //
+ // - ALL_NEW - Returns all of the attributes of the item, as they appear after
+ // the UpdateItem operation.
+ //
+ // - UPDATED_NEW - Returns only the updated attributes, as they appear after the
+ // UpdateItem operation.
+ //
+ // There is no additional cost associated with requesting a return value aside
+ // from the small network and processing overhead of receiving a larger response.
+ // No read capacity units are consumed.
+ //
+ // The values returned are strongly consistent.
+ ReturnValues types.ReturnValue
+
+ // An optional parameter that returns the item attributes for an UpdateItem
+ // operation that failed a condition check.
+ //
+ // There is no additional cost associated with requesting a return value aside
+ // from the small network and processing overhead of receiving a larger response.
+ // No read capacity units are consumed.
+ ReturnValuesOnConditionCheckFailure types.ReturnValuesOnConditionCheckFailure
+
+ // An expression that defines one or more attributes to be updated, the action to
+ // be performed on them, and new values for them.
+ //
+ // The following action values are available for UpdateExpression .
+ //
+ // - SET - Adds one or more attributes and values to an item. If any of these
+ // attributes already exist, they are replaced by the new values. You can also use
+ // SET to add or subtract from an attribute that is of type Number. For example:
+ // SET myNum = myNum + :val
+ //
+ // SET supports the following functions:
+ //
+ // - if_not_exists (path, operand) - if the item does not contain an attribute at
+ // the specified path, then if_not_exists evaluates to operand; otherwise, it
+ // evaluates to path. You can use this function to avoid overwriting an attribute
+ // that may already be present in the item.
+ //
+ // - list_append (operand, operand) - evaluates to a list with a new element
+ // added to it. You can append the new element to the start or the end of the list
+ // by reversing the order of the operands.
+ //
+ // These function names are case-sensitive.
+ //
+ // - REMOVE - Removes one or more attributes from an item.
+ //
+ // - ADD - Adds the specified value to the item, if the attribute does not
+ // already exist. If the attribute does exist, then the behavior of ADD depends
+ // on the data type of the attribute:
+ //
+ // - If the existing attribute is a number, and if Value is also a number, then
+ // Value is mathematically added to the existing attribute. If Value is a
+ // negative number, then it is subtracted from the existing attribute.
+ //
+ // If you use ADD to increment or decrement a number value for an item that doesn't
+ // exist before the update, DynamoDB uses 0 as the initial value.
+ //
+ // Similarly, if you use ADD for an existing item to increment or decrement an
+ // attribute value that doesn't exist before the update, DynamoDB uses 0 as the
+ // initial value. For example, suppose that the item you want to update doesn't
+ // have an attribute named itemcount , but you decide to ADD the number 3 to this
+ // attribute anyway. DynamoDB will create the itemcount attribute, set its
+ // initial value to 0 , and finally add 3 to it. The result will be a new
+ // itemcount attribute in the item, with a value of 3 .
+ //
+ // - If the existing data type is a set and if Value is also a set, then Value is
+ // added to the existing set. For example, if the attribute value is the set
+ // [1,2] , and the ADD action specified [3] , then the final attribute value is
+ // [1,2,3] . An error occurs if an ADD action is specified for a set attribute
+ // and the attribute type specified does not match the existing set type.
+ //
+ // Both sets must have the same primitive data type. For example, if the existing
+ // data type is a set of strings, the Value must also be a set of strings.
+ //
+ // The ADD action only supports Number and set data types. In addition, ADD can
+ // only be used on top-level attributes, not nested attributes.
+ //
+ // - DELETE - Deletes an element from a set.
+ //
+ // If a set of values is specified, then those values are subtracted from the old
+ // set. For example, if the attribute value was the set [a,b,c] and the DELETE
+ // action specifies [a,c] , then the final attribute value is [b] . Specifying an
+ // empty set is an error.
+ //
+ // The DELETE action only supports set data types. In addition, DELETE can only be
+ // used on top-level attributes, not nested attributes.
+ //
+ // You can have many actions in a single expression, such as the following: SET
+ // a=:value1, b=:value2 DELETE :value3, :value4, :value5
+ //
+ // For more information on update expressions, see [Modifying Items and Attributes] in the Amazon DynamoDB
+ // Developer Guide.
+ //
+ // [Modifying Items and Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html
+ UpdateExpression *string
+
+ noSmithyDocumentSerde
+}
+
+func (in *UpdateItemInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+// Represents the output of an UpdateItem operation.
+type UpdateItemOutput struct {
+
+ // A map of attribute values as they appear before or after the UpdateItem
+ // operation, as determined by the ReturnValues parameter.
+ //
+ // The Attributes map is only present if the update was successful and ReturnValues
+ // was specified as something other than NONE in the request. Each element
+ // represents one attribute.
+ Attributes map[string]types.AttributeValue
+
+ // The capacity units consumed by the UpdateItem operation. The data returned
+ // includes the total provisioned throughput consumed, along with statistics for
+ // the table and any indexes involved in the operation. ConsumedCapacity is only
+ // returned if the ReturnConsumedCapacity parameter was specified. For more
+ // information, see [Capacity unity consumption for write operations]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Capacity unity consumption for write operations]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#write-operation-consumption
+ ConsumedCapacity *types.ConsumedCapacity
+
+ // Information about item collections, if any, that were affected by the UpdateItem
+ // operation. ItemCollectionMetrics is only returned if the
+ // ReturnItemCollectionMetrics parameter was specified. If the table does not have
+ // any local secondary indexes, this information is not returned in the response.
+ //
+ // Each ItemCollectionMetrics element consists of:
+ //
+ // - ItemCollectionKey - The partition key value of the item collection. This is
+ // the same as the partition key value of the item itself.
+ //
+ // - SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
+ // This value is a two-element array containing a lower bound and an upper bound
+ // for the estimate. The estimate includes the size of all the items in the table,
+ // plus the size of all attributes projected into all of the local secondary
+ // indexes on that table. Use this estimate to measure whether a local secondary
+ // index is approaching its size limit.
+ //
+ // The estimate is subject to change over time; therefore, do not rely on the
+ // precision or accuracy of the estimate.
+ ItemCollectionMetrics *types.ItemCollectionMetrics
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationUpdateItemMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateItem{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateItem{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateItem"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpUpdateItemDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpUpdateItemValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateItem(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpUpdateItemDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpUpdateItemDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpUpdateItemDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*UpdateItemInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opUpdateItem(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "UpdateItem",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go
new file mode 100644
index 000000000..9a1b68ebd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateKinesisStreamingDestination.go
@@ -0,0 +1,270 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// The command to update the Kinesis stream destination.
+func (c *Client) UpdateKinesisStreamingDestination(ctx context.Context, params *UpdateKinesisStreamingDestinationInput, optFns ...func(*Options)) (*UpdateKinesisStreamingDestinationOutput, error) {
+ if params == nil {
+ params = &UpdateKinesisStreamingDestinationInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "UpdateKinesisStreamingDestination", params, optFns, c.addOperationUpdateKinesisStreamingDestinationMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*UpdateKinesisStreamingDestinationOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type UpdateKinesisStreamingDestinationInput struct {
+
+ // The Amazon Resource Name (ARN) for the Kinesis stream input.
+ //
+ // This member is required.
+ StreamArn *string
+
+ // The table name for the Kinesis streaming destination input. You can also
+ // provide the ARN of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // The command to update the Kinesis stream configuration.
+ UpdateKinesisStreamingConfiguration *types.UpdateKinesisStreamingConfiguration
+
+ noSmithyDocumentSerde
+}
+
+func (in *UpdateKinesisStreamingDestinationInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type UpdateKinesisStreamingDestinationOutput struct {
+
+ // The status of the attempt to update the Kinesis streaming destination output.
+ DestinationStatus types.DestinationStatus
+
+ // The ARN for the Kinesis stream input.
+ StreamArn *string
+
+ // The table name for the Kinesis streaming destination output.
+ TableName *string
+
+ // The command to update the Kinesis streaming destination configuration.
+ UpdateKinesisStreamingConfiguration *types.UpdateKinesisStreamingConfiguration
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationUpdateKinesisStreamingDestinationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateKinesisStreamingDestination{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateKinesisStreamingDestination{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateKinesisStreamingDestination"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpUpdateKinesisStreamingDestinationDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpUpdateKinesisStreamingDestinationValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateKinesisStreamingDestination(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpUpdateKinesisStreamingDestinationDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpUpdateKinesisStreamingDestinationDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpUpdateKinesisStreamingDestinationDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*UpdateKinesisStreamingDestinationInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opUpdateKinesisStreamingDestination(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "UpdateKinesisStreamingDestination",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go
new file mode 100644
index 000000000..325db35c8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTable.go
@@ -0,0 +1,381 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Modifies the provisioned throughput settings, global secondary indexes, or
+// DynamoDB Streams settings for a given table.
+//
+// You can only perform one of the following operations at once:
+//
+// - Modify the provisioned throughput settings of the table.
+//
+// - Remove a global secondary index from the table.
+//
+// - Create a new global secondary index on the table. After the index begins
+// backfilling, you can use UpdateTable to perform other operations.
+//
+// UpdateTable is an asynchronous operation; while it's executing, the table
+// status changes from ACTIVE to UPDATING . While it's UPDATING , you can't issue
+// another UpdateTable request. When the table returns to the ACTIVE state, the
+// UpdateTable operation is complete.
+func (c *Client) UpdateTable(ctx context.Context, params *UpdateTableInput, optFns ...func(*Options)) (*UpdateTableOutput, error) {
+ if params == nil {
+ params = &UpdateTableInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "UpdateTable", params, optFns, c.addOperationUpdateTableMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*UpdateTableOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of an UpdateTable operation.
+type UpdateTableInput struct {
+
+ // The name of the table to be updated. You can also provide the Amazon Resource
+ // Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // An array of attributes that describe the key schema for the table and indexes.
+ // If you are adding a new global secondary index to the table,
+ // AttributeDefinitions must include the key element(s) of the new index.
+ AttributeDefinitions []types.AttributeDefinition
+
+ // Controls how you are charged for read and write throughput and how you manage
+ // capacity. When switching from pay-per-request to provisioned capacity, initial
+ // provisioned capacity values must be set. The initial provisioned capacity values
+ // are estimated based on the consumed read and write capacity of your table and
+ // global secondary indexes over the past 30 minutes.
+ //
+ // - PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for most DynamoDB
+ // workloads. PAY_PER_REQUEST sets the billing mode to [On-demand capacity mode].
+ //
+ // - PROVISIONED - We recommend using PROVISIONED for steady workloads with
+ // predictable growth where capacity requirements can be reliably forecasted.
+ // PROVISIONED sets the billing mode to [Provisioned capacity mode].
+ //
+ // [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html
+ // [On-demand capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html
+ BillingMode types.BillingMode
+
+ // Indicates whether deletion protection is to be enabled (true) or disabled
+ // (false) on the table.
+ DeletionProtectionEnabled *bool
+
+ // An array of one or more global secondary indexes for the table. For each index
+ // in the array, you can request one action:
+ //
+ // - Create - add a new global secondary index to the table.
+ //
+ // - Update - modify the provisioned throughput settings of an existing global
+ // secondary index.
+ //
+ // - Delete - remove a global secondary index from the table.
+ //
+ // You can create or delete only one global secondary index per UpdateTable
+ // operation.
+ //
+ // For more information, see [Managing Global Secondary Indexes] in the Amazon DynamoDB Developer Guide.
+ //
+ // [Managing Global Secondary Indexes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html
+ GlobalSecondaryIndexUpdates []types.GlobalSecondaryIndexUpdate
+
+ // A list of witness updates for a MRSC global table. A witness provides a
+ // cost-effective alternative to a full replica in a MRSC global table by
+ // maintaining replicated change data written to global table replicas. You cannot
+ // perform read or write operations on a witness. For each witness, you can request
+ // one action:
+ //
+ // - Create - add a new witness to the global table.
+ //
+ // - Delete - remove a witness from the global table.
+ //
+ // You can create or delete only one witness per UpdateTable operation.
+ //
+ // For more information, see [Multi-Region strong consistency (MRSC)] in the Amazon DynamoDB Developer Guide
+ //
+ // [Multi-Region strong consistency (MRSC)]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_HowItWorks.html#V2globaltables_HowItWorks.consistency-modes
+ GlobalTableWitnessUpdates []types.GlobalTableWitnessGroupUpdate
+
+ // Specifies the consistency mode for a new global table. This parameter is only
+ // valid when you create a global table by specifying one or more [Create]actions in the [ReplicaUpdates]
+ // action list.
+ //
+ // You can specify one of the following consistency modes:
+ //
+ // - EVENTUAL : Configures a new global table for multi-Region eventual
+ // consistency (MREC). This is the default consistency mode for global tables.
+ //
+ // - STRONG : Configures a new global table for multi-Region strong consistency
+ // (MRSC).
+ //
+ // If you don't specify this field, the global table consistency mode defaults to
+ // EVENTUAL . For more information about global tables consistency modes, see [Consistency modes] in
+ // DynamoDB developer guide.
+ //
+ // [ReplicaUpdates]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html#DDB-UpdateTable-request-ReplicaUpdates
+ // [Create]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ReplicationGroupUpdate.html#DDB-Type-ReplicationGroupUpdate-Create
+ // [Consistency modes]: https://docs.aws.amazon.com/V2globaltables_HowItWorks.html#V2globaltables_HowItWorks.consistency-modes
+ MultiRegionConsistency types.MultiRegionConsistency
+
+ // Updates the maximum number of read and write units for the specified table in
+ // on-demand capacity mode. If you use this parameter, you must specify
+ // MaxReadRequestUnits , MaxWriteRequestUnits , or both.
+ OnDemandThroughput *types.OnDemandThroughput
+
+ // The new provisioned throughput settings for the specified table or index.
+ ProvisionedThroughput *types.ProvisionedThroughput
+
+ // A list of replica update actions (create, delete, or update) for the table.
+ ReplicaUpdates []types.ReplicationGroupUpdate
+
+ // The new server-side encryption settings for the specified table.
+ SSESpecification *types.SSESpecification
+
+ // Represents the DynamoDB Streams configuration for the table.
+ //
+ // You receive a ValidationException if you try to enable a stream on a table that
+ // already has a stream, or if you try to disable a stream on a table that doesn't
+ // have a stream.
+ StreamSpecification *types.StreamSpecification
+
+ // The table class of the table to be updated. Valid values are STANDARD and
+ // STANDARD_INFREQUENT_ACCESS .
+ TableClass types.TableClass
+
+ // Represents the warm throughput (in read units per second and write units per
+ // second) for updating a table.
+ WarmThroughput *types.WarmThroughput
+
+ noSmithyDocumentSerde
+}
+
+func (in *UpdateTableInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+// Represents the output of an UpdateTable operation.
+type UpdateTableOutput struct {
+
+ // Represents the properties of the table.
+ TableDescription *types.TableDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationUpdateTableMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateTable{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateTable"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpUpdateTableDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpUpdateTableValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateTable(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpUpdateTableDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpUpdateTableDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpUpdateTableDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*UpdateTableInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opUpdateTable(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "UpdateTable",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go
new file mode 100644
index 000000000..26b33641d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTableReplicaAutoScaling.go
@@ -0,0 +1,218 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Updates auto scaling settings on your global tables at once.
+func (c *Client) UpdateTableReplicaAutoScaling(ctx context.Context, params *UpdateTableReplicaAutoScalingInput, optFns ...func(*Options)) (*UpdateTableReplicaAutoScalingOutput, error) {
+ if params == nil {
+ params = &UpdateTableReplicaAutoScalingInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "UpdateTableReplicaAutoScaling", params, optFns, c.addOperationUpdateTableReplicaAutoScalingMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*UpdateTableReplicaAutoScalingOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type UpdateTableReplicaAutoScalingInput struct {
+
+ // The name of the global table to be updated. You can also provide the Amazon
+ // Resource Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // Represents the auto scaling settings of the global secondary indexes of the
+ // replica to be updated.
+ GlobalSecondaryIndexUpdates []types.GlobalSecondaryIndexAutoScalingUpdate
+
+ // Represents the auto scaling settings to be modified for a global table or
+ // global secondary index.
+ ProvisionedWriteCapacityAutoScalingUpdate *types.AutoScalingSettingsUpdate
+
+ // Represents the auto scaling settings of replicas of the table that will be
+ // modified.
+ ReplicaUpdates []types.ReplicaAutoScalingUpdate
+
+ noSmithyDocumentSerde
+}
+
+func (in *UpdateTableReplicaAutoScalingInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type UpdateTableReplicaAutoScalingOutput struct {
+
+ // Returns information about the auto scaling settings of a table with replicas.
+ TableAutoScalingDescription *types.TableAutoScalingDescription
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationUpdateTableReplicaAutoScalingMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateTableReplicaAutoScaling{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateTableReplicaAutoScaling{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateTableReplicaAutoScaling"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpUpdateTableReplicaAutoScalingValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateTableReplicaAutoScaling(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opUpdateTableReplicaAutoScaling(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "UpdateTableReplicaAutoScaling",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go
new file mode 100644
index 000000000..6c0fc24a1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/api_op_UpdateTimeToLive.go
@@ -0,0 +1,288 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ internalEndpointDiscovery "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// The UpdateTimeToLive method enables or disables Time to Live (TTL) for the
+// specified table. A successful UpdateTimeToLive call returns the current
+// TimeToLiveSpecification . It can take up to one hour for the change to fully
+// process. Any additional UpdateTimeToLive calls for the same table during this
+// one hour duration result in a ValidationException .
+//
+// TTL compares the current time in epoch time format to the time stored in the
+// TTL attribute of an item. If the epoch time value stored in the attribute is
+// less than the current time, the item is marked as expired and subsequently
+// deleted.
+//
+// The epoch time format is the number of seconds elapsed since 12:00:00 AM
+// January 1, 1970 UTC.
+//
+// DynamoDB deletes expired items on a best-effort basis to ensure availability of
+// throughput for other data operations.
+//
+// DynamoDB typically deletes expired items within two days of expiration. The
+// exact duration within which an item gets deleted after expiration is specific to
+// the nature of the workload. Items that have expired and not been deleted will
+// still show up in reads, queries, and scans.
+//
+// As items are deleted, they are removed from any local secondary index and
+// global secondary index immediately in the same eventually consistent way as a
+// standard delete operation.
+//
+// For more information, see [Time To Live] in the Amazon DynamoDB Developer Guide.
+//
+// [Time To Live]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html
+func (c *Client) UpdateTimeToLive(ctx context.Context, params *UpdateTimeToLiveInput, optFns ...func(*Options)) (*UpdateTimeToLiveOutput, error) {
+ if params == nil {
+ params = &UpdateTimeToLiveInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "UpdateTimeToLive", params, optFns, c.addOperationUpdateTimeToLiveMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*UpdateTimeToLiveOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+// Represents the input of an UpdateTimeToLive operation.
+type UpdateTimeToLiveInput struct {
+
+ // The name of the table to be configured. You can also provide the Amazon
+ // Resource Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // Represents the settings used to enable or disable Time to Live for the
+ // specified table.
+ //
+ // This member is required.
+ TimeToLiveSpecification *types.TimeToLiveSpecification
+
+ noSmithyDocumentSerde
+}
+
+func (in *UpdateTimeToLiveInput) bindEndpointParams(p *EndpointParameters) {
+
+ p.ResourceArn = in.TableName
+
+}
+
+type UpdateTimeToLiveOutput struct {
+
+ // Represents the output of an UpdateTimeToLive operation.
+ TimeToLiveSpecification *types.TimeToLiveSpecification
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationUpdateTimeToLiveMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsjson10_serializeOpUpdateTimeToLive{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpUpdateTimeToLive{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "UpdateTimeToLive"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addOpUpdateTimeToLiveDiscoverEndpointMiddleware(stack, options, c); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addUserAgentAccountIDEndpointMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpUpdateTimeToLiveValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opUpdateTimeToLive(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addValidateResponseChecksum(stack, options); err != nil {
+ return err
+ }
+ if err = addAcceptEncodingGzip(stack, options); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAttempt(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptExecution(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSerialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterSigning(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptTransmit(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptBeforeDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addInterceptAfterDeserialization(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func addOpUpdateTimeToLiveDiscoverEndpointMiddleware(stack *middleware.Stack, o Options, c *Client) error {
+ return stack.Finalize.Insert(&internalEndpointDiscovery.DiscoverEndpoint{
+ Options: []func(*internalEndpointDiscovery.DiscoverEndpointOptions){
+ func(opt *internalEndpointDiscovery.DiscoverEndpointOptions) {
+ opt.DisableHTTPS = o.EndpointOptions.DisableHTTPS
+ opt.Logger = o.Logger
+ },
+ },
+ DiscoverOperation: c.fetchOpUpdateTimeToLiveDiscoverEndpoint,
+ EndpointDiscoveryEnableState: o.EndpointDiscovery.EnableEndpointDiscovery,
+ EndpointDiscoveryRequired: false,
+ Region: o.Region,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+func (c *Client) fetchOpUpdateTimeToLiveDiscoverEndpoint(ctx context.Context, region string, optFns ...func(*internalEndpointDiscovery.DiscoverEndpointOptions)) (internalEndpointDiscovery.WeightedAddress, error) {
+ input := getOperationInput(ctx)
+ in, ok := input.(*UpdateTimeToLiveInput)
+ if !ok {
+ return internalEndpointDiscovery.WeightedAddress{}, fmt.Errorf("unknown input type %T", input)
+ }
+ _ = in
+
+ identifierMap := make(map[string]string, 0)
+ identifierMap["sdk#Region"] = region
+
+ key := fmt.Sprintf("DynamoDB.%v", identifierMap)
+
+ if v, ok := c.endpointCache.Get(key); ok {
+ return v, nil
+ }
+
+ discoveryOperationInput := &DescribeEndpointsInput{}
+
+ opt := internalEndpointDiscovery.DiscoverEndpointOptions{}
+ for _, fn := range optFns {
+ fn(&opt)
+ }
+
+ go c.handleEndpointDiscoveryFromService(ctx, discoveryOperationInput, region, key, opt)
+ return internalEndpointDiscovery.WeightedAddress{}, nil
+}
+
+func newServiceMetadataMiddleware_opUpdateTimeToLive(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "UpdateTimeToLive",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/auth.go
new file mode 100644
index 000000000..b1d605c82
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/auth.go
@@ -0,0 +1,339 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "slices"
+ "strings"
+)
+
+func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) {
+ params.Region = options.Region
+}
+
+type setLegacyContextSigningOptionsMiddleware struct {
+}
+
+func (*setLegacyContextSigningOptionsMiddleware) ID() string {
+ return "setLegacyContextSigningOptions"
+}
+
+func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ rscheme := getResolvedAuthScheme(ctx)
+ schemeID := rscheme.Scheme.SchemeID()
+
+ if sn := awsmiddleware.GetSigningName(ctx); sn != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn)
+ }
+ }
+
+ if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr})
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
+}
+
+type withAnonymous struct {
+ resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ opts = append(opts, &smithyauth.Option{
+ SchemeID: smithyauth.SchemeIDAnonymous,
+ })
+ return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+ if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+ return
+ }
+
+ options.AuthSchemeResolver = &withAnonymous{
+ resolver: options.AuthSchemeResolver,
+ }
+}
+
+// AuthResolverParameters contains the set of inputs necessary for auth scheme
+// resolution.
+type AuthResolverParameters struct {
+ // The name of the operation being invoked.
+ Operation string
+
+ // The region in which the operation is being invoked.
+ Region string
+}
+
+func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters {
+ params := &AuthResolverParameters{
+ Operation: operation,
+ }
+
+ bindAuthParamsRegion(ctx, params, input, options)
+
+ return params
+}
+
+// AuthSchemeResolver returns a set of possible authentication options for an
+// operation.
+type AuthSchemeResolver interface {
+ ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error)
+}
+
+type defaultAuthSchemeResolver struct{}
+
+var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil)
+
+func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ if overrides, ok := operationAuthOptions[params.Operation]; ok {
+ return overrides(params), nil
+ }
+ return serviceAuthOptions(params), nil
+}
+
+var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{}
+
+func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {
+ SchemeID: smithyauth.SchemeIDSigV4,
+ SignerProperties: func() smithy.Properties {
+ var props smithy.Properties
+ smithyhttp.SetSigV4SigningName(&props, "dynamodb")
+ smithyhttp.SetSigV4SigningRegion(&props, params.Region)
+ return props
+ }(),
+ },
+ }
+}
+
+type resolveAuthSchemeMiddleware struct {
+ operation string
+ options Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+ return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveAuthScheme")
+ defer span.End()
+
+ params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options)
+ options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return out, metadata, fmt.Errorf("resolve auth scheme: %w", err)
+ }
+
+ scheme, ok := m.selectScheme(options)
+ if !ok {
+ return out, metadata, fmt.Errorf("could not select an auth scheme")
+ }
+
+ ctx = setResolvedAuthScheme(ctx, scheme)
+
+ span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID())
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) {
+ sorted := sortAuthOptions(options, m.options.AuthSchemePreference)
+ for _, option := range sorted {
+ if option.SchemeID == smithyauth.SchemeIDAnonymous {
+ return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true
+ }
+
+ for _, scheme := range m.options.AuthSchemes {
+ if scheme.SchemeID() != option.SchemeID {
+ continue
+ }
+
+ if scheme.IdentityResolver(m.options) != nil {
+ return newResolvedAuthScheme(scheme, option), true
+ }
+ }
+ }
+
+ return nil, false
+}
+
+func sortAuthOptions(options []*smithyauth.Option, preferred []string) []*smithyauth.Option {
+ byPriority := make([]*smithyauth.Option, 0, len(options))
+ for _, prefName := range preferred {
+ for _, option := range options {
+ optName := option.SchemeID
+ if parts := strings.Split(option.SchemeID, "#"); len(parts) == 2 {
+ optName = parts[1]
+ }
+ if prefName == optName {
+ byPriority = append(byPriority, option)
+ }
+ }
+ }
+ for _, option := range options {
+ if !slices.ContainsFunc(byPriority, func(o *smithyauth.Option) bool {
+ return o.SchemeID == option.SchemeID
+ }) {
+ byPriority = append(byPriority, option)
+ }
+ }
+ return byPriority
+}
+
+type resolvedAuthSchemeKey struct{}
+
+type resolvedAuthScheme struct {
+ Scheme smithyhttp.AuthScheme
+ IdentityProperties smithy.Properties
+ SignerProperties smithy.Properties
+}
+
+func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme {
+ return &resolvedAuthScheme{
+ Scheme: scheme,
+ IdentityProperties: option.IdentityProperties,
+ SignerProperties: option.SignerProperties,
+ }
+}
+
+func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context {
+ return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme)
+}
+
+func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme {
+ v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme)
+ return v
+}
+
+type getIdentityMiddleware struct {
+ options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+ return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ innerCtx, span := tracing.StartSpan(ctx, "GetIdentity")
+ defer span.End()
+
+ rscheme := getResolvedAuthScheme(innerCtx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ resolver := rscheme.Scheme.IdentityResolver(m.options)
+ if resolver == nil {
+ return out, metadata, fmt.Errorf("no identity resolver")
+ }
+
+ identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration",
+ func() (smithyauth.Identity, error) {
+ return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties)
+ },
+ func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("get identity: %w", err)
+ }
+
+ ctx = setIdentity(ctx, identity)
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+type identityKey struct{}
+
+func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context {
+ return middleware.WithStackValue(ctx, identityKey{}, identity)
+}
+
+func getIdentity(ctx context.Context) smithyauth.Identity {
+ v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity)
+ return v
+}
+
+type signRequestMiddleware struct {
+ options Options
+}
+
+func (*signRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "SignRequest")
+ defer span.End()
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request)
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ identity := getIdentity(ctx)
+ if identity == nil {
+ return out, metadata, fmt.Errorf("no identity")
+ }
+
+ signer := rscheme.Scheme.Signer()
+ if signer == nil {
+ return out, metadata, fmt.Errorf("no signer")
+ }
+
+ _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) {
+ return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties)
+ }, func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("sign request: %w", err)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go
new file mode 100644
index 000000000..fdf566c4c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/deserializers.go
@@ -0,0 +1,19498 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws/protocol/restjson"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ smithy "github.com/aws/smithy-go"
+ smithyio "github.com/aws/smithy-go/io"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ smithytime "github.com/aws/smithy-go/time"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "io/ioutil"
+ "math"
+ "strings"
+)
+
+type awsAwsjson10_deserializeOpBatchExecuteStatement struct {
+}
+
+func (*awsAwsjson10_deserializeOpBatchExecuteStatement) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpBatchExecuteStatement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorBatchExecuteStatement(response, &metadata)
+ }
+ output := &BatchExecuteStatementOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentBatchExecuteStatementOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorBatchExecuteStatement(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("RequestLimitExceeded", errorCode):
+ return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody)
+
+ case strings.EqualFold("ThrottlingException", errorCode):
+ return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpBatchGetItem struct {
+}
+
+func (*awsAwsjson10_deserializeOpBatchGetItem) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpBatchGetItem) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorBatchGetItem(response, &metadata)
+ }
+ output := &BatchGetItemOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentBatchGetItemOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorBatchGetItem(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ProvisionedThroughputExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody)
+
+ case strings.EqualFold("RequestLimitExceeded", errorCode):
+ return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ThrottlingException", errorCode):
+ return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpBatchWriteItem struct {
+}
+
+func (*awsAwsjson10_deserializeOpBatchWriteItem) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpBatchWriteItem) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorBatchWriteItem(response, &metadata)
+ }
+ output := &BatchWriteItemOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentBatchWriteItemOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorBatchWriteItem(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ItemCollectionSizeLimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorItemCollectionSizeLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ProvisionedThroughputExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody)
+
+ case strings.EqualFold("ReplicatedWriteConflictException", errorCode):
+ return awsAwsjson10_deserializeErrorReplicatedWriteConflictException(response, errorBody)
+
+ case strings.EqualFold("RequestLimitExceeded", errorCode):
+ return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ThrottlingException", errorCode):
+ return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpCreateBackup struct {
+}
+
+func (*awsAwsjson10_deserializeOpCreateBackup) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpCreateBackup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorCreateBackup(response, &metadata)
+ }
+ output := &CreateBackupOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentCreateBackupOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorCreateBackup(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("BackupInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorBackupInUseException(response, errorBody)
+
+ case strings.EqualFold("ContinuousBackupsUnavailableException", errorCode):
+ return awsAwsjson10_deserializeErrorContinuousBackupsUnavailableException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("TableInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorTableInUseException(response, errorBody)
+
+ case strings.EqualFold("TableNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorTableNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpCreateGlobalTable struct {
+}
+
+func (*awsAwsjson10_deserializeOpCreateGlobalTable) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpCreateGlobalTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorCreateGlobalTable(response, &metadata)
+ }
+ output := &CreateGlobalTableOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentCreateGlobalTableOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorCreateGlobalTable(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("GlobalTableAlreadyExistsException", errorCode):
+ return awsAwsjson10_deserializeErrorGlobalTableAlreadyExistsException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("TableNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorTableNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpCreateTable struct {
+}
+
+func (*awsAwsjson10_deserializeOpCreateTable) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpCreateTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorCreateTable(response, &metadata)
+ }
+ output := &CreateTableOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentCreateTableOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorCreateTable(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ResourceInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDeleteBackup struct {
+}
+
+func (*awsAwsjson10_deserializeOpDeleteBackup) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDeleteBackup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDeleteBackup(response, &metadata)
+ }
+ output := &DeleteBackupOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDeleteBackupOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDeleteBackup(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("BackupInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorBackupInUseException(response, errorBody)
+
+ case strings.EqualFold("BackupNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorBackupNotFoundException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDeleteItem struct {
+}
+
+func (*awsAwsjson10_deserializeOpDeleteItem) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDeleteItem) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDeleteItem(response, &metadata)
+ }
+ output := &DeleteItemOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDeleteItemOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDeleteItem(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("ConditionalCheckFailedException", errorCode):
+ return awsAwsjson10_deserializeErrorConditionalCheckFailedException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ItemCollectionSizeLimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorItemCollectionSizeLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ProvisionedThroughputExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody)
+
+ case strings.EqualFold("ReplicatedWriteConflictException", errorCode):
+ return awsAwsjson10_deserializeErrorReplicatedWriteConflictException(response, errorBody)
+
+ case strings.EqualFold("RequestLimitExceeded", errorCode):
+ return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ThrottlingException", errorCode):
+ return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody)
+
+ case strings.EqualFold("TransactionConflictException", errorCode):
+ return awsAwsjson10_deserializeErrorTransactionConflictException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDeleteResourcePolicy struct {
+}
+
+func (*awsAwsjson10_deserializeOpDeleteResourcePolicy) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDeleteResourcePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDeleteResourcePolicy(response, &metadata)
+ }
+ output := &DeleteResourcePolicyOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDeleteResourcePolicyOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDeleteResourcePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("PolicyNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorPolicyNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ResourceInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDeleteTable struct {
+}
+
+func (*awsAwsjson10_deserializeOpDeleteTable) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDeleteTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDeleteTable(response, &metadata)
+ }
+ output := &DeleteTableOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDeleteTableOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDeleteTable(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ResourceInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDescribeBackup struct {
+}
+
+func (*awsAwsjson10_deserializeOpDescribeBackup) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDescribeBackup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDescribeBackup(response, &metadata)
+ }
+ output := &DescribeBackupOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDescribeBackupOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDescribeBackup(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("BackupNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorBackupNotFoundException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDescribeContinuousBackups struct {
+}
+
+func (*awsAwsjson10_deserializeOpDescribeContinuousBackups) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDescribeContinuousBackups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDescribeContinuousBackups(response, &metadata)
+ }
+ output := &DescribeContinuousBackupsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDescribeContinuousBackupsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDescribeContinuousBackups(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("TableNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorTableNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDescribeContributorInsights struct {
+}
+
+func (*awsAwsjson10_deserializeOpDescribeContributorInsights) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDescribeContributorInsights) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDescribeContributorInsights(response, &metadata)
+ }
+ output := &DescribeContributorInsightsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDescribeContributorInsightsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDescribeContributorInsights(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDescribeEndpoints struct {
+}
+
+func (*awsAwsjson10_deserializeOpDescribeEndpoints) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDescribeEndpoints) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDescribeEndpoints(response, &metadata)
+ }
+ output := &DescribeEndpointsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDescribeEndpointsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDescribeEndpoints(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDescribeExport struct {
+}
+
+func (*awsAwsjson10_deserializeOpDescribeExport) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDescribeExport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDescribeExport(response, &metadata)
+ }
+ output := &DescribeExportOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDescribeExportOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDescribeExport(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("ExportNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorExportNotFoundException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDescribeGlobalTable struct {
+}
+
+func (*awsAwsjson10_deserializeOpDescribeGlobalTable) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDescribeGlobalTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDescribeGlobalTable(response, &metadata)
+ }
+ output := &DescribeGlobalTableOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDescribeGlobalTableOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDescribeGlobalTable(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("GlobalTableNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorGlobalTableNotFoundException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDescribeGlobalTableSettings struct {
+}
+
+func (*awsAwsjson10_deserializeOpDescribeGlobalTableSettings) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDescribeGlobalTableSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDescribeGlobalTableSettings(response, &metadata)
+ }
+ output := &DescribeGlobalTableSettingsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDescribeGlobalTableSettingsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDescribeGlobalTableSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("GlobalTableNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorGlobalTableNotFoundException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDescribeImport struct {
+}
+
+func (*awsAwsjson10_deserializeOpDescribeImport) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDescribeImport) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDescribeImport(response, &metadata)
+ }
+ output := &DescribeImportOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDescribeImportOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDescribeImport(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("ImportNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorImportNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDescribeKinesisStreamingDestination struct {
+}
+
+func (*awsAwsjson10_deserializeOpDescribeKinesisStreamingDestination) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDescribeKinesisStreamingDestination) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDescribeKinesisStreamingDestination(response, &metadata)
+ }
+ output := &DescribeKinesisStreamingDestinationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDescribeKinesisStreamingDestinationOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDescribeKinesisStreamingDestination(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDescribeLimits struct {
+}
+
+func (*awsAwsjson10_deserializeOpDescribeLimits) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDescribeLimits) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDescribeLimits(response, &metadata)
+ }
+ output := &DescribeLimitsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDescribeLimitsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDescribeLimits(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDescribeTable struct {
+}
+
+func (*awsAwsjson10_deserializeOpDescribeTable) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDescribeTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDescribeTable(response, &metadata)
+ }
+ output := &DescribeTableOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDescribeTableOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDescribeTable(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDescribeTableReplicaAutoScaling struct {
+}
+
+func (*awsAwsjson10_deserializeOpDescribeTableReplicaAutoScaling) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDescribeTableReplicaAutoScaling) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDescribeTableReplicaAutoScaling(response, &metadata)
+ }
+ output := &DescribeTableReplicaAutoScalingOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDescribeTableReplicaAutoScalingOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDescribeTableReplicaAutoScaling(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDescribeTimeToLive struct {
+}
+
+func (*awsAwsjson10_deserializeOpDescribeTimeToLive) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDescribeTimeToLive) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDescribeTimeToLive(response, &metadata)
+ }
+ output := &DescribeTimeToLiveOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDescribeTimeToLiveOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDescribeTimeToLive(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpDisableKinesisStreamingDestination struct {
+}
+
+func (*awsAwsjson10_deserializeOpDisableKinesisStreamingDestination) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpDisableKinesisStreamingDestination) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorDisableKinesisStreamingDestination(response, &metadata)
+ }
+ output := &DisableKinesisStreamingDestinationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentDisableKinesisStreamingDestinationOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorDisableKinesisStreamingDestination(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ResourceInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpEnableKinesisStreamingDestination struct {
+}
+
+func (*awsAwsjson10_deserializeOpEnableKinesisStreamingDestination) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpEnableKinesisStreamingDestination) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorEnableKinesisStreamingDestination(response, &metadata)
+ }
+ output := &EnableKinesisStreamingDestinationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentEnableKinesisStreamingDestinationOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorEnableKinesisStreamingDestination(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ResourceInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpExecuteStatement struct {
+}
+
+func (*awsAwsjson10_deserializeOpExecuteStatement) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpExecuteStatement) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorExecuteStatement(response, &metadata)
+ }
+ output := &ExecuteStatementOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentExecuteStatementOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorExecuteStatement(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("ConditionalCheckFailedException", errorCode):
+ return awsAwsjson10_deserializeErrorConditionalCheckFailedException(response, errorBody)
+
+ case strings.EqualFold("DuplicateItemException", errorCode):
+ return awsAwsjson10_deserializeErrorDuplicateItemException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("ItemCollectionSizeLimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorItemCollectionSizeLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ProvisionedThroughputExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody)
+
+ case strings.EqualFold("RequestLimitExceeded", errorCode):
+ return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ThrottlingException", errorCode):
+ return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody)
+
+ case strings.EqualFold("TransactionConflictException", errorCode):
+ return awsAwsjson10_deserializeErrorTransactionConflictException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpExecuteTransaction struct {
+}
+
+func (*awsAwsjson10_deserializeOpExecuteTransaction) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpExecuteTransaction) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorExecuteTransaction(response, &metadata)
+ }
+ output := &ExecuteTransactionOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentExecuteTransactionOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorExecuteTransaction(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("IdempotentParameterMismatchException", errorCode):
+ return awsAwsjson10_deserializeErrorIdempotentParameterMismatchException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("ProvisionedThroughputExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody)
+
+ case strings.EqualFold("RequestLimitExceeded", errorCode):
+ return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ThrottlingException", errorCode):
+ return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody)
+
+ case strings.EqualFold("TransactionCanceledException", errorCode):
+ return awsAwsjson10_deserializeErrorTransactionCanceledException(response, errorBody)
+
+ case strings.EqualFold("TransactionInProgressException", errorCode):
+ return awsAwsjson10_deserializeErrorTransactionInProgressException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpExportTableToPointInTime struct {
+}
+
+func (*awsAwsjson10_deserializeOpExportTableToPointInTime) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpExportTableToPointInTime) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorExportTableToPointInTime(response, &metadata)
+ }
+ output := &ExportTableToPointInTimeOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentExportTableToPointInTimeOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorExportTableToPointInTime(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("ExportConflictException", errorCode):
+ return awsAwsjson10_deserializeErrorExportConflictException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidExportTimeException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidExportTimeException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("PointInTimeRecoveryUnavailableException", errorCode):
+ return awsAwsjson10_deserializeErrorPointInTimeRecoveryUnavailableException(response, errorBody)
+
+ case strings.EqualFold("TableNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorTableNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpGetItem struct {
+}
+
+func (*awsAwsjson10_deserializeOpGetItem) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpGetItem) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorGetItem(response, &metadata)
+ }
+ output := &GetItemOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentGetItemOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorGetItem(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ProvisionedThroughputExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody)
+
+ case strings.EqualFold("RequestLimitExceeded", errorCode):
+ return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ThrottlingException", errorCode):
+ return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpGetResourcePolicy struct {
+}
+
+func (*awsAwsjson10_deserializeOpGetResourcePolicy) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpGetResourcePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorGetResourcePolicy(response, &metadata)
+ }
+ output := &GetResourcePolicyOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentGetResourcePolicyOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorGetResourcePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("PolicyNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorPolicyNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpImportTable struct {
+}
+
+func (*awsAwsjson10_deserializeOpImportTable) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpImportTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorImportTable(response, &metadata)
+ }
+ output := &ImportTableOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentImportTableOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorImportTable(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("ImportConflictException", errorCode):
+ return awsAwsjson10_deserializeErrorImportConflictException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ResourceInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpListBackups struct {
+}
+
+func (*awsAwsjson10_deserializeOpListBackups) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpListBackups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorListBackups(response, &metadata)
+ }
+ output := &ListBackupsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentListBackupsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorListBackups(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpListContributorInsights struct {
+}
+
+func (*awsAwsjson10_deserializeOpListContributorInsights) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpListContributorInsights) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorListContributorInsights(response, &metadata)
+ }
+ output := &ListContributorInsightsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentListContributorInsightsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorListContributorInsights(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpListExports struct {
+}
+
+func (*awsAwsjson10_deserializeOpListExports) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpListExports) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorListExports(response, &metadata)
+ }
+ output := &ListExportsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentListExportsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorListExports(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpListGlobalTables struct {
+}
+
+func (*awsAwsjson10_deserializeOpListGlobalTables) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpListGlobalTables) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorListGlobalTables(response, &metadata)
+ }
+ output := &ListGlobalTablesOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentListGlobalTablesOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorListGlobalTables(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpListImports struct {
+}
+
+func (*awsAwsjson10_deserializeOpListImports) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpListImports) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorListImports(response, &metadata)
+ }
+ output := &ListImportsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentListImportsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorListImports(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpListTables struct {
+}
+
+func (*awsAwsjson10_deserializeOpListTables) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpListTables) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorListTables(response, &metadata)
+ }
+ output := &ListTablesOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentListTablesOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorListTables(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpListTagsOfResource struct {
+}
+
+func (*awsAwsjson10_deserializeOpListTagsOfResource) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpListTagsOfResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorListTagsOfResource(response, &metadata)
+ }
+ output := &ListTagsOfResourceOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentListTagsOfResourceOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorListTagsOfResource(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpPutItem struct {
+}
+
+func (*awsAwsjson10_deserializeOpPutItem) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpPutItem) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorPutItem(response, &metadata)
+ }
+ output := &PutItemOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentPutItemOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorPutItem(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("ConditionalCheckFailedException", errorCode):
+ return awsAwsjson10_deserializeErrorConditionalCheckFailedException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ItemCollectionSizeLimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorItemCollectionSizeLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ProvisionedThroughputExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody)
+
+ case strings.EqualFold("ReplicatedWriteConflictException", errorCode):
+ return awsAwsjson10_deserializeErrorReplicatedWriteConflictException(response, errorBody)
+
+ case strings.EqualFold("RequestLimitExceeded", errorCode):
+ return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ThrottlingException", errorCode):
+ return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody)
+
+ case strings.EqualFold("TransactionConflictException", errorCode):
+ return awsAwsjson10_deserializeErrorTransactionConflictException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpPutResourcePolicy struct {
+}
+
+func (*awsAwsjson10_deserializeOpPutResourcePolicy) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpPutResourcePolicy) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorPutResourcePolicy(response, &metadata)
+ }
+ output := &PutResourcePolicyOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentPutResourcePolicyOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorPutResourcePolicy(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("PolicyNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorPolicyNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ResourceInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpQuery struct {
+}
+
+func (*awsAwsjson10_deserializeOpQuery) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpQuery) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorQuery(response, &metadata)
+ }
+ output := &QueryOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentQueryOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorQuery(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ProvisionedThroughputExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody)
+
+ case strings.EqualFold("RequestLimitExceeded", errorCode):
+ return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ThrottlingException", errorCode):
+ return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpRestoreTableFromBackup struct {
+}
+
+func (*awsAwsjson10_deserializeOpRestoreTableFromBackup) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpRestoreTableFromBackup) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorRestoreTableFromBackup(response, &metadata)
+ }
+ output := &RestoreTableFromBackupOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentRestoreTableFromBackupOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorRestoreTableFromBackup(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("BackupInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorBackupInUseException(response, errorBody)
+
+ case strings.EqualFold("BackupNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorBackupNotFoundException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("TableAlreadyExistsException", errorCode):
+ return awsAwsjson10_deserializeErrorTableAlreadyExistsException(response, errorBody)
+
+ case strings.EqualFold("TableInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorTableInUseException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpRestoreTableToPointInTime struct {
+}
+
+func (*awsAwsjson10_deserializeOpRestoreTableToPointInTime) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpRestoreTableToPointInTime) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorRestoreTableToPointInTime(response, &metadata)
+ }
+ output := &RestoreTableToPointInTimeOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentRestoreTableToPointInTimeOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorRestoreTableToPointInTime(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("InvalidRestoreTimeException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidRestoreTimeException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("PointInTimeRecoveryUnavailableException", errorCode):
+ return awsAwsjson10_deserializeErrorPointInTimeRecoveryUnavailableException(response, errorBody)
+
+ case strings.EqualFold("TableAlreadyExistsException", errorCode):
+ return awsAwsjson10_deserializeErrorTableAlreadyExistsException(response, errorBody)
+
+ case strings.EqualFold("TableInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorTableInUseException(response, errorBody)
+
+ case strings.EqualFold("TableNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorTableNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpScan struct {
+}
+
+func (*awsAwsjson10_deserializeOpScan) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpScan) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorScan(response, &metadata)
+ }
+ output := &ScanOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentScanOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorScan(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ProvisionedThroughputExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody)
+
+ case strings.EqualFold("RequestLimitExceeded", errorCode):
+ return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ThrottlingException", errorCode):
+ return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpTagResource struct {
+}
+
+func (*awsAwsjson10_deserializeOpTagResource) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpTagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorTagResource(response, &metadata)
+ }
+ output := &TagResourceOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorTagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ResourceInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpTransactGetItems struct {
+}
+
+func (*awsAwsjson10_deserializeOpTransactGetItems) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpTransactGetItems) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorTransactGetItems(response, &metadata)
+ }
+ output := &TransactGetItemsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentTransactGetItemsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorTransactGetItems(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ProvisionedThroughputExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody)
+
+ case strings.EqualFold("RequestLimitExceeded", errorCode):
+ return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ThrottlingException", errorCode):
+ return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody)
+
+ case strings.EqualFold("TransactionCanceledException", errorCode):
+ return awsAwsjson10_deserializeErrorTransactionCanceledException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpTransactWriteItems struct {
+}
+
+func (*awsAwsjson10_deserializeOpTransactWriteItems) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpTransactWriteItems) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorTransactWriteItems(response, &metadata)
+ }
+ output := &TransactWriteItemsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentTransactWriteItemsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorTransactWriteItems(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("IdempotentParameterMismatchException", errorCode):
+ return awsAwsjson10_deserializeErrorIdempotentParameterMismatchException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ProvisionedThroughputExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody)
+
+ case strings.EqualFold("RequestLimitExceeded", errorCode):
+ return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ThrottlingException", errorCode):
+ return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody)
+
+ case strings.EqualFold("TransactionCanceledException", errorCode):
+ return awsAwsjson10_deserializeErrorTransactionCanceledException(response, errorBody)
+
+ case strings.EqualFold("TransactionInProgressException", errorCode):
+ return awsAwsjson10_deserializeErrorTransactionInProgressException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpUntagResource struct {
+}
+
+func (*awsAwsjson10_deserializeOpUntagResource) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpUntagResource) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorUntagResource(response, &metadata)
+ }
+ output := &UntagResourceOutput{}
+ out.Result = output
+
+ if _, err = io.Copy(ioutil.Discard, response.Body); err != nil {
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to discard response body, %w", err),
+ }
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorUntagResource(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ResourceInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpUpdateContinuousBackups struct {
+}
+
+func (*awsAwsjson10_deserializeOpUpdateContinuousBackups) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpUpdateContinuousBackups) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorUpdateContinuousBackups(response, &metadata)
+ }
+ output := &UpdateContinuousBackupsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentUpdateContinuousBackupsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorUpdateContinuousBackups(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("ContinuousBackupsUnavailableException", errorCode):
+ return awsAwsjson10_deserializeErrorContinuousBackupsUnavailableException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("TableNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorTableNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpUpdateContributorInsights struct {
+}
+
+func (*awsAwsjson10_deserializeOpUpdateContributorInsights) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpUpdateContributorInsights) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorUpdateContributorInsights(response, &metadata)
+ }
+ output := &UpdateContributorInsightsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentUpdateContributorInsightsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorUpdateContributorInsights(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpUpdateGlobalTable struct {
+}
+
+func (*awsAwsjson10_deserializeOpUpdateGlobalTable) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpUpdateGlobalTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorUpdateGlobalTable(response, &metadata)
+ }
+ output := &UpdateGlobalTableOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentUpdateGlobalTableOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorUpdateGlobalTable(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("GlobalTableNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorGlobalTableNotFoundException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ReplicaAlreadyExistsException", errorCode):
+ return awsAwsjson10_deserializeErrorReplicaAlreadyExistsException(response, errorBody)
+
+ case strings.EqualFold("ReplicaNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorReplicaNotFoundException(response, errorBody)
+
+ case strings.EqualFold("TableNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorTableNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpUpdateGlobalTableSettings struct {
+}
+
+func (*awsAwsjson10_deserializeOpUpdateGlobalTableSettings) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpUpdateGlobalTableSettings) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorUpdateGlobalTableSettings(response, &metadata)
+ }
+ output := &UpdateGlobalTableSettingsOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentUpdateGlobalTableSettingsOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorUpdateGlobalTableSettings(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("GlobalTableNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorGlobalTableNotFoundException(response, errorBody)
+
+ case strings.EqualFold("IndexNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorIndexNotFoundException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ReplicaNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorReplicaNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ResourceInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpUpdateItem struct {
+}
+
+func (*awsAwsjson10_deserializeOpUpdateItem) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpUpdateItem) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorUpdateItem(response, &metadata)
+ }
+ output := &UpdateItemOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentUpdateItemOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorUpdateItem(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("ConditionalCheckFailedException", errorCode):
+ return awsAwsjson10_deserializeErrorConditionalCheckFailedException(response, errorBody)
+
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("ItemCollectionSizeLimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorItemCollectionSizeLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ProvisionedThroughputExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response, errorBody)
+
+ case strings.EqualFold("ReplicatedWriteConflictException", errorCode):
+ return awsAwsjson10_deserializeErrorReplicatedWriteConflictException(response, errorBody)
+
+ case strings.EqualFold("RequestLimitExceeded", errorCode):
+ return awsAwsjson10_deserializeErrorRequestLimitExceeded(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ case strings.EqualFold("ThrottlingException", errorCode):
+ return awsAwsjson10_deserializeErrorThrottlingException(response, errorBody)
+
+ case strings.EqualFold("TransactionConflictException", errorCode):
+ return awsAwsjson10_deserializeErrorTransactionConflictException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpUpdateKinesisStreamingDestination struct {
+}
+
+func (*awsAwsjson10_deserializeOpUpdateKinesisStreamingDestination) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpUpdateKinesisStreamingDestination) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorUpdateKinesisStreamingDestination(response, &metadata)
+ }
+ output := &UpdateKinesisStreamingDestinationOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentUpdateKinesisStreamingDestinationOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorUpdateKinesisStreamingDestination(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ResourceInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpUpdateTable struct {
+}
+
+func (*awsAwsjson10_deserializeOpUpdateTable) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpUpdateTable) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorUpdateTable(response, &metadata)
+ }
+ output := &UpdateTableOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentUpdateTableOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorUpdateTable(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ResourceInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpUpdateTableReplicaAutoScaling struct {
+}
+
+func (*awsAwsjson10_deserializeOpUpdateTableReplicaAutoScaling) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpUpdateTableReplicaAutoScaling) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorUpdateTableReplicaAutoScaling(response, &metadata)
+ }
+ output := &UpdateTableReplicaAutoScalingOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentUpdateTableReplicaAutoScalingOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorUpdateTableReplicaAutoScaling(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ResourceInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+type awsAwsjson10_deserializeOpUpdateTimeToLive struct {
+}
+
+func (*awsAwsjson10_deserializeOpUpdateTimeToLive) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsjson10_deserializeOpUpdateTimeToLive) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsjson10_deserializeOpErrorUpdateTimeToLive(response, &metadata)
+ }
+ output := &UpdateTimeToLiveOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsAwsjson10_deserializeOpDocumentUpdateTimeToLiveOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsjson10_deserializeOpErrorUpdateTimeToLive(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ bodyInfo, err := getProtocolErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if typ, ok := resolveProtocolErrorType(headerCode, bodyInfo); ok {
+ errorCode = restjson.SanitizeErrorCode(typ)
+ }
+ if len(bodyInfo.Message) != 0 {
+ errorMessage = bodyInfo.Message
+ }
+ switch {
+ case strings.EqualFold("InternalServerError", errorCode):
+ return awsAwsjson10_deserializeErrorInternalServerError(response, errorBody)
+
+ case strings.EqualFold("InvalidEndpointException", errorCode):
+ return awsAwsjson10_deserializeErrorInvalidEndpointException(response, errorBody)
+
+ case strings.EqualFold("LimitExceededException", errorCode):
+ return awsAwsjson10_deserializeErrorLimitExceededException(response, errorBody)
+
+ case strings.EqualFold("ResourceInUseException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceInUseException(response, errorBody)
+
+ case strings.EqualFold("ResourceNotFoundException", errorCode):
+ return awsAwsjson10_deserializeErrorResourceNotFoundException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsAwsjson10_deserializeErrorBackupInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.BackupInUseException{}
+ err := awsAwsjson10_deserializeDocumentBackupInUseException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorBackupNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.BackupNotFoundException{}
+ err := awsAwsjson10_deserializeDocumentBackupNotFoundException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorConditionalCheckFailedException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.ConditionalCheckFailedException{}
+ err := awsAwsjson10_deserializeDocumentConditionalCheckFailedException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorContinuousBackupsUnavailableException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.ContinuousBackupsUnavailableException{}
+ err := awsAwsjson10_deserializeDocumentContinuousBackupsUnavailableException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorDuplicateItemException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.DuplicateItemException{}
+ err := awsAwsjson10_deserializeDocumentDuplicateItemException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorExportConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.ExportConflictException{}
+ err := awsAwsjson10_deserializeDocumentExportConflictException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorExportNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.ExportNotFoundException{}
+ err := awsAwsjson10_deserializeDocumentExportNotFoundException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorGlobalTableAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.GlobalTableAlreadyExistsException{}
+ err := awsAwsjson10_deserializeDocumentGlobalTableAlreadyExistsException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorGlobalTableNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.GlobalTableNotFoundException{}
+ err := awsAwsjson10_deserializeDocumentGlobalTableNotFoundException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorIdempotentParameterMismatchException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.IdempotentParameterMismatchException{}
+ err := awsAwsjson10_deserializeDocumentIdempotentParameterMismatchException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorImportConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.ImportConflictException{}
+ err := awsAwsjson10_deserializeDocumentImportConflictException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorImportNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.ImportNotFoundException{}
+ err := awsAwsjson10_deserializeDocumentImportNotFoundException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorIndexNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.IndexNotFoundException{}
+ err := awsAwsjson10_deserializeDocumentIndexNotFoundException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorInternalServerError(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.InternalServerError{}
+ err := awsAwsjson10_deserializeDocumentInternalServerError(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorInvalidEndpointException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.InvalidEndpointException{}
+ err := awsAwsjson10_deserializeDocumentInvalidEndpointException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorInvalidExportTimeException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.InvalidExportTimeException{}
+ err := awsAwsjson10_deserializeDocumentInvalidExportTimeException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorInvalidRestoreTimeException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.InvalidRestoreTimeException{}
+ err := awsAwsjson10_deserializeDocumentInvalidRestoreTimeException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorItemCollectionSizeLimitExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.ItemCollectionSizeLimitExceededException{}
+ err := awsAwsjson10_deserializeDocumentItemCollectionSizeLimitExceededException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorLimitExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.LimitExceededException{}
+ err := awsAwsjson10_deserializeDocumentLimitExceededException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorPointInTimeRecoveryUnavailableException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.PointInTimeRecoveryUnavailableException{}
+ err := awsAwsjson10_deserializeDocumentPointInTimeRecoveryUnavailableException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorPolicyNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.PolicyNotFoundException{}
+ err := awsAwsjson10_deserializeDocumentPolicyNotFoundException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorProvisionedThroughputExceededException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.ProvisionedThroughputExceededException{}
+ err := awsAwsjson10_deserializeDocumentProvisionedThroughputExceededException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorReplicaAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.ReplicaAlreadyExistsException{}
+ err := awsAwsjson10_deserializeDocumentReplicaAlreadyExistsException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorReplicaNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.ReplicaNotFoundException{}
+ err := awsAwsjson10_deserializeDocumentReplicaNotFoundException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorReplicatedWriteConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.ReplicatedWriteConflictException{}
+ err := awsAwsjson10_deserializeDocumentReplicatedWriteConflictException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorRequestLimitExceeded(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.RequestLimitExceeded{}
+ err := awsAwsjson10_deserializeDocumentRequestLimitExceeded(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorResourceInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.ResourceInUseException{}
+ err := awsAwsjson10_deserializeDocumentResourceInUseException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorResourceNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.ResourceNotFoundException{}
+ err := awsAwsjson10_deserializeDocumentResourceNotFoundException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorTableAlreadyExistsException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.TableAlreadyExistsException{}
+ err := awsAwsjson10_deserializeDocumentTableAlreadyExistsException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorTableInUseException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.TableInUseException{}
+ err := awsAwsjson10_deserializeDocumentTableInUseException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorTableNotFoundException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.TableNotFoundException{}
+ err := awsAwsjson10_deserializeDocumentTableNotFoundException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorThrottlingException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.ThrottlingException{}
+ err := awsAwsjson10_deserializeDocumentThrottlingException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorTransactionCanceledException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.TransactionCanceledException{}
+ err := awsAwsjson10_deserializeDocumentTransactionCanceledException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorTransactionConflictException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.TransactionConflictException{}
+ err := awsAwsjson10_deserializeDocumentTransactionConflictException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeErrorTransactionInProgressException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ output := &types.TransactionInProgressException{}
+ err := awsAwsjson10_deserializeDocumentTransactionInProgressException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ return output
+}
+
+func awsAwsjson10_deserializeDocumentArchivalSummary(v **types.ArchivalSummary, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ArchivalSummary
+ if *v == nil {
+ sv = &types.ArchivalSummary{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ArchivalBackupArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BackupArn to be of type string, got %T instead", value)
+ }
+ sv.ArchivalBackupArn = ptr.String(jtv)
+ }
+
+ case "ArchivalDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.ArchivalDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "ArchivalReason":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ArchivalReason to be of type string, got %T instead", value)
+ }
+ sv.ArchivalReason = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentAttributeDefinition(v **types.AttributeDefinition, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.AttributeDefinition
+ if *v == nil {
+ sv = &types.AttributeDefinition{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "AttributeName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected KeySchemaAttributeName to be of type string, got %T instead", value)
+ }
+ sv.AttributeName = ptr.String(jtv)
+ }
+
+ case "AttributeType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ScalarAttributeType to be of type string, got %T instead", value)
+ }
+ sv.AttributeType = types.ScalarAttributeType(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentAttributeDefinitions(v *[]types.AttributeDefinition, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.AttributeDefinition
+ if *v == nil {
+ cv = []types.AttributeDefinition{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.AttributeDefinition
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentAttributeDefinition(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentAttributeMap(v *map[string]types.AttributeValue, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var mv map[string]types.AttributeValue
+ if *v == nil {
+ mv = map[string]types.AttributeValue{}
+ } else {
+ mv = *v
+ }
+
+ for key, value := range shape {
+ var parsedVal types.AttributeValue
+ mapVar := parsedVal
+ if err := awsAwsjson10_deserializeDocumentAttributeValue(&mapVar, value); err != nil {
+ return err
+ }
+ parsedVal = mapVar
+ mv[key] = parsedVal
+
+ }
+ *v = mv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentAttributeNameList(v *[]string, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []string
+ if *v == nil {
+ cv = []string{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col string
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AttributeName to be of type string, got %T instead", value)
+ }
+ col = jtv
+ }
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentAttributeValue(v *types.AttributeValue, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var uv types.AttributeValue
+loop:
+ for key, value := range shape {
+ if value == nil {
+ continue
+ }
+ switch key {
+ case "B":
+ var mv []byte
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BinaryAttributeValue to be []byte, got %T instead", value)
+ }
+ dv, err := base64.StdEncoding.DecodeString(jtv)
+ if err != nil {
+ return fmt.Errorf("failed to base64 decode BinaryAttributeValue, %w", err)
+ }
+ mv = dv
+ }
+ uv = &types.AttributeValueMemberB{Value: mv}
+ break loop
+
+ case "BOOL":
+ var mv bool
+ if value != nil {
+ jtv, ok := value.(bool)
+ if !ok {
+ return fmt.Errorf("expected BooleanAttributeValue to be of type *bool, got %T instead", value)
+ }
+ mv = jtv
+ }
+ uv = &types.AttributeValueMemberBOOL{Value: mv}
+ break loop
+
+ case "BS":
+ var mv [][]byte
+ if err := awsAwsjson10_deserializeDocumentBinarySetAttributeValue(&mv, value); err != nil {
+ return err
+ }
+ uv = &types.AttributeValueMemberBS{Value: mv}
+ break loop
+
+ case "L":
+ var mv []types.AttributeValue
+ if err := awsAwsjson10_deserializeDocumentListAttributeValue(&mv, value); err != nil {
+ return err
+ }
+ uv = &types.AttributeValueMemberL{Value: mv}
+ break loop
+
+ case "M":
+ var mv map[string]types.AttributeValue
+ if err := awsAwsjson10_deserializeDocumentMapAttributeValue(&mv, value); err != nil {
+ return err
+ }
+ uv = &types.AttributeValueMemberM{Value: mv}
+ break loop
+
+ case "N":
+ var mv string
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected NumberAttributeValue to be of type string, got %T instead", value)
+ }
+ mv = jtv
+ }
+ uv = &types.AttributeValueMemberN{Value: mv}
+ break loop
+
+ case "NS":
+ var mv []string
+ if err := awsAwsjson10_deserializeDocumentNumberSetAttributeValue(&mv, value); err != nil {
+ return err
+ }
+ uv = &types.AttributeValueMemberNS{Value: mv}
+ break loop
+
+ case "NULL":
+ var mv bool
+ if value != nil {
+ jtv, ok := value.(bool)
+ if !ok {
+ return fmt.Errorf("expected NullAttributeValue to be of type *bool, got %T instead", value)
+ }
+ mv = jtv
+ }
+ uv = &types.AttributeValueMemberNULL{Value: mv}
+ break loop
+
+ case "S":
+ var mv string
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected StringAttributeValue to be of type string, got %T instead", value)
+ }
+ mv = jtv
+ }
+ uv = &types.AttributeValueMemberS{Value: mv}
+ break loop
+
+ case "SS":
+ var mv []string
+ if err := awsAwsjson10_deserializeDocumentStringSetAttributeValue(&mv, value); err != nil {
+ return err
+ }
+ uv = &types.AttributeValueMemberSS{Value: mv}
+ break loop
+
+ default:
+ uv = &types.UnknownUnionMember{Tag: key}
+ break loop
+
+ }
+ }
+ *v = uv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentAutoScalingPolicyDescription(v **types.AutoScalingPolicyDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.AutoScalingPolicyDescription
+ if *v == nil {
+ sv = &types.AutoScalingPolicyDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "PolicyName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AutoScalingPolicyName to be of type string, got %T instead", value)
+ }
+ sv.PolicyName = ptr.String(jtv)
+ }
+
+ case "TargetTrackingScalingPolicyConfiguration":
+ if err := awsAwsjson10_deserializeDocumentAutoScalingTargetTrackingScalingPolicyConfigurationDescription(&sv.TargetTrackingScalingPolicyConfiguration, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentAutoScalingPolicyDescriptionList(v *[]types.AutoScalingPolicyDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.AutoScalingPolicyDescription
+ if *v == nil {
+ cv = []types.AutoScalingPolicyDescription{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.AutoScalingPolicyDescription
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentAutoScalingPolicyDescription(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(v **types.AutoScalingSettingsDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.AutoScalingSettingsDescription
+ if *v == nil {
+ sv = &types.AutoScalingSettingsDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "AutoScalingDisabled":
+ if value != nil {
+ jtv, ok := value.(bool)
+ if !ok {
+ return fmt.Errorf("expected BooleanObject to be of type *bool, got %T instead", value)
+ }
+ sv.AutoScalingDisabled = ptr.Bool(jtv)
+ }
+
+ case "AutoScalingRoleArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected String to be of type string, got %T instead", value)
+ }
+ sv.AutoScalingRoleArn = ptr.String(jtv)
+ }
+
+ case "MaximumUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.MaximumUnits = ptr.Int64(i64)
+ }
+
+ case "MinimumUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.MinimumUnits = ptr.Int64(i64)
+ }
+
+ case "ScalingPolicies":
+ if err := awsAwsjson10_deserializeDocumentAutoScalingPolicyDescriptionList(&sv.ScalingPolicies, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentAutoScalingTargetTrackingScalingPolicyConfigurationDescription(v **types.AutoScalingTargetTrackingScalingPolicyConfigurationDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.AutoScalingTargetTrackingScalingPolicyConfigurationDescription
+ if *v == nil {
+ sv = &types.AutoScalingTargetTrackingScalingPolicyConfigurationDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "DisableScaleIn":
+ if value != nil {
+ jtv, ok := value.(bool)
+ if !ok {
+ return fmt.Errorf("expected BooleanObject to be of type *bool, got %T instead", value)
+ }
+ sv.DisableScaleIn = ptr.Bool(jtv)
+ }
+
+ case "ScaleInCooldown":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected IntegerObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ScaleInCooldown = ptr.Int32(int32(i64))
+ }
+
+ case "ScaleOutCooldown":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected IntegerObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ScaleOutCooldown = ptr.Int32(int32(i64))
+ }
+
+ case "TargetValue":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.TargetValue = ptr.Float64(f64)
+
+ case string:
+ var f64 float64
+ switch {
+ case strings.EqualFold(jtv, "NaN"):
+ f64 = math.NaN()
+
+ case strings.EqualFold(jtv, "Infinity"):
+ f64 = math.Inf(1)
+
+ case strings.EqualFold(jtv, "-Infinity"):
+ f64 = math.Inf(-1)
+
+ default:
+ return fmt.Errorf("unknown JSON number value: %s", jtv)
+
+ }
+ sv.TargetValue = ptr.Float64(f64)
+
+ default:
+ return fmt.Errorf("expected DoubleObject to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentBackupDescription(v **types.BackupDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.BackupDescription
+ if *v == nil {
+ sv = &types.BackupDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "BackupDetails":
+ if err := awsAwsjson10_deserializeDocumentBackupDetails(&sv.BackupDetails, value); err != nil {
+ return err
+ }
+
+ case "SourceTableDetails":
+ if err := awsAwsjson10_deserializeDocumentSourceTableDetails(&sv.SourceTableDetails, value); err != nil {
+ return err
+ }
+
+ case "SourceTableFeatureDetails":
+ if err := awsAwsjson10_deserializeDocumentSourceTableFeatureDetails(&sv.SourceTableFeatureDetails, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentBackupDetails(v **types.BackupDetails, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.BackupDetails
+ if *v == nil {
+ sv = &types.BackupDetails{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "BackupArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BackupArn to be of type string, got %T instead", value)
+ }
+ sv.BackupArn = ptr.String(jtv)
+ }
+
+ case "BackupCreationDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.BackupCreationDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected BackupCreationDateTime to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "BackupExpiryDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.BackupExpiryDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "BackupName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BackupName to be of type string, got %T instead", value)
+ }
+ sv.BackupName = ptr.String(jtv)
+ }
+
+ case "BackupSizeBytes":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected BackupSizeBytes to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.BackupSizeBytes = ptr.Int64(i64)
+ }
+
+ case "BackupStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BackupStatus to be of type string, got %T instead", value)
+ }
+ sv.BackupStatus = types.BackupStatus(jtv)
+ }
+
+ case "BackupType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BackupType to be of type string, got %T instead", value)
+ }
+ sv.BackupType = types.BackupType(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentBackupInUseException(v **types.BackupInUseException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.BackupInUseException
+ if *v == nil {
+ sv = &types.BackupInUseException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentBackupNotFoundException(v **types.BackupNotFoundException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.BackupNotFoundException
+ if *v == nil {
+ sv = &types.BackupNotFoundException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentBackupSummaries(v *[]types.BackupSummary, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.BackupSummary
+ if *v == nil {
+ cv = []types.BackupSummary{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.BackupSummary
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentBackupSummary(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentBackupSummary(v **types.BackupSummary, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.BackupSummary
+ if *v == nil {
+ sv = &types.BackupSummary{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "BackupArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BackupArn to be of type string, got %T instead", value)
+ }
+ sv.BackupArn = ptr.String(jtv)
+ }
+
+ case "BackupCreationDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.BackupCreationDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected BackupCreationDateTime to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "BackupExpiryDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.BackupExpiryDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "BackupName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BackupName to be of type string, got %T instead", value)
+ }
+ sv.BackupName = ptr.String(jtv)
+ }
+
+ case "BackupSizeBytes":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected BackupSizeBytes to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.BackupSizeBytes = ptr.Int64(i64)
+ }
+
+ case "BackupStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BackupStatus to be of type string, got %T instead", value)
+ }
+ sv.BackupStatus = types.BackupStatus(jtv)
+ }
+
+ case "BackupType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BackupType to be of type string, got %T instead", value)
+ }
+ sv.BackupType = types.BackupType(jtv)
+ }
+
+ case "TableArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableArn to be of type string, got %T instead", value)
+ }
+ sv.TableArn = ptr.String(jtv)
+ }
+
+ case "TableId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableId to be of type string, got %T instead", value)
+ }
+ sv.TableId = ptr.String(jtv)
+ }
+
+ case "TableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.TableName = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentBatchGetRequestMap(v *map[string]types.KeysAndAttributes, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var mv map[string]types.KeysAndAttributes
+ if *v == nil {
+ mv = map[string]types.KeysAndAttributes{}
+ } else {
+ mv = *v
+ }
+
+ for key, value := range shape {
+ var parsedVal types.KeysAndAttributes
+ mapVar := parsedVal
+ destAddr := &mapVar
+ if err := awsAwsjson10_deserializeDocumentKeysAndAttributes(&destAddr, value); err != nil {
+ return err
+ }
+ parsedVal = *destAddr
+ mv[key] = parsedVal
+
+ }
+ *v = mv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentBatchGetResponseMap(v *map[string][]map[string]types.AttributeValue, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var mv map[string][]map[string]types.AttributeValue
+ if *v == nil {
+ mv = map[string][]map[string]types.AttributeValue{}
+ } else {
+ mv = *v
+ }
+
+ for key, value := range shape {
+ var parsedVal []map[string]types.AttributeValue
+ mapVar := parsedVal
+ if err := awsAwsjson10_deserializeDocumentItemList(&mapVar, value); err != nil {
+ return err
+ }
+ parsedVal = mapVar
+ mv[key] = parsedVal
+
+ }
+ *v = mv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentBatchStatementError(v **types.BatchStatementError, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.BatchStatementError
+ if *v == nil {
+ sv = &types.BatchStatementError{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Code":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BatchStatementErrorCodeEnum to be of type string, got %T instead", value)
+ }
+ sv.Code = types.BatchStatementErrorCodeEnum(jtv)
+ }
+
+ case "Item":
+ if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Item, value); err != nil {
+ return err
+ }
+
+ case "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected String to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentBatchStatementResponse(v **types.BatchStatementResponse, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.BatchStatementResponse
+ if *v == nil {
+ sv = &types.BatchStatementResponse{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Error":
+ if err := awsAwsjson10_deserializeDocumentBatchStatementError(&sv.Error, value); err != nil {
+ return err
+ }
+
+ case "Item":
+ if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Item, value); err != nil {
+ return err
+ }
+
+ case "TableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.TableName = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentBatchWriteItemRequestMap(v *map[string][]types.WriteRequest, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var mv map[string][]types.WriteRequest
+ if *v == nil {
+ mv = map[string][]types.WriteRequest{}
+ } else {
+ mv = *v
+ }
+
+ for key, value := range shape {
+ var parsedVal []types.WriteRequest
+ mapVar := parsedVal
+ if err := awsAwsjson10_deserializeDocumentWriteRequests(&mapVar, value); err != nil {
+ return err
+ }
+ parsedVal = mapVar
+ mv[key] = parsedVal
+
+ }
+ *v = mv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentBillingModeSummary(v **types.BillingModeSummary, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.BillingModeSummary
+ if *v == nil {
+ sv = &types.BillingModeSummary{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "BillingMode":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BillingMode to be of type string, got %T instead", value)
+ }
+ sv.BillingMode = types.BillingMode(jtv)
+ }
+
+ case "LastUpdateToPayPerRequestDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.LastUpdateToPayPerRequestDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentBinarySetAttributeValue(v *[][]byte, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv [][]byte
+ if *v == nil {
+ cv = [][]byte{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col []byte
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BinaryAttributeValue to be []byte, got %T instead", value)
+ }
+ dv, err := base64.StdEncoding.DecodeString(jtv)
+ if err != nil {
+ return fmt.Errorf("failed to base64 decode BinaryAttributeValue, %w", err)
+ }
+ col = dv
+ }
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentCancellationReason(v **types.CancellationReason, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.CancellationReason
+ if *v == nil {
+ sv = &types.CancellationReason{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Code":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Code to be of type string, got %T instead", value)
+ }
+ sv.Code = ptr.String(jtv)
+ }
+
+ case "Item":
+ if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Item, value); err != nil {
+ return err
+ }
+
+ case "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentCancellationReasonList(v *[]types.CancellationReason, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.CancellationReason
+ if *v == nil {
+ cv = []types.CancellationReason{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.CancellationReason
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentCancellationReason(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentCapacity(v **types.Capacity, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.Capacity
+ if *v == nil {
+ sv = &types.Capacity{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "CapacityUnits":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.CapacityUnits = ptr.Float64(f64)
+
+ case string:
+ var f64 float64
+ switch {
+ case strings.EqualFold(jtv, "NaN"):
+ f64 = math.NaN()
+
+ case strings.EqualFold(jtv, "Infinity"):
+ f64 = math.Inf(1)
+
+ case strings.EqualFold(jtv, "-Infinity"):
+ f64 = math.Inf(-1)
+
+ default:
+ return fmt.Errorf("unknown JSON number value: %s", jtv)
+
+ }
+ sv.CapacityUnits = ptr.Float64(f64)
+
+ default:
+ return fmt.Errorf("expected ConsumedCapacityUnits to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "ReadCapacityUnits":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.ReadCapacityUnits = ptr.Float64(f64)
+
+ case string:
+ var f64 float64
+ switch {
+ case strings.EqualFold(jtv, "NaN"):
+ f64 = math.NaN()
+
+ case strings.EqualFold(jtv, "Infinity"):
+ f64 = math.Inf(1)
+
+ case strings.EqualFold(jtv, "-Infinity"):
+ f64 = math.Inf(-1)
+
+ default:
+ return fmt.Errorf("unknown JSON number value: %s", jtv)
+
+ }
+ sv.ReadCapacityUnits = ptr.Float64(f64)
+
+ default:
+ return fmt.Errorf("expected ConsumedCapacityUnits to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "WriteCapacityUnits":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.WriteCapacityUnits = ptr.Float64(f64)
+
+ case string:
+ var f64 float64
+ switch {
+ case strings.EqualFold(jtv, "NaN"):
+ f64 = math.NaN()
+
+ case strings.EqualFold(jtv, "Infinity"):
+ f64 = math.Inf(1)
+
+ case strings.EqualFold(jtv, "-Infinity"):
+ f64 = math.Inf(-1)
+
+ default:
+ return fmt.Errorf("unknown JSON number value: %s", jtv)
+
+ }
+ sv.WriteCapacityUnits = ptr.Float64(f64)
+
+ default:
+ return fmt.Errorf("expected ConsumedCapacityUnits to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentConditionalCheckFailedException(v **types.ConditionalCheckFailedException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ConditionalCheckFailedException
+ if *v == nil {
+ sv = &types.ConditionalCheckFailedException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Item":
+ if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Item, value); err != nil {
+ return err
+ }
+
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentConsumedCapacity(v **types.ConsumedCapacity, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ConsumedCapacity
+ if *v == nil {
+ sv = &types.ConsumedCapacity{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "CapacityUnits":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.CapacityUnits = ptr.Float64(f64)
+
+ case string:
+ var f64 float64
+ switch {
+ case strings.EqualFold(jtv, "NaN"):
+ f64 = math.NaN()
+
+ case strings.EqualFold(jtv, "Infinity"):
+ f64 = math.Inf(1)
+
+ case strings.EqualFold(jtv, "-Infinity"):
+ f64 = math.Inf(-1)
+
+ default:
+ return fmt.Errorf("unknown JSON number value: %s", jtv)
+
+ }
+ sv.CapacityUnits = ptr.Float64(f64)
+
+ default:
+ return fmt.Errorf("expected ConsumedCapacityUnits to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "GlobalSecondaryIndexes":
+ if err := awsAwsjson10_deserializeDocumentSecondaryIndexesCapacityMap(&sv.GlobalSecondaryIndexes, value); err != nil {
+ return err
+ }
+
+ case "LocalSecondaryIndexes":
+ if err := awsAwsjson10_deserializeDocumentSecondaryIndexesCapacityMap(&sv.LocalSecondaryIndexes, value); err != nil {
+ return err
+ }
+
+ case "ReadCapacityUnits":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.ReadCapacityUnits = ptr.Float64(f64)
+
+ case string:
+ var f64 float64
+ switch {
+ case strings.EqualFold(jtv, "NaN"):
+ f64 = math.NaN()
+
+ case strings.EqualFold(jtv, "Infinity"):
+ f64 = math.Inf(1)
+
+ case strings.EqualFold(jtv, "-Infinity"):
+ f64 = math.Inf(-1)
+
+ default:
+ return fmt.Errorf("unknown JSON number value: %s", jtv)
+
+ }
+ sv.ReadCapacityUnits = ptr.Float64(f64)
+
+ default:
+ return fmt.Errorf("expected ConsumedCapacityUnits to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "Table":
+ if err := awsAwsjson10_deserializeDocumentCapacity(&sv.Table, value); err != nil {
+ return err
+ }
+
+ case "TableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableArn to be of type string, got %T instead", value)
+ }
+ sv.TableName = ptr.String(jtv)
+ }
+
+ case "WriteCapacityUnits":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.WriteCapacityUnits = ptr.Float64(f64)
+
+ case string:
+ var f64 float64
+ switch {
+ case strings.EqualFold(jtv, "NaN"):
+ f64 = math.NaN()
+
+ case strings.EqualFold(jtv, "Infinity"):
+ f64 = math.Inf(1)
+
+ case strings.EqualFold(jtv, "-Infinity"):
+ f64 = math.Inf(-1)
+
+ default:
+ return fmt.Errorf("unknown JSON number value: %s", jtv)
+
+ }
+ sv.WriteCapacityUnits = ptr.Float64(f64)
+
+ default:
+ return fmt.Errorf("expected ConsumedCapacityUnits to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentConsumedCapacityMultiple(v *[]types.ConsumedCapacity, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.ConsumedCapacity
+ if *v == nil {
+ cv = []types.ConsumedCapacity{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.ConsumedCapacity
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentContinuousBackupsDescription(v **types.ContinuousBackupsDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ContinuousBackupsDescription
+ if *v == nil {
+ sv = &types.ContinuousBackupsDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ContinuousBackupsStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ContinuousBackupsStatus to be of type string, got %T instead", value)
+ }
+ sv.ContinuousBackupsStatus = types.ContinuousBackupsStatus(jtv)
+ }
+
+ case "PointInTimeRecoveryDescription":
+ if err := awsAwsjson10_deserializeDocumentPointInTimeRecoveryDescription(&sv.PointInTimeRecoveryDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentContinuousBackupsUnavailableException(v **types.ContinuousBackupsUnavailableException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ContinuousBackupsUnavailableException
+ if *v == nil {
+ sv = &types.ContinuousBackupsUnavailableException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentContributorInsightsRuleList(v *[]string, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []string
+ if *v == nil {
+ cv = []string{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col string
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ContributorInsightsRule to be of type string, got %T instead", value)
+ }
+ col = jtv
+ }
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentContributorInsightsSummaries(v *[]types.ContributorInsightsSummary, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.ContributorInsightsSummary
+ if *v == nil {
+ cv = []types.ContributorInsightsSummary{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.ContributorInsightsSummary
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentContributorInsightsSummary(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentContributorInsightsSummary(v **types.ContributorInsightsSummary, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ContributorInsightsSummary
+ if *v == nil {
+ sv = &types.ContributorInsightsSummary{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ContributorInsightsMode":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ContributorInsightsMode to be of type string, got %T instead", value)
+ }
+ sv.ContributorInsightsMode = types.ContributorInsightsMode(jtv)
+ }
+
+ case "ContributorInsightsStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ContributorInsightsStatus to be of type string, got %T instead", value)
+ }
+ sv.ContributorInsightsStatus = types.ContributorInsightsStatus(jtv)
+ }
+
+ case "IndexName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexName to be of type string, got %T instead", value)
+ }
+ sv.IndexName = ptr.String(jtv)
+ }
+
+ case "TableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.TableName = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentCsvHeaderList(v *[]string, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []string
+ if *v == nil {
+ cv = []string{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col string
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected CsvHeader to be of type string, got %T instead", value)
+ }
+ col = jtv
+ }
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentCsvOptions(v **types.CsvOptions, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.CsvOptions
+ if *v == nil {
+ sv = &types.CsvOptions{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Delimiter":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected CsvDelimiter to be of type string, got %T instead", value)
+ }
+ sv.Delimiter = ptr.String(jtv)
+ }
+
+ case "HeaderList":
+ if err := awsAwsjson10_deserializeDocumentCsvHeaderList(&sv.HeaderList, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentDeleteRequest(v **types.DeleteRequest, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.DeleteRequest
+ if *v == nil {
+ sv = &types.DeleteRequest{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Key":
+ if err := awsAwsjson10_deserializeDocumentKey(&sv.Key, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentDuplicateItemException(v **types.DuplicateItemException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.DuplicateItemException
+ if *v == nil {
+ sv = &types.DuplicateItemException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentEnableKinesisStreamingConfiguration(v **types.EnableKinesisStreamingConfiguration, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.EnableKinesisStreamingConfiguration
+ if *v == nil {
+ sv = &types.EnableKinesisStreamingConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ApproximateCreationDateTimePrecision":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ApproximateCreationDateTimePrecision to be of type string, got %T instead", value)
+ }
+ sv.ApproximateCreationDateTimePrecision = types.ApproximateCreationDateTimePrecision(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentEndpoint(v **types.Endpoint, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.Endpoint
+ if *v == nil {
+ sv = &types.Endpoint{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Address":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected String to be of type string, got %T instead", value)
+ }
+ sv.Address = ptr.String(jtv)
+ }
+
+ case "CachePeriodInMinutes":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected Long to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.CachePeriodInMinutes = i64
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentEndpoints(v *[]types.Endpoint, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.Endpoint
+ if *v == nil {
+ cv = []types.Endpoint{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.Endpoint
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentEndpoint(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentExportConflictException(v **types.ExportConflictException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ExportConflictException
+ if *v == nil {
+ sv = &types.ExportConflictException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentExportDescription(v **types.ExportDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ExportDescription
+ if *v == nil {
+ sv = &types.ExportDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "BilledSizeBytes":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected BilledSizeBytes to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.BilledSizeBytes = ptr.Int64(i64)
+ }
+
+ case "ClientToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ClientToken to be of type string, got %T instead", value)
+ }
+ sv.ClientToken = ptr.String(jtv)
+ }
+
+ case "EndTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.EndTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected ExportEndTime to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "ExportArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ExportArn to be of type string, got %T instead", value)
+ }
+ sv.ExportArn = ptr.String(jtv)
+ }
+
+ case "ExportFormat":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ExportFormat to be of type string, got %T instead", value)
+ }
+ sv.ExportFormat = types.ExportFormat(jtv)
+ }
+
+ case "ExportManifest":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ExportManifest to be of type string, got %T instead", value)
+ }
+ sv.ExportManifest = ptr.String(jtv)
+ }
+
+ case "ExportStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ExportStatus to be of type string, got %T instead", value)
+ }
+ sv.ExportStatus = types.ExportStatus(jtv)
+ }
+
+ case "ExportTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.ExportTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected ExportTime to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "ExportType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ExportType to be of type string, got %T instead", value)
+ }
+ sv.ExportType = types.ExportType(jtv)
+ }
+
+ case "FailureCode":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected FailureCode to be of type string, got %T instead", value)
+ }
+ sv.FailureCode = ptr.String(jtv)
+ }
+
+ case "FailureMessage":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected FailureMessage to be of type string, got %T instead", value)
+ }
+ sv.FailureMessage = ptr.String(jtv)
+ }
+
+ case "IncrementalExportSpecification":
+ if err := awsAwsjson10_deserializeDocumentIncrementalExportSpecification(&sv.IncrementalExportSpecification, value); err != nil {
+ return err
+ }
+
+ case "ItemCount":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected ItemCount to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ItemCount = ptr.Int64(i64)
+ }
+
+ case "S3Bucket":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected S3Bucket to be of type string, got %T instead", value)
+ }
+ sv.S3Bucket = ptr.String(jtv)
+ }
+
+ case "S3BucketOwner":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected S3BucketOwner to be of type string, got %T instead", value)
+ }
+ sv.S3BucketOwner = ptr.String(jtv)
+ }
+
+ case "S3Prefix":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected S3Prefix to be of type string, got %T instead", value)
+ }
+ sv.S3Prefix = ptr.String(jtv)
+ }
+
+ case "S3SseAlgorithm":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected S3SseAlgorithm to be of type string, got %T instead", value)
+ }
+ sv.S3SseAlgorithm = types.S3SseAlgorithm(jtv)
+ }
+
+ case "S3SseKmsKeyId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected S3SseKmsKeyId to be of type string, got %T instead", value)
+ }
+ sv.S3SseKmsKeyId = ptr.String(jtv)
+ }
+
+ case "StartTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.StartTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected ExportStartTime to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "TableArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableArn to be of type string, got %T instead", value)
+ }
+ sv.TableArn = ptr.String(jtv)
+ }
+
+ case "TableId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableId to be of type string, got %T instead", value)
+ }
+ sv.TableId = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentExportNotFoundException(v **types.ExportNotFoundException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ExportNotFoundException
+ if *v == nil {
+ sv = &types.ExportNotFoundException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentExportSummaries(v *[]types.ExportSummary, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.ExportSummary
+ if *v == nil {
+ cv = []types.ExportSummary{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.ExportSummary
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentExportSummary(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentExportSummary(v **types.ExportSummary, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ExportSummary
+ if *v == nil {
+ sv = &types.ExportSummary{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ExportArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ExportArn to be of type string, got %T instead", value)
+ }
+ sv.ExportArn = ptr.String(jtv)
+ }
+
+ case "ExportStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ExportStatus to be of type string, got %T instead", value)
+ }
+ sv.ExportStatus = types.ExportStatus(jtv)
+ }
+
+ case "ExportType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ExportType to be of type string, got %T instead", value)
+ }
+ sv.ExportType = types.ExportType(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentExpressionAttributeNameMap(v *map[string]string, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var mv map[string]string
+ if *v == nil {
+ mv = map[string]string{}
+ } else {
+ mv = *v
+ }
+
+ for key, value := range shape {
+ var parsedVal string
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AttributeName to be of type string, got %T instead", value)
+ }
+ parsedVal = jtv
+ }
+ mv[key] = parsedVal
+
+ }
+ *v = mv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentFailureException(v **types.FailureException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.FailureException
+ if *v == nil {
+ sv = &types.FailureException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ExceptionDescription":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ExceptionDescription to be of type string, got %T instead", value)
+ }
+ sv.ExceptionDescription = ptr.String(jtv)
+ }
+
+ case "ExceptionName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ExceptionName to be of type string, got %T instead", value)
+ }
+ sv.ExceptionName = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentGlobalSecondaryIndex(v **types.GlobalSecondaryIndex, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.GlobalSecondaryIndex
+ if *v == nil {
+ sv = &types.GlobalSecondaryIndex{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "IndexName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexName to be of type string, got %T instead", value)
+ }
+ sv.IndexName = ptr.String(jtv)
+ }
+
+ case "KeySchema":
+ if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil {
+ return err
+ }
+
+ case "OnDemandThroughput":
+ if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil {
+ return err
+ }
+
+ case "Projection":
+ if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil {
+ return err
+ }
+
+ case "ProvisionedThroughput":
+ if err := awsAwsjson10_deserializeDocumentProvisionedThroughput(&sv.ProvisionedThroughput, value); err != nil {
+ return err
+ }
+
+ case "WarmThroughput":
+ if err := awsAwsjson10_deserializeDocumentWarmThroughput(&sv.WarmThroughput, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexDescription(v **types.GlobalSecondaryIndexDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.GlobalSecondaryIndexDescription
+ if *v == nil {
+ sv = &types.GlobalSecondaryIndexDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Backfilling":
+ if value != nil {
+ jtv, ok := value.(bool)
+ if !ok {
+ return fmt.Errorf("expected Backfilling to be of type *bool, got %T instead", value)
+ }
+ sv.Backfilling = ptr.Bool(jtv)
+ }
+
+ case "IndexArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected String to be of type string, got %T instead", value)
+ }
+ sv.IndexArn = ptr.String(jtv)
+ }
+
+ case "IndexName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexName to be of type string, got %T instead", value)
+ }
+ sv.IndexName = ptr.String(jtv)
+ }
+
+ case "IndexSizeBytes":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.IndexSizeBytes = ptr.Int64(i64)
+ }
+
+ case "IndexStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexStatus to be of type string, got %T instead", value)
+ }
+ sv.IndexStatus = types.IndexStatus(jtv)
+ }
+
+ case "ItemCount":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ItemCount = ptr.Int64(i64)
+ }
+
+ case "KeySchema":
+ if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil {
+ return err
+ }
+
+ case "OnDemandThroughput":
+ if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil {
+ return err
+ }
+
+ case "Projection":
+ if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil {
+ return err
+ }
+
+ case "ProvisionedThroughput":
+ if err := awsAwsjson10_deserializeDocumentProvisionedThroughputDescription(&sv.ProvisionedThroughput, value); err != nil {
+ return err
+ }
+
+ case "WarmThroughput":
+ if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndexWarmThroughputDescription(&sv.WarmThroughput, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexDescriptionList(v *[]types.GlobalSecondaryIndexDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.GlobalSecondaryIndexDescription
+ if *v == nil {
+ cv = []types.GlobalSecondaryIndexDescription{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.GlobalSecondaryIndexDescription
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndexDescription(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexes(v *[]types.GlobalSecondaryIndexInfo, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.GlobalSecondaryIndexInfo
+ if *v == nil {
+ cv = []types.GlobalSecondaryIndexInfo{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.GlobalSecondaryIndexInfo
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndexInfo(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexInfo(v **types.GlobalSecondaryIndexInfo, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.GlobalSecondaryIndexInfo
+ if *v == nil {
+ sv = &types.GlobalSecondaryIndexInfo{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "IndexName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexName to be of type string, got %T instead", value)
+ }
+ sv.IndexName = ptr.String(jtv)
+ }
+
+ case "KeySchema":
+ if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil {
+ return err
+ }
+
+ case "OnDemandThroughput":
+ if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil {
+ return err
+ }
+
+ case "Projection":
+ if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil {
+ return err
+ }
+
+ case "ProvisionedThroughput":
+ if err := awsAwsjson10_deserializeDocumentProvisionedThroughput(&sv.ProvisionedThroughput, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexList(v *[]types.GlobalSecondaryIndex, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.GlobalSecondaryIndex
+ if *v == nil {
+ cv = []types.GlobalSecondaryIndex{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.GlobalSecondaryIndex
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndex(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentGlobalSecondaryIndexWarmThroughputDescription(v **types.GlobalSecondaryIndexWarmThroughputDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.GlobalSecondaryIndexWarmThroughputDescription
+ if *v == nil {
+ sv = &types.GlobalSecondaryIndexWarmThroughputDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ReadUnitsPerSecond":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ReadUnitsPerSecond = ptr.Int64(i64)
+ }
+
+ case "Status":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexStatus to be of type string, got %T instead", value)
+ }
+ sv.Status = types.IndexStatus(jtv)
+ }
+
+ case "WriteUnitsPerSecond":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.WriteUnitsPerSecond = ptr.Int64(i64)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentGlobalTable(v **types.GlobalTable, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.GlobalTable
+ if *v == nil {
+ sv = &types.GlobalTable{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "GlobalTableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.GlobalTableName = ptr.String(jtv)
+ }
+
+ case "ReplicationGroup":
+ if err := awsAwsjson10_deserializeDocumentReplicaList(&sv.ReplicationGroup, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentGlobalTableAlreadyExistsException(v **types.GlobalTableAlreadyExistsException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.GlobalTableAlreadyExistsException
+ if *v == nil {
+ sv = &types.GlobalTableAlreadyExistsException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentGlobalTableDescription(v **types.GlobalTableDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.GlobalTableDescription
+ if *v == nil {
+ sv = &types.GlobalTableDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "CreationDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.CreationDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "GlobalTableArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected GlobalTableArnString to be of type string, got %T instead", value)
+ }
+ sv.GlobalTableArn = ptr.String(jtv)
+ }
+
+ case "GlobalTableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.GlobalTableName = ptr.String(jtv)
+ }
+
+ case "GlobalTableStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected GlobalTableStatus to be of type string, got %T instead", value)
+ }
+ sv.GlobalTableStatus = types.GlobalTableStatus(jtv)
+ }
+
+ case "ReplicationGroup":
+ if err := awsAwsjson10_deserializeDocumentReplicaDescriptionList(&sv.ReplicationGroup, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentGlobalTableList(v *[]types.GlobalTable, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.GlobalTable
+ if *v == nil {
+ cv = []types.GlobalTable{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.GlobalTable
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentGlobalTable(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentGlobalTableNotFoundException(v **types.GlobalTableNotFoundException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.GlobalTableNotFoundException
+ if *v == nil {
+ sv = &types.GlobalTableNotFoundException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentGlobalTableWitnessDescription(v **types.GlobalTableWitnessDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.GlobalTableWitnessDescription
+ if *v == nil {
+ sv = &types.GlobalTableWitnessDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "RegionName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected RegionName to be of type string, got %T instead", value)
+ }
+ sv.RegionName = ptr.String(jtv)
+ }
+
+ case "WitnessStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected WitnessStatus to be of type string, got %T instead", value)
+ }
+ sv.WitnessStatus = types.WitnessStatus(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentGlobalTableWitnessDescriptionList(v *[]types.GlobalTableWitnessDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.GlobalTableWitnessDescription
+ if *v == nil {
+ cv = []types.GlobalTableWitnessDescription{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.GlobalTableWitnessDescription
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentGlobalTableWitnessDescription(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentIdempotentParameterMismatchException(v **types.IdempotentParameterMismatchException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.IdempotentParameterMismatchException
+ if *v == nil {
+ sv = &types.IdempotentParameterMismatchException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentImportConflictException(v **types.ImportConflictException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ImportConflictException
+ if *v == nil {
+ sv = &types.ImportConflictException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentImportNotFoundException(v **types.ImportNotFoundException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ImportNotFoundException
+ if *v == nil {
+ sv = &types.ImportNotFoundException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentImportSummary(v **types.ImportSummary, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ImportSummary
+ if *v == nil {
+ sv = &types.ImportSummary{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "CloudWatchLogGroupArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected CloudWatchLogGroupArn to be of type string, got %T instead", value)
+ }
+ sv.CloudWatchLogGroupArn = ptr.String(jtv)
+ }
+
+ case "EndTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.EndTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected ImportEndTime to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "ImportArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ImportArn to be of type string, got %T instead", value)
+ }
+ sv.ImportArn = ptr.String(jtv)
+ }
+
+ case "ImportStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ImportStatus to be of type string, got %T instead", value)
+ }
+ sv.ImportStatus = types.ImportStatus(jtv)
+ }
+
+ case "InputFormat":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected InputFormat to be of type string, got %T instead", value)
+ }
+ sv.InputFormat = types.InputFormat(jtv)
+ }
+
+ case "S3BucketSource":
+ if err := awsAwsjson10_deserializeDocumentS3BucketSource(&sv.S3BucketSource, value); err != nil {
+ return err
+ }
+
+ case "StartTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.StartTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected ImportStartTime to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "TableArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableArn to be of type string, got %T instead", value)
+ }
+ sv.TableArn = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentImportSummaryList(v *[]types.ImportSummary, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.ImportSummary
+ if *v == nil {
+ cv = []types.ImportSummary{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.ImportSummary
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentImportSummary(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentImportTableDescription(v **types.ImportTableDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ImportTableDescription
+ if *v == nil {
+ sv = &types.ImportTableDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ClientToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ClientToken to be of type string, got %T instead", value)
+ }
+ sv.ClientToken = ptr.String(jtv)
+ }
+
+ case "CloudWatchLogGroupArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected CloudWatchLogGroupArn to be of type string, got %T instead", value)
+ }
+ sv.CloudWatchLogGroupArn = ptr.String(jtv)
+ }
+
+ case "EndTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.EndTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected ImportEndTime to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "ErrorCount":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected ErrorCount to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ErrorCount = i64
+ }
+
+ case "FailureCode":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected FailureCode to be of type string, got %T instead", value)
+ }
+ sv.FailureCode = ptr.String(jtv)
+ }
+
+ case "FailureMessage":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected FailureMessage to be of type string, got %T instead", value)
+ }
+ sv.FailureMessage = ptr.String(jtv)
+ }
+
+ case "ImportArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ImportArn to be of type string, got %T instead", value)
+ }
+ sv.ImportArn = ptr.String(jtv)
+ }
+
+ case "ImportedItemCount":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected ImportedItemCount to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ImportedItemCount = i64
+ }
+
+ case "ImportStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ImportStatus to be of type string, got %T instead", value)
+ }
+ sv.ImportStatus = types.ImportStatus(jtv)
+ }
+
+ case "InputCompressionType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected InputCompressionType to be of type string, got %T instead", value)
+ }
+ sv.InputCompressionType = types.InputCompressionType(jtv)
+ }
+
+ case "InputFormat":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected InputFormat to be of type string, got %T instead", value)
+ }
+ sv.InputFormat = types.InputFormat(jtv)
+ }
+
+ case "InputFormatOptions":
+ if err := awsAwsjson10_deserializeDocumentInputFormatOptions(&sv.InputFormatOptions, value); err != nil {
+ return err
+ }
+
+ case "ProcessedItemCount":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected ProcessedItemCount to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ProcessedItemCount = i64
+ }
+
+ case "ProcessedSizeBytes":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ProcessedSizeBytes = ptr.Int64(i64)
+ }
+
+ case "S3BucketSource":
+ if err := awsAwsjson10_deserializeDocumentS3BucketSource(&sv.S3BucketSource, value); err != nil {
+ return err
+ }
+
+ case "StartTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.StartTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected ImportStartTime to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "TableArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableArn to be of type string, got %T instead", value)
+ }
+ sv.TableArn = ptr.String(jtv)
+ }
+
+ case "TableCreationParameters":
+ if err := awsAwsjson10_deserializeDocumentTableCreationParameters(&sv.TableCreationParameters, value); err != nil {
+ return err
+ }
+
+ case "TableId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableId to be of type string, got %T instead", value)
+ }
+ sv.TableId = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentIncrementalExportSpecification(v **types.IncrementalExportSpecification, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.IncrementalExportSpecification
+ if *v == nil {
+ sv = &types.IncrementalExportSpecification{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ExportFromTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.ExportFromTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected ExportFromTime to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "ExportToTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.ExportToTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected ExportToTime to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "ExportViewType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ExportViewType to be of type string, got %T instead", value)
+ }
+ sv.ExportViewType = types.ExportViewType(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentIndexNotFoundException(v **types.IndexNotFoundException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.IndexNotFoundException
+ if *v == nil {
+ sv = &types.IndexNotFoundException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentInputFormatOptions(v **types.InputFormatOptions, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InputFormatOptions
+ if *v == nil {
+ sv = &types.InputFormatOptions{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Csv":
+ if err := awsAwsjson10_deserializeDocumentCsvOptions(&sv.Csv, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentInternalServerError(v **types.InternalServerError, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InternalServerError
+ if *v == nil {
+ sv = &types.InternalServerError{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentInvalidEndpointException(v **types.InvalidEndpointException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidEndpointException
+ if *v == nil {
+ sv = &types.InvalidEndpointException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected String to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentInvalidExportTimeException(v **types.InvalidExportTimeException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidExportTimeException
+ if *v == nil {
+ sv = &types.InvalidExportTimeException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentInvalidRestoreTimeException(v **types.InvalidRestoreTimeException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidRestoreTimeException
+ if *v == nil {
+ sv = &types.InvalidRestoreTimeException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentItemCollectionKeyAttributeMap(v *map[string]types.AttributeValue, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var mv map[string]types.AttributeValue
+ if *v == nil {
+ mv = map[string]types.AttributeValue{}
+ } else {
+ mv = *v
+ }
+
+ for key, value := range shape {
+ var parsedVal types.AttributeValue
+ mapVar := parsedVal
+ if err := awsAwsjson10_deserializeDocumentAttributeValue(&mapVar, value); err != nil {
+ return err
+ }
+ parsedVal = mapVar
+ mv[key] = parsedVal
+
+ }
+ *v = mv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentItemCollectionMetrics(v **types.ItemCollectionMetrics, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ItemCollectionMetrics
+ if *v == nil {
+ sv = &types.ItemCollectionMetrics{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ItemCollectionKey":
+ if err := awsAwsjson10_deserializeDocumentItemCollectionKeyAttributeMap(&sv.ItemCollectionKey, value); err != nil {
+ return err
+ }
+
+ case "SizeEstimateRangeGB":
+ if err := awsAwsjson10_deserializeDocumentItemCollectionSizeEstimateRange(&sv.SizeEstimateRangeGB, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentItemCollectionMetricsMultiple(v *[]types.ItemCollectionMetrics, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.ItemCollectionMetrics
+ if *v == nil {
+ cv = []types.ItemCollectionMetrics{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.ItemCollectionMetrics
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentItemCollectionMetrics(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentItemCollectionMetricsPerTable(v *map[string][]types.ItemCollectionMetrics, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var mv map[string][]types.ItemCollectionMetrics
+ if *v == nil {
+ mv = map[string][]types.ItemCollectionMetrics{}
+ } else {
+ mv = *v
+ }
+
+ for key, value := range shape {
+ var parsedVal []types.ItemCollectionMetrics
+ mapVar := parsedVal
+ if err := awsAwsjson10_deserializeDocumentItemCollectionMetricsMultiple(&mapVar, value); err != nil {
+ return err
+ }
+ parsedVal = mapVar
+ mv[key] = parsedVal
+
+ }
+ *v = mv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentItemCollectionSizeEstimateRange(v *[]float64, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []float64
+ if *v == nil {
+ cv = []float64{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col float64
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ col = f64
+
+ case string:
+ var f64 float64
+ switch {
+ case strings.EqualFold(jtv, "NaN"):
+ f64 = math.NaN()
+
+ case strings.EqualFold(jtv, "Infinity"):
+ f64 = math.Inf(1)
+
+ case strings.EqualFold(jtv, "-Infinity"):
+ f64 = math.Inf(-1)
+
+ default:
+ return fmt.Errorf("unknown JSON number value: %s", jtv)
+
+ }
+ col = f64
+
+ default:
+ return fmt.Errorf("expected ItemCollectionSizeEstimateBound to be a JSON Number, got %T instead", value)
+
+ }
+ }
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentItemCollectionSizeLimitExceededException(v **types.ItemCollectionSizeLimitExceededException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ItemCollectionSizeLimitExceededException
+ if *v == nil {
+ sv = &types.ItemCollectionSizeLimitExceededException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentItemList(v *[]map[string]types.AttributeValue, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []map[string]types.AttributeValue
+ if *v == nil {
+ cv = []map[string]types.AttributeValue{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col map[string]types.AttributeValue
+ if err := awsAwsjson10_deserializeDocumentAttributeMap(&col, value); err != nil {
+ return err
+ }
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentItemResponse(v **types.ItemResponse, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ItemResponse
+ if *v == nil {
+ sv = &types.ItemResponse{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Item":
+ if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Item, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentItemResponseList(v *[]types.ItemResponse, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.ItemResponse
+ if *v == nil {
+ cv = []types.ItemResponse{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.ItemResponse
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentItemResponse(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentKey(v *map[string]types.AttributeValue, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var mv map[string]types.AttributeValue
+ if *v == nil {
+ mv = map[string]types.AttributeValue{}
+ } else {
+ mv = *v
+ }
+
+ for key, value := range shape {
+ var parsedVal types.AttributeValue
+ mapVar := parsedVal
+ if err := awsAwsjson10_deserializeDocumentAttributeValue(&mapVar, value); err != nil {
+ return err
+ }
+ parsedVal = mapVar
+ mv[key] = parsedVal
+
+ }
+ *v = mv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentKeyList(v *[]map[string]types.AttributeValue, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []map[string]types.AttributeValue
+ if *v == nil {
+ cv = []map[string]types.AttributeValue{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col map[string]types.AttributeValue
+ if err := awsAwsjson10_deserializeDocumentKey(&col, value); err != nil {
+ return err
+ }
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentKeysAndAttributes(v **types.KeysAndAttributes, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.KeysAndAttributes
+ if *v == nil {
+ sv = &types.KeysAndAttributes{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "AttributesToGet":
+ if err := awsAwsjson10_deserializeDocumentAttributeNameList(&sv.AttributesToGet, value); err != nil {
+ return err
+ }
+
+ case "ConsistentRead":
+ if value != nil {
+ jtv, ok := value.(bool)
+ if !ok {
+ return fmt.Errorf("expected ConsistentRead to be of type *bool, got %T instead", value)
+ }
+ sv.ConsistentRead = ptr.Bool(jtv)
+ }
+
+ case "ExpressionAttributeNames":
+ if err := awsAwsjson10_deserializeDocumentExpressionAttributeNameMap(&sv.ExpressionAttributeNames, value); err != nil {
+ return err
+ }
+
+ case "Keys":
+ if err := awsAwsjson10_deserializeDocumentKeyList(&sv.Keys, value); err != nil {
+ return err
+ }
+
+ case "ProjectionExpression":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ProjectionExpression to be of type string, got %T instead", value)
+ }
+ sv.ProjectionExpression = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentKeySchema(v *[]types.KeySchemaElement, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.KeySchemaElement
+ if *v == nil {
+ cv = []types.KeySchemaElement{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.KeySchemaElement
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentKeySchemaElement(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentKeySchemaElement(v **types.KeySchemaElement, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.KeySchemaElement
+ if *v == nil {
+ sv = &types.KeySchemaElement{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "AttributeName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected KeySchemaAttributeName to be of type string, got %T instead", value)
+ }
+ sv.AttributeName = ptr.String(jtv)
+ }
+
+ case "KeyType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected KeyType to be of type string, got %T instead", value)
+ }
+ sv.KeyType = types.KeyType(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentKinesisDataStreamDestination(v **types.KinesisDataStreamDestination, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.KinesisDataStreamDestination
+ if *v == nil {
+ sv = &types.KinesisDataStreamDestination{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ApproximateCreationDateTimePrecision":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ApproximateCreationDateTimePrecision to be of type string, got %T instead", value)
+ }
+ sv.ApproximateCreationDateTimePrecision = types.ApproximateCreationDateTimePrecision(jtv)
+ }
+
+ case "DestinationStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected DestinationStatus to be of type string, got %T instead", value)
+ }
+ sv.DestinationStatus = types.DestinationStatus(jtv)
+ }
+
+ case "DestinationStatusDescription":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected String to be of type string, got %T instead", value)
+ }
+ sv.DestinationStatusDescription = ptr.String(jtv)
+ }
+
+ case "StreamArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected StreamArn to be of type string, got %T instead", value)
+ }
+ sv.StreamArn = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentKinesisDataStreamDestinations(v *[]types.KinesisDataStreamDestination, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.KinesisDataStreamDestination
+ if *v == nil {
+ cv = []types.KinesisDataStreamDestination{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.KinesisDataStreamDestination
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentKinesisDataStreamDestination(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentLimitExceededException(v **types.LimitExceededException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.LimitExceededException
+ if *v == nil {
+ sv = &types.LimitExceededException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentListAttributeValue(v *[]types.AttributeValue, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.AttributeValue
+ if *v == nil {
+ cv = []types.AttributeValue{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.AttributeValue
+ if err := awsAwsjson10_deserializeDocumentAttributeValue(&col, value); err != nil {
+ return err
+ }
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentLocalSecondaryIndexDescription(v **types.LocalSecondaryIndexDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.LocalSecondaryIndexDescription
+ if *v == nil {
+ sv = &types.LocalSecondaryIndexDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "IndexArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected String to be of type string, got %T instead", value)
+ }
+ sv.IndexArn = ptr.String(jtv)
+ }
+
+ case "IndexName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexName to be of type string, got %T instead", value)
+ }
+ sv.IndexName = ptr.String(jtv)
+ }
+
+ case "IndexSizeBytes":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.IndexSizeBytes = ptr.Int64(i64)
+ }
+
+ case "ItemCount":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ItemCount = ptr.Int64(i64)
+ }
+
+ case "KeySchema":
+ if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil {
+ return err
+ }
+
+ case "Projection":
+ if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentLocalSecondaryIndexDescriptionList(v *[]types.LocalSecondaryIndexDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.LocalSecondaryIndexDescription
+ if *v == nil {
+ cv = []types.LocalSecondaryIndexDescription{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.LocalSecondaryIndexDescription
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentLocalSecondaryIndexDescription(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentLocalSecondaryIndexes(v *[]types.LocalSecondaryIndexInfo, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.LocalSecondaryIndexInfo
+ if *v == nil {
+ cv = []types.LocalSecondaryIndexInfo{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.LocalSecondaryIndexInfo
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentLocalSecondaryIndexInfo(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentLocalSecondaryIndexInfo(v **types.LocalSecondaryIndexInfo, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.LocalSecondaryIndexInfo
+ if *v == nil {
+ sv = &types.LocalSecondaryIndexInfo{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "IndexName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexName to be of type string, got %T instead", value)
+ }
+ sv.IndexName = ptr.String(jtv)
+ }
+
+ case "KeySchema":
+ if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil {
+ return err
+ }
+
+ case "Projection":
+ if err := awsAwsjson10_deserializeDocumentProjection(&sv.Projection, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentMapAttributeValue(v *map[string]types.AttributeValue, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var mv map[string]types.AttributeValue
+ if *v == nil {
+ mv = map[string]types.AttributeValue{}
+ } else {
+ mv = *v
+ }
+
+ for key, value := range shape {
+ var parsedVal types.AttributeValue
+ mapVar := parsedVal
+ if err := awsAwsjson10_deserializeDocumentAttributeValue(&mapVar, value); err != nil {
+ return err
+ }
+ parsedVal = mapVar
+ mv[key] = parsedVal
+
+ }
+ *v = mv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentNonKeyAttributeNameList(v *[]string, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []string
+ if *v == nil {
+ cv = []string{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col string
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected NonKeyAttributeName to be of type string, got %T instead", value)
+ }
+ col = jtv
+ }
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentNumberSetAttributeValue(v *[]string, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []string
+ if *v == nil {
+ cv = []string{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col string
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected NumberAttributeValue to be of type string, got %T instead", value)
+ }
+ col = jtv
+ }
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentOnDemandThroughput(v **types.OnDemandThroughput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.OnDemandThroughput
+ if *v == nil {
+ sv = &types.OnDemandThroughput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "MaxReadRequestUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.MaxReadRequestUnits = ptr.Int64(i64)
+ }
+
+ case "MaxWriteRequestUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.MaxWriteRequestUnits = ptr.Int64(i64)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentOnDemandThroughputOverride(v **types.OnDemandThroughputOverride, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.OnDemandThroughputOverride
+ if *v == nil {
+ sv = &types.OnDemandThroughputOverride{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "MaxReadRequestUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.MaxReadRequestUnits = ptr.Int64(i64)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentPartiQLBatchResponse(v *[]types.BatchStatementResponse, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.BatchStatementResponse
+ if *v == nil {
+ cv = []types.BatchStatementResponse{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.BatchStatementResponse
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentBatchStatementResponse(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentPointInTimeRecoveryDescription(v **types.PointInTimeRecoveryDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.PointInTimeRecoveryDescription
+ if *v == nil {
+ sv = &types.PointInTimeRecoveryDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "EarliestRestorableDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.EarliestRestorableDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "LatestRestorableDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.LatestRestorableDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "PointInTimeRecoveryStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected PointInTimeRecoveryStatus to be of type string, got %T instead", value)
+ }
+ sv.PointInTimeRecoveryStatus = types.PointInTimeRecoveryStatus(jtv)
+ }
+
+ case "RecoveryPeriodInDays":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected RecoveryPeriodInDays to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.RecoveryPeriodInDays = ptr.Int32(int32(i64))
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentPointInTimeRecoveryUnavailableException(v **types.PointInTimeRecoveryUnavailableException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.PointInTimeRecoveryUnavailableException
+ if *v == nil {
+ sv = &types.PointInTimeRecoveryUnavailableException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentPolicyNotFoundException(v **types.PolicyNotFoundException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.PolicyNotFoundException
+ if *v == nil {
+ sv = &types.PolicyNotFoundException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentProjection(v **types.Projection, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.Projection
+ if *v == nil {
+ sv = &types.Projection{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "NonKeyAttributes":
+ if err := awsAwsjson10_deserializeDocumentNonKeyAttributeNameList(&sv.NonKeyAttributes, value); err != nil {
+ return err
+ }
+
+ case "ProjectionType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ProjectionType to be of type string, got %T instead", value)
+ }
+ sv.ProjectionType = types.ProjectionType(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentProvisionedThroughput(v **types.ProvisionedThroughput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ProvisionedThroughput
+ if *v == nil {
+ sv = &types.ProvisionedThroughput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ReadCapacityUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ReadCapacityUnits = ptr.Int64(i64)
+ }
+
+ case "WriteCapacityUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.WriteCapacityUnits = ptr.Int64(i64)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentProvisionedThroughputDescription(v **types.ProvisionedThroughputDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ProvisionedThroughputDescription
+ if *v == nil {
+ sv = &types.ProvisionedThroughputDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "LastDecreaseDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.LastDecreaseDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "LastIncreaseDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.LastIncreaseDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "NumberOfDecreasesToday":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.NumberOfDecreasesToday = ptr.Int64(i64)
+ }
+
+ case "ReadCapacityUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected NonNegativeLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ReadCapacityUnits = ptr.Int64(i64)
+ }
+
+ case "WriteCapacityUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected NonNegativeLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.WriteCapacityUnits = ptr.Int64(i64)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentProvisionedThroughputExceededException(v **types.ProvisionedThroughputExceededException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ProvisionedThroughputExceededException
+ if *v == nil {
+ sv = &types.ProvisionedThroughputExceededException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ case "ThrottlingReasons":
+ if err := awsAwsjson10_deserializeDocumentThrottlingReasonList(&sv.ThrottlingReasons, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentProvisionedThroughputOverride(v **types.ProvisionedThroughputOverride, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ProvisionedThroughputOverride
+ if *v == nil {
+ sv = &types.ProvisionedThroughputOverride{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ReadCapacityUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ReadCapacityUnits = ptr.Int64(i64)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentPutItemInputAttributeMap(v *map[string]types.AttributeValue, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var mv map[string]types.AttributeValue
+ if *v == nil {
+ mv = map[string]types.AttributeValue{}
+ } else {
+ mv = *v
+ }
+
+ for key, value := range shape {
+ var parsedVal types.AttributeValue
+ mapVar := parsedVal
+ if err := awsAwsjson10_deserializeDocumentAttributeValue(&mapVar, value); err != nil {
+ return err
+ }
+ parsedVal = mapVar
+ mv[key] = parsedVal
+
+ }
+ *v = mv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentPutRequest(v **types.PutRequest, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.PutRequest
+ if *v == nil {
+ sv = &types.PutRequest{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Item":
+ if err := awsAwsjson10_deserializeDocumentPutItemInputAttributeMap(&sv.Item, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplica(v **types.Replica, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.Replica
+ if *v == nil {
+ sv = &types.Replica{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "RegionName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected RegionName to be of type string, got %T instead", value)
+ }
+ sv.RegionName = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaAlreadyExistsException(v **types.ReplicaAlreadyExistsException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ReplicaAlreadyExistsException
+ if *v == nil {
+ sv = &types.ReplicaAlreadyExistsException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaAutoScalingDescription(v **types.ReplicaAutoScalingDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ReplicaAutoScalingDescription
+ if *v == nil {
+ sv = &types.ReplicaAutoScalingDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "GlobalSecondaryIndexes":
+ if err := awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexAutoScalingDescriptionList(&sv.GlobalSecondaryIndexes, value); err != nil {
+ return err
+ }
+
+ case "RegionName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected RegionName to be of type string, got %T instead", value)
+ }
+ sv.RegionName = ptr.String(jtv)
+ }
+
+ case "ReplicaProvisionedReadCapacityAutoScalingSettings":
+ if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ReplicaProvisionedReadCapacityAutoScalingSettings, value); err != nil {
+ return err
+ }
+
+ case "ReplicaProvisionedWriteCapacityAutoScalingSettings":
+ if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ReplicaProvisionedWriteCapacityAutoScalingSettings, value); err != nil {
+ return err
+ }
+
+ case "ReplicaStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ReplicaStatus to be of type string, got %T instead", value)
+ }
+ sv.ReplicaStatus = types.ReplicaStatus(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaAutoScalingDescriptionList(v *[]types.ReplicaAutoScalingDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.ReplicaAutoScalingDescription
+ if *v == nil {
+ cv = []types.ReplicaAutoScalingDescription{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.ReplicaAutoScalingDescription
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentReplicaAutoScalingDescription(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaDescription(v **types.ReplicaDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ReplicaDescription
+ if *v == nil {
+ sv = &types.ReplicaDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "GlobalSecondaryIndexes":
+ if err := awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexDescriptionList(&sv.GlobalSecondaryIndexes, value); err != nil {
+ return err
+ }
+
+ case "KMSMasterKeyId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected KMSMasterKeyId to be of type string, got %T instead", value)
+ }
+ sv.KMSMasterKeyId = ptr.String(jtv)
+ }
+
+ case "OnDemandThroughputOverride":
+ if err := awsAwsjson10_deserializeDocumentOnDemandThroughputOverride(&sv.OnDemandThroughputOverride, value); err != nil {
+ return err
+ }
+
+ case "ProvisionedThroughputOverride":
+ if err := awsAwsjson10_deserializeDocumentProvisionedThroughputOverride(&sv.ProvisionedThroughputOverride, value); err != nil {
+ return err
+ }
+
+ case "RegionName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected RegionName to be of type string, got %T instead", value)
+ }
+ sv.RegionName = ptr.String(jtv)
+ }
+
+ case "ReplicaInaccessibleDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.ReplicaInaccessibleDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "ReplicaStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ReplicaStatus to be of type string, got %T instead", value)
+ }
+ sv.ReplicaStatus = types.ReplicaStatus(jtv)
+ }
+
+ case "ReplicaStatusDescription":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ReplicaStatusDescription to be of type string, got %T instead", value)
+ }
+ sv.ReplicaStatusDescription = ptr.String(jtv)
+ }
+
+ case "ReplicaStatusPercentProgress":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ReplicaStatusPercentProgress to be of type string, got %T instead", value)
+ }
+ sv.ReplicaStatusPercentProgress = ptr.String(jtv)
+ }
+
+ case "ReplicaTableClassSummary":
+ if err := awsAwsjson10_deserializeDocumentTableClassSummary(&sv.ReplicaTableClassSummary, value); err != nil {
+ return err
+ }
+
+ case "WarmThroughput":
+ if err := awsAwsjson10_deserializeDocumentTableWarmThroughputDescription(&sv.WarmThroughput, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaDescriptionList(v *[]types.ReplicaDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.ReplicaDescription
+ if *v == nil {
+ cv = []types.ReplicaDescription{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.ReplicaDescription
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentReplicaDescription(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexAutoScalingDescription(v **types.ReplicaGlobalSecondaryIndexAutoScalingDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ReplicaGlobalSecondaryIndexAutoScalingDescription
+ if *v == nil {
+ sv = &types.ReplicaGlobalSecondaryIndexAutoScalingDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "IndexName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexName to be of type string, got %T instead", value)
+ }
+ sv.IndexName = ptr.String(jtv)
+ }
+
+ case "IndexStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexStatus to be of type string, got %T instead", value)
+ }
+ sv.IndexStatus = types.IndexStatus(jtv)
+ }
+
+ case "ProvisionedReadCapacityAutoScalingSettings":
+ if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ProvisionedReadCapacityAutoScalingSettings, value); err != nil {
+ return err
+ }
+
+ case "ProvisionedWriteCapacityAutoScalingSettings":
+ if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ProvisionedWriteCapacityAutoScalingSettings, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexAutoScalingDescriptionList(v *[]types.ReplicaGlobalSecondaryIndexAutoScalingDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.ReplicaGlobalSecondaryIndexAutoScalingDescription
+ if *v == nil {
+ cv = []types.ReplicaGlobalSecondaryIndexAutoScalingDescription{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.ReplicaGlobalSecondaryIndexAutoScalingDescription
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexAutoScalingDescription(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexDescription(v **types.ReplicaGlobalSecondaryIndexDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ReplicaGlobalSecondaryIndexDescription
+ if *v == nil {
+ sv = &types.ReplicaGlobalSecondaryIndexDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "IndexName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexName to be of type string, got %T instead", value)
+ }
+ sv.IndexName = ptr.String(jtv)
+ }
+
+ case "OnDemandThroughputOverride":
+ if err := awsAwsjson10_deserializeDocumentOnDemandThroughputOverride(&sv.OnDemandThroughputOverride, value); err != nil {
+ return err
+ }
+
+ case "ProvisionedThroughputOverride":
+ if err := awsAwsjson10_deserializeDocumentProvisionedThroughputOverride(&sv.ProvisionedThroughputOverride, value); err != nil {
+ return err
+ }
+
+ case "WarmThroughput":
+ if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndexWarmThroughputDescription(&sv.WarmThroughput, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexDescriptionList(v *[]types.ReplicaGlobalSecondaryIndexDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.ReplicaGlobalSecondaryIndexDescription
+ if *v == nil {
+ cv = []types.ReplicaGlobalSecondaryIndexDescription{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.ReplicaGlobalSecondaryIndexDescription
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexDescription(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexSettingsDescription(v **types.ReplicaGlobalSecondaryIndexSettingsDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ReplicaGlobalSecondaryIndexSettingsDescription
+ if *v == nil {
+ sv = &types.ReplicaGlobalSecondaryIndexSettingsDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "IndexName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexName to be of type string, got %T instead", value)
+ }
+ sv.IndexName = ptr.String(jtv)
+ }
+
+ case "IndexStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexStatus to be of type string, got %T instead", value)
+ }
+ sv.IndexStatus = types.IndexStatus(jtv)
+ }
+
+ case "ProvisionedReadCapacityAutoScalingSettings":
+ if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ProvisionedReadCapacityAutoScalingSettings, value); err != nil {
+ return err
+ }
+
+ case "ProvisionedReadCapacityUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ProvisionedReadCapacityUnits = ptr.Int64(i64)
+ }
+
+ case "ProvisionedWriteCapacityAutoScalingSettings":
+ if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ProvisionedWriteCapacityAutoScalingSettings, value); err != nil {
+ return err
+ }
+
+ case "ProvisionedWriteCapacityUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ProvisionedWriteCapacityUnits = ptr.Int64(i64)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexSettingsDescriptionList(v *[]types.ReplicaGlobalSecondaryIndexSettingsDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.ReplicaGlobalSecondaryIndexSettingsDescription
+ if *v == nil {
+ cv = []types.ReplicaGlobalSecondaryIndexSettingsDescription{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.ReplicaGlobalSecondaryIndexSettingsDescription
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexSettingsDescription(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaList(v *[]types.Replica, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.Replica
+ if *v == nil {
+ cv = []types.Replica{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.Replica
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentReplica(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaNotFoundException(v **types.ReplicaNotFoundException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ReplicaNotFoundException
+ if *v == nil {
+ sv = &types.ReplicaNotFoundException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaSettingsDescription(v **types.ReplicaSettingsDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ReplicaSettingsDescription
+ if *v == nil {
+ sv = &types.ReplicaSettingsDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "RegionName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected RegionName to be of type string, got %T instead", value)
+ }
+ sv.RegionName = ptr.String(jtv)
+ }
+
+ case "ReplicaBillingModeSummary":
+ if err := awsAwsjson10_deserializeDocumentBillingModeSummary(&sv.ReplicaBillingModeSummary, value); err != nil {
+ return err
+ }
+
+ case "ReplicaGlobalSecondaryIndexSettings":
+ if err := awsAwsjson10_deserializeDocumentReplicaGlobalSecondaryIndexSettingsDescriptionList(&sv.ReplicaGlobalSecondaryIndexSettings, value); err != nil {
+ return err
+ }
+
+ case "ReplicaProvisionedReadCapacityAutoScalingSettings":
+ if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ReplicaProvisionedReadCapacityAutoScalingSettings, value); err != nil {
+ return err
+ }
+
+ case "ReplicaProvisionedReadCapacityUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected NonNegativeLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ReplicaProvisionedReadCapacityUnits = ptr.Int64(i64)
+ }
+
+ case "ReplicaProvisionedWriteCapacityAutoScalingSettings":
+ if err := awsAwsjson10_deserializeDocumentAutoScalingSettingsDescription(&sv.ReplicaProvisionedWriteCapacityAutoScalingSettings, value); err != nil {
+ return err
+ }
+
+ case "ReplicaProvisionedWriteCapacityUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected NonNegativeLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ReplicaProvisionedWriteCapacityUnits = ptr.Int64(i64)
+ }
+
+ case "ReplicaStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ReplicaStatus to be of type string, got %T instead", value)
+ }
+ sv.ReplicaStatus = types.ReplicaStatus(jtv)
+ }
+
+ case "ReplicaTableClassSummary":
+ if err := awsAwsjson10_deserializeDocumentTableClassSummary(&sv.ReplicaTableClassSummary, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicaSettingsDescriptionList(v *[]types.ReplicaSettingsDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.ReplicaSettingsDescription
+ if *v == nil {
+ cv = []types.ReplicaSettingsDescription{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.ReplicaSettingsDescription
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentReplicaSettingsDescription(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentReplicatedWriteConflictException(v **types.ReplicatedWriteConflictException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ReplicatedWriteConflictException
+ if *v == nil {
+ sv = &types.ReplicatedWriteConflictException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentRequestLimitExceeded(v **types.RequestLimitExceeded, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.RequestLimitExceeded
+ if *v == nil {
+ sv = &types.RequestLimitExceeded{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ case "ThrottlingReasons":
+ if err := awsAwsjson10_deserializeDocumentThrottlingReasonList(&sv.ThrottlingReasons, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentResourceInUseException(v **types.ResourceInUseException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ResourceInUseException
+ if *v == nil {
+ sv = &types.ResourceInUseException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentResourceNotFoundException(v **types.ResourceNotFoundException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ResourceNotFoundException
+ if *v == nil {
+ sv = &types.ResourceNotFoundException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentRestoreSummary(v **types.RestoreSummary, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.RestoreSummary
+ if *v == nil {
+ sv = &types.RestoreSummary{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "RestoreDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.RestoreDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "RestoreInProgress":
+ if value != nil {
+ jtv, ok := value.(bool)
+ if !ok {
+ return fmt.Errorf("expected RestoreInProgress to be of type *bool, got %T instead", value)
+ }
+ sv.RestoreInProgress = ptr.Bool(jtv)
+ }
+
+ case "SourceBackupArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BackupArn to be of type string, got %T instead", value)
+ }
+ sv.SourceBackupArn = ptr.String(jtv)
+ }
+
+ case "SourceTableArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableArn to be of type string, got %T instead", value)
+ }
+ sv.SourceTableArn = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentS3BucketSource(v **types.S3BucketSource, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.S3BucketSource
+ if *v == nil {
+ sv = &types.S3BucketSource{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "S3Bucket":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected S3Bucket to be of type string, got %T instead", value)
+ }
+ sv.S3Bucket = ptr.String(jtv)
+ }
+
+ case "S3BucketOwner":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected S3BucketOwner to be of type string, got %T instead", value)
+ }
+ sv.S3BucketOwner = ptr.String(jtv)
+ }
+
+ case "S3KeyPrefix":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected S3Prefix to be of type string, got %T instead", value)
+ }
+ sv.S3KeyPrefix = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentSecondaryIndexesCapacityMap(v *map[string]types.Capacity, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var mv map[string]types.Capacity
+ if *v == nil {
+ mv = map[string]types.Capacity{}
+ } else {
+ mv = *v
+ }
+
+ for key, value := range shape {
+ var parsedVal types.Capacity
+ mapVar := parsedVal
+ destAddr := &mapVar
+ if err := awsAwsjson10_deserializeDocumentCapacity(&destAddr, value); err != nil {
+ return err
+ }
+ parsedVal = *destAddr
+ mv[key] = parsedVal
+
+ }
+ *v = mv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentSourceTableDetails(v **types.SourceTableDetails, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.SourceTableDetails
+ if *v == nil {
+ sv = &types.SourceTableDetails{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "BillingMode":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BillingMode to be of type string, got %T instead", value)
+ }
+ sv.BillingMode = types.BillingMode(jtv)
+ }
+
+ case "ItemCount":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected ItemCount to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ItemCount = ptr.Int64(i64)
+ }
+
+ case "KeySchema":
+ if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil {
+ return err
+ }
+
+ case "OnDemandThroughput":
+ if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil {
+ return err
+ }
+
+ case "ProvisionedThroughput":
+ if err := awsAwsjson10_deserializeDocumentProvisionedThroughput(&sv.ProvisionedThroughput, value); err != nil {
+ return err
+ }
+
+ case "TableArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableArn to be of type string, got %T instead", value)
+ }
+ sv.TableArn = ptr.String(jtv)
+ }
+
+ case "TableCreationDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.TableCreationDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected TableCreationDateTime to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "TableId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableId to be of type string, got %T instead", value)
+ }
+ sv.TableId = ptr.String(jtv)
+ }
+
+ case "TableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.TableName = ptr.String(jtv)
+ }
+
+ case "TableSizeBytes":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.TableSizeBytes = ptr.Int64(i64)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentSourceTableFeatureDetails(v **types.SourceTableFeatureDetails, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.SourceTableFeatureDetails
+ if *v == nil {
+ sv = &types.SourceTableFeatureDetails{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "GlobalSecondaryIndexes":
+ if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndexes(&sv.GlobalSecondaryIndexes, value); err != nil {
+ return err
+ }
+
+ case "LocalSecondaryIndexes":
+ if err := awsAwsjson10_deserializeDocumentLocalSecondaryIndexes(&sv.LocalSecondaryIndexes, value); err != nil {
+ return err
+ }
+
+ case "SSEDescription":
+ if err := awsAwsjson10_deserializeDocumentSSEDescription(&sv.SSEDescription, value); err != nil {
+ return err
+ }
+
+ case "StreamDescription":
+ if err := awsAwsjson10_deserializeDocumentStreamSpecification(&sv.StreamDescription, value); err != nil {
+ return err
+ }
+
+ case "TimeToLiveDescription":
+ if err := awsAwsjson10_deserializeDocumentTimeToLiveDescription(&sv.TimeToLiveDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentSSEDescription(v **types.SSEDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.SSEDescription
+ if *v == nil {
+ sv = &types.SSEDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "InaccessibleEncryptionDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.InaccessibleEncryptionDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "KMSMasterKeyArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected KMSMasterKeyArn to be of type string, got %T instead", value)
+ }
+ sv.KMSMasterKeyArn = ptr.String(jtv)
+ }
+
+ case "SSEType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected SSEType to be of type string, got %T instead", value)
+ }
+ sv.SSEType = types.SSEType(jtv)
+ }
+
+ case "Status":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected SSEStatus to be of type string, got %T instead", value)
+ }
+ sv.Status = types.SSEStatus(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentSSESpecification(v **types.SSESpecification, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.SSESpecification
+ if *v == nil {
+ sv = &types.SSESpecification{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Enabled":
+ if value != nil {
+ jtv, ok := value.(bool)
+ if !ok {
+ return fmt.Errorf("expected SSEEnabled to be of type *bool, got %T instead", value)
+ }
+ sv.Enabled = ptr.Bool(jtv)
+ }
+
+ case "KMSMasterKeyId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected KMSMasterKeyId to be of type string, got %T instead", value)
+ }
+ sv.KMSMasterKeyId = ptr.String(jtv)
+ }
+
+ case "SSEType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected SSEType to be of type string, got %T instead", value)
+ }
+ sv.SSEType = types.SSEType(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentStreamSpecification(v **types.StreamSpecification, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.StreamSpecification
+ if *v == nil {
+ sv = &types.StreamSpecification{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "StreamEnabled":
+ if value != nil {
+ jtv, ok := value.(bool)
+ if !ok {
+ return fmt.Errorf("expected StreamEnabled to be of type *bool, got %T instead", value)
+ }
+ sv.StreamEnabled = ptr.Bool(jtv)
+ }
+
+ case "StreamViewType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected StreamViewType to be of type string, got %T instead", value)
+ }
+ sv.StreamViewType = types.StreamViewType(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentStringSetAttributeValue(v *[]string, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []string
+ if *v == nil {
+ cv = []string{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col string
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected StringAttributeValue to be of type string, got %T instead", value)
+ }
+ col = jtv
+ }
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTableAlreadyExistsException(v **types.TableAlreadyExistsException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TableAlreadyExistsException
+ if *v == nil {
+ sv = &types.TableAlreadyExistsException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTableAutoScalingDescription(v **types.TableAutoScalingDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TableAutoScalingDescription
+ if *v == nil {
+ sv = &types.TableAutoScalingDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Replicas":
+ if err := awsAwsjson10_deserializeDocumentReplicaAutoScalingDescriptionList(&sv.Replicas, value); err != nil {
+ return err
+ }
+
+ case "TableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.TableName = ptr.String(jtv)
+ }
+
+ case "TableStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableStatus to be of type string, got %T instead", value)
+ }
+ sv.TableStatus = types.TableStatus(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTableClassSummary(v **types.TableClassSummary, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TableClassSummary
+ if *v == nil {
+ sv = &types.TableClassSummary{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "LastUpdateDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.LastUpdateDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "TableClass":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableClass to be of type string, got %T instead", value)
+ }
+ sv.TableClass = types.TableClass(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTableCreationParameters(v **types.TableCreationParameters, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TableCreationParameters
+ if *v == nil {
+ sv = &types.TableCreationParameters{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "AttributeDefinitions":
+ if err := awsAwsjson10_deserializeDocumentAttributeDefinitions(&sv.AttributeDefinitions, value); err != nil {
+ return err
+ }
+
+ case "BillingMode":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BillingMode to be of type string, got %T instead", value)
+ }
+ sv.BillingMode = types.BillingMode(jtv)
+ }
+
+ case "GlobalSecondaryIndexes":
+ if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndexList(&sv.GlobalSecondaryIndexes, value); err != nil {
+ return err
+ }
+
+ case "KeySchema":
+ if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil {
+ return err
+ }
+
+ case "OnDemandThroughput":
+ if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil {
+ return err
+ }
+
+ case "ProvisionedThroughput":
+ if err := awsAwsjson10_deserializeDocumentProvisionedThroughput(&sv.ProvisionedThroughput, value); err != nil {
+ return err
+ }
+
+ case "SSESpecification":
+ if err := awsAwsjson10_deserializeDocumentSSESpecification(&sv.SSESpecification, value); err != nil {
+ return err
+ }
+
+ case "TableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.TableName = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTableDescription(v **types.TableDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TableDescription
+ if *v == nil {
+ sv = &types.TableDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ArchivalSummary":
+ if err := awsAwsjson10_deserializeDocumentArchivalSummary(&sv.ArchivalSummary, value); err != nil {
+ return err
+ }
+
+ case "AttributeDefinitions":
+ if err := awsAwsjson10_deserializeDocumentAttributeDefinitions(&sv.AttributeDefinitions, value); err != nil {
+ return err
+ }
+
+ case "BillingModeSummary":
+ if err := awsAwsjson10_deserializeDocumentBillingModeSummary(&sv.BillingModeSummary, value); err != nil {
+ return err
+ }
+
+ case "CreationDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.CreationDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected Date to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "DeletionProtectionEnabled":
+ if value != nil {
+ jtv, ok := value.(bool)
+ if !ok {
+ return fmt.Errorf("expected DeletionProtectionEnabled to be of type *bool, got %T instead", value)
+ }
+ sv.DeletionProtectionEnabled = ptr.Bool(jtv)
+ }
+
+ case "GlobalSecondaryIndexes":
+ if err := awsAwsjson10_deserializeDocumentGlobalSecondaryIndexDescriptionList(&sv.GlobalSecondaryIndexes, value); err != nil {
+ return err
+ }
+
+ case "GlobalTableVersion":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected String to be of type string, got %T instead", value)
+ }
+ sv.GlobalTableVersion = ptr.String(jtv)
+ }
+
+ case "GlobalTableWitnesses":
+ if err := awsAwsjson10_deserializeDocumentGlobalTableWitnessDescriptionList(&sv.GlobalTableWitnesses, value); err != nil {
+ return err
+ }
+
+ case "ItemCount":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ItemCount = ptr.Int64(i64)
+ }
+
+ case "KeySchema":
+ if err := awsAwsjson10_deserializeDocumentKeySchema(&sv.KeySchema, value); err != nil {
+ return err
+ }
+
+ case "LatestStreamArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected StreamArn to be of type string, got %T instead", value)
+ }
+ sv.LatestStreamArn = ptr.String(jtv)
+ }
+
+ case "LatestStreamLabel":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected String to be of type string, got %T instead", value)
+ }
+ sv.LatestStreamLabel = ptr.String(jtv)
+ }
+
+ case "LocalSecondaryIndexes":
+ if err := awsAwsjson10_deserializeDocumentLocalSecondaryIndexDescriptionList(&sv.LocalSecondaryIndexes, value); err != nil {
+ return err
+ }
+
+ case "MultiRegionConsistency":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected MultiRegionConsistency to be of type string, got %T instead", value)
+ }
+ sv.MultiRegionConsistency = types.MultiRegionConsistency(jtv)
+ }
+
+ case "OnDemandThroughput":
+ if err := awsAwsjson10_deserializeDocumentOnDemandThroughput(&sv.OnDemandThroughput, value); err != nil {
+ return err
+ }
+
+ case "ProvisionedThroughput":
+ if err := awsAwsjson10_deserializeDocumentProvisionedThroughputDescription(&sv.ProvisionedThroughput, value); err != nil {
+ return err
+ }
+
+ case "Replicas":
+ if err := awsAwsjson10_deserializeDocumentReplicaDescriptionList(&sv.Replicas, value); err != nil {
+ return err
+ }
+
+ case "RestoreSummary":
+ if err := awsAwsjson10_deserializeDocumentRestoreSummary(&sv.RestoreSummary, value); err != nil {
+ return err
+ }
+
+ case "SSEDescription":
+ if err := awsAwsjson10_deserializeDocumentSSEDescription(&sv.SSEDescription, value); err != nil {
+ return err
+ }
+
+ case "StreamSpecification":
+ if err := awsAwsjson10_deserializeDocumentStreamSpecification(&sv.StreamSpecification, value); err != nil {
+ return err
+ }
+
+ case "TableArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected String to be of type string, got %T instead", value)
+ }
+ sv.TableArn = ptr.String(jtv)
+ }
+
+ case "TableClassSummary":
+ if err := awsAwsjson10_deserializeDocumentTableClassSummary(&sv.TableClassSummary, value); err != nil {
+ return err
+ }
+
+ case "TableId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableId to be of type string, got %T instead", value)
+ }
+ sv.TableId = ptr.String(jtv)
+ }
+
+ case "TableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.TableName = ptr.String(jtv)
+ }
+
+ case "TableSizeBytes":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.TableSizeBytes = ptr.Int64(i64)
+ }
+
+ case "TableStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableStatus to be of type string, got %T instead", value)
+ }
+ sv.TableStatus = types.TableStatus(jtv)
+ }
+
+ case "WarmThroughput":
+ if err := awsAwsjson10_deserializeDocumentTableWarmThroughputDescription(&sv.WarmThroughput, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTableInUseException(v **types.TableInUseException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TableInUseException
+ if *v == nil {
+ sv = &types.TableInUseException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTableNameList(v *[]string, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []string
+ if *v == nil {
+ cv = []string{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col string
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ col = jtv
+ }
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTableNotFoundException(v **types.TableNotFoundException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TableNotFoundException
+ if *v == nil {
+ sv = &types.TableNotFoundException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTableWarmThroughputDescription(v **types.TableWarmThroughputDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TableWarmThroughputDescription
+ if *v == nil {
+ sv = &types.TableWarmThroughputDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ReadUnitsPerSecond":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ReadUnitsPerSecond = ptr.Int64(i64)
+ }
+
+ case "Status":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableStatus to be of type string, got %T instead", value)
+ }
+ sv.Status = types.TableStatus(jtv)
+ }
+
+ case "WriteUnitsPerSecond":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.WriteUnitsPerSecond = ptr.Int64(i64)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTag(v **types.Tag, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.Tag
+ if *v == nil {
+ sv = &types.Tag{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Key":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TagKeyString to be of type string, got %T instead", value)
+ }
+ sv.Key = ptr.String(jtv)
+ }
+
+ case "Value":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TagValueString to be of type string, got %T instead", value)
+ }
+ sv.Value = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTagList(v *[]types.Tag, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.Tag
+ if *v == nil {
+ cv = []types.Tag{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.Tag
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentTag(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentThrottlingException(v **types.ThrottlingException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ThrottlingException
+ if *v == nil {
+ sv = &types.ThrottlingException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AvailabilityErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ case "throttlingReasons":
+ if err := awsAwsjson10_deserializeDocumentThrottlingReasonList(&sv.ThrottlingReasons, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentThrottlingReason(v **types.ThrottlingReason, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.ThrottlingReason
+ if *v == nil {
+ sv = &types.ThrottlingReason{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "reason":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Reason to be of type string, got %T instead", value)
+ }
+ sv.Reason = ptr.String(jtv)
+ }
+
+ case "resource":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Resource to be of type string, got %T instead", value)
+ }
+ sv.Resource = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentThrottlingReasonList(v *[]types.ThrottlingReason, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.ThrottlingReason
+ if *v == nil {
+ cv = []types.ThrottlingReason{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.ThrottlingReason
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentThrottlingReason(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTimeToLiveDescription(v **types.TimeToLiveDescription, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TimeToLiveDescription
+ if *v == nil {
+ sv = &types.TimeToLiveDescription{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "AttributeName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TimeToLiveAttributeName to be of type string, got %T instead", value)
+ }
+ sv.AttributeName = ptr.String(jtv)
+ }
+
+ case "TimeToLiveStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TimeToLiveStatus to be of type string, got %T instead", value)
+ }
+ sv.TimeToLiveStatus = types.TimeToLiveStatus(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTimeToLiveSpecification(v **types.TimeToLiveSpecification, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TimeToLiveSpecification
+ if *v == nil {
+ sv = &types.TimeToLiveSpecification{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "AttributeName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TimeToLiveAttributeName to be of type string, got %T instead", value)
+ }
+ sv.AttributeName = ptr.String(jtv)
+ }
+
+ case "Enabled":
+ if value != nil {
+ jtv, ok := value.(bool)
+ if !ok {
+ return fmt.Errorf("expected TimeToLiveEnabled to be of type *bool, got %T instead", value)
+ }
+ sv.Enabled = ptr.Bool(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTransactionCanceledException(v **types.TransactionCanceledException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TransactionCanceledException
+ if *v == nil {
+ sv = &types.TransactionCanceledException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "CancellationReasons":
+ if err := awsAwsjson10_deserializeDocumentCancellationReasonList(&sv.CancellationReasons, value); err != nil {
+ return err
+ }
+
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTransactionConflictException(v **types.TransactionConflictException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TransactionConflictException
+ if *v == nil {
+ sv = &types.TransactionConflictException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentTransactionInProgressException(v **types.TransactionInProgressException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.TransactionInProgressException
+ if *v == nil {
+ sv = &types.TransactionInProgressException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "message", "Message":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorMessage to be of type string, got %T instead", value)
+ }
+ sv.Message = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentUpdateKinesisStreamingConfiguration(v **types.UpdateKinesisStreamingConfiguration, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.UpdateKinesisStreamingConfiguration
+ if *v == nil {
+ sv = &types.UpdateKinesisStreamingConfiguration{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ApproximateCreationDateTimePrecision":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ApproximateCreationDateTimePrecision to be of type string, got %T instead", value)
+ }
+ sv.ApproximateCreationDateTimePrecision = types.ApproximateCreationDateTimePrecision(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentWarmThroughput(v **types.WarmThroughput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.WarmThroughput
+ if *v == nil {
+ sv = &types.WarmThroughput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ReadUnitsPerSecond":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ReadUnitsPerSecond = ptr.Int64(i64)
+ }
+
+ case "WriteUnitsPerSecond":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected LongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.WriteUnitsPerSecond = ptr.Int64(i64)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentWriteRequest(v **types.WriteRequest, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.WriteRequest
+ if *v == nil {
+ sv = &types.WriteRequest{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "DeleteRequest":
+ if err := awsAwsjson10_deserializeDocumentDeleteRequest(&sv.DeleteRequest, value); err != nil {
+ return err
+ }
+
+ case "PutRequest":
+ if err := awsAwsjson10_deserializeDocumentPutRequest(&sv.PutRequest, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeDocumentWriteRequests(v *[]types.WriteRequest, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []types.WriteRequest
+ if *v == nil {
+ cv = []types.WriteRequest{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col types.WriteRequest
+ destAddr := &col
+ if err := awsAwsjson10_deserializeDocumentWriteRequest(&destAddr, value); err != nil {
+ return err
+ }
+ col = *destAddr
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentBatchExecuteStatementOutput(v **BatchExecuteStatementOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *BatchExecuteStatementOutput
+ if *v == nil {
+ sv = &BatchExecuteStatementOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ConsumedCapacity":
+ if err := awsAwsjson10_deserializeDocumentConsumedCapacityMultiple(&sv.ConsumedCapacity, value); err != nil {
+ return err
+ }
+
+ case "Responses":
+ if err := awsAwsjson10_deserializeDocumentPartiQLBatchResponse(&sv.Responses, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentBatchGetItemOutput(v **BatchGetItemOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *BatchGetItemOutput
+ if *v == nil {
+ sv = &BatchGetItemOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ConsumedCapacity":
+ if err := awsAwsjson10_deserializeDocumentConsumedCapacityMultiple(&sv.ConsumedCapacity, value); err != nil {
+ return err
+ }
+
+ case "Responses":
+ if err := awsAwsjson10_deserializeDocumentBatchGetResponseMap(&sv.Responses, value); err != nil {
+ return err
+ }
+
+ case "UnprocessedKeys":
+ if err := awsAwsjson10_deserializeDocumentBatchGetRequestMap(&sv.UnprocessedKeys, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentBatchWriteItemOutput(v **BatchWriteItemOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *BatchWriteItemOutput
+ if *v == nil {
+ sv = &BatchWriteItemOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ConsumedCapacity":
+ if err := awsAwsjson10_deserializeDocumentConsumedCapacityMultiple(&sv.ConsumedCapacity, value); err != nil {
+ return err
+ }
+
+ case "ItemCollectionMetrics":
+ if err := awsAwsjson10_deserializeDocumentItemCollectionMetricsPerTable(&sv.ItemCollectionMetrics, value); err != nil {
+ return err
+ }
+
+ case "UnprocessedItems":
+ if err := awsAwsjson10_deserializeDocumentBatchWriteItemRequestMap(&sv.UnprocessedItems, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentCreateBackupOutput(v **CreateBackupOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *CreateBackupOutput
+ if *v == nil {
+ sv = &CreateBackupOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "BackupDetails":
+ if err := awsAwsjson10_deserializeDocumentBackupDetails(&sv.BackupDetails, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentCreateGlobalTableOutput(v **CreateGlobalTableOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *CreateGlobalTableOutput
+ if *v == nil {
+ sv = &CreateGlobalTableOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "GlobalTableDescription":
+ if err := awsAwsjson10_deserializeDocumentGlobalTableDescription(&sv.GlobalTableDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentCreateTableOutput(v **CreateTableOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *CreateTableOutput
+ if *v == nil {
+ sv = &CreateTableOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "TableDescription":
+ if err := awsAwsjson10_deserializeDocumentTableDescription(&sv.TableDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDeleteBackupOutput(v **DeleteBackupOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DeleteBackupOutput
+ if *v == nil {
+ sv = &DeleteBackupOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "BackupDescription":
+ if err := awsAwsjson10_deserializeDocumentBackupDescription(&sv.BackupDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDeleteItemOutput(v **DeleteItemOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DeleteItemOutput
+ if *v == nil {
+ sv = &DeleteItemOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Attributes":
+ if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Attributes, value); err != nil {
+ return err
+ }
+
+ case "ConsumedCapacity":
+ if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&sv.ConsumedCapacity, value); err != nil {
+ return err
+ }
+
+ case "ItemCollectionMetrics":
+ if err := awsAwsjson10_deserializeDocumentItemCollectionMetrics(&sv.ItemCollectionMetrics, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDeleteResourcePolicyOutput(v **DeleteResourcePolicyOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DeleteResourcePolicyOutput
+ if *v == nil {
+ sv = &DeleteResourcePolicyOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "RevisionId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected PolicyRevisionId to be of type string, got %T instead", value)
+ }
+ sv.RevisionId = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDeleteTableOutput(v **DeleteTableOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DeleteTableOutput
+ if *v == nil {
+ sv = &DeleteTableOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "TableDescription":
+ if err := awsAwsjson10_deserializeDocumentTableDescription(&sv.TableDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDescribeBackupOutput(v **DescribeBackupOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DescribeBackupOutput
+ if *v == nil {
+ sv = &DescribeBackupOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "BackupDescription":
+ if err := awsAwsjson10_deserializeDocumentBackupDescription(&sv.BackupDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDescribeContinuousBackupsOutput(v **DescribeContinuousBackupsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DescribeContinuousBackupsOutput
+ if *v == nil {
+ sv = &DescribeContinuousBackupsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ContinuousBackupsDescription":
+ if err := awsAwsjson10_deserializeDocumentContinuousBackupsDescription(&sv.ContinuousBackupsDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDescribeContributorInsightsOutput(v **DescribeContributorInsightsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DescribeContributorInsightsOutput
+ if *v == nil {
+ sv = &DescribeContributorInsightsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ContributorInsightsMode":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ContributorInsightsMode to be of type string, got %T instead", value)
+ }
+ sv.ContributorInsightsMode = types.ContributorInsightsMode(jtv)
+ }
+
+ case "ContributorInsightsRuleList":
+ if err := awsAwsjson10_deserializeDocumentContributorInsightsRuleList(&sv.ContributorInsightsRuleList, value); err != nil {
+ return err
+ }
+
+ case "ContributorInsightsStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ContributorInsightsStatus to be of type string, got %T instead", value)
+ }
+ sv.ContributorInsightsStatus = types.ContributorInsightsStatus(jtv)
+ }
+
+ case "FailureException":
+ if err := awsAwsjson10_deserializeDocumentFailureException(&sv.FailureException, value); err != nil {
+ return err
+ }
+
+ case "IndexName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexName to be of type string, got %T instead", value)
+ }
+ sv.IndexName = ptr.String(jtv)
+ }
+
+ case "LastUpdateDateTime":
+ if value != nil {
+ switch jtv := value.(type) {
+ case json.Number:
+ f64, err := jtv.Float64()
+ if err != nil {
+ return err
+ }
+ sv.LastUpdateDateTime = ptr.Time(smithytime.ParseEpochSeconds(f64))
+
+ default:
+ return fmt.Errorf("expected LastUpdateDateTime to be a JSON Number, got %T instead", value)
+
+ }
+ }
+
+ case "TableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.TableName = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDescribeEndpointsOutput(v **DescribeEndpointsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DescribeEndpointsOutput
+ if *v == nil {
+ sv = &DescribeEndpointsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Endpoints":
+ if err := awsAwsjson10_deserializeDocumentEndpoints(&sv.Endpoints, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDescribeExportOutput(v **DescribeExportOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DescribeExportOutput
+ if *v == nil {
+ sv = &DescribeExportOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ExportDescription":
+ if err := awsAwsjson10_deserializeDocumentExportDescription(&sv.ExportDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDescribeGlobalTableOutput(v **DescribeGlobalTableOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DescribeGlobalTableOutput
+ if *v == nil {
+ sv = &DescribeGlobalTableOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "GlobalTableDescription":
+ if err := awsAwsjson10_deserializeDocumentGlobalTableDescription(&sv.GlobalTableDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDescribeGlobalTableSettingsOutput(v **DescribeGlobalTableSettingsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DescribeGlobalTableSettingsOutput
+ if *v == nil {
+ sv = &DescribeGlobalTableSettingsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "GlobalTableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.GlobalTableName = ptr.String(jtv)
+ }
+
+ case "ReplicaSettings":
+ if err := awsAwsjson10_deserializeDocumentReplicaSettingsDescriptionList(&sv.ReplicaSettings, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDescribeImportOutput(v **DescribeImportOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DescribeImportOutput
+ if *v == nil {
+ sv = &DescribeImportOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ImportTableDescription":
+ if err := awsAwsjson10_deserializeDocumentImportTableDescription(&sv.ImportTableDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDescribeKinesisStreamingDestinationOutput(v **DescribeKinesisStreamingDestinationOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DescribeKinesisStreamingDestinationOutput
+ if *v == nil {
+ sv = &DescribeKinesisStreamingDestinationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "KinesisDataStreamDestinations":
+ if err := awsAwsjson10_deserializeDocumentKinesisDataStreamDestinations(&sv.KinesisDataStreamDestinations, value); err != nil {
+ return err
+ }
+
+ case "TableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.TableName = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDescribeLimitsOutput(v **DescribeLimitsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DescribeLimitsOutput
+ if *v == nil {
+ sv = &DescribeLimitsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "AccountMaxReadCapacityUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.AccountMaxReadCapacityUnits = ptr.Int64(i64)
+ }
+
+ case "AccountMaxWriteCapacityUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.AccountMaxWriteCapacityUnits = ptr.Int64(i64)
+ }
+
+ case "TableMaxReadCapacityUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.TableMaxReadCapacityUnits = ptr.Int64(i64)
+ }
+
+ case "TableMaxWriteCapacityUnits":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected PositiveLongObject to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.TableMaxWriteCapacityUnits = ptr.Int64(i64)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDescribeTableOutput(v **DescribeTableOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DescribeTableOutput
+ if *v == nil {
+ sv = &DescribeTableOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Table":
+ if err := awsAwsjson10_deserializeDocumentTableDescription(&sv.Table, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDescribeTableReplicaAutoScalingOutput(v **DescribeTableReplicaAutoScalingOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DescribeTableReplicaAutoScalingOutput
+ if *v == nil {
+ sv = &DescribeTableReplicaAutoScalingOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "TableAutoScalingDescription":
+ if err := awsAwsjson10_deserializeDocumentTableAutoScalingDescription(&sv.TableAutoScalingDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDescribeTimeToLiveOutput(v **DescribeTimeToLiveOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DescribeTimeToLiveOutput
+ if *v == nil {
+ sv = &DescribeTimeToLiveOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "TimeToLiveDescription":
+ if err := awsAwsjson10_deserializeDocumentTimeToLiveDescription(&sv.TimeToLiveDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentDisableKinesisStreamingDestinationOutput(v **DisableKinesisStreamingDestinationOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *DisableKinesisStreamingDestinationOutput
+ if *v == nil {
+ sv = &DisableKinesisStreamingDestinationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "DestinationStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected DestinationStatus to be of type string, got %T instead", value)
+ }
+ sv.DestinationStatus = types.DestinationStatus(jtv)
+ }
+
+ case "EnableKinesisStreamingConfiguration":
+ if err := awsAwsjson10_deserializeDocumentEnableKinesisStreamingConfiguration(&sv.EnableKinesisStreamingConfiguration, value); err != nil {
+ return err
+ }
+
+ case "StreamArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected StreamArn to be of type string, got %T instead", value)
+ }
+ sv.StreamArn = ptr.String(jtv)
+ }
+
+ case "TableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.TableName = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentEnableKinesisStreamingDestinationOutput(v **EnableKinesisStreamingDestinationOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *EnableKinesisStreamingDestinationOutput
+ if *v == nil {
+ sv = &EnableKinesisStreamingDestinationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "DestinationStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected DestinationStatus to be of type string, got %T instead", value)
+ }
+ sv.DestinationStatus = types.DestinationStatus(jtv)
+ }
+
+ case "EnableKinesisStreamingConfiguration":
+ if err := awsAwsjson10_deserializeDocumentEnableKinesisStreamingConfiguration(&sv.EnableKinesisStreamingConfiguration, value); err != nil {
+ return err
+ }
+
+ case "StreamArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected StreamArn to be of type string, got %T instead", value)
+ }
+ sv.StreamArn = ptr.String(jtv)
+ }
+
+ case "TableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.TableName = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentExecuteStatementOutput(v **ExecuteStatementOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ExecuteStatementOutput
+ if *v == nil {
+ sv = &ExecuteStatementOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ConsumedCapacity":
+ if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&sv.ConsumedCapacity, value); err != nil {
+ return err
+ }
+
+ case "Items":
+ if err := awsAwsjson10_deserializeDocumentItemList(&sv.Items, value); err != nil {
+ return err
+ }
+
+ case "LastEvaluatedKey":
+ if err := awsAwsjson10_deserializeDocumentKey(&sv.LastEvaluatedKey, value); err != nil {
+ return err
+ }
+
+ case "NextToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected PartiQLNextToken to be of type string, got %T instead", value)
+ }
+ sv.NextToken = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentExecuteTransactionOutput(v **ExecuteTransactionOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ExecuteTransactionOutput
+ if *v == nil {
+ sv = &ExecuteTransactionOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ConsumedCapacity":
+ if err := awsAwsjson10_deserializeDocumentConsumedCapacityMultiple(&sv.ConsumedCapacity, value); err != nil {
+ return err
+ }
+
+ case "Responses":
+ if err := awsAwsjson10_deserializeDocumentItemResponseList(&sv.Responses, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentExportTableToPointInTimeOutput(v **ExportTableToPointInTimeOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ExportTableToPointInTimeOutput
+ if *v == nil {
+ sv = &ExportTableToPointInTimeOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ExportDescription":
+ if err := awsAwsjson10_deserializeDocumentExportDescription(&sv.ExportDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentGetItemOutput(v **GetItemOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *GetItemOutput
+ if *v == nil {
+ sv = &GetItemOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ConsumedCapacity":
+ if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&sv.ConsumedCapacity, value); err != nil {
+ return err
+ }
+
+ case "Item":
+ if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Item, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentGetResourcePolicyOutput(v **GetResourcePolicyOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *GetResourcePolicyOutput
+ if *v == nil {
+ sv = &GetResourcePolicyOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Policy":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ResourcePolicy to be of type string, got %T instead", value)
+ }
+ sv.Policy = ptr.String(jtv)
+ }
+
+ case "RevisionId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected PolicyRevisionId to be of type string, got %T instead", value)
+ }
+ sv.RevisionId = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentImportTableOutput(v **ImportTableOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ImportTableOutput
+ if *v == nil {
+ sv = &ImportTableOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ImportTableDescription":
+ if err := awsAwsjson10_deserializeDocumentImportTableDescription(&sv.ImportTableDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentListBackupsOutput(v **ListBackupsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ListBackupsOutput
+ if *v == nil {
+ sv = &ListBackupsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "BackupSummaries":
+ if err := awsAwsjson10_deserializeDocumentBackupSummaries(&sv.BackupSummaries, value); err != nil {
+ return err
+ }
+
+ case "LastEvaluatedBackupArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected BackupArn to be of type string, got %T instead", value)
+ }
+ sv.LastEvaluatedBackupArn = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentListContributorInsightsOutput(v **ListContributorInsightsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ListContributorInsightsOutput
+ if *v == nil {
+ sv = &ListContributorInsightsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ContributorInsightsSummaries":
+ if err := awsAwsjson10_deserializeDocumentContributorInsightsSummaries(&sv.ContributorInsightsSummaries, value); err != nil {
+ return err
+ }
+
+ case "NextToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected NextTokenString to be of type string, got %T instead", value)
+ }
+ sv.NextToken = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentListExportsOutput(v **ListExportsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ListExportsOutput
+ if *v == nil {
+ sv = &ListExportsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ExportSummaries":
+ if err := awsAwsjson10_deserializeDocumentExportSummaries(&sv.ExportSummaries, value); err != nil {
+ return err
+ }
+
+ case "NextToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ExportNextToken to be of type string, got %T instead", value)
+ }
+ sv.NextToken = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentListGlobalTablesOutput(v **ListGlobalTablesOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ListGlobalTablesOutput
+ if *v == nil {
+ sv = &ListGlobalTablesOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "GlobalTables":
+ if err := awsAwsjson10_deserializeDocumentGlobalTableList(&sv.GlobalTables, value); err != nil {
+ return err
+ }
+
+ case "LastEvaluatedGlobalTableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.LastEvaluatedGlobalTableName = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentListImportsOutput(v **ListImportsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ListImportsOutput
+ if *v == nil {
+ sv = &ListImportsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ImportSummaryList":
+ if err := awsAwsjson10_deserializeDocumentImportSummaryList(&sv.ImportSummaryList, value); err != nil {
+ return err
+ }
+
+ case "NextToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ImportNextToken to be of type string, got %T instead", value)
+ }
+ sv.NextToken = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentListTablesOutput(v **ListTablesOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ListTablesOutput
+ if *v == nil {
+ sv = &ListTablesOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "LastEvaluatedTableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.LastEvaluatedTableName = ptr.String(jtv)
+ }
+
+ case "TableNames":
+ if err := awsAwsjson10_deserializeDocumentTableNameList(&sv.TableNames, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentListTagsOfResourceOutput(v **ListTagsOfResourceOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ListTagsOfResourceOutput
+ if *v == nil {
+ sv = &ListTagsOfResourceOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "NextToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected NextTokenString to be of type string, got %T instead", value)
+ }
+ sv.NextToken = ptr.String(jtv)
+ }
+
+ case "Tags":
+ if err := awsAwsjson10_deserializeDocumentTagList(&sv.Tags, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentPutItemOutput(v **PutItemOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *PutItemOutput
+ if *v == nil {
+ sv = &PutItemOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Attributes":
+ if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Attributes, value); err != nil {
+ return err
+ }
+
+ case "ConsumedCapacity":
+ if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&sv.ConsumedCapacity, value); err != nil {
+ return err
+ }
+
+ case "ItemCollectionMetrics":
+ if err := awsAwsjson10_deserializeDocumentItemCollectionMetrics(&sv.ItemCollectionMetrics, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentPutResourcePolicyOutput(v **PutResourcePolicyOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *PutResourcePolicyOutput
+ if *v == nil {
+ sv = &PutResourcePolicyOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "RevisionId":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected PolicyRevisionId to be of type string, got %T instead", value)
+ }
+ sv.RevisionId = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentQueryOutput(v **QueryOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *QueryOutput
+ if *v == nil {
+ sv = &QueryOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ConsumedCapacity":
+ if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&sv.ConsumedCapacity, value); err != nil {
+ return err
+ }
+
+ case "Count":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected Integer to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.Count = int32(i64)
+ }
+
+ case "Items":
+ if err := awsAwsjson10_deserializeDocumentItemList(&sv.Items, value); err != nil {
+ return err
+ }
+
+ case "LastEvaluatedKey":
+ if err := awsAwsjson10_deserializeDocumentKey(&sv.LastEvaluatedKey, value); err != nil {
+ return err
+ }
+
+ case "ScannedCount":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected Integer to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ScannedCount = int32(i64)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentRestoreTableFromBackupOutput(v **RestoreTableFromBackupOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *RestoreTableFromBackupOutput
+ if *v == nil {
+ sv = &RestoreTableFromBackupOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "TableDescription":
+ if err := awsAwsjson10_deserializeDocumentTableDescription(&sv.TableDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentRestoreTableToPointInTimeOutput(v **RestoreTableToPointInTimeOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *RestoreTableToPointInTimeOutput
+ if *v == nil {
+ sv = &RestoreTableToPointInTimeOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "TableDescription":
+ if err := awsAwsjson10_deserializeDocumentTableDescription(&sv.TableDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentScanOutput(v **ScanOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *ScanOutput
+ if *v == nil {
+ sv = &ScanOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ConsumedCapacity":
+ if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&sv.ConsumedCapacity, value); err != nil {
+ return err
+ }
+
+ case "Count":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected Integer to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.Count = int32(i64)
+ }
+
+ case "Items":
+ if err := awsAwsjson10_deserializeDocumentItemList(&sv.Items, value); err != nil {
+ return err
+ }
+
+ case "LastEvaluatedKey":
+ if err := awsAwsjson10_deserializeDocumentKey(&sv.LastEvaluatedKey, value); err != nil {
+ return err
+ }
+
+ case "ScannedCount":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected Integer to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ScannedCount = int32(i64)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentTransactGetItemsOutput(v **TransactGetItemsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *TransactGetItemsOutput
+ if *v == nil {
+ sv = &TransactGetItemsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ConsumedCapacity":
+ if err := awsAwsjson10_deserializeDocumentConsumedCapacityMultiple(&sv.ConsumedCapacity, value); err != nil {
+ return err
+ }
+
+ case "Responses":
+ if err := awsAwsjson10_deserializeDocumentItemResponseList(&sv.Responses, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentTransactWriteItemsOutput(v **TransactWriteItemsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *TransactWriteItemsOutput
+ if *v == nil {
+ sv = &TransactWriteItemsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ConsumedCapacity":
+ if err := awsAwsjson10_deserializeDocumentConsumedCapacityMultiple(&sv.ConsumedCapacity, value); err != nil {
+ return err
+ }
+
+ case "ItemCollectionMetrics":
+ if err := awsAwsjson10_deserializeDocumentItemCollectionMetricsPerTable(&sv.ItemCollectionMetrics, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentUpdateContinuousBackupsOutput(v **UpdateContinuousBackupsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *UpdateContinuousBackupsOutput
+ if *v == nil {
+ sv = &UpdateContinuousBackupsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ContinuousBackupsDescription":
+ if err := awsAwsjson10_deserializeDocumentContinuousBackupsDescription(&sv.ContinuousBackupsDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentUpdateContributorInsightsOutput(v **UpdateContributorInsightsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *UpdateContributorInsightsOutput
+ if *v == nil {
+ sv = &UpdateContributorInsightsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "ContributorInsightsMode":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ContributorInsightsMode to be of type string, got %T instead", value)
+ }
+ sv.ContributorInsightsMode = types.ContributorInsightsMode(jtv)
+ }
+
+ case "ContributorInsightsStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ContributorInsightsStatus to be of type string, got %T instead", value)
+ }
+ sv.ContributorInsightsStatus = types.ContributorInsightsStatus(jtv)
+ }
+
+ case "IndexName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IndexName to be of type string, got %T instead", value)
+ }
+ sv.IndexName = ptr.String(jtv)
+ }
+
+ case "TableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.TableName = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentUpdateGlobalTableOutput(v **UpdateGlobalTableOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *UpdateGlobalTableOutput
+ if *v == nil {
+ sv = &UpdateGlobalTableOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "GlobalTableDescription":
+ if err := awsAwsjson10_deserializeDocumentGlobalTableDescription(&sv.GlobalTableDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentUpdateGlobalTableSettingsOutput(v **UpdateGlobalTableSettingsOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *UpdateGlobalTableSettingsOutput
+ if *v == nil {
+ sv = &UpdateGlobalTableSettingsOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "GlobalTableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.GlobalTableName = ptr.String(jtv)
+ }
+
+ case "ReplicaSettings":
+ if err := awsAwsjson10_deserializeDocumentReplicaSettingsDescriptionList(&sv.ReplicaSettings, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentUpdateItemOutput(v **UpdateItemOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *UpdateItemOutput
+ if *v == nil {
+ sv = &UpdateItemOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "Attributes":
+ if err := awsAwsjson10_deserializeDocumentAttributeMap(&sv.Attributes, value); err != nil {
+ return err
+ }
+
+ case "ConsumedCapacity":
+ if err := awsAwsjson10_deserializeDocumentConsumedCapacity(&sv.ConsumedCapacity, value); err != nil {
+ return err
+ }
+
+ case "ItemCollectionMetrics":
+ if err := awsAwsjson10_deserializeDocumentItemCollectionMetrics(&sv.ItemCollectionMetrics, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentUpdateKinesisStreamingDestinationOutput(v **UpdateKinesisStreamingDestinationOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *UpdateKinesisStreamingDestinationOutput
+ if *v == nil {
+ sv = &UpdateKinesisStreamingDestinationOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "DestinationStatus":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected DestinationStatus to be of type string, got %T instead", value)
+ }
+ sv.DestinationStatus = types.DestinationStatus(jtv)
+ }
+
+ case "StreamArn":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected StreamArn to be of type string, got %T instead", value)
+ }
+ sv.StreamArn = ptr.String(jtv)
+ }
+
+ case "TableName":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TableName to be of type string, got %T instead", value)
+ }
+ sv.TableName = ptr.String(jtv)
+ }
+
+ case "UpdateKinesisStreamingConfiguration":
+ if err := awsAwsjson10_deserializeDocumentUpdateKinesisStreamingConfiguration(&sv.UpdateKinesisStreamingConfiguration, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentUpdateTableOutput(v **UpdateTableOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *UpdateTableOutput
+ if *v == nil {
+ sv = &UpdateTableOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "TableDescription":
+ if err := awsAwsjson10_deserializeDocumentTableDescription(&sv.TableDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentUpdateTableReplicaAutoScalingOutput(v **UpdateTableReplicaAutoScalingOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *UpdateTableReplicaAutoScalingOutput
+ if *v == nil {
+ sv = &UpdateTableReplicaAutoScalingOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "TableAutoScalingDescription":
+ if err := awsAwsjson10_deserializeDocumentTableAutoScalingDescription(&sv.TableAutoScalingDescription, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+func awsAwsjson10_deserializeOpDocumentUpdateTimeToLiveOutput(v **UpdateTimeToLiveOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *UpdateTimeToLiveOutput
+ if *v == nil {
+ sv = &UpdateTimeToLiveOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "TimeToLiveSpecification":
+ if err := awsAwsjson10_deserializeDocumentTimeToLiveSpecification(&sv.TimeToLiveSpecification, value); err != nil {
+ return err
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
+type protocolErrorInfo struct {
+ Type string `json:"__type"`
+ Message string
+ Code any // nonstandard for awsjson but some services do present the type here
+}
+
+func getProtocolErrorInfo(decoder *json.Decoder) (protocolErrorInfo, error) {
+ var errInfo protocolErrorInfo
+ if err := decoder.Decode(&errInfo); err != nil {
+ if err == io.EOF {
+ return errInfo, nil
+ }
+ return errInfo, err
+ }
+
+ return errInfo, nil
+}
+
+func resolveProtocolErrorType(headerType string, bodyInfo protocolErrorInfo) (string, bool) {
+ if len(headerType) != 0 {
+ return headerType, true
+ } else if len(bodyInfo.Type) != 0 {
+ return bodyInfo.Type, true
+ } else if code, ok := bodyInfo.Code.(string); ok && len(code) != 0 {
+ return code, true
+ }
+ return "", false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go
new file mode 100644
index 000000000..53f36085a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/doc.go
@@ -0,0 +1,26 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+// Package dynamodb provides the API client, operations, and parameter types for
+// Amazon DynamoDB.
+//
+// # Amazon DynamoDB
+//
+// Amazon DynamoDB is a fully managed NoSQL database service that provides fast
+// and predictable performance with seamless scalability. DynamoDB lets you offload
+// the administrative burdens of operating and scaling a distributed database, so
+// that you don't have to worry about hardware provisioning, setup and
+// configuration, replication, software patching, or cluster scaling.
+//
+// With DynamoDB, you can create database tables that can store and retrieve any
+// amount of data, and serve any level of request traffic. You can scale up or
+// scale down your tables' throughput capacity without downtime or performance
+// degradation, and use the Amazon Web Services Management Console to monitor
+// resource utilization and performance metrics.
+//
+// DynamoDB automatically spreads the data and traffic for your tables over a
+// sufficient number of servers to handle your throughput and storage requirements,
+// while maintaining consistent and fast performance. All of your data is stored on
+// solid state disks (SSDs) and automatically replicated across multiple
+// Availability Zones in an Amazon Web Services Region, providing built-in high
+// availability and data durability.
+package dynamodb
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/endpoints.go
new file mode 100644
index 000000000..a859c043f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/endpoints.go
@@ -0,0 +1,813 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn"
+ internalendpoints "github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/endpoints"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ smithyendpoints "github.com/aws/smithy-go/endpoints"
+ "github.com/aws/smithy-go/endpoints/private/rulesfn"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+)
+
+// EndpointResolverOptions is the service endpoint resolver options
+type EndpointResolverOptions = internalendpoints.Options
+
+// EndpointResolver interface for resolving service endpoints.
+type EndpointResolver interface {
+ ResolveEndpoint(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+}
+
+var _ EndpointResolver = &internalendpoints.Resolver{}
+
+// NewDefaultEndpointResolver constructs a new service endpoint resolver
+func NewDefaultEndpointResolver() *internalendpoints.Resolver {
+ return internalendpoints.New()
+}
+
+// EndpointResolverFunc is a helper utility that wraps a function so it satisfies
+// the EndpointResolver interface. This is useful when you want to add additional
+// endpoint resolving logic, or stub out specific endpoints with custom values.
+type EndpointResolverFunc func(region string, options EndpointResolverOptions) (aws.Endpoint, error)
+
+func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ return fn(region, options)
+}
+
+// EndpointResolverFromURL returns an EndpointResolver configured using the
+// provided endpoint url. By default, the resolved endpoint resolver uses the
+// client region as signing region, and the endpoint source is set to
+// EndpointSourceCustom.You can provide functional options to configure endpoint
+// values for the resolved endpoint.
+func EndpointResolverFromURL(url string, optFns ...func(*aws.Endpoint)) EndpointResolver {
+ e := aws.Endpoint{URL: url, Source: aws.EndpointSourceCustom}
+ for _, fn := range optFns {
+ fn(&e)
+ }
+
+ return EndpointResolverFunc(
+ func(region string, options EndpointResolverOptions) (aws.Endpoint, error) {
+ if len(e.SigningRegion) == 0 {
+ e.SigningRegion = region
+ }
+ return e, nil
+ },
+ )
+}
+
+type ResolveEndpoint struct {
+ Resolver EndpointResolver
+ Options EndpointResolverOptions
+}
+
+func (*ResolveEndpoint) ID() string {
+ return "ResolveEndpoint"
+}
+
+func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.Resolver == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ eo := m.Options
+ eo.Logger = middleware.GetLogger(ctx)
+
+ var endpoint aws.Endpoint
+ endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo)
+ if err != nil {
+ nf := (&aws.EndpointNotFoundError{})
+ if errors.As(err, &nf) {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false)
+ return next.HandleSerialize(ctx, in)
+ }
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ req.URL, err = url.Parse(endpoint.URL)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to parse endpoint URL: %w", err)
+ }
+
+ if len(awsmiddleware.GetSigningName(ctx)) == 0 {
+ signingName := endpoint.SigningName
+ if len(signingName) == 0 {
+ signingName = "dynamodb"
+ }
+ ctx = awsmiddleware.SetSigningName(ctx, signingName)
+ }
+ ctx = awsmiddleware.SetEndpointSource(ctx, endpoint.Source)
+ ctx = smithyhttp.SetHostnameImmutable(ctx, endpoint.HostnameImmutable)
+ ctx = awsmiddleware.SetSigningRegion(ctx, endpoint.SigningRegion)
+ ctx = awsmiddleware.SetPartitionID(ctx, endpoint.PartitionID)
+ return next.HandleSerialize(ctx, in)
+}
+func addResolveEndpointMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Serialize.Insert(&ResolveEndpoint{
+ Resolver: o.EndpointResolver,
+ Options: o.EndpointOptions,
+ }, "OperationSerializer", middleware.Before)
+}
+
+func removeResolveEndpointMiddleware(stack *middleware.Stack) error {
+ _, err := stack.Serialize.Remove((&ResolveEndpoint{}).ID())
+ return err
+}
+
+type wrappedEndpointResolver struct {
+ awsResolver aws.EndpointResolverWithOptions
+}
+
+func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
+ return w.awsResolver.ResolveEndpoint(ServiceID, region, options)
+}
+
+type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error)
+
+func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, options ...interface{}) (aws.Endpoint, error) {
+ return a(service, region)
+}
+
+var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil)
+
+// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver.
+// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error,
+// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked
+// via its middleware.
+//
+// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated.
+func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver {
+ var resolver aws.EndpointResolverWithOptions
+
+ if awsResolverWithOptions != nil {
+ resolver = awsResolverWithOptions
+ } else if awsResolver != nil {
+ resolver = awsEndpointResolverAdaptor(awsResolver.ResolveEndpoint)
+ }
+
+ return &wrappedEndpointResolver{
+ awsResolver: resolver,
+ }
+}
+
+func finalizeClientEndpointResolverOptions(options *Options) {
+ options.EndpointOptions.LogDeprecated = options.ClientLogMode.IsDeprecatedUsage()
+
+ if len(options.EndpointOptions.ResolvedRegion) == 0 {
+ const fipsInfix = "-fips-"
+ const fipsPrefix = "fips-"
+ const fipsSuffix = "-fips"
+
+ if strings.Contains(options.Region, fipsInfix) ||
+ strings.Contains(options.Region, fipsPrefix) ||
+ strings.Contains(options.Region, fipsSuffix) {
+ options.EndpointOptions.ResolvedRegion = strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(
+ options.Region, fipsInfix, "-"), fipsPrefix, ""), fipsSuffix, "")
+ options.EndpointOptions.UseFIPSEndpoint = aws.FIPSEndpointStateEnabled
+ }
+ }
+
+}
+
+func resolveEndpointResolverV2(options *Options) {
+ if options.EndpointResolverV2 == nil {
+ options.EndpointResolverV2 = NewDefaultEndpointResolverV2()
+ }
+}
+
+func resolveBaseEndpoint(cfg aws.Config, o *Options) {
+ if cfg.BaseEndpoint != nil {
+ o.BaseEndpoint = cfg.BaseEndpoint
+ }
+
+ _, g := os.LookupEnv("AWS_ENDPOINT_URL")
+ _, s := os.LookupEnv("AWS_ENDPOINT_URL_DYNAMODB")
+
+ if g && !s {
+ return
+ }
+
+ value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "DynamoDB", cfg.ConfigSources)
+ if found && err == nil {
+ o.BaseEndpoint = &value
+ }
+}
+
+func bindRegion(region string) *string {
+ if region == "" {
+ return nil
+ }
+ return aws.String(endpoints.MapFIPSRegion(region))
+}
+
+// EndpointParameters provides the parameters that influence how endpoints are
+// resolved.
+type EndpointParameters struct {
+ // The AWS region used to dispatch the request.
+ //
+ // Parameter is
+ // required.
+ //
+ // AWS::Region
+ Region *string
+
+ // When true, use the dual-stack endpoint. If the configured endpoint does not
+ // support dual-stack, dispatching the request MAY return an error.
+ //
+ // Defaults to
+ // false if no value is provided.
+ //
+ // AWS::UseDualStack
+ UseDualStack *bool
+
+ // When true, send this request to the FIPS-compliant regional endpoint. If the
+ // configured endpoint does not have a FIPS compliant endpoint, dispatching the
+ // request will return an error.
+ //
+ // Defaults to false if no value is
+ // provided.
+ //
+ // AWS::UseFIPS
+ UseFIPS *bool
+
+ // Override the endpoint used to send this request
+ //
+ // Parameter is
+ // required.
+ //
+ // SDK::Endpoint
+ Endpoint *string
+
+ // The AWS AccountId used for the request.
+ //
+ // Parameter is
+ // required.
+ //
+ // AWS::Auth::AccountId
+ AccountId *string
+
+ // The AccountId Endpoint Mode.
+ //
+ // Parameter is
+ // required.
+ //
+ // AWS::Auth::AccountIdEndpointMode
+ AccountIdEndpointMode *string
+
+ // ResourceArn containing arn of resource
+ //
+ // Parameter is required.
+ ResourceArn *string
+
+ // ResourceArnList containing list of resource arns
+ //
+ // Parameter is required.
+ ResourceArnList []string
+}
+
+// ValidateRequired validates required parameters are set.
+func (p EndpointParameters) ValidateRequired() error {
+ if p.UseDualStack == nil {
+ return fmt.Errorf("parameter UseDualStack is required")
+ }
+
+ if p.UseFIPS == nil {
+ return fmt.Errorf("parameter UseFIPS is required")
+ }
+
+ return nil
+}
+
+// WithDefaults returns a shallow copy of EndpointParameterswith default values
+// applied to members where applicable.
+func (p EndpointParameters) WithDefaults() EndpointParameters {
+ if p.UseDualStack == nil {
+ p.UseDualStack = ptr.Bool(false)
+ }
+
+ if p.UseFIPS == nil {
+ p.UseFIPS = ptr.Bool(false)
+ }
+ return p
+}
+
+type stringSlice []string
+
+func (s stringSlice) Get(i int) *string {
+ if i < 0 || i >= len(s) {
+ return nil
+ }
+
+ v := s[i]
+ return &v
+}
+
+// EndpointResolverV2 provides the interface for resolving service endpoints.
+type EndpointResolverV2 interface {
+ // ResolveEndpoint attempts to resolve the endpoint with the provided options,
+ // returning the endpoint if found. Otherwise an error is returned.
+ ResolveEndpoint(ctx context.Context, params EndpointParameters) (
+ smithyendpoints.Endpoint, error,
+ )
+}
+
+// resolver provides the implementation for resolving endpoints.
+type resolver struct{}
+
+func NewDefaultEndpointResolverV2() EndpointResolverV2 {
+ return &resolver{}
+}
+
+// ResolveEndpoint attempts to resolve the endpoint with the provided options,
+// returning the endpoint if found. Otherwise an error is returned.
+func (r *resolver) ResolveEndpoint(
+ ctx context.Context, params EndpointParameters,
+) (
+ endpoint smithyendpoints.Endpoint, err error,
+) {
+ params = params.WithDefaults()
+ if err = params.ValidateRequired(); err != nil {
+ return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err)
+ }
+ _UseDualStack := *params.UseDualStack
+ _UseFIPS := *params.UseFIPS
+
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if _UseFIPS == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported")
+ }
+ if _UseDualStack == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported")
+ }
+ uriString := _Endpoint
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ if exprVal := params.Region; exprVal != nil {
+ _Region := *exprVal
+ _ = _Region
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _PartitionResult := *exprVal
+ _ = _PartitionResult
+ if _Region == "local" {
+ if _UseFIPS == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and local endpoint are not supported")
+ }
+ if _UseDualStack == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and local endpoint are not supported")
+ }
+ uriString := "http://localhost:8000"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "dynamodb")
+ smithyhttp.SetSigV4ASigningName(&sp, "dynamodb")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _UseFIPS == true {
+ if _UseDualStack == true {
+ if _PartitionResult.SupportsFIPS == true {
+ if _PartitionResult.SupportsDualStack == true {
+ if exprVal := params.AccountIdEndpointMode; exprVal != nil {
+ _AccountIdEndpointMode := *exprVal
+ _ = _AccountIdEndpointMode
+ if _AccountIdEndpointMode == "required" {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required and FIPS is enabled, but FIPS account endpoints are not supported")
+ }
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://dynamodb-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DualStackDnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both")
+ }
+ }
+ if _UseFIPS == true {
+ if _PartitionResult.SupportsFIPS == true {
+ if _PartitionResult.Name == "aws-us-gov" {
+ if exprVal := params.AccountIdEndpointMode; exprVal != nil {
+ _AccountIdEndpointMode := *exprVal
+ _ = _AccountIdEndpointMode
+ if _AccountIdEndpointMode == "required" {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required and FIPS is enabled, but FIPS account endpoints are not supported")
+ }
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://dynamodb.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ if exprVal := params.AccountIdEndpointMode; exprVal != nil {
+ _AccountIdEndpointMode := *exprVal
+ _ = _AccountIdEndpointMode
+ if _AccountIdEndpointMode == "required" {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required and FIPS is enabled, but FIPS account endpoints are not supported")
+ }
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://dynamodb-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS")
+ }
+ if _UseDualStack == true {
+ if _PartitionResult.SupportsDualStack == true {
+ if exprVal := params.AccountIdEndpointMode; exprVal != nil {
+ _AccountIdEndpointMode := *exprVal
+ _ = _AccountIdEndpointMode
+ if _AccountIdEndpointMode == "required" {
+ if !(_UseFIPS == true) {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required and DualStack is enabled, but DualStack account endpoints are not supported")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required and FIPS is enabled, but FIPS account endpoints are not supported")
+ }
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://dynamodb.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DualStackDnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack")
+ }
+ if exprVal := params.AccountIdEndpointMode; exprVal != nil {
+ _AccountIdEndpointMode := *exprVal
+ _ = _AccountIdEndpointMode
+ if !(_AccountIdEndpointMode == "disabled") {
+ if _PartitionResult.Name == "aws" {
+ if !(_UseFIPS == true) {
+ if !(_UseDualStack == true) {
+ if exprVal := params.ResourceArn; exprVal != nil {
+ _ResourceArn := *exprVal
+ _ = _ResourceArn
+ if exprVal := awsrulesfn.ParseARN(_ResourceArn); exprVal != nil {
+ _ParsedArn := *exprVal
+ _ = _ParsedArn
+ if _ParsedArn.Service == "dynamodb" {
+ if rulesfn.IsValidHostLabel(_ParsedArn.Region, false) {
+ if _ParsedArn.Region == _Region {
+ if rulesfn.IsValidHostLabel(_ParsedArn.AccountId, false) {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_ParsedArn.AccountId)
+ out.WriteString(".ddb.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if exprVal := params.AccountIdEndpointMode; exprVal != nil {
+ _AccountIdEndpointMode := *exprVal
+ _ = _AccountIdEndpointMode
+ if !(_AccountIdEndpointMode == "disabled") {
+ if _PartitionResult.Name == "aws" {
+ if !(_UseFIPS == true) {
+ if !(_UseDualStack == true) {
+ if exprVal := params.ResourceArnList; exprVal != nil {
+ _ResourceArnList := stringSlice(exprVal)
+ _ = _ResourceArnList
+ if exprVal := _ResourceArnList.Get(0); exprVal != nil {
+ _FirstArn := *exprVal
+ _ = _FirstArn
+ if exprVal := awsrulesfn.ParseARN(_FirstArn); exprVal != nil {
+ _ParsedArn := *exprVal
+ _ = _ParsedArn
+ if _ParsedArn.Service == "dynamodb" {
+ if rulesfn.IsValidHostLabel(_ParsedArn.Region, false) {
+ if _ParsedArn.Region == _Region {
+ if rulesfn.IsValidHostLabel(_ParsedArn.AccountId, false) {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_ParsedArn.AccountId)
+ out.WriteString(".ddb.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if exprVal := params.AccountIdEndpointMode; exprVal != nil {
+ _AccountIdEndpointMode := *exprVal
+ _ = _AccountIdEndpointMode
+ if !(_AccountIdEndpointMode == "disabled") {
+ if _PartitionResult.Name == "aws" {
+ if !(_UseFIPS == true) {
+ if !(_UseDualStack == true) {
+ if exprVal := params.AccountId; exprVal != nil {
+ _AccountId := *exprVal
+ _ = _AccountId
+ if rulesfn.IsValidHostLabel(_AccountId, false) {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://")
+ out.WriteString(_AccountId)
+ out.WriteString(".ddb.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Credentials-sourced account ID parameter is invalid")
+ }
+ }
+ }
+ }
+ }
+ }
+ if exprVal := params.AccountIdEndpointMode; exprVal != nil {
+ _AccountIdEndpointMode := *exprVal
+ _ = _AccountIdEndpointMode
+ if _AccountIdEndpointMode == "required" {
+ if !(_UseFIPS == true) {
+ if !(_UseDualStack == true) {
+ if _PartitionResult.Name == "aws" {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "AccountIdEndpointMode is required but no AccountID was provided or able to be loaded")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required but account endpoints are not supported in this partition")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required and DualStack is enabled, but DualStack account endpoints are not supported")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: AccountIdEndpointMode is required and FIPS is enabled, but FIPS account endpoints are not supported")
+ }
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://dynamodb.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region")
+}
+
+type endpointParamsBinder interface {
+ bindEndpointParams(*EndpointParameters)
+}
+
+func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters {
+ params := &EndpointParameters{}
+
+ params.Region = bindRegion(options.Region)
+ params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
+ params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
+ params.Endpoint = options.BaseEndpoint
+ params.AccountId = resolveAccountID(getIdentity(ctx), options.AccountIDEndpointMode)
+ params.AccountIdEndpointMode = aws.String(string(options.AccountIDEndpointMode))
+
+ if b, ok := input.(endpointParamsBinder); ok {
+ b.bindEndpointParams(params)
+ }
+
+ return params
+}
+
+type resolveEndpointV2Middleware struct {
+ options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveEndpoint")
+ defer span.End()
+
+ if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ if err := checkAccountID(getIdentity(ctx), m.options.AccountIDEndpointMode); err != nil {
+ return out, metadata, fmt.Errorf("invalid accountID set: %w", err)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.options.EndpointResolverV2 == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ params := bindEndpointParams(ctx, getOperationInput(ctx), m.options)
+ endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration",
+ func() (smithyendpoints.Endpoint, error) {
+ return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params)
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ span.SetProperty("client.call.resolved_endpoint", endpt.URI.String())
+
+ if endpt.URI.RawPath == "" && req.URL.RawPath != "" {
+ endpt.URI.RawPath = endpt.URI.Path
+ }
+ req.URL.Scheme = endpt.URI.Scheme
+ req.URL.Host = endpt.URI.Host
+ req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path)
+ req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath)
+ for k := range endpt.Headers {
+ req.Header.Set(k, endpt.Headers.Get(k))
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ opts, _ := smithyauth.GetAuthOptions(&endpt.Properties)
+ for _, o := range opts {
+ rscheme.SignerProperties.SetAll(&o.SignerProperties)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/generated.json
new file mode 100644
index 000000000..b2746adbd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/generated.json
@@ -0,0 +1,93 @@
+{
+ "dependencies": {
+ "github.com/aws/aws-sdk-go-v2": "v1.4.0",
+ "github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000",
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000",
+ "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5",
+ "github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery": "v0.0.0-00010101000000-000000000000",
+ "github.com/aws/smithy-go": "v1.4.0"
+ },
+ "files": [
+ "api_client.go",
+ "api_client_test.go",
+ "api_op_BatchExecuteStatement.go",
+ "api_op_BatchGetItem.go",
+ "api_op_BatchWriteItem.go",
+ "api_op_CreateBackup.go",
+ "api_op_CreateGlobalTable.go",
+ "api_op_CreateTable.go",
+ "api_op_DeleteBackup.go",
+ "api_op_DeleteItem.go",
+ "api_op_DeleteResourcePolicy.go",
+ "api_op_DeleteTable.go",
+ "api_op_DescribeBackup.go",
+ "api_op_DescribeContinuousBackups.go",
+ "api_op_DescribeContributorInsights.go",
+ "api_op_DescribeEndpoints.go",
+ "api_op_DescribeExport.go",
+ "api_op_DescribeGlobalTable.go",
+ "api_op_DescribeGlobalTableSettings.go",
+ "api_op_DescribeImport.go",
+ "api_op_DescribeKinesisStreamingDestination.go",
+ "api_op_DescribeLimits.go",
+ "api_op_DescribeTable.go",
+ "api_op_DescribeTableReplicaAutoScaling.go",
+ "api_op_DescribeTimeToLive.go",
+ "api_op_DisableKinesisStreamingDestination.go",
+ "api_op_EnableKinesisStreamingDestination.go",
+ "api_op_ExecuteStatement.go",
+ "api_op_ExecuteTransaction.go",
+ "api_op_ExportTableToPointInTime.go",
+ "api_op_GetItem.go",
+ "api_op_GetResourcePolicy.go",
+ "api_op_ImportTable.go",
+ "api_op_ListBackups.go",
+ "api_op_ListContributorInsights.go",
+ "api_op_ListExports.go",
+ "api_op_ListGlobalTables.go",
+ "api_op_ListImports.go",
+ "api_op_ListTables.go",
+ "api_op_ListTagsOfResource.go",
+ "api_op_PutItem.go",
+ "api_op_PutResourcePolicy.go",
+ "api_op_Query.go",
+ "api_op_RestoreTableFromBackup.go",
+ "api_op_RestoreTableToPointInTime.go",
+ "api_op_Scan.go",
+ "api_op_TagResource.go",
+ "api_op_TransactGetItems.go",
+ "api_op_TransactWriteItems.go",
+ "api_op_UntagResource.go",
+ "api_op_UpdateContinuousBackups.go",
+ "api_op_UpdateContributorInsights.go",
+ "api_op_UpdateGlobalTable.go",
+ "api_op_UpdateGlobalTableSettings.go",
+ "api_op_UpdateItem.go",
+ "api_op_UpdateKinesisStreamingDestination.go",
+ "api_op_UpdateTable.go",
+ "api_op_UpdateTableReplicaAutoScaling.go",
+ "api_op_UpdateTimeToLive.go",
+ "auth.go",
+ "deserializers.go",
+ "doc.go",
+ "endpoints.go",
+ "endpoints_config_test.go",
+ "endpoints_test.go",
+ "generated.json",
+ "internal/endpoints/endpoints.go",
+ "internal/endpoints/endpoints_test.go",
+ "options.go",
+ "protocol_test.go",
+ "serializers.go",
+ "snapshot_test.go",
+ "sra_operation_order_test.go",
+ "types/enums.go",
+ "types/errors.go",
+ "types/types.go",
+ "types/types_exported_test.go",
+ "validators.go"
+ ],
+ "go": "1.22",
+ "module": "github.com/aws/aws-sdk-go-v2/service/dynamodb",
+ "unstable": false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go
new file mode 100644
index 000000000..6e1c54415
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package dynamodb
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.50.1"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/handwritten_paginators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/handwritten_paginators.go
new file mode 100644
index 000000000..399b13e7a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/handwritten_paginators.go
@@ -0,0 +1,88 @@
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/internal/awsutil"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+)
+
+// BatchGetItemPaginatorOptions is the paginator options for BatchGetItem
+type BatchGetItemPaginatorOptions struct {
+ // Set to true if pagination should stop if the service returns a pagination token
+ // that matches the most recent token provided to the service.
+ StopOnDuplicateToken bool
+}
+
+// BatchGetItemPaginator is a paginator for BatchGetItem
+type BatchGetItemPaginator struct {
+ options BatchGetItemPaginatorOptions
+ client BatchGetItemAPIClient
+ params *BatchGetItemInput
+ firstPage bool
+ requestItems map[string]types.KeysAndAttributes
+ isTruncated bool
+}
+
+// BatchGetItemAPIClient is a client that implements the BatchGetItem operation.
+type BatchGetItemAPIClient interface {
+ BatchGetItem(context.Context, *BatchGetItemInput, ...func(*Options)) (*BatchGetItemOutput, error)
+}
+
+// NewBatchGetItemPaginator returns a new BatchGetItemPaginator
+func NewBatchGetItemPaginator(client BatchGetItemAPIClient, params *BatchGetItemInput, optFns ...func(*BatchGetItemPaginatorOptions)) *BatchGetItemPaginator {
+ if params == nil {
+ params = &BatchGetItemInput{}
+ }
+
+ options := BatchGetItemPaginatorOptions{}
+
+ for _, fn := range optFns {
+ fn(&options)
+ }
+
+ return &BatchGetItemPaginator{
+ options: options,
+ client: client,
+ params: params,
+ firstPage: true,
+ requestItems: params.RequestItems,
+ }
+}
+
+// HasMorePages returns a boolean indicating whether more pages are available
+func (p *BatchGetItemPaginator) HasMorePages() bool {
+ return p.firstPage || p.isTruncated
+}
+
+// NextPage retrieves the next BatchGetItem page.
+func (p *BatchGetItemPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*BatchGetItemOutput, error) {
+ if !p.HasMorePages() {
+ return nil, fmt.Errorf("no more pages available")
+ }
+
+ params := *p.params
+ params.RequestItems = p.requestItems
+
+ result, err := p.client.BatchGetItem(ctx, ¶ms, optFns...)
+ if err != nil {
+ return nil, err
+ }
+ p.firstPage = false
+
+ prevToken := p.requestItems
+ p.isTruncated = len(result.UnprocessedKeys) != 0
+ p.requestItems = nil
+ if p.isTruncated {
+ p.requestItems = result.UnprocessedKeys
+ }
+
+ if p.options.StopOnDuplicateToken &&
+ prevToken != nil &&
+ p.requestItems != nil &&
+ awsutil.DeepEqual(prevToken, p.requestItems) {
+ p.isTruncated = false
+ }
+
+ return result, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations/checksum.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations/checksum.go
new file mode 100644
index 000000000..6b3171e70
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations/checksum.go
@@ -0,0 +1,119 @@
+package customizations
+
+import (
+ "context"
+ "fmt"
+ "hash"
+ "hash/crc32"
+ "io"
+ "net/http"
+ "strconv"
+
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// AddValidateResponseChecksumOptions provides the options for the
+// AddValidateResponseChecksum middleware setup.
+type AddValidateResponseChecksumOptions struct {
+ Disable bool
+}
+
+// AddValidateResponseChecksum adds the Checksum to the middleware
+// stack if checksum is not disabled.
+func AddValidateResponseChecksum(stack *middleware.Stack, options AddValidateResponseChecksumOptions) error {
+ if options.Disable {
+ return nil
+ }
+
+ return stack.Deserialize.Add(&Checksum{}, middleware.After)
+}
+
+// Checksum provides a middleware to validate the DynamoDB response
+// body's integrity by comparing the computed CRC32 checksum with the value
+// provided in the HTTP response header.
+type Checksum struct{}
+
+// ID returns the middleware ID.
+func (*Checksum) ID() string { return "DynamoDB:ResponseChecksumValidation" }
+
+// HandleDeserialize implements the Deserialize middleware handle method.
+func (m *Checksum) HandleDeserialize(
+ ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ output middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ output, metadata, err = next.HandleDeserialize(ctx, input)
+ if err != nil {
+ return output, metadata, err
+ }
+
+ resp, ok := output.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return output, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("unknown response type %T", output.RawResponse),
+ }
+ }
+
+ expectChecksum, ok, err := getCRC32Checksum(resp.Header)
+ if err != nil {
+ return output, metadata, &smithy.DeserializationError{Err: err}
+ }
+
+ resp.Body = wrapCRC32ChecksumValidate(expectChecksum, resp.Body)
+
+ return output, metadata, err
+}
+
+const crc32ChecksumHeader = "X-Amz-Crc32"
+
+func getCRC32Checksum(header http.Header) (uint32, bool, error) {
+ v := header.Get(crc32ChecksumHeader)
+ if len(v) == 0 {
+ return 0, false, nil
+ }
+
+ c, err := strconv.ParseUint(v, 10, 32)
+ if err != nil {
+ return 0, false, fmt.Errorf("unable to parse checksum header %v, %w", v, err)
+ }
+
+ return uint32(c), true, nil
+}
+
+// crc32ChecksumValidate provides wrapping of an io.Reader to validate the CRC32
+// checksum of the bytes read against the expected checksum.
+type crc32ChecksumValidate struct {
+ io.Reader
+
+ closer io.Closer
+ expect uint32
+ hash hash.Hash32
+}
+
+// wrapCRC32ChecksumValidate constructs a new crc32ChecksumValidate that will
+// compute a running CRC32 checksum of the bytes read.
+func wrapCRC32ChecksumValidate(checksum uint32, reader io.ReadCloser) *crc32ChecksumValidate {
+ hash := crc32.NewIEEE()
+ return &crc32ChecksumValidate{
+ expect: checksum,
+ Reader: io.TeeReader(reader, hash),
+ closer: reader,
+ hash: hash,
+ }
+}
+
+// Close validates the wrapped reader's CRC32 checksum. Returns an error if
+// the read checksum does not match the expected checksum.
+//
+// May return an error if the wrapped io.Reader's close returns an error, if it
+// implements close.
+func (c *crc32ChecksumValidate) Close() error {
+ if actual := c.hash.Sum32(); actual != c.expect {
+ c.closer.Close()
+ return fmt.Errorf("response did not match expected checksum, %d, %d", c.expect, actual)
+ }
+
+ return c.closer.Close()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations/doc.go
new file mode 100644
index 000000000..b023f04be
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/customizations/doc.go
@@ -0,0 +1,42 @@
+/*
+Package customizations provides customizations for the Amazon DynamoDB API client.
+
+The DynamoDB API client uses two customizations, response checksum validation,
+and manual content-encoding: gzip support.
+
+# Middleware layering
+
+Checksum validation needs to be performed first in deserialization chain
+on top of gzip decompression. Since the behavior of Deserialization is
+in reverse order to the other stack steps its easier to consider that
+"after" means "before".
+
+ HTTP Response -> Checksum -> gzip decompress -> deserialize
+
+# Response checksum validation
+
+DynamoDB responses can include a X-Amz-Crc32 header with the CRC32 checksum
+value of the response body. If the response body is content-encoding: gzip, the
+checksum is of the gzipped response content.
+
+If the header is present, the SDK should validate that the response payload
+computed CRC32 checksum matches the value provided in the header. The checksum
+header is based on the original payload provided returned by the service. Which
+means that if the response is gzipped the checksum is of the gzipped response,
+not the decompressed response bytes.
+
+Customization option:
+
+ DisableValidateResponseChecksum (Enabled by Default)
+
+# Accept encoding gzip
+
+For customization around accept encoding, dynamodb client uses the middlewares
+defined at service/internal/accept-encoding. Please refer to the documentation for
+`accept-encoding` package for more details.
+
+Customization option:
+
+ EnableAcceptEncodingGzip (Disabled by Default)
+*/
+package customizations
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/endpoints/endpoints.go
new file mode 100644
index 000000000..09e2b0eed
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/internal/endpoints/endpoints.go
@@ -0,0 +1,596 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package endpoints
+
+import (
+ "github.com/aws/aws-sdk-go-v2/aws"
+ endpoints "github.com/aws/aws-sdk-go-v2/internal/endpoints/v2"
+ "github.com/aws/smithy-go/logging"
+ "regexp"
+)
+
+// Options is the endpoint resolver configuration options
+type Options struct {
+ // Logger is a logging implementation that log events should be sent to.
+ Logger logging.Logger
+
+ // LogDeprecated indicates that deprecated endpoints should be logged to the
+ // provided logger.
+ LogDeprecated bool
+
+ // ResolvedRegion is used to override the region to be resolved, rather then the
+ // using the value passed to the ResolveEndpoint method. This value is used by the
+ // SDK to translate regions like fips-us-east-1 or us-east-1-fips to an alternative
+ // name. You must not set this value directly in your application.
+ ResolvedRegion string
+
+ // DisableHTTPS informs the resolver to return an endpoint that does not use the
+ // HTTPS scheme.
+ DisableHTTPS bool
+
+ // UseDualStackEndpoint specifies the resolver must resolve a dual-stack endpoint.
+ UseDualStackEndpoint aws.DualStackEndpointState
+
+ // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
+ UseFIPSEndpoint aws.FIPSEndpointState
+}
+
+func (o Options) GetResolvedRegion() string {
+ return o.ResolvedRegion
+}
+
+func (o Options) GetDisableHTTPS() bool {
+ return o.DisableHTTPS
+}
+
+func (o Options) GetUseDualStackEndpoint() aws.DualStackEndpointState {
+ return o.UseDualStackEndpoint
+}
+
+func (o Options) GetUseFIPSEndpoint() aws.FIPSEndpointState {
+ return o.UseFIPSEndpoint
+}
+
+func transformToSharedOptions(options Options) endpoints.Options {
+ return endpoints.Options{
+ Logger: options.Logger,
+ LogDeprecated: options.LogDeprecated,
+ ResolvedRegion: options.ResolvedRegion,
+ DisableHTTPS: options.DisableHTTPS,
+ UseDualStackEndpoint: options.UseDualStackEndpoint,
+ UseFIPSEndpoint: options.UseFIPSEndpoint,
+ }
+}
+
+// Resolver DynamoDB endpoint resolver
+type Resolver struct {
+ partitions endpoints.Partitions
+}
+
+// ResolveEndpoint resolves the service endpoint for the given region and options
+func (r *Resolver) ResolveEndpoint(region string, options Options) (endpoint aws.Endpoint, err error) {
+ if len(region) == 0 {
+ return endpoint, &aws.MissingRegionError{}
+ }
+
+ opt := transformToSharedOptions(options)
+ return r.partitions.ResolveEndpoint(region, opt)
+}
+
+// New returns a new Resolver
+func New() *Resolver {
+ return &Resolver{
+ partitions: defaultPartitions,
+ }
+}
+
+var partitionRegexp = struct {
+ Aws *regexp.Regexp
+ AwsCn *regexp.Regexp
+ AwsEusc *regexp.Regexp
+ AwsIso *regexp.Regexp
+ AwsIsoB *regexp.Regexp
+ AwsIsoE *regexp.Regexp
+ AwsIsoF *regexp.Regexp
+ AwsUsGov *regexp.Regexp
+}{
+
+ Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"),
+ AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
+ AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"),
+ AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
+ AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
+ AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"),
+ AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"),
+ AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
+}
+
+var defaultPartitions = endpoints.Partitions{
+ {
+ ID: "aws",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "dynamodb.{region}.api.aws",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "dynamodb-fips.{region}.api.aws",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "dynamodb.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.Aws,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "af-south-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-east-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-northeast-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-northeast-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-northeast-3",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-south-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-south-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-3",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-4",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-5",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-6",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-7",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ca-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ca-central-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ca-central-1-fips",
+ }: endpoints.Endpoint{
+ Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ca-central-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.ca-west-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "ca-west-1-fips",
+ }: endpoints.Endpoint{
+ Hostname: "dynamodb-fips.ca-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ca-west-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "eu-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-central-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-north-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-south-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-south-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-west-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-west-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-west-3",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "il-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "local",
+ }: endpoints.Endpoint{
+ Hostname: "localhost:8000",
+ Protocols: []string{"http"},
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "me-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "me-south-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "mx-central-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "sa-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-east-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-1-fips",
+ }: endpoints.Endpoint{
+ Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-east-2",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-east-2-fips",
+ }: endpoints.Endpoint{
+ Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-east-2",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-west-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-1-fips",
+ }: endpoints.Endpoint{
+ Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-2",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-west-2",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-west-2-fips",
+ }: endpoints.Endpoint{
+ Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-2",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ },
+ },
+ {
+ ID: "aws-cn",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "dynamodb.{region}.api.amazonwebservices.com.cn",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.{region}.amazonaws.com.cn",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "dynamodb-fips.{region}.api.amazonwebservices.com.cn",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "dynamodb.{region}.amazonaws.com.cn",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsCn,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "cn-north-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "cn-northwest-1",
+ }: endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-eusc",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.{region}.amazonaws.eu",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "dynamodb.{region}.amazonaws.eu",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsEusc,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.{region}.c2s.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "dynamodb.{region}.c2s.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIso,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "us-iso-east-1",
+ }: endpoints.Endpoint{
+ Protocols: []string{"http", "https"},
+ },
+ endpoints.EndpointKey{
+ Region: "us-iso-west-1",
+ }: endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-iso-b",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.{region}.sc2s.sgov.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "dynamodb.{region}.sc2s.sgov.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoB,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "us-isob-east-1",
+ }: endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-iso-e",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.{region}.cloud.adc-e.uk",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "dynamodb.{region}.cloud.adc-e.uk",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoE,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "eu-isoe-west-1",
+ }: endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-iso-f",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.{region}.csp.hci.ic.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "dynamodb.{region}.csp.hci.ic.gov",
+ Protocols: []string{"http", "https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoF,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "us-isof-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-isof-south-1",
+ }: endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-us-gov",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.DualStackVariant,
+ }: {
+ Hostname: "dynamodb.{region}.api.aws",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: endpoints.FIPSVariant | endpoints.DualStackVariant,
+ }: {
+ Hostname: "dynamodb-fips.{region}.api.aws",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "dynamodb.{region}.amazonaws.com",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsUsGov,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "us-gov-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-gov-east-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.us-gov-east-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-gov-east-1-fips",
+ }: endpoints.Endpoint{
+ Hostname: "dynamodb-fips.us-gov-east-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-gov-east-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ endpoints.EndpointKey{
+ Region: "us-gov-west-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-gov-west-1",
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "dynamodb-fips.us-gov-west-1.amazonaws.com",
+ },
+ endpoints.EndpointKey{
+ Region: "us-gov-west-1-fips",
+ }: endpoints.Endpoint{
+ Hostname: "dynamodb-fips.us-gov-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-gov-west-1",
+ },
+ Deprecated: aws.TrueTernary,
+ },
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/options.go
new file mode 100644
index 000000000..743c048fb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/options.go
@@ -0,0 +1,257 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+)
+
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+type Options struct {
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // Indicates how aws account ID is applied in endpoint2.0 routing
+ AccountIDEndpointMode aws.AccountIDEndpointMode
+
+ // The optional application specific identifier appended to the User-Agent header.
+ AppID string
+
+ // This endpoint will be given as input to an EndpointResolverV2. It is used for
+ // providing a custom base endpoint that is subject to modifications by the
+ // processing EndpointResolverV2.
+ BaseEndpoint *string
+
+ // Configures the events that will be sent to the configured logger.
+ ClientLogMode aws.ClientLogMode
+
+ // The credentials object to use when signing requests.
+ Credentials aws.CredentialsProvider
+
+ // The configuration DefaultsMode that the SDK should use when constructing the
+ // clients initial default settings.
+ DefaultsMode aws.DefaultsMode
+
+ // Allows you to disable the client's validation of response integrity using CRC32
+ // checksum. Enabled by default.
+ DisableValidateResponseChecksum bool
+
+ // Allows you to enable the client's support for compressed gzip responses.
+ // Disabled by default.
+ EnableAcceptEncodingGzip bool
+
+ // Allows configuring endpoint discovery
+ EndpointDiscovery EndpointDiscoveryOptions
+
+ // The endpoint options to be used when attempting to resolve an endpoint.
+ EndpointOptions EndpointResolverOptions
+
+ // The service endpoint resolver.
+ //
+ // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
+ // value for this field will likely prevent you from using any endpoint-related
+ // service features released after the introduction of EndpointResolverV2 and
+ // BaseEndpoint.
+ //
+ // To migrate an EndpointResolver implementation that uses a custom endpoint, set
+ // the client option BaseEndpoint instead.
+ EndpointResolver EndpointResolver
+
+ // Resolves the endpoint used for a particular service operation. This should be
+ // used over the deprecated EndpointResolver.
+ EndpointResolverV2 EndpointResolverV2
+
+ // Signature Version 4 (SigV4) Signer
+ HTTPSignerV4 HTTPSignerV4
+
+ // Provides idempotency tokens values that will be automatically populated into
+ // idempotent API operations.
+ IdempotencyTokenProvider IdempotencyTokenProvider
+
+ // The logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // The client meter provider.
+ MeterProvider metrics.MeterProvider
+
+ // The region to send requests to. (Required)
+ Region string
+
+ // RetryMaxAttempts specifies the maximum number attempts an API client will call
+ // an operation that fails with a retryable error. A value of 0 is ignored, and
+ // will not be used to configure the API client created default retryer, or modify
+ // per operation call's retry max attempts.
+ //
+ // If specified in an operation call's functional options with a value that is
+ // different than the constructed client's Options, the Client's Retryer will be
+ // wrapped to use the operation's specific RetryMaxAttempts value.
+ RetryMaxAttempts int
+
+ // RetryMode specifies the retry mode the API client will be created with, if
+ // Retryer option is not also specified.
+ //
+ // When creating a new API Clients this member will only be used if the Retryer
+ // Options member is nil. This value will be ignored if Retryer is not nil.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ RetryMode aws.RetryMode
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer. The kind of
+ // default retry created by the API client can be changed with the RetryMode
+ // option.
+ Retryer aws.Retryer
+
+ // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
+ // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You
+ // should not populate this structure programmatically, or rely on the values here
+ // within your applications.
+ RuntimeEnvironment aws.RuntimeEnvironment
+
+ // The client tracer provider.
+ TracerProvider tracing.TracerProvider
+
+ // The initial DefaultsMode used when the client options were constructed. If the
+ // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
+ // value was at that point in time.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ resolvedDefaultsMode aws.DefaultsMode
+
+ // The HTTP client to invoke API calls with. Defaults to client's default HTTP
+ // implementation if nil.
+ HTTPClient HTTPClient
+
+ // Client registry of operation interceptors.
+ Interceptors smithyhttp.InterceptorRegistry
+
+ // The auth scheme resolver which determines how to authenticate for each
+ // operation.
+ AuthSchemeResolver AuthSchemeResolver
+
+ // The list of auth schemes supported by the client.
+ AuthSchemes []smithyhttp.AuthScheme
+
+ // Priority list of preferred auth scheme names (e.g. sigv4a).
+ AuthSchemePreference []string
+}
+
+// Copy creates a clone where the APIOptions list is deep copied.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
+ copy(to.APIOptions, o.APIOptions)
+ to.Interceptors = o.Interceptors.Copy()
+
+ return to
+}
+
+func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver {
+ if schemeID == "aws.auth#sigv4" {
+ return getSigV4IdentityResolver(o)
+ }
+ if schemeID == "smithy.api#noAuth" {
+ return &smithyauth.AnonymousIdentityResolver{}
+ }
+ return nil
+}
+
+// WithAPIOptions returns a functional option for setting the Client's APIOptions
+// option.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, optFns...)
+ }
+}
+
+// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
+// this field will likely prevent you from using any endpoint-related service
+// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
+// To migrate an EndpointResolver implementation that uses a custom endpoint, set
+// the client option BaseEndpoint instead.
+func WithEndpointResolver(v EndpointResolver) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolver = v
+ }
+}
+
+// WithEndpointResolverV2 returns a functional option for setting the Client's
+// EndpointResolverV2 option.
+func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolverV2 = v
+ }
+}
+
+func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver {
+ if o.Credentials != nil {
+ return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials}
+ }
+ return nil
+}
+
+// WithSigV4SigningName applies an override to the authentication workflow to
+// use the given signing name for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing name from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningName(name string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+// WithSigV4SigningRegion applies an override to the authentication workflow to
+// use the given signing region for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing region from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningRegion(region string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+func ignoreAnonymousAuth(options *Options) {
+ if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) {
+ options.Credentials = nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go
new file mode 100644
index 000000000..366d52966
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/serializers.go
@@ -0,0 +1,7458 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/encoding/httpbinding"
+ smithyjson "github.com/aws/smithy-go/encoding/json"
+ "github.com/aws/smithy-go/middleware"
+ smithytime "github.com/aws/smithy-go/time"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "math"
+ "path"
+)
+
+type awsAwsjson10_serializeOpBatchExecuteStatement struct {
+}
+
+func (*awsAwsjson10_serializeOpBatchExecuteStatement) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpBatchExecuteStatement) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*BatchExecuteStatementInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.BatchExecuteStatement")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentBatchExecuteStatementInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpBatchGetItem struct {
+}
+
+func (*awsAwsjson10_serializeOpBatchGetItem) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpBatchGetItem) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*BatchGetItemInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.BatchGetItem")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentBatchGetItemInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpBatchWriteItem struct {
+}
+
+func (*awsAwsjson10_serializeOpBatchWriteItem) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpBatchWriteItem) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*BatchWriteItemInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.BatchWriteItem")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentBatchWriteItemInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpCreateBackup struct {
+}
+
+func (*awsAwsjson10_serializeOpCreateBackup) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpCreateBackup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CreateBackupInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.CreateBackup")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentCreateBackupInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpCreateGlobalTable struct {
+}
+
+func (*awsAwsjson10_serializeOpCreateGlobalTable) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpCreateGlobalTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CreateGlobalTableInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.CreateGlobalTable")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentCreateGlobalTableInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpCreateTable struct {
+}
+
+func (*awsAwsjson10_serializeOpCreateTable) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpCreateTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CreateTableInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.CreateTable")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentCreateTableInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDeleteBackup struct {
+}
+
+func (*awsAwsjson10_serializeOpDeleteBackup) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDeleteBackup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteBackupInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DeleteBackup")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDeleteBackupInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDeleteItem struct {
+}
+
+func (*awsAwsjson10_serializeOpDeleteItem) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDeleteItem) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteItemInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DeleteItem")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDeleteItemInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDeleteResourcePolicy struct {
+}
+
+func (*awsAwsjson10_serializeOpDeleteResourcePolicy) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDeleteResourcePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteResourcePolicyInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DeleteResourcePolicy")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDeleteResourcePolicyInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDeleteTable struct {
+}
+
+func (*awsAwsjson10_serializeOpDeleteTable) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDeleteTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DeleteTableInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DeleteTable")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDeleteTableInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDescribeBackup struct {
+}
+
+func (*awsAwsjson10_serializeOpDescribeBackup) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDescribeBackup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DescribeBackupInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeBackup")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDescribeBackupInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDescribeContinuousBackups struct {
+}
+
+func (*awsAwsjson10_serializeOpDescribeContinuousBackups) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDescribeContinuousBackups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DescribeContinuousBackupsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeContinuousBackups")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDescribeContinuousBackupsInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDescribeContributorInsights struct {
+}
+
+func (*awsAwsjson10_serializeOpDescribeContributorInsights) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDescribeContributorInsights) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DescribeContributorInsightsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeContributorInsights")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDescribeContributorInsightsInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDescribeEndpoints struct {
+}
+
+func (*awsAwsjson10_serializeOpDescribeEndpoints) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDescribeEndpoints) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DescribeEndpointsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeEndpoints")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDescribeEndpointsInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDescribeExport struct {
+}
+
+func (*awsAwsjson10_serializeOpDescribeExport) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDescribeExport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DescribeExportInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeExport")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDescribeExportInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDescribeGlobalTable struct {
+}
+
+func (*awsAwsjson10_serializeOpDescribeGlobalTable) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDescribeGlobalTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DescribeGlobalTableInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeGlobalTable")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDescribeGlobalTableInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDescribeGlobalTableSettings struct {
+}
+
+func (*awsAwsjson10_serializeOpDescribeGlobalTableSettings) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDescribeGlobalTableSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DescribeGlobalTableSettingsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeGlobalTableSettings")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDescribeGlobalTableSettingsInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDescribeImport struct {
+}
+
+func (*awsAwsjson10_serializeOpDescribeImport) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDescribeImport) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DescribeImportInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeImport")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDescribeImportInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDescribeKinesisStreamingDestination struct {
+}
+
+func (*awsAwsjson10_serializeOpDescribeKinesisStreamingDestination) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDescribeKinesisStreamingDestination) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DescribeKinesisStreamingDestinationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeKinesisStreamingDestination")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDescribeKinesisStreamingDestinationInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDescribeLimits struct {
+}
+
+func (*awsAwsjson10_serializeOpDescribeLimits) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDescribeLimits) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DescribeLimitsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeLimits")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDescribeLimitsInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDescribeTable struct {
+}
+
+func (*awsAwsjson10_serializeOpDescribeTable) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDescribeTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DescribeTableInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeTable")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDescribeTableInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDescribeTableReplicaAutoScaling struct {
+}
+
+func (*awsAwsjson10_serializeOpDescribeTableReplicaAutoScaling) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDescribeTableReplicaAutoScaling) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DescribeTableReplicaAutoScalingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeTableReplicaAutoScaling")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDescribeTableReplicaAutoScalingInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDescribeTimeToLive struct {
+}
+
+func (*awsAwsjson10_serializeOpDescribeTimeToLive) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDescribeTimeToLive) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DescribeTimeToLiveInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DescribeTimeToLive")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDescribeTimeToLiveInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpDisableKinesisStreamingDestination struct {
+}
+
+func (*awsAwsjson10_serializeOpDisableKinesisStreamingDestination) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpDisableKinesisStreamingDestination) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*DisableKinesisStreamingDestinationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.DisableKinesisStreamingDestination")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentDisableKinesisStreamingDestinationInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpEnableKinesisStreamingDestination struct {
+}
+
+func (*awsAwsjson10_serializeOpEnableKinesisStreamingDestination) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpEnableKinesisStreamingDestination) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*EnableKinesisStreamingDestinationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.EnableKinesisStreamingDestination")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentEnableKinesisStreamingDestinationInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpExecuteStatement struct {
+}
+
+func (*awsAwsjson10_serializeOpExecuteStatement) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpExecuteStatement) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ExecuteStatementInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ExecuteStatement")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentExecuteStatementInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpExecuteTransaction struct {
+}
+
+func (*awsAwsjson10_serializeOpExecuteTransaction) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpExecuteTransaction) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ExecuteTransactionInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ExecuteTransaction")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentExecuteTransactionInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpExportTableToPointInTime struct {
+}
+
+func (*awsAwsjson10_serializeOpExportTableToPointInTime) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpExportTableToPointInTime) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ExportTableToPointInTimeInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ExportTableToPointInTime")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentExportTableToPointInTimeInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpGetItem struct {
+}
+
+func (*awsAwsjson10_serializeOpGetItem) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpGetItem) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetItemInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.GetItem")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentGetItemInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpGetResourcePolicy struct {
+}
+
+func (*awsAwsjson10_serializeOpGetResourcePolicy) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpGetResourcePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*GetResourcePolicyInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.GetResourcePolicy")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentGetResourcePolicyInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpImportTable struct {
+}
+
+func (*awsAwsjson10_serializeOpImportTable) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpImportTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ImportTableInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ImportTable")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentImportTableInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpListBackups struct {
+}
+
+func (*awsAwsjson10_serializeOpListBackups) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpListBackups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListBackupsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ListBackups")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentListBackupsInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpListContributorInsights struct {
+}
+
+func (*awsAwsjson10_serializeOpListContributorInsights) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpListContributorInsights) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListContributorInsightsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ListContributorInsights")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentListContributorInsightsInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpListExports struct {
+}
+
+func (*awsAwsjson10_serializeOpListExports) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpListExports) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListExportsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ListExports")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentListExportsInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpListGlobalTables struct {
+}
+
+func (*awsAwsjson10_serializeOpListGlobalTables) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpListGlobalTables) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListGlobalTablesInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ListGlobalTables")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentListGlobalTablesInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpListImports struct {
+}
+
+func (*awsAwsjson10_serializeOpListImports) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpListImports) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListImportsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ListImports")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentListImportsInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpListTables struct {
+}
+
+func (*awsAwsjson10_serializeOpListTables) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpListTables) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListTablesInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ListTables")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentListTablesInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpListTagsOfResource struct {
+}
+
+func (*awsAwsjson10_serializeOpListTagsOfResource) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpListTagsOfResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ListTagsOfResourceInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.ListTagsOfResource")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentListTagsOfResourceInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpPutItem struct {
+}
+
+func (*awsAwsjson10_serializeOpPutItem) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpPutItem) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutItemInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.PutItem")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentPutItemInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpPutResourcePolicy struct {
+}
+
+func (*awsAwsjson10_serializeOpPutResourcePolicy) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpPutResourcePolicy) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*PutResourcePolicyInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.PutResourcePolicy")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentPutResourcePolicyInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpQuery struct {
+}
+
+func (*awsAwsjson10_serializeOpQuery) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpQuery) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*QueryInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.Query")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentQueryInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpRestoreTableFromBackup struct {
+}
+
+func (*awsAwsjson10_serializeOpRestoreTableFromBackup) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpRestoreTableFromBackup) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*RestoreTableFromBackupInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.RestoreTableFromBackup")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentRestoreTableFromBackupInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpRestoreTableToPointInTime struct {
+}
+
+func (*awsAwsjson10_serializeOpRestoreTableToPointInTime) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpRestoreTableToPointInTime) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*RestoreTableToPointInTimeInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.RestoreTableToPointInTime")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentRestoreTableToPointInTimeInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpScan struct {
+}
+
+func (*awsAwsjson10_serializeOpScan) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpScan) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*ScanInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.Scan")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentScanInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpTagResource struct {
+}
+
+func (*awsAwsjson10_serializeOpTagResource) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpTagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*TagResourceInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.TagResource")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentTagResourceInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpTransactGetItems struct {
+}
+
+func (*awsAwsjson10_serializeOpTransactGetItems) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpTransactGetItems) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*TransactGetItemsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.TransactGetItems")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentTransactGetItemsInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpTransactWriteItems struct {
+}
+
+func (*awsAwsjson10_serializeOpTransactWriteItems) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpTransactWriteItems) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*TransactWriteItemsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.TransactWriteItems")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentTransactWriteItemsInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpUntagResource struct {
+}
+
+func (*awsAwsjson10_serializeOpUntagResource) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpUntagResource) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*UntagResourceInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UntagResource")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentUntagResourceInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpUpdateContinuousBackups struct {
+}
+
+func (*awsAwsjson10_serializeOpUpdateContinuousBackups) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpUpdateContinuousBackups) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*UpdateContinuousBackupsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateContinuousBackups")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentUpdateContinuousBackupsInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpUpdateContributorInsights struct {
+}
+
+func (*awsAwsjson10_serializeOpUpdateContributorInsights) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpUpdateContributorInsights) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*UpdateContributorInsightsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateContributorInsights")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentUpdateContributorInsightsInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpUpdateGlobalTable struct {
+}
+
+func (*awsAwsjson10_serializeOpUpdateGlobalTable) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpUpdateGlobalTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*UpdateGlobalTableInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateGlobalTable")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentUpdateGlobalTableInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpUpdateGlobalTableSettings struct {
+}
+
+func (*awsAwsjson10_serializeOpUpdateGlobalTableSettings) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpUpdateGlobalTableSettings) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*UpdateGlobalTableSettingsInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateGlobalTableSettings")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentUpdateGlobalTableSettingsInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpUpdateItem struct {
+}
+
+func (*awsAwsjson10_serializeOpUpdateItem) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpUpdateItem) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*UpdateItemInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateItem")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentUpdateItemInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpUpdateKinesisStreamingDestination struct {
+}
+
+func (*awsAwsjson10_serializeOpUpdateKinesisStreamingDestination) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpUpdateKinesisStreamingDestination) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*UpdateKinesisStreamingDestinationInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateKinesisStreamingDestination")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentUpdateKinesisStreamingDestinationInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpUpdateTable struct {
+}
+
+func (*awsAwsjson10_serializeOpUpdateTable) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpUpdateTable) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*UpdateTableInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateTable")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentUpdateTableInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpUpdateTableReplicaAutoScaling struct {
+}
+
+func (*awsAwsjson10_serializeOpUpdateTableReplicaAutoScaling) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpUpdateTableReplicaAutoScaling) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*UpdateTableReplicaAutoScalingInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateTableReplicaAutoScaling")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentUpdateTableReplicaAutoScalingInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsjson10_serializeOpUpdateTimeToLive struct {
+}
+
+func (*awsAwsjson10_serializeOpUpdateTimeToLive) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsjson10_serializeOpUpdateTimeToLive) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*UpdateTimeToLiveInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-amz-json-1.0")
+ httpBindingEncoder.SetHeader("X-Amz-Target").String("DynamoDB_20120810.UpdateTimeToLive")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsAwsjson10_serializeOpDocumentUpdateTimeToLiveInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsAwsjson10_serializeDocumentAttributeDefinition(v *types.AttributeDefinition, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.AttributeName != nil {
+ ok := object.Key("AttributeName")
+ ok.String(*v.AttributeName)
+ }
+
+ if len(v.AttributeType) > 0 {
+ ok := object.Key("AttributeType")
+ ok.String(string(v.AttributeType))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentAttributeDefinitions(v []types.AttributeDefinition, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentAttributeDefinition(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentAttributeNameList(v []string, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentAttributeUpdates(v map[string]types.AttributeValueUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ for key := range v {
+ om := object.Key(key)
+ mapVar := v[key]
+ if err := awsAwsjson10_serializeDocumentAttributeValueUpdate(&mapVar, om); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentAttributeValue(v types.AttributeValue, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ switch uv := v.(type) {
+ case *types.AttributeValueMemberB:
+ av := object.Key("B")
+ av.Base64EncodeBytes(uv.Value)
+
+ case *types.AttributeValueMemberBOOL:
+ av := object.Key("BOOL")
+ av.Boolean(uv.Value)
+
+ case *types.AttributeValueMemberBS:
+ av := object.Key("BS")
+ if err := awsAwsjson10_serializeDocumentBinarySetAttributeValue(uv.Value, av); err != nil {
+ return err
+ }
+
+ case *types.AttributeValueMemberL:
+ av := object.Key("L")
+ if err := awsAwsjson10_serializeDocumentListAttributeValue(uv.Value, av); err != nil {
+ return err
+ }
+
+ case *types.AttributeValueMemberM:
+ av := object.Key("M")
+ if err := awsAwsjson10_serializeDocumentMapAttributeValue(uv.Value, av); err != nil {
+ return err
+ }
+
+ case *types.AttributeValueMemberN:
+ av := object.Key("N")
+ av.String(uv.Value)
+
+ case *types.AttributeValueMemberNS:
+ av := object.Key("NS")
+ if err := awsAwsjson10_serializeDocumentNumberSetAttributeValue(uv.Value, av); err != nil {
+ return err
+ }
+
+ case *types.AttributeValueMemberNULL:
+ av := object.Key("NULL")
+ av.Boolean(uv.Value)
+
+ case *types.AttributeValueMemberS:
+ av := object.Key("S")
+ av.String(uv.Value)
+
+ case *types.AttributeValueMemberSS:
+ av := object.Key("SS")
+ if err := awsAwsjson10_serializeDocumentStringSetAttributeValue(uv.Value, av); err != nil {
+ return err
+ }
+
+ default:
+ return fmt.Errorf("attempted to serialize unknown member type %T for union %T", uv, v)
+
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentAttributeValueList(v []types.AttributeValue, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if vv := v[i]; vv == nil {
+ continue
+ }
+ if err := awsAwsjson10_serializeDocumentAttributeValue(v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentAttributeValueUpdate(v *types.AttributeValueUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if len(v.Action) > 0 {
+ ok := object.Key("Action")
+ ok.String(string(v.Action))
+ }
+
+ if v.Value != nil {
+ ok := object.Key("Value")
+ if err := awsAwsjson10_serializeDocumentAttributeValue(v.Value, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentAutoScalingPolicyUpdate(v *types.AutoScalingPolicyUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.PolicyName != nil {
+ ok := object.Key("PolicyName")
+ ok.String(*v.PolicyName)
+ }
+
+ if v.TargetTrackingScalingPolicyConfiguration != nil {
+ ok := object.Key("TargetTrackingScalingPolicyConfiguration")
+ if err := awsAwsjson10_serializeDocumentAutoScalingTargetTrackingScalingPolicyConfigurationUpdate(v.TargetTrackingScalingPolicyConfiguration, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v *types.AutoScalingSettingsUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.AutoScalingDisabled != nil {
+ ok := object.Key("AutoScalingDisabled")
+ ok.Boolean(*v.AutoScalingDisabled)
+ }
+
+ if v.AutoScalingRoleArn != nil {
+ ok := object.Key("AutoScalingRoleArn")
+ ok.String(*v.AutoScalingRoleArn)
+ }
+
+ if v.MaximumUnits != nil {
+ ok := object.Key("MaximumUnits")
+ ok.Long(*v.MaximumUnits)
+ }
+
+ if v.MinimumUnits != nil {
+ ok := object.Key("MinimumUnits")
+ ok.Long(*v.MinimumUnits)
+ }
+
+ if v.ScalingPolicyUpdate != nil {
+ ok := object.Key("ScalingPolicyUpdate")
+ if err := awsAwsjson10_serializeDocumentAutoScalingPolicyUpdate(v.ScalingPolicyUpdate, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentAutoScalingTargetTrackingScalingPolicyConfigurationUpdate(v *types.AutoScalingTargetTrackingScalingPolicyConfigurationUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.DisableScaleIn != nil {
+ ok := object.Key("DisableScaleIn")
+ ok.Boolean(*v.DisableScaleIn)
+ }
+
+ if v.ScaleInCooldown != nil {
+ ok := object.Key("ScaleInCooldown")
+ ok.Integer(*v.ScaleInCooldown)
+ }
+
+ if v.ScaleOutCooldown != nil {
+ ok := object.Key("ScaleOutCooldown")
+ ok.Integer(*v.ScaleOutCooldown)
+ }
+
+ if v.TargetValue != nil {
+ ok := object.Key("TargetValue")
+ switch {
+ case math.IsNaN(*v.TargetValue):
+ ok.String("NaN")
+
+ case math.IsInf(*v.TargetValue, 1):
+ ok.String("Infinity")
+
+ case math.IsInf(*v.TargetValue, -1):
+ ok.String("-Infinity")
+
+ default:
+ ok.Double(*v.TargetValue)
+
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentBatchGetRequestMap(v map[string]types.KeysAndAttributes, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ for key := range v {
+ om := object.Key(key)
+ mapVar := v[key]
+ if err := awsAwsjson10_serializeDocumentKeysAndAttributes(&mapVar, om); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentBatchStatementRequest(v *types.BatchStatementRequest, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ConsistentRead != nil {
+ ok := object.Key("ConsistentRead")
+ ok.Boolean(*v.ConsistentRead)
+ }
+
+ if v.Parameters != nil {
+ ok := object.Key("Parameters")
+ if err := awsAwsjson10_serializeDocumentPreparedStatementParameters(v.Parameters, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ReturnValuesOnConditionCheckFailure) > 0 {
+ ok := object.Key("ReturnValuesOnConditionCheckFailure")
+ ok.String(string(v.ReturnValuesOnConditionCheckFailure))
+ }
+
+ if v.Statement != nil {
+ ok := object.Key("Statement")
+ ok.String(*v.Statement)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentBatchWriteItemRequestMap(v map[string][]types.WriteRequest, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ for key := range v {
+ om := object.Key(key)
+ if vv := v[key]; vv == nil {
+ continue
+ }
+ if err := awsAwsjson10_serializeDocumentWriteRequests(v[key], om); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentBinarySetAttributeValue(v [][]byte, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if vv := v[i]; vv == nil {
+ continue
+ }
+ av.Base64EncodeBytes(v[i])
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentCondition(v *types.Condition, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.AttributeValueList != nil {
+ ok := object.Key("AttributeValueList")
+ if err := awsAwsjson10_serializeDocumentAttributeValueList(v.AttributeValueList, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ComparisonOperator) > 0 {
+ ok := object.Key("ComparisonOperator")
+ ok.String(string(v.ComparisonOperator))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentConditionCheck(v *types.ConditionCheck, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ConditionExpression != nil {
+ ok := object.Key("ConditionExpression")
+ ok.String(*v.ConditionExpression)
+ }
+
+ if v.ExpressionAttributeNames != nil {
+ ok := object.Key("ExpressionAttributeNames")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpressionAttributeValues != nil {
+ ok := object.Key("ExpressionAttributeValues")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Key != nil {
+ ok := object.Key("Key")
+ if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ReturnValuesOnConditionCheckFailure) > 0 {
+ ok := object.Key("ReturnValuesOnConditionCheckFailure")
+ ok.String(string(v.ReturnValuesOnConditionCheckFailure))
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentCreateGlobalSecondaryIndexAction(v *types.CreateGlobalSecondaryIndexAction, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.IndexName != nil {
+ ok := object.Key("IndexName")
+ ok.String(*v.IndexName)
+ }
+
+ if v.KeySchema != nil {
+ ok := object.Key("KeySchema")
+ if err := awsAwsjson10_serializeDocumentKeySchema(v.KeySchema, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.OnDemandThroughput != nil {
+ ok := object.Key("OnDemandThroughput")
+ if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Projection != nil {
+ ok := object.Key("Projection")
+ if err := awsAwsjson10_serializeDocumentProjection(v.Projection, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvisionedThroughput != nil {
+ ok := object.Key("ProvisionedThroughput")
+ if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.WarmThroughput != nil {
+ ok := object.Key("WarmThroughput")
+ if err := awsAwsjson10_serializeDocumentWarmThroughput(v.WarmThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentCreateGlobalTableWitnessGroupMemberAction(v *types.CreateGlobalTableWitnessGroupMemberAction, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.RegionName != nil {
+ ok := object.Key("RegionName")
+ ok.String(*v.RegionName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentCreateReplicaAction(v *types.CreateReplicaAction, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.RegionName != nil {
+ ok := object.Key("RegionName")
+ ok.String(*v.RegionName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentCreateReplicationGroupMemberAction(v *types.CreateReplicationGroupMemberAction, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.GlobalSecondaryIndexes != nil {
+ ok := object.Key("GlobalSecondaryIndexes")
+ if err := awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexList(v.GlobalSecondaryIndexes, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.KMSMasterKeyId != nil {
+ ok := object.Key("KMSMasterKeyId")
+ ok.String(*v.KMSMasterKeyId)
+ }
+
+ if v.OnDemandThroughputOverride != nil {
+ ok := object.Key("OnDemandThroughputOverride")
+ if err := awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v.OnDemandThroughputOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvisionedThroughputOverride != nil {
+ ok := object.Key("ProvisionedThroughputOverride")
+ if err := awsAwsjson10_serializeDocumentProvisionedThroughputOverride(v.ProvisionedThroughputOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.RegionName != nil {
+ ok := object.Key("RegionName")
+ ok.String(*v.RegionName)
+ }
+
+ if len(v.TableClassOverride) > 0 {
+ ok := object.Key("TableClassOverride")
+ ok.String(string(v.TableClassOverride))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentCsvHeaderList(v []string, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentCsvOptions(v *types.CsvOptions, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.Delimiter != nil {
+ ok := object.Key("Delimiter")
+ ok.String(*v.Delimiter)
+ }
+
+ if v.HeaderList != nil {
+ ok := object.Key("HeaderList")
+ if err := awsAwsjson10_serializeDocumentCsvHeaderList(v.HeaderList, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentDelete(v *types.Delete, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ConditionExpression != nil {
+ ok := object.Key("ConditionExpression")
+ ok.String(*v.ConditionExpression)
+ }
+
+ if v.ExpressionAttributeNames != nil {
+ ok := object.Key("ExpressionAttributeNames")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpressionAttributeValues != nil {
+ ok := object.Key("ExpressionAttributeValues")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Key != nil {
+ ok := object.Key("Key")
+ if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ReturnValuesOnConditionCheckFailure) > 0 {
+ ok := object.Key("ReturnValuesOnConditionCheckFailure")
+ ok.String(string(v.ReturnValuesOnConditionCheckFailure))
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentDeleteGlobalSecondaryIndexAction(v *types.DeleteGlobalSecondaryIndexAction, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.IndexName != nil {
+ ok := object.Key("IndexName")
+ ok.String(*v.IndexName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentDeleteGlobalTableWitnessGroupMemberAction(v *types.DeleteGlobalTableWitnessGroupMemberAction, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.RegionName != nil {
+ ok := object.Key("RegionName")
+ ok.String(*v.RegionName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentDeleteReplicaAction(v *types.DeleteReplicaAction, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.RegionName != nil {
+ ok := object.Key("RegionName")
+ ok.String(*v.RegionName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentDeleteReplicationGroupMemberAction(v *types.DeleteReplicationGroupMemberAction, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.RegionName != nil {
+ ok := object.Key("RegionName")
+ ok.String(*v.RegionName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentDeleteRequest(v *types.DeleteRequest, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.Key != nil {
+ ok := object.Key("Key")
+ if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentEnableKinesisStreamingConfiguration(v *types.EnableKinesisStreamingConfiguration, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if len(v.ApproximateCreationDateTimePrecision) > 0 {
+ ok := object.Key("ApproximateCreationDateTimePrecision")
+ ok.String(string(v.ApproximateCreationDateTimePrecision))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentExpectedAttributeMap(v map[string]types.ExpectedAttributeValue, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ for key := range v {
+ om := object.Key(key)
+ mapVar := v[key]
+ if err := awsAwsjson10_serializeDocumentExpectedAttributeValue(&mapVar, om); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentExpectedAttributeValue(v *types.ExpectedAttributeValue, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.AttributeValueList != nil {
+ ok := object.Key("AttributeValueList")
+ if err := awsAwsjson10_serializeDocumentAttributeValueList(v.AttributeValueList, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ComparisonOperator) > 0 {
+ ok := object.Key("ComparisonOperator")
+ ok.String(string(v.ComparisonOperator))
+ }
+
+ if v.Exists != nil {
+ ok := object.Key("Exists")
+ ok.Boolean(*v.Exists)
+ }
+
+ if v.Value != nil {
+ ok := object.Key("Value")
+ if err := awsAwsjson10_serializeDocumentAttributeValue(v.Value, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v map[string]string, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ for key := range v {
+ om := object.Key(key)
+ om.String(v[key])
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v map[string]types.AttributeValue, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ for key := range v {
+ om := object.Key(key)
+ if vv := v[key]; vv == nil {
+ continue
+ }
+ if err := awsAwsjson10_serializeDocumentAttributeValue(v[key], om); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentFilterConditionMap(v map[string]types.Condition, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ for key := range v {
+ om := object.Key(key)
+ mapVar := v[key]
+ if err := awsAwsjson10_serializeDocumentCondition(&mapVar, om); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentGet(v *types.Get, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ExpressionAttributeNames != nil {
+ ok := object.Key("ExpressionAttributeNames")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Key != nil {
+ ok := object.Key("Key")
+ if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProjectionExpression != nil {
+ ok := object.Key("ProjectionExpression")
+ ok.String(*v.ProjectionExpression)
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentGlobalSecondaryIndex(v *types.GlobalSecondaryIndex, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.IndexName != nil {
+ ok := object.Key("IndexName")
+ ok.String(*v.IndexName)
+ }
+
+ if v.KeySchema != nil {
+ ok := object.Key("KeySchema")
+ if err := awsAwsjson10_serializeDocumentKeySchema(v.KeySchema, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.OnDemandThroughput != nil {
+ ok := object.Key("OnDemandThroughput")
+ if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Projection != nil {
+ ok := object.Key("Projection")
+ if err := awsAwsjson10_serializeDocumentProjection(v.Projection, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvisionedThroughput != nil {
+ ok := object.Key("ProvisionedThroughput")
+ if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.WarmThroughput != nil {
+ ok := object.Key("WarmThroughput")
+ if err := awsAwsjson10_serializeDocumentWarmThroughput(v.WarmThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentGlobalSecondaryIndexAutoScalingUpdate(v *types.GlobalSecondaryIndexAutoScalingUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.IndexName != nil {
+ ok := object.Key("IndexName")
+ ok.String(*v.IndexName)
+ }
+
+ if v.ProvisionedWriteCapacityAutoScalingUpdate != nil {
+ ok := object.Key("ProvisionedWriteCapacityAutoScalingUpdate")
+ if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.ProvisionedWriteCapacityAutoScalingUpdate, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentGlobalSecondaryIndexAutoScalingUpdateList(v []types.GlobalSecondaryIndexAutoScalingUpdate, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexAutoScalingUpdate(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentGlobalSecondaryIndexList(v []types.GlobalSecondaryIndex, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndex(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentGlobalSecondaryIndexUpdate(v *types.GlobalSecondaryIndexUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.Create != nil {
+ ok := object.Key("Create")
+ if err := awsAwsjson10_serializeDocumentCreateGlobalSecondaryIndexAction(v.Create, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Delete != nil {
+ ok := object.Key("Delete")
+ if err := awsAwsjson10_serializeDocumentDeleteGlobalSecondaryIndexAction(v.Delete, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Update != nil {
+ ok := object.Key("Update")
+ if err := awsAwsjson10_serializeDocumentUpdateGlobalSecondaryIndexAction(v.Update, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentGlobalSecondaryIndexUpdateList(v []types.GlobalSecondaryIndexUpdate, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexUpdate(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentGlobalTableGlobalSecondaryIndexSettingsUpdate(v *types.GlobalTableGlobalSecondaryIndexSettingsUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.IndexName != nil {
+ ok := object.Key("IndexName")
+ ok.String(*v.IndexName)
+ }
+
+ if v.ProvisionedWriteCapacityAutoScalingSettingsUpdate != nil {
+ ok := object.Key("ProvisionedWriteCapacityAutoScalingSettingsUpdate")
+ if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.ProvisionedWriteCapacityAutoScalingSettingsUpdate, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvisionedWriteCapacityUnits != nil {
+ ok := object.Key("ProvisionedWriteCapacityUnits")
+ ok.Long(*v.ProvisionedWriteCapacityUnits)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentGlobalTableGlobalSecondaryIndexSettingsUpdateList(v []types.GlobalTableGlobalSecondaryIndexSettingsUpdate, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentGlobalTableGlobalSecondaryIndexSettingsUpdate(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentGlobalTableWitnessGroupUpdate(v *types.GlobalTableWitnessGroupUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.Create != nil {
+ ok := object.Key("Create")
+ if err := awsAwsjson10_serializeDocumentCreateGlobalTableWitnessGroupMemberAction(v.Create, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Delete != nil {
+ ok := object.Key("Delete")
+ if err := awsAwsjson10_serializeDocumentDeleteGlobalTableWitnessGroupMemberAction(v.Delete, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentGlobalTableWitnessGroupUpdateList(v []types.GlobalTableWitnessGroupUpdate, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentGlobalTableWitnessGroupUpdate(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentIncrementalExportSpecification(v *types.IncrementalExportSpecification, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ExportFromTime != nil {
+ ok := object.Key("ExportFromTime")
+ ok.Double(smithytime.FormatEpochSeconds(*v.ExportFromTime))
+ }
+
+ if v.ExportToTime != nil {
+ ok := object.Key("ExportToTime")
+ ok.Double(smithytime.FormatEpochSeconds(*v.ExportToTime))
+ }
+
+ if len(v.ExportViewType) > 0 {
+ ok := object.Key("ExportViewType")
+ ok.String(string(v.ExportViewType))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentInputFormatOptions(v *types.InputFormatOptions, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.Csv != nil {
+ ok := object.Key("Csv")
+ if err := awsAwsjson10_serializeDocumentCsvOptions(v.Csv, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentKey(v map[string]types.AttributeValue, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ for key := range v {
+ om := object.Key(key)
+ if vv := v[key]; vv == nil {
+ continue
+ }
+ if err := awsAwsjson10_serializeDocumentAttributeValue(v[key], om); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentKeyConditions(v map[string]types.Condition, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ for key := range v {
+ om := object.Key(key)
+ mapVar := v[key]
+ if err := awsAwsjson10_serializeDocumentCondition(&mapVar, om); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentKeyList(v []map[string]types.AttributeValue, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if vv := v[i]; vv == nil {
+ continue
+ }
+ if err := awsAwsjson10_serializeDocumentKey(v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentKeysAndAttributes(v *types.KeysAndAttributes, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.AttributesToGet != nil {
+ ok := object.Key("AttributesToGet")
+ if err := awsAwsjson10_serializeDocumentAttributeNameList(v.AttributesToGet, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ConsistentRead != nil {
+ ok := object.Key("ConsistentRead")
+ ok.Boolean(*v.ConsistentRead)
+ }
+
+ if v.ExpressionAttributeNames != nil {
+ ok := object.Key("ExpressionAttributeNames")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Keys != nil {
+ ok := object.Key("Keys")
+ if err := awsAwsjson10_serializeDocumentKeyList(v.Keys, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProjectionExpression != nil {
+ ok := object.Key("ProjectionExpression")
+ ok.String(*v.ProjectionExpression)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentKeySchema(v []types.KeySchemaElement, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentKeySchemaElement(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentKeySchemaElement(v *types.KeySchemaElement, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.AttributeName != nil {
+ ok := object.Key("AttributeName")
+ ok.String(*v.AttributeName)
+ }
+
+ if len(v.KeyType) > 0 {
+ ok := object.Key("KeyType")
+ ok.String(string(v.KeyType))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentListAttributeValue(v []types.AttributeValue, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if vv := v[i]; vv == nil {
+ continue
+ }
+ if err := awsAwsjson10_serializeDocumentAttributeValue(v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentLocalSecondaryIndex(v *types.LocalSecondaryIndex, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.IndexName != nil {
+ ok := object.Key("IndexName")
+ ok.String(*v.IndexName)
+ }
+
+ if v.KeySchema != nil {
+ ok := object.Key("KeySchema")
+ if err := awsAwsjson10_serializeDocumentKeySchema(v.KeySchema, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Projection != nil {
+ ok := object.Key("Projection")
+ if err := awsAwsjson10_serializeDocumentProjection(v.Projection, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentLocalSecondaryIndexList(v []types.LocalSecondaryIndex, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentLocalSecondaryIndex(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentMapAttributeValue(v map[string]types.AttributeValue, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ for key := range v {
+ om := object.Key(key)
+ if vv := v[key]; vv == nil {
+ continue
+ }
+ if err := awsAwsjson10_serializeDocumentAttributeValue(v[key], om); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentNonKeyAttributeNameList(v []string, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentNumberSetAttributeValue(v []string, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentOnDemandThroughput(v *types.OnDemandThroughput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.MaxReadRequestUnits != nil {
+ ok := object.Key("MaxReadRequestUnits")
+ ok.Long(*v.MaxReadRequestUnits)
+ }
+
+ if v.MaxWriteRequestUnits != nil {
+ ok := object.Key("MaxWriteRequestUnits")
+ ok.Long(*v.MaxWriteRequestUnits)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v *types.OnDemandThroughputOverride, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.MaxReadRequestUnits != nil {
+ ok := object.Key("MaxReadRequestUnits")
+ ok.Long(*v.MaxReadRequestUnits)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentParameterizedStatement(v *types.ParameterizedStatement, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.Parameters != nil {
+ ok := object.Key("Parameters")
+ if err := awsAwsjson10_serializeDocumentPreparedStatementParameters(v.Parameters, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ReturnValuesOnConditionCheckFailure) > 0 {
+ ok := object.Key("ReturnValuesOnConditionCheckFailure")
+ ok.String(string(v.ReturnValuesOnConditionCheckFailure))
+ }
+
+ if v.Statement != nil {
+ ok := object.Key("Statement")
+ ok.String(*v.Statement)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentParameterizedStatements(v []types.ParameterizedStatement, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentParameterizedStatement(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentPartiQLBatchRequest(v []types.BatchStatementRequest, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentBatchStatementRequest(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentPointInTimeRecoverySpecification(v *types.PointInTimeRecoverySpecification, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.PointInTimeRecoveryEnabled != nil {
+ ok := object.Key("PointInTimeRecoveryEnabled")
+ ok.Boolean(*v.PointInTimeRecoveryEnabled)
+ }
+
+ if v.RecoveryPeriodInDays != nil {
+ ok := object.Key("RecoveryPeriodInDays")
+ ok.Integer(*v.RecoveryPeriodInDays)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentPreparedStatementParameters(v []types.AttributeValue, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if vv := v[i]; vv == nil {
+ continue
+ }
+ if err := awsAwsjson10_serializeDocumentAttributeValue(v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentProjection(v *types.Projection, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.NonKeyAttributes != nil {
+ ok := object.Key("NonKeyAttributes")
+ if err := awsAwsjson10_serializeDocumentNonKeyAttributeNameList(v.NonKeyAttributes, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ProjectionType) > 0 {
+ ok := object.Key("ProjectionType")
+ ok.String(string(v.ProjectionType))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentProvisionedThroughput(v *types.ProvisionedThroughput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ReadCapacityUnits != nil {
+ ok := object.Key("ReadCapacityUnits")
+ ok.Long(*v.ReadCapacityUnits)
+ }
+
+ if v.WriteCapacityUnits != nil {
+ ok := object.Key("WriteCapacityUnits")
+ ok.Long(*v.WriteCapacityUnits)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentProvisionedThroughputOverride(v *types.ProvisionedThroughputOverride, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ReadCapacityUnits != nil {
+ ok := object.Key("ReadCapacityUnits")
+ ok.Long(*v.ReadCapacityUnits)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentPut(v *types.Put, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ConditionExpression != nil {
+ ok := object.Key("ConditionExpression")
+ ok.String(*v.ConditionExpression)
+ }
+
+ if v.ExpressionAttributeNames != nil {
+ ok := object.Key("ExpressionAttributeNames")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpressionAttributeValues != nil {
+ ok := object.Key("ExpressionAttributeValues")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Item != nil {
+ ok := object.Key("Item")
+ if err := awsAwsjson10_serializeDocumentPutItemInputAttributeMap(v.Item, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ReturnValuesOnConditionCheckFailure) > 0 {
+ ok := object.Key("ReturnValuesOnConditionCheckFailure")
+ ok.String(string(v.ReturnValuesOnConditionCheckFailure))
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentPutItemInputAttributeMap(v map[string]types.AttributeValue, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ for key := range v {
+ om := object.Key(key)
+ if vv := v[key]; vv == nil {
+ continue
+ }
+ if err := awsAwsjson10_serializeDocumentAttributeValue(v[key], om); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentPutRequest(v *types.PutRequest, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.Item != nil {
+ ok := object.Key("Item")
+ if err := awsAwsjson10_serializeDocumentPutItemInputAttributeMap(v.Item, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplica(v *types.Replica, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.RegionName != nil {
+ ok := object.Key("RegionName")
+ ok.String(*v.RegionName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicaAutoScalingUpdate(v *types.ReplicaAutoScalingUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.RegionName != nil {
+ ok := object.Key("RegionName")
+ ok.String(*v.RegionName)
+ }
+
+ if v.ReplicaGlobalSecondaryIndexUpdates != nil {
+ ok := object.Key("ReplicaGlobalSecondaryIndexUpdates")
+ if err := awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexAutoScalingUpdateList(v.ReplicaGlobalSecondaryIndexUpdates, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ReplicaProvisionedReadCapacityAutoScalingUpdate != nil {
+ ok := object.Key("ReplicaProvisionedReadCapacityAutoScalingUpdate")
+ if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.ReplicaProvisionedReadCapacityAutoScalingUpdate, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicaAutoScalingUpdateList(v []types.ReplicaAutoScalingUpdate, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentReplicaAutoScalingUpdate(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndex(v *types.ReplicaGlobalSecondaryIndex, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.IndexName != nil {
+ ok := object.Key("IndexName")
+ ok.String(*v.IndexName)
+ }
+
+ if v.OnDemandThroughputOverride != nil {
+ ok := object.Key("OnDemandThroughputOverride")
+ if err := awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v.OnDemandThroughputOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvisionedThroughputOverride != nil {
+ ok := object.Key("ProvisionedThroughputOverride")
+ if err := awsAwsjson10_serializeDocumentProvisionedThroughputOverride(v.ProvisionedThroughputOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexAutoScalingUpdate(v *types.ReplicaGlobalSecondaryIndexAutoScalingUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.IndexName != nil {
+ ok := object.Key("IndexName")
+ ok.String(*v.IndexName)
+ }
+
+ if v.ProvisionedReadCapacityAutoScalingUpdate != nil {
+ ok := object.Key("ProvisionedReadCapacityAutoScalingUpdate")
+ if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.ProvisionedReadCapacityAutoScalingUpdate, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexAutoScalingUpdateList(v []types.ReplicaGlobalSecondaryIndexAutoScalingUpdate, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexAutoScalingUpdate(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexList(v []types.ReplicaGlobalSecondaryIndex, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndex(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexSettingsUpdate(v *types.ReplicaGlobalSecondaryIndexSettingsUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.IndexName != nil {
+ ok := object.Key("IndexName")
+ ok.String(*v.IndexName)
+ }
+
+ if v.ProvisionedReadCapacityAutoScalingSettingsUpdate != nil {
+ ok := object.Key("ProvisionedReadCapacityAutoScalingSettingsUpdate")
+ if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.ProvisionedReadCapacityAutoScalingSettingsUpdate, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvisionedReadCapacityUnits != nil {
+ ok := object.Key("ProvisionedReadCapacityUnits")
+ ok.Long(*v.ProvisionedReadCapacityUnits)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexSettingsUpdateList(v []types.ReplicaGlobalSecondaryIndexSettingsUpdate, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexSettingsUpdate(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicaList(v []types.Replica, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentReplica(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicaSettingsUpdate(v *types.ReplicaSettingsUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.RegionName != nil {
+ ok := object.Key("RegionName")
+ ok.String(*v.RegionName)
+ }
+
+ if v.ReplicaGlobalSecondaryIndexSettingsUpdate != nil {
+ ok := object.Key("ReplicaGlobalSecondaryIndexSettingsUpdate")
+ if err := awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexSettingsUpdateList(v.ReplicaGlobalSecondaryIndexSettingsUpdate, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate != nil {
+ ok := object.Key("ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate")
+ if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ReplicaProvisionedReadCapacityUnits != nil {
+ ok := object.Key("ReplicaProvisionedReadCapacityUnits")
+ ok.Long(*v.ReplicaProvisionedReadCapacityUnits)
+ }
+
+ if len(v.ReplicaTableClass) > 0 {
+ ok := object.Key("ReplicaTableClass")
+ ok.String(string(v.ReplicaTableClass))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicaSettingsUpdateList(v []types.ReplicaSettingsUpdate, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentReplicaSettingsUpdate(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicationGroupUpdate(v *types.ReplicationGroupUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.Create != nil {
+ ok := object.Key("Create")
+ if err := awsAwsjson10_serializeDocumentCreateReplicationGroupMemberAction(v.Create, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Delete != nil {
+ ok := object.Key("Delete")
+ if err := awsAwsjson10_serializeDocumentDeleteReplicationGroupMemberAction(v.Delete, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Update != nil {
+ ok := object.Key("Update")
+ if err := awsAwsjson10_serializeDocumentUpdateReplicationGroupMemberAction(v.Update, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicationGroupUpdateList(v []types.ReplicationGroupUpdate, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentReplicationGroupUpdate(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicaUpdate(v *types.ReplicaUpdate, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.Create != nil {
+ ok := object.Key("Create")
+ if err := awsAwsjson10_serializeDocumentCreateReplicaAction(v.Create, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Delete != nil {
+ ok := object.Key("Delete")
+ if err := awsAwsjson10_serializeDocumentDeleteReplicaAction(v.Delete, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentReplicaUpdateList(v []types.ReplicaUpdate, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentReplicaUpdate(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentS3BucketSource(v *types.S3BucketSource, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.S3Bucket != nil {
+ ok := object.Key("S3Bucket")
+ ok.String(*v.S3Bucket)
+ }
+
+ if v.S3BucketOwner != nil {
+ ok := object.Key("S3BucketOwner")
+ ok.String(*v.S3BucketOwner)
+ }
+
+ if v.S3KeyPrefix != nil {
+ ok := object.Key("S3KeyPrefix")
+ ok.String(*v.S3KeyPrefix)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentSSESpecification(v *types.SSESpecification, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.Enabled != nil {
+ ok := object.Key("Enabled")
+ ok.Boolean(*v.Enabled)
+ }
+
+ if v.KMSMasterKeyId != nil {
+ ok := object.Key("KMSMasterKeyId")
+ ok.String(*v.KMSMasterKeyId)
+ }
+
+ if len(v.SSEType) > 0 {
+ ok := object.Key("SSEType")
+ ok.String(string(v.SSEType))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentStreamSpecification(v *types.StreamSpecification, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.StreamEnabled != nil {
+ ok := object.Key("StreamEnabled")
+ ok.Boolean(*v.StreamEnabled)
+ }
+
+ if len(v.StreamViewType) > 0 {
+ ok := object.Key("StreamViewType")
+ ok.String(string(v.StreamViewType))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentStringSetAttributeValue(v []string, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentTableCreationParameters(v *types.TableCreationParameters, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.AttributeDefinitions != nil {
+ ok := object.Key("AttributeDefinitions")
+ if err := awsAwsjson10_serializeDocumentAttributeDefinitions(v.AttributeDefinitions, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.BillingMode) > 0 {
+ ok := object.Key("BillingMode")
+ ok.String(string(v.BillingMode))
+ }
+
+ if v.GlobalSecondaryIndexes != nil {
+ ok := object.Key("GlobalSecondaryIndexes")
+ if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexList(v.GlobalSecondaryIndexes, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.KeySchema != nil {
+ ok := object.Key("KeySchema")
+ if err := awsAwsjson10_serializeDocumentKeySchema(v.KeySchema, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.OnDemandThroughput != nil {
+ ok := object.Key("OnDemandThroughput")
+ if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvisionedThroughput != nil {
+ ok := object.Key("ProvisionedThroughput")
+ if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.SSESpecification != nil {
+ ok := object.Key("SSESpecification")
+ if err := awsAwsjson10_serializeDocumentSSESpecification(v.SSESpecification, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentTag(v *types.Tag, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.Key != nil {
+ ok := object.Key("Key")
+ ok.String(*v.Key)
+ }
+
+ if v.Value != nil {
+ ok := object.Key("Value")
+ ok.String(*v.Value)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentTagKeyList(v []string, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentTagList(v []types.Tag, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentTag(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentTimeToLiveSpecification(v *types.TimeToLiveSpecification, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.AttributeName != nil {
+ ok := object.Key("AttributeName")
+ ok.String(*v.AttributeName)
+ }
+
+ if v.Enabled != nil {
+ ok := object.Key("Enabled")
+ ok.Boolean(*v.Enabled)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentTransactGetItem(v *types.TransactGetItem, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.Get != nil {
+ ok := object.Key("Get")
+ if err := awsAwsjson10_serializeDocumentGet(v.Get, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentTransactGetItemList(v []types.TransactGetItem, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentTransactGetItem(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentTransactWriteItem(v *types.TransactWriteItem, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ConditionCheck != nil {
+ ok := object.Key("ConditionCheck")
+ if err := awsAwsjson10_serializeDocumentConditionCheck(v.ConditionCheck, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Delete != nil {
+ ok := object.Key("Delete")
+ if err := awsAwsjson10_serializeDocumentDelete(v.Delete, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Put != nil {
+ ok := object.Key("Put")
+ if err := awsAwsjson10_serializeDocumentPut(v.Put, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Update != nil {
+ ok := object.Key("Update")
+ if err := awsAwsjson10_serializeDocumentUpdate(v.Update, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentTransactWriteItemList(v []types.TransactWriteItem, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentTransactWriteItem(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentUpdate(v *types.Update, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ConditionExpression != nil {
+ ok := object.Key("ConditionExpression")
+ ok.String(*v.ConditionExpression)
+ }
+
+ if v.ExpressionAttributeNames != nil {
+ ok := object.Key("ExpressionAttributeNames")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpressionAttributeValues != nil {
+ ok := object.Key("ExpressionAttributeValues")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Key != nil {
+ ok := object.Key("Key")
+ if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ReturnValuesOnConditionCheckFailure) > 0 {
+ ok := object.Key("ReturnValuesOnConditionCheckFailure")
+ ok.String(string(v.ReturnValuesOnConditionCheckFailure))
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ if v.UpdateExpression != nil {
+ ok := object.Key("UpdateExpression")
+ ok.String(*v.UpdateExpression)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentUpdateGlobalSecondaryIndexAction(v *types.UpdateGlobalSecondaryIndexAction, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.IndexName != nil {
+ ok := object.Key("IndexName")
+ ok.String(*v.IndexName)
+ }
+
+ if v.OnDemandThroughput != nil {
+ ok := object.Key("OnDemandThroughput")
+ if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvisionedThroughput != nil {
+ ok := object.Key("ProvisionedThroughput")
+ if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.WarmThroughput != nil {
+ ok := object.Key("WarmThroughput")
+ if err := awsAwsjson10_serializeDocumentWarmThroughput(v.WarmThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentUpdateKinesisStreamingConfiguration(v *types.UpdateKinesisStreamingConfiguration, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if len(v.ApproximateCreationDateTimePrecision) > 0 {
+ ok := object.Key("ApproximateCreationDateTimePrecision")
+ ok.String(string(v.ApproximateCreationDateTimePrecision))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentUpdateReplicationGroupMemberAction(v *types.UpdateReplicationGroupMemberAction, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.GlobalSecondaryIndexes != nil {
+ ok := object.Key("GlobalSecondaryIndexes")
+ if err := awsAwsjson10_serializeDocumentReplicaGlobalSecondaryIndexList(v.GlobalSecondaryIndexes, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.KMSMasterKeyId != nil {
+ ok := object.Key("KMSMasterKeyId")
+ ok.String(*v.KMSMasterKeyId)
+ }
+
+ if v.OnDemandThroughputOverride != nil {
+ ok := object.Key("OnDemandThroughputOverride")
+ if err := awsAwsjson10_serializeDocumentOnDemandThroughputOverride(v.OnDemandThroughputOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvisionedThroughputOverride != nil {
+ ok := object.Key("ProvisionedThroughputOverride")
+ if err := awsAwsjson10_serializeDocumentProvisionedThroughputOverride(v.ProvisionedThroughputOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.RegionName != nil {
+ ok := object.Key("RegionName")
+ ok.String(*v.RegionName)
+ }
+
+ if len(v.TableClassOverride) > 0 {
+ ok := object.Key("TableClassOverride")
+ ok.String(string(v.TableClassOverride))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentWarmThroughput(v *types.WarmThroughput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ReadUnitsPerSecond != nil {
+ ok := object.Key("ReadUnitsPerSecond")
+ ok.Long(*v.ReadUnitsPerSecond)
+ }
+
+ if v.WriteUnitsPerSecond != nil {
+ ok := object.Key("WriteUnitsPerSecond")
+ ok.Long(*v.WriteUnitsPerSecond)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentWriteRequest(v *types.WriteRequest, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.DeleteRequest != nil {
+ ok := object.Key("DeleteRequest")
+ if err := awsAwsjson10_serializeDocumentDeleteRequest(v.DeleteRequest, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.PutRequest != nil {
+ ok := object.Key("PutRequest")
+ if err := awsAwsjson10_serializeDocumentPutRequest(v.PutRequest, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeDocumentWriteRequests(v []types.WriteRequest, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsjson10_serializeDocumentWriteRequest(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentBatchExecuteStatementInput(v *BatchExecuteStatementInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if len(v.ReturnConsumedCapacity) > 0 {
+ ok := object.Key("ReturnConsumedCapacity")
+ ok.String(string(v.ReturnConsumedCapacity))
+ }
+
+ if v.Statements != nil {
+ ok := object.Key("Statements")
+ if err := awsAwsjson10_serializeDocumentPartiQLBatchRequest(v.Statements, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentBatchGetItemInput(v *BatchGetItemInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.RequestItems != nil {
+ ok := object.Key("RequestItems")
+ if err := awsAwsjson10_serializeDocumentBatchGetRequestMap(v.RequestItems, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ReturnConsumedCapacity) > 0 {
+ ok := object.Key("ReturnConsumedCapacity")
+ ok.String(string(v.ReturnConsumedCapacity))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentBatchWriteItemInput(v *BatchWriteItemInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.RequestItems != nil {
+ ok := object.Key("RequestItems")
+ if err := awsAwsjson10_serializeDocumentBatchWriteItemRequestMap(v.RequestItems, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ReturnConsumedCapacity) > 0 {
+ ok := object.Key("ReturnConsumedCapacity")
+ ok.String(string(v.ReturnConsumedCapacity))
+ }
+
+ if len(v.ReturnItemCollectionMetrics) > 0 {
+ ok := object.Key("ReturnItemCollectionMetrics")
+ ok.String(string(v.ReturnItemCollectionMetrics))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentCreateBackupInput(v *CreateBackupInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.BackupName != nil {
+ ok := object.Key("BackupName")
+ ok.String(*v.BackupName)
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentCreateGlobalTableInput(v *CreateGlobalTableInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.GlobalTableName != nil {
+ ok := object.Key("GlobalTableName")
+ ok.String(*v.GlobalTableName)
+ }
+
+ if v.ReplicationGroup != nil {
+ ok := object.Key("ReplicationGroup")
+ if err := awsAwsjson10_serializeDocumentReplicaList(v.ReplicationGroup, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentCreateTableInput(v *CreateTableInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.AttributeDefinitions != nil {
+ ok := object.Key("AttributeDefinitions")
+ if err := awsAwsjson10_serializeDocumentAttributeDefinitions(v.AttributeDefinitions, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.BillingMode) > 0 {
+ ok := object.Key("BillingMode")
+ ok.String(string(v.BillingMode))
+ }
+
+ if v.DeletionProtectionEnabled != nil {
+ ok := object.Key("DeletionProtectionEnabled")
+ ok.Boolean(*v.DeletionProtectionEnabled)
+ }
+
+ if v.GlobalSecondaryIndexes != nil {
+ ok := object.Key("GlobalSecondaryIndexes")
+ if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexList(v.GlobalSecondaryIndexes, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.KeySchema != nil {
+ ok := object.Key("KeySchema")
+ if err := awsAwsjson10_serializeDocumentKeySchema(v.KeySchema, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.LocalSecondaryIndexes != nil {
+ ok := object.Key("LocalSecondaryIndexes")
+ if err := awsAwsjson10_serializeDocumentLocalSecondaryIndexList(v.LocalSecondaryIndexes, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.OnDemandThroughput != nil {
+ ok := object.Key("OnDemandThroughput")
+ if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvisionedThroughput != nil {
+ ok := object.Key("ProvisionedThroughput")
+ if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ResourcePolicy != nil {
+ ok := object.Key("ResourcePolicy")
+ ok.String(*v.ResourcePolicy)
+ }
+
+ if v.SSESpecification != nil {
+ ok := object.Key("SSESpecification")
+ if err := awsAwsjson10_serializeDocumentSSESpecification(v.SSESpecification, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.StreamSpecification != nil {
+ ok := object.Key("StreamSpecification")
+ if err := awsAwsjson10_serializeDocumentStreamSpecification(v.StreamSpecification, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.TableClass) > 0 {
+ ok := object.Key("TableClass")
+ ok.String(string(v.TableClass))
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ if v.Tags != nil {
+ ok := object.Key("Tags")
+ if err := awsAwsjson10_serializeDocumentTagList(v.Tags, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.WarmThroughput != nil {
+ ok := object.Key("WarmThroughput")
+ if err := awsAwsjson10_serializeDocumentWarmThroughput(v.WarmThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDeleteBackupInput(v *DeleteBackupInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.BackupArn != nil {
+ ok := object.Key("BackupArn")
+ ok.String(*v.BackupArn)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDeleteItemInput(v *DeleteItemInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if len(v.ConditionalOperator) > 0 {
+ ok := object.Key("ConditionalOperator")
+ ok.String(string(v.ConditionalOperator))
+ }
+
+ if v.ConditionExpression != nil {
+ ok := object.Key("ConditionExpression")
+ ok.String(*v.ConditionExpression)
+ }
+
+ if v.Expected != nil {
+ ok := object.Key("Expected")
+ if err := awsAwsjson10_serializeDocumentExpectedAttributeMap(v.Expected, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpressionAttributeNames != nil {
+ ok := object.Key("ExpressionAttributeNames")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpressionAttributeValues != nil {
+ ok := object.Key("ExpressionAttributeValues")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Key != nil {
+ ok := object.Key("Key")
+ if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ReturnConsumedCapacity) > 0 {
+ ok := object.Key("ReturnConsumedCapacity")
+ ok.String(string(v.ReturnConsumedCapacity))
+ }
+
+ if len(v.ReturnItemCollectionMetrics) > 0 {
+ ok := object.Key("ReturnItemCollectionMetrics")
+ ok.String(string(v.ReturnItemCollectionMetrics))
+ }
+
+ if len(v.ReturnValues) > 0 {
+ ok := object.Key("ReturnValues")
+ ok.String(string(v.ReturnValues))
+ }
+
+ if len(v.ReturnValuesOnConditionCheckFailure) > 0 {
+ ok := object.Key("ReturnValuesOnConditionCheckFailure")
+ ok.String(string(v.ReturnValuesOnConditionCheckFailure))
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDeleteResourcePolicyInput(v *DeleteResourcePolicyInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ExpectedRevisionId != nil {
+ ok := object.Key("ExpectedRevisionId")
+ ok.String(*v.ExpectedRevisionId)
+ }
+
+ if v.ResourceArn != nil {
+ ok := object.Key("ResourceArn")
+ ok.String(*v.ResourceArn)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDeleteTableInput(v *DeleteTableInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDescribeBackupInput(v *DescribeBackupInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.BackupArn != nil {
+ ok := object.Key("BackupArn")
+ ok.String(*v.BackupArn)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDescribeContinuousBackupsInput(v *DescribeContinuousBackupsInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDescribeContributorInsightsInput(v *DescribeContributorInsightsInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.IndexName != nil {
+ ok := object.Key("IndexName")
+ ok.String(*v.IndexName)
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDescribeEndpointsInput(v *DescribeEndpointsInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDescribeExportInput(v *DescribeExportInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ExportArn != nil {
+ ok := object.Key("ExportArn")
+ ok.String(*v.ExportArn)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDescribeGlobalTableInput(v *DescribeGlobalTableInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.GlobalTableName != nil {
+ ok := object.Key("GlobalTableName")
+ ok.String(*v.GlobalTableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDescribeGlobalTableSettingsInput(v *DescribeGlobalTableSettingsInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.GlobalTableName != nil {
+ ok := object.Key("GlobalTableName")
+ ok.String(*v.GlobalTableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDescribeImportInput(v *DescribeImportInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ImportArn != nil {
+ ok := object.Key("ImportArn")
+ ok.String(*v.ImportArn)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDescribeKinesisStreamingDestinationInput(v *DescribeKinesisStreamingDestinationInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDescribeLimitsInput(v *DescribeLimitsInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDescribeTableInput(v *DescribeTableInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDescribeTableReplicaAutoScalingInput(v *DescribeTableReplicaAutoScalingInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDescribeTimeToLiveInput(v *DescribeTimeToLiveInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentDisableKinesisStreamingDestinationInput(v *DisableKinesisStreamingDestinationInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.EnableKinesisStreamingConfiguration != nil {
+ ok := object.Key("EnableKinesisStreamingConfiguration")
+ if err := awsAwsjson10_serializeDocumentEnableKinesisStreamingConfiguration(v.EnableKinesisStreamingConfiguration, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.StreamArn != nil {
+ ok := object.Key("StreamArn")
+ ok.String(*v.StreamArn)
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentEnableKinesisStreamingDestinationInput(v *EnableKinesisStreamingDestinationInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.EnableKinesisStreamingConfiguration != nil {
+ ok := object.Key("EnableKinesisStreamingConfiguration")
+ if err := awsAwsjson10_serializeDocumentEnableKinesisStreamingConfiguration(v.EnableKinesisStreamingConfiguration, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.StreamArn != nil {
+ ok := object.Key("StreamArn")
+ ok.String(*v.StreamArn)
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentExecuteStatementInput(v *ExecuteStatementInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ConsistentRead != nil {
+ ok := object.Key("ConsistentRead")
+ ok.Boolean(*v.ConsistentRead)
+ }
+
+ if v.Limit != nil {
+ ok := object.Key("Limit")
+ ok.Integer(*v.Limit)
+ }
+
+ if v.NextToken != nil {
+ ok := object.Key("NextToken")
+ ok.String(*v.NextToken)
+ }
+
+ if v.Parameters != nil {
+ ok := object.Key("Parameters")
+ if err := awsAwsjson10_serializeDocumentPreparedStatementParameters(v.Parameters, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ReturnConsumedCapacity) > 0 {
+ ok := object.Key("ReturnConsumedCapacity")
+ ok.String(string(v.ReturnConsumedCapacity))
+ }
+
+ if len(v.ReturnValuesOnConditionCheckFailure) > 0 {
+ ok := object.Key("ReturnValuesOnConditionCheckFailure")
+ ok.String(string(v.ReturnValuesOnConditionCheckFailure))
+ }
+
+ if v.Statement != nil {
+ ok := object.Key("Statement")
+ ok.String(*v.Statement)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentExecuteTransactionInput(v *ExecuteTransactionInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ClientRequestToken != nil {
+ ok := object.Key("ClientRequestToken")
+ ok.String(*v.ClientRequestToken)
+ }
+
+ if len(v.ReturnConsumedCapacity) > 0 {
+ ok := object.Key("ReturnConsumedCapacity")
+ ok.String(string(v.ReturnConsumedCapacity))
+ }
+
+ if v.TransactStatements != nil {
+ ok := object.Key("TransactStatements")
+ if err := awsAwsjson10_serializeDocumentParameterizedStatements(v.TransactStatements, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentExportTableToPointInTimeInput(v *ExportTableToPointInTimeInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ClientToken != nil {
+ ok := object.Key("ClientToken")
+ ok.String(*v.ClientToken)
+ }
+
+ if len(v.ExportFormat) > 0 {
+ ok := object.Key("ExportFormat")
+ ok.String(string(v.ExportFormat))
+ }
+
+ if v.ExportTime != nil {
+ ok := object.Key("ExportTime")
+ ok.Double(smithytime.FormatEpochSeconds(*v.ExportTime))
+ }
+
+ if len(v.ExportType) > 0 {
+ ok := object.Key("ExportType")
+ ok.String(string(v.ExportType))
+ }
+
+ if v.IncrementalExportSpecification != nil {
+ ok := object.Key("IncrementalExportSpecification")
+ if err := awsAwsjson10_serializeDocumentIncrementalExportSpecification(v.IncrementalExportSpecification, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.S3Bucket != nil {
+ ok := object.Key("S3Bucket")
+ ok.String(*v.S3Bucket)
+ }
+
+ if v.S3BucketOwner != nil {
+ ok := object.Key("S3BucketOwner")
+ ok.String(*v.S3BucketOwner)
+ }
+
+ if v.S3Prefix != nil {
+ ok := object.Key("S3Prefix")
+ ok.String(*v.S3Prefix)
+ }
+
+ if len(v.S3SseAlgorithm) > 0 {
+ ok := object.Key("S3SseAlgorithm")
+ ok.String(string(v.S3SseAlgorithm))
+ }
+
+ if v.S3SseKmsKeyId != nil {
+ ok := object.Key("S3SseKmsKeyId")
+ ok.String(*v.S3SseKmsKeyId)
+ }
+
+ if v.TableArn != nil {
+ ok := object.Key("TableArn")
+ ok.String(*v.TableArn)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentGetItemInput(v *GetItemInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.AttributesToGet != nil {
+ ok := object.Key("AttributesToGet")
+ if err := awsAwsjson10_serializeDocumentAttributeNameList(v.AttributesToGet, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ConsistentRead != nil {
+ ok := object.Key("ConsistentRead")
+ ok.Boolean(*v.ConsistentRead)
+ }
+
+ if v.ExpressionAttributeNames != nil {
+ ok := object.Key("ExpressionAttributeNames")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Key != nil {
+ ok := object.Key("Key")
+ if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProjectionExpression != nil {
+ ok := object.Key("ProjectionExpression")
+ ok.String(*v.ProjectionExpression)
+ }
+
+ if len(v.ReturnConsumedCapacity) > 0 {
+ ok := object.Key("ReturnConsumedCapacity")
+ ok.String(string(v.ReturnConsumedCapacity))
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentGetResourcePolicyInput(v *GetResourcePolicyInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ResourceArn != nil {
+ ok := object.Key("ResourceArn")
+ ok.String(*v.ResourceArn)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentImportTableInput(v *ImportTableInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ClientToken != nil {
+ ok := object.Key("ClientToken")
+ ok.String(*v.ClientToken)
+ }
+
+ if len(v.InputCompressionType) > 0 {
+ ok := object.Key("InputCompressionType")
+ ok.String(string(v.InputCompressionType))
+ }
+
+ if len(v.InputFormat) > 0 {
+ ok := object.Key("InputFormat")
+ ok.String(string(v.InputFormat))
+ }
+
+ if v.InputFormatOptions != nil {
+ ok := object.Key("InputFormatOptions")
+ if err := awsAwsjson10_serializeDocumentInputFormatOptions(v.InputFormatOptions, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.S3BucketSource != nil {
+ ok := object.Key("S3BucketSource")
+ if err := awsAwsjson10_serializeDocumentS3BucketSource(v.S3BucketSource, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.TableCreationParameters != nil {
+ ok := object.Key("TableCreationParameters")
+ if err := awsAwsjson10_serializeDocumentTableCreationParameters(v.TableCreationParameters, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentListBackupsInput(v *ListBackupsInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if len(v.BackupType) > 0 {
+ ok := object.Key("BackupType")
+ ok.String(string(v.BackupType))
+ }
+
+ if v.ExclusiveStartBackupArn != nil {
+ ok := object.Key("ExclusiveStartBackupArn")
+ ok.String(*v.ExclusiveStartBackupArn)
+ }
+
+ if v.Limit != nil {
+ ok := object.Key("Limit")
+ ok.Integer(*v.Limit)
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ if v.TimeRangeLowerBound != nil {
+ ok := object.Key("TimeRangeLowerBound")
+ ok.Double(smithytime.FormatEpochSeconds(*v.TimeRangeLowerBound))
+ }
+
+ if v.TimeRangeUpperBound != nil {
+ ok := object.Key("TimeRangeUpperBound")
+ ok.Double(smithytime.FormatEpochSeconds(*v.TimeRangeUpperBound))
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentListContributorInsightsInput(v *ListContributorInsightsInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.MaxResults != 0 {
+ ok := object.Key("MaxResults")
+ ok.Integer(v.MaxResults)
+ }
+
+ if v.NextToken != nil {
+ ok := object.Key("NextToken")
+ ok.String(*v.NextToken)
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentListExportsInput(v *ListExportsInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.MaxResults != nil {
+ ok := object.Key("MaxResults")
+ ok.Integer(*v.MaxResults)
+ }
+
+ if v.NextToken != nil {
+ ok := object.Key("NextToken")
+ ok.String(*v.NextToken)
+ }
+
+ if v.TableArn != nil {
+ ok := object.Key("TableArn")
+ ok.String(*v.TableArn)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentListGlobalTablesInput(v *ListGlobalTablesInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ExclusiveStartGlobalTableName != nil {
+ ok := object.Key("ExclusiveStartGlobalTableName")
+ ok.String(*v.ExclusiveStartGlobalTableName)
+ }
+
+ if v.Limit != nil {
+ ok := object.Key("Limit")
+ ok.Integer(*v.Limit)
+ }
+
+ if v.RegionName != nil {
+ ok := object.Key("RegionName")
+ ok.String(*v.RegionName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentListImportsInput(v *ListImportsInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.NextToken != nil {
+ ok := object.Key("NextToken")
+ ok.String(*v.NextToken)
+ }
+
+ if v.PageSize != nil {
+ ok := object.Key("PageSize")
+ ok.Integer(*v.PageSize)
+ }
+
+ if v.TableArn != nil {
+ ok := object.Key("TableArn")
+ ok.String(*v.TableArn)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentListTablesInput(v *ListTablesInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ExclusiveStartTableName != nil {
+ ok := object.Key("ExclusiveStartTableName")
+ ok.String(*v.ExclusiveStartTableName)
+ }
+
+ if v.Limit != nil {
+ ok := object.Key("Limit")
+ ok.Integer(*v.Limit)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentListTagsOfResourceInput(v *ListTagsOfResourceInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.NextToken != nil {
+ ok := object.Key("NextToken")
+ ok.String(*v.NextToken)
+ }
+
+ if v.ResourceArn != nil {
+ ok := object.Key("ResourceArn")
+ ok.String(*v.ResourceArn)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentPutItemInput(v *PutItemInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if len(v.ConditionalOperator) > 0 {
+ ok := object.Key("ConditionalOperator")
+ ok.String(string(v.ConditionalOperator))
+ }
+
+ if v.ConditionExpression != nil {
+ ok := object.Key("ConditionExpression")
+ ok.String(*v.ConditionExpression)
+ }
+
+ if v.Expected != nil {
+ ok := object.Key("Expected")
+ if err := awsAwsjson10_serializeDocumentExpectedAttributeMap(v.Expected, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpressionAttributeNames != nil {
+ ok := object.Key("ExpressionAttributeNames")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpressionAttributeValues != nil {
+ ok := object.Key("ExpressionAttributeValues")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Item != nil {
+ ok := object.Key("Item")
+ if err := awsAwsjson10_serializeDocumentPutItemInputAttributeMap(v.Item, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ReturnConsumedCapacity) > 0 {
+ ok := object.Key("ReturnConsumedCapacity")
+ ok.String(string(v.ReturnConsumedCapacity))
+ }
+
+ if len(v.ReturnItemCollectionMetrics) > 0 {
+ ok := object.Key("ReturnItemCollectionMetrics")
+ ok.String(string(v.ReturnItemCollectionMetrics))
+ }
+
+ if len(v.ReturnValues) > 0 {
+ ok := object.Key("ReturnValues")
+ ok.String(string(v.ReturnValues))
+ }
+
+ if len(v.ReturnValuesOnConditionCheckFailure) > 0 {
+ ok := object.Key("ReturnValuesOnConditionCheckFailure")
+ ok.String(string(v.ReturnValuesOnConditionCheckFailure))
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentPutResourcePolicyInput(v *PutResourcePolicyInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ConfirmRemoveSelfResourceAccess {
+ ok := object.Key("ConfirmRemoveSelfResourceAccess")
+ ok.Boolean(v.ConfirmRemoveSelfResourceAccess)
+ }
+
+ if v.ExpectedRevisionId != nil {
+ ok := object.Key("ExpectedRevisionId")
+ ok.String(*v.ExpectedRevisionId)
+ }
+
+ if v.Policy != nil {
+ ok := object.Key("Policy")
+ ok.String(*v.Policy)
+ }
+
+ if v.ResourceArn != nil {
+ ok := object.Key("ResourceArn")
+ ok.String(*v.ResourceArn)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentQueryInput(v *QueryInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.AttributesToGet != nil {
+ ok := object.Key("AttributesToGet")
+ if err := awsAwsjson10_serializeDocumentAttributeNameList(v.AttributesToGet, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ConditionalOperator) > 0 {
+ ok := object.Key("ConditionalOperator")
+ ok.String(string(v.ConditionalOperator))
+ }
+
+ if v.ConsistentRead != nil {
+ ok := object.Key("ConsistentRead")
+ ok.Boolean(*v.ConsistentRead)
+ }
+
+ if v.ExclusiveStartKey != nil {
+ ok := object.Key("ExclusiveStartKey")
+ if err := awsAwsjson10_serializeDocumentKey(v.ExclusiveStartKey, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpressionAttributeNames != nil {
+ ok := object.Key("ExpressionAttributeNames")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpressionAttributeValues != nil {
+ ok := object.Key("ExpressionAttributeValues")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.FilterExpression != nil {
+ ok := object.Key("FilterExpression")
+ ok.String(*v.FilterExpression)
+ }
+
+ if v.IndexName != nil {
+ ok := object.Key("IndexName")
+ ok.String(*v.IndexName)
+ }
+
+ if v.KeyConditionExpression != nil {
+ ok := object.Key("KeyConditionExpression")
+ ok.String(*v.KeyConditionExpression)
+ }
+
+ if v.KeyConditions != nil {
+ ok := object.Key("KeyConditions")
+ if err := awsAwsjson10_serializeDocumentKeyConditions(v.KeyConditions, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Limit != nil {
+ ok := object.Key("Limit")
+ ok.Integer(*v.Limit)
+ }
+
+ if v.ProjectionExpression != nil {
+ ok := object.Key("ProjectionExpression")
+ ok.String(*v.ProjectionExpression)
+ }
+
+ if v.QueryFilter != nil {
+ ok := object.Key("QueryFilter")
+ if err := awsAwsjson10_serializeDocumentFilterConditionMap(v.QueryFilter, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ReturnConsumedCapacity) > 0 {
+ ok := object.Key("ReturnConsumedCapacity")
+ ok.String(string(v.ReturnConsumedCapacity))
+ }
+
+ if v.ScanIndexForward != nil {
+ ok := object.Key("ScanIndexForward")
+ ok.Boolean(*v.ScanIndexForward)
+ }
+
+ if len(v.Select) > 0 {
+ ok := object.Key("Select")
+ ok.String(string(v.Select))
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentRestoreTableFromBackupInput(v *RestoreTableFromBackupInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.BackupArn != nil {
+ ok := object.Key("BackupArn")
+ ok.String(*v.BackupArn)
+ }
+
+ if len(v.BillingModeOverride) > 0 {
+ ok := object.Key("BillingModeOverride")
+ ok.String(string(v.BillingModeOverride))
+ }
+
+ if v.GlobalSecondaryIndexOverride != nil {
+ ok := object.Key("GlobalSecondaryIndexOverride")
+ if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexList(v.GlobalSecondaryIndexOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.LocalSecondaryIndexOverride != nil {
+ ok := object.Key("LocalSecondaryIndexOverride")
+ if err := awsAwsjson10_serializeDocumentLocalSecondaryIndexList(v.LocalSecondaryIndexOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.OnDemandThroughputOverride != nil {
+ ok := object.Key("OnDemandThroughputOverride")
+ if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughputOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvisionedThroughputOverride != nil {
+ ok := object.Key("ProvisionedThroughputOverride")
+ if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughputOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.SSESpecificationOverride != nil {
+ ok := object.Key("SSESpecificationOverride")
+ if err := awsAwsjson10_serializeDocumentSSESpecification(v.SSESpecificationOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.TargetTableName != nil {
+ ok := object.Key("TargetTableName")
+ ok.String(*v.TargetTableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentRestoreTableToPointInTimeInput(v *RestoreTableToPointInTimeInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if len(v.BillingModeOverride) > 0 {
+ ok := object.Key("BillingModeOverride")
+ ok.String(string(v.BillingModeOverride))
+ }
+
+ if v.GlobalSecondaryIndexOverride != nil {
+ ok := object.Key("GlobalSecondaryIndexOverride")
+ if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexList(v.GlobalSecondaryIndexOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.LocalSecondaryIndexOverride != nil {
+ ok := object.Key("LocalSecondaryIndexOverride")
+ if err := awsAwsjson10_serializeDocumentLocalSecondaryIndexList(v.LocalSecondaryIndexOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.OnDemandThroughputOverride != nil {
+ ok := object.Key("OnDemandThroughputOverride")
+ if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughputOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvisionedThroughputOverride != nil {
+ ok := object.Key("ProvisionedThroughputOverride")
+ if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughputOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.RestoreDateTime != nil {
+ ok := object.Key("RestoreDateTime")
+ ok.Double(smithytime.FormatEpochSeconds(*v.RestoreDateTime))
+ }
+
+ if v.SourceTableArn != nil {
+ ok := object.Key("SourceTableArn")
+ ok.String(*v.SourceTableArn)
+ }
+
+ if v.SourceTableName != nil {
+ ok := object.Key("SourceTableName")
+ ok.String(*v.SourceTableName)
+ }
+
+ if v.SSESpecificationOverride != nil {
+ ok := object.Key("SSESpecificationOverride")
+ if err := awsAwsjson10_serializeDocumentSSESpecification(v.SSESpecificationOverride, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.TargetTableName != nil {
+ ok := object.Key("TargetTableName")
+ ok.String(*v.TargetTableName)
+ }
+
+ if v.UseLatestRestorableTime != nil {
+ ok := object.Key("UseLatestRestorableTime")
+ ok.Boolean(*v.UseLatestRestorableTime)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentScanInput(v *ScanInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.AttributesToGet != nil {
+ ok := object.Key("AttributesToGet")
+ if err := awsAwsjson10_serializeDocumentAttributeNameList(v.AttributesToGet, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ConditionalOperator) > 0 {
+ ok := object.Key("ConditionalOperator")
+ ok.String(string(v.ConditionalOperator))
+ }
+
+ if v.ConsistentRead != nil {
+ ok := object.Key("ConsistentRead")
+ ok.Boolean(*v.ConsistentRead)
+ }
+
+ if v.ExclusiveStartKey != nil {
+ ok := object.Key("ExclusiveStartKey")
+ if err := awsAwsjson10_serializeDocumentKey(v.ExclusiveStartKey, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpressionAttributeNames != nil {
+ ok := object.Key("ExpressionAttributeNames")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpressionAttributeValues != nil {
+ ok := object.Key("ExpressionAttributeValues")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.FilterExpression != nil {
+ ok := object.Key("FilterExpression")
+ ok.String(*v.FilterExpression)
+ }
+
+ if v.IndexName != nil {
+ ok := object.Key("IndexName")
+ ok.String(*v.IndexName)
+ }
+
+ if v.Limit != nil {
+ ok := object.Key("Limit")
+ ok.Integer(*v.Limit)
+ }
+
+ if v.ProjectionExpression != nil {
+ ok := object.Key("ProjectionExpression")
+ ok.String(*v.ProjectionExpression)
+ }
+
+ if len(v.ReturnConsumedCapacity) > 0 {
+ ok := object.Key("ReturnConsumedCapacity")
+ ok.String(string(v.ReturnConsumedCapacity))
+ }
+
+ if v.ScanFilter != nil {
+ ok := object.Key("ScanFilter")
+ if err := awsAwsjson10_serializeDocumentFilterConditionMap(v.ScanFilter, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Segment != nil {
+ ok := object.Key("Segment")
+ ok.Integer(*v.Segment)
+ }
+
+ if len(v.Select) > 0 {
+ ok := object.Key("Select")
+ ok.String(string(v.Select))
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ if v.TotalSegments != nil {
+ ok := object.Key("TotalSegments")
+ ok.Integer(*v.TotalSegments)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentTagResourceInput(v *TagResourceInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ResourceArn != nil {
+ ok := object.Key("ResourceArn")
+ ok.String(*v.ResourceArn)
+ }
+
+ if v.Tags != nil {
+ ok := object.Key("Tags")
+ if err := awsAwsjson10_serializeDocumentTagList(v.Tags, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentTransactGetItemsInput(v *TransactGetItemsInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if len(v.ReturnConsumedCapacity) > 0 {
+ ok := object.Key("ReturnConsumedCapacity")
+ ok.String(string(v.ReturnConsumedCapacity))
+ }
+
+ if v.TransactItems != nil {
+ ok := object.Key("TransactItems")
+ if err := awsAwsjson10_serializeDocumentTransactGetItemList(v.TransactItems, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentTransactWriteItemsInput(v *TransactWriteItemsInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ClientRequestToken != nil {
+ ok := object.Key("ClientRequestToken")
+ ok.String(*v.ClientRequestToken)
+ }
+
+ if len(v.ReturnConsumedCapacity) > 0 {
+ ok := object.Key("ReturnConsumedCapacity")
+ ok.String(string(v.ReturnConsumedCapacity))
+ }
+
+ if len(v.ReturnItemCollectionMetrics) > 0 {
+ ok := object.Key("ReturnItemCollectionMetrics")
+ ok.String(string(v.ReturnItemCollectionMetrics))
+ }
+
+ if v.TransactItems != nil {
+ ok := object.Key("TransactItems")
+ if err := awsAwsjson10_serializeDocumentTransactWriteItemList(v.TransactItems, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentUntagResourceInput(v *UntagResourceInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.ResourceArn != nil {
+ ok := object.Key("ResourceArn")
+ ok.String(*v.ResourceArn)
+ }
+
+ if v.TagKeys != nil {
+ ok := object.Key("TagKeys")
+ if err := awsAwsjson10_serializeDocumentTagKeyList(v.TagKeys, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentUpdateContinuousBackupsInput(v *UpdateContinuousBackupsInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.PointInTimeRecoverySpecification != nil {
+ ok := object.Key("PointInTimeRecoverySpecification")
+ if err := awsAwsjson10_serializeDocumentPointInTimeRecoverySpecification(v.PointInTimeRecoverySpecification, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentUpdateContributorInsightsInput(v *UpdateContributorInsightsInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if len(v.ContributorInsightsAction) > 0 {
+ ok := object.Key("ContributorInsightsAction")
+ ok.String(string(v.ContributorInsightsAction))
+ }
+
+ if len(v.ContributorInsightsMode) > 0 {
+ ok := object.Key("ContributorInsightsMode")
+ ok.String(string(v.ContributorInsightsMode))
+ }
+
+ if v.IndexName != nil {
+ ok := object.Key("IndexName")
+ ok.String(*v.IndexName)
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentUpdateGlobalTableInput(v *UpdateGlobalTableInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.GlobalTableName != nil {
+ ok := object.Key("GlobalTableName")
+ ok.String(*v.GlobalTableName)
+ }
+
+ if v.ReplicaUpdates != nil {
+ ok := object.Key("ReplicaUpdates")
+ if err := awsAwsjson10_serializeDocumentReplicaUpdateList(v.ReplicaUpdates, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentUpdateGlobalTableSettingsInput(v *UpdateGlobalTableSettingsInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if len(v.GlobalTableBillingMode) > 0 {
+ ok := object.Key("GlobalTableBillingMode")
+ ok.String(string(v.GlobalTableBillingMode))
+ }
+
+ if v.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil {
+ ok := object.Key("GlobalTableGlobalSecondaryIndexSettingsUpdate")
+ if err := awsAwsjson10_serializeDocumentGlobalTableGlobalSecondaryIndexSettingsUpdateList(v.GlobalTableGlobalSecondaryIndexSettingsUpdate, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.GlobalTableName != nil {
+ ok := object.Key("GlobalTableName")
+ ok.String(*v.GlobalTableName)
+ }
+
+ if v.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate != nil {
+ ok := object.Key("GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate")
+ if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.GlobalTableProvisionedWriteCapacityUnits != nil {
+ ok := object.Key("GlobalTableProvisionedWriteCapacityUnits")
+ ok.Long(*v.GlobalTableProvisionedWriteCapacityUnits)
+ }
+
+ if v.ReplicaSettingsUpdate != nil {
+ ok := object.Key("ReplicaSettingsUpdate")
+ if err := awsAwsjson10_serializeDocumentReplicaSettingsUpdateList(v.ReplicaSettingsUpdate, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentUpdateItemInput(v *UpdateItemInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.AttributeUpdates != nil {
+ ok := object.Key("AttributeUpdates")
+ if err := awsAwsjson10_serializeDocumentAttributeUpdates(v.AttributeUpdates, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ConditionalOperator) > 0 {
+ ok := object.Key("ConditionalOperator")
+ ok.String(string(v.ConditionalOperator))
+ }
+
+ if v.ConditionExpression != nil {
+ ok := object.Key("ConditionExpression")
+ ok.String(*v.ConditionExpression)
+ }
+
+ if v.Expected != nil {
+ ok := object.Key("Expected")
+ if err := awsAwsjson10_serializeDocumentExpectedAttributeMap(v.Expected, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpressionAttributeNames != nil {
+ ok := object.Key("ExpressionAttributeNames")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeNameMap(v.ExpressionAttributeNames, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ExpressionAttributeValues != nil {
+ ok := object.Key("ExpressionAttributeValues")
+ if err := awsAwsjson10_serializeDocumentExpressionAttributeValueMap(v.ExpressionAttributeValues, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.Key != nil {
+ ok := object.Key("Key")
+ if err := awsAwsjson10_serializeDocumentKey(v.Key, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.ReturnConsumedCapacity) > 0 {
+ ok := object.Key("ReturnConsumedCapacity")
+ ok.String(string(v.ReturnConsumedCapacity))
+ }
+
+ if len(v.ReturnItemCollectionMetrics) > 0 {
+ ok := object.Key("ReturnItemCollectionMetrics")
+ ok.String(string(v.ReturnItemCollectionMetrics))
+ }
+
+ if len(v.ReturnValues) > 0 {
+ ok := object.Key("ReturnValues")
+ ok.String(string(v.ReturnValues))
+ }
+
+ if len(v.ReturnValuesOnConditionCheckFailure) > 0 {
+ ok := object.Key("ReturnValuesOnConditionCheckFailure")
+ ok.String(string(v.ReturnValuesOnConditionCheckFailure))
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ if v.UpdateExpression != nil {
+ ok := object.Key("UpdateExpression")
+ ok.String(*v.UpdateExpression)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentUpdateKinesisStreamingDestinationInput(v *UpdateKinesisStreamingDestinationInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.StreamArn != nil {
+ ok := object.Key("StreamArn")
+ ok.String(*v.StreamArn)
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ if v.UpdateKinesisStreamingConfiguration != nil {
+ ok := object.Key("UpdateKinesisStreamingConfiguration")
+ if err := awsAwsjson10_serializeDocumentUpdateKinesisStreamingConfiguration(v.UpdateKinesisStreamingConfiguration, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentUpdateTableInput(v *UpdateTableInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.AttributeDefinitions != nil {
+ ok := object.Key("AttributeDefinitions")
+ if err := awsAwsjson10_serializeDocumentAttributeDefinitions(v.AttributeDefinitions, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.BillingMode) > 0 {
+ ok := object.Key("BillingMode")
+ ok.String(string(v.BillingMode))
+ }
+
+ if v.DeletionProtectionEnabled != nil {
+ ok := object.Key("DeletionProtectionEnabled")
+ ok.Boolean(*v.DeletionProtectionEnabled)
+ }
+
+ if v.GlobalSecondaryIndexUpdates != nil {
+ ok := object.Key("GlobalSecondaryIndexUpdates")
+ if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexUpdateList(v.GlobalSecondaryIndexUpdates, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.GlobalTableWitnessUpdates != nil {
+ ok := object.Key("GlobalTableWitnessUpdates")
+ if err := awsAwsjson10_serializeDocumentGlobalTableWitnessGroupUpdateList(v.GlobalTableWitnessUpdates, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.MultiRegionConsistency) > 0 {
+ ok := object.Key("MultiRegionConsistency")
+ ok.String(string(v.MultiRegionConsistency))
+ }
+
+ if v.OnDemandThroughput != nil {
+ ok := object.Key("OnDemandThroughput")
+ if err := awsAwsjson10_serializeDocumentOnDemandThroughput(v.OnDemandThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvisionedThroughput != nil {
+ ok := object.Key("ProvisionedThroughput")
+ if err := awsAwsjson10_serializeDocumentProvisionedThroughput(v.ProvisionedThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ReplicaUpdates != nil {
+ ok := object.Key("ReplicaUpdates")
+ if err := awsAwsjson10_serializeDocumentReplicationGroupUpdateList(v.ReplicaUpdates, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.SSESpecification != nil {
+ ok := object.Key("SSESpecification")
+ if err := awsAwsjson10_serializeDocumentSSESpecification(v.SSESpecification, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.StreamSpecification != nil {
+ ok := object.Key("StreamSpecification")
+ if err := awsAwsjson10_serializeDocumentStreamSpecification(v.StreamSpecification, ok); err != nil {
+ return err
+ }
+ }
+
+ if len(v.TableClass) > 0 {
+ ok := object.Key("TableClass")
+ ok.String(string(v.TableClass))
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ if v.WarmThroughput != nil {
+ ok := object.Key("WarmThroughput")
+ if err := awsAwsjson10_serializeDocumentWarmThroughput(v.WarmThroughput, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentUpdateTableReplicaAutoScalingInput(v *UpdateTableReplicaAutoScalingInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.GlobalSecondaryIndexUpdates != nil {
+ ok := object.Key("GlobalSecondaryIndexUpdates")
+ if err := awsAwsjson10_serializeDocumentGlobalSecondaryIndexAutoScalingUpdateList(v.GlobalSecondaryIndexUpdates, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ProvisionedWriteCapacityAutoScalingUpdate != nil {
+ ok := object.Key("ProvisionedWriteCapacityAutoScalingUpdate")
+ if err := awsAwsjson10_serializeDocumentAutoScalingSettingsUpdate(v.ProvisionedWriteCapacityAutoScalingUpdate, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.ReplicaUpdates != nil {
+ ok := object.Key("ReplicaUpdates")
+ if err := awsAwsjson10_serializeDocumentReplicaAutoScalingUpdateList(v.ReplicaUpdates, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ return nil
+}
+
+func awsAwsjson10_serializeOpDocumentUpdateTimeToLiveInput(v *UpdateTimeToLiveInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.TableName != nil {
+ ok := object.Key("TableName")
+ ok.String(*v.TableName)
+ }
+
+ if v.TimeToLiveSpecification != nil {
+ ok := object.Key("TimeToLiveSpecification")
+ if err := awsAwsjson10_serializeDocumentTimeToLiveSpecification(v.TimeToLiveSpecification, ok); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go
new file mode 100644
index 000000000..47d43d57e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/enums.go
@@ -0,0 +1,947 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+type ApproximateCreationDateTimePrecision string
+
+// Enum values for ApproximateCreationDateTimePrecision
+const (
+ ApproximateCreationDateTimePrecisionMillisecond ApproximateCreationDateTimePrecision = "MILLISECOND"
+ ApproximateCreationDateTimePrecisionMicrosecond ApproximateCreationDateTimePrecision = "MICROSECOND"
+)
+
+// Values returns all known values for ApproximateCreationDateTimePrecision. Note
+// that this can be expanded in the future, and so it is only as up to date as the
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ApproximateCreationDateTimePrecision) Values() []ApproximateCreationDateTimePrecision {
+ return []ApproximateCreationDateTimePrecision{
+ "MILLISECOND",
+ "MICROSECOND",
+ }
+}
+
+type AttributeAction string
+
+// Enum values for AttributeAction
+const (
+ AttributeActionAdd AttributeAction = "ADD"
+ AttributeActionPut AttributeAction = "PUT"
+ AttributeActionDelete AttributeAction = "DELETE"
+)
+
+// Values returns all known values for AttributeAction. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (AttributeAction) Values() []AttributeAction {
+ return []AttributeAction{
+ "ADD",
+ "PUT",
+ "DELETE",
+ }
+}
+
+type BackupStatus string
+
+// Enum values for BackupStatus
+const (
+ BackupStatusCreating BackupStatus = "CREATING"
+ BackupStatusDeleted BackupStatus = "DELETED"
+ BackupStatusAvailable BackupStatus = "AVAILABLE"
+)
+
+// Values returns all known values for BackupStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (BackupStatus) Values() []BackupStatus {
+ return []BackupStatus{
+ "CREATING",
+ "DELETED",
+ "AVAILABLE",
+ }
+}
+
+type BackupType string
+
+// Enum values for BackupType
+const (
+ BackupTypeUser BackupType = "USER"
+ BackupTypeSystem BackupType = "SYSTEM"
+ BackupTypeAwsBackup BackupType = "AWS_BACKUP"
+)
+
+// Values returns all known values for BackupType. Note that this can be expanded
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (BackupType) Values() []BackupType {
+ return []BackupType{
+ "USER",
+ "SYSTEM",
+ "AWS_BACKUP",
+ }
+}
+
+type BackupTypeFilter string
+
+// Enum values for BackupTypeFilter
+const (
+ BackupTypeFilterUser BackupTypeFilter = "USER"
+ BackupTypeFilterSystem BackupTypeFilter = "SYSTEM"
+ BackupTypeFilterAwsBackup BackupTypeFilter = "AWS_BACKUP"
+ BackupTypeFilterAll BackupTypeFilter = "ALL"
+)
+
+// Values returns all known values for BackupTypeFilter. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (BackupTypeFilter) Values() []BackupTypeFilter {
+ return []BackupTypeFilter{
+ "USER",
+ "SYSTEM",
+ "AWS_BACKUP",
+ "ALL",
+ }
+}
+
+type BatchStatementErrorCodeEnum string
+
+// Enum values for BatchStatementErrorCodeEnum
+const (
+ BatchStatementErrorCodeEnumConditionalCheckFailed BatchStatementErrorCodeEnum = "ConditionalCheckFailed"
+ BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded BatchStatementErrorCodeEnum = "ItemCollectionSizeLimitExceeded"
+ BatchStatementErrorCodeEnumRequestLimitExceeded BatchStatementErrorCodeEnum = "RequestLimitExceeded"
+ BatchStatementErrorCodeEnumValidationError BatchStatementErrorCodeEnum = "ValidationError"
+ BatchStatementErrorCodeEnumProvisionedThroughputExceeded BatchStatementErrorCodeEnum = "ProvisionedThroughputExceeded"
+ BatchStatementErrorCodeEnumTransactionConflict BatchStatementErrorCodeEnum = "TransactionConflict"
+ BatchStatementErrorCodeEnumThrottlingError BatchStatementErrorCodeEnum = "ThrottlingError"
+ BatchStatementErrorCodeEnumInternalServerError BatchStatementErrorCodeEnum = "InternalServerError"
+ BatchStatementErrorCodeEnumResourceNotFound BatchStatementErrorCodeEnum = "ResourceNotFound"
+ BatchStatementErrorCodeEnumAccessDenied BatchStatementErrorCodeEnum = "AccessDenied"
+ BatchStatementErrorCodeEnumDuplicateItem BatchStatementErrorCodeEnum = "DuplicateItem"
+)
+
+// Values returns all known values for BatchStatementErrorCodeEnum. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (BatchStatementErrorCodeEnum) Values() []BatchStatementErrorCodeEnum {
+ return []BatchStatementErrorCodeEnum{
+ "ConditionalCheckFailed",
+ "ItemCollectionSizeLimitExceeded",
+ "RequestLimitExceeded",
+ "ValidationError",
+ "ProvisionedThroughputExceeded",
+ "TransactionConflict",
+ "ThrottlingError",
+ "InternalServerError",
+ "ResourceNotFound",
+ "AccessDenied",
+ "DuplicateItem",
+ }
+}
+
+type BillingMode string
+
+// Enum values for BillingMode
+const (
+ BillingModeProvisioned BillingMode = "PROVISIONED"
+ BillingModePayPerRequest BillingMode = "PAY_PER_REQUEST"
+)
+
+// Values returns all known values for BillingMode. Note that this can be expanded
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (BillingMode) Values() []BillingMode {
+ return []BillingMode{
+ "PROVISIONED",
+ "PAY_PER_REQUEST",
+ }
+}
+
+type ComparisonOperator string
+
+// Enum values for ComparisonOperator
+const (
+ ComparisonOperatorEq ComparisonOperator = "EQ"
+ ComparisonOperatorNe ComparisonOperator = "NE"
+ ComparisonOperatorIn ComparisonOperator = "IN"
+ ComparisonOperatorLe ComparisonOperator = "LE"
+ ComparisonOperatorLt ComparisonOperator = "LT"
+ ComparisonOperatorGe ComparisonOperator = "GE"
+ ComparisonOperatorGt ComparisonOperator = "GT"
+ ComparisonOperatorBetween ComparisonOperator = "BETWEEN"
+ ComparisonOperatorNotNull ComparisonOperator = "NOT_NULL"
+ ComparisonOperatorNull ComparisonOperator = "NULL"
+ ComparisonOperatorContains ComparisonOperator = "CONTAINS"
+ ComparisonOperatorNotContains ComparisonOperator = "NOT_CONTAINS"
+ ComparisonOperatorBeginsWith ComparisonOperator = "BEGINS_WITH"
+)
+
+// Values returns all known values for ComparisonOperator. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ComparisonOperator) Values() []ComparisonOperator {
+ return []ComparisonOperator{
+ "EQ",
+ "NE",
+ "IN",
+ "LE",
+ "LT",
+ "GE",
+ "GT",
+ "BETWEEN",
+ "NOT_NULL",
+ "NULL",
+ "CONTAINS",
+ "NOT_CONTAINS",
+ "BEGINS_WITH",
+ }
+}
+
+type ConditionalOperator string
+
+// Enum values for ConditionalOperator
+const (
+ ConditionalOperatorAnd ConditionalOperator = "AND"
+ ConditionalOperatorOr ConditionalOperator = "OR"
+)
+
+// Values returns all known values for ConditionalOperator. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ConditionalOperator) Values() []ConditionalOperator {
+ return []ConditionalOperator{
+ "AND",
+ "OR",
+ }
+}
+
+type ContinuousBackupsStatus string
+
+// Enum values for ContinuousBackupsStatus
+const (
+ ContinuousBackupsStatusEnabled ContinuousBackupsStatus = "ENABLED"
+ ContinuousBackupsStatusDisabled ContinuousBackupsStatus = "DISABLED"
+)
+
+// Values returns all known values for ContinuousBackupsStatus. Note that this can
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ContinuousBackupsStatus) Values() []ContinuousBackupsStatus {
+ return []ContinuousBackupsStatus{
+ "ENABLED",
+ "DISABLED",
+ }
+}
+
+type ContributorInsightsAction string
+
+// Enum values for ContributorInsightsAction
+const (
+ ContributorInsightsActionEnable ContributorInsightsAction = "ENABLE"
+ ContributorInsightsActionDisable ContributorInsightsAction = "DISABLE"
+)
+
+// Values returns all known values for ContributorInsightsAction. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ContributorInsightsAction) Values() []ContributorInsightsAction {
+ return []ContributorInsightsAction{
+ "ENABLE",
+ "DISABLE",
+ }
+}
+
+type ContributorInsightsMode string
+
+// Enum values for ContributorInsightsMode
+const (
+ ContributorInsightsModeAccessedAndThrottledKeys ContributorInsightsMode = "ACCESSED_AND_THROTTLED_KEYS"
+ ContributorInsightsModeThrottledKeys ContributorInsightsMode = "THROTTLED_KEYS"
+)
+
+// Values returns all known values for ContributorInsightsMode. Note that this can
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ContributorInsightsMode) Values() []ContributorInsightsMode {
+ return []ContributorInsightsMode{
+ "ACCESSED_AND_THROTTLED_KEYS",
+ "THROTTLED_KEYS",
+ }
+}
+
+type ContributorInsightsStatus string
+
+// Enum values for ContributorInsightsStatus
+const (
+ ContributorInsightsStatusEnabling ContributorInsightsStatus = "ENABLING"
+ ContributorInsightsStatusEnabled ContributorInsightsStatus = "ENABLED"
+ ContributorInsightsStatusDisabling ContributorInsightsStatus = "DISABLING"
+ ContributorInsightsStatusDisabled ContributorInsightsStatus = "DISABLED"
+ ContributorInsightsStatusFailed ContributorInsightsStatus = "FAILED"
+)
+
+// Values returns all known values for ContributorInsightsStatus. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ContributorInsightsStatus) Values() []ContributorInsightsStatus {
+ return []ContributorInsightsStatus{
+ "ENABLING",
+ "ENABLED",
+ "DISABLING",
+ "DISABLED",
+ "FAILED",
+ }
+}
+
+type DestinationStatus string
+
+// Enum values for DestinationStatus
+const (
+ DestinationStatusEnabling DestinationStatus = "ENABLING"
+ DestinationStatusActive DestinationStatus = "ACTIVE"
+ DestinationStatusDisabling DestinationStatus = "DISABLING"
+ DestinationStatusDisabled DestinationStatus = "DISABLED"
+ DestinationStatusEnableFailed DestinationStatus = "ENABLE_FAILED"
+ DestinationStatusUpdating DestinationStatus = "UPDATING"
+)
+
+// Values returns all known values for DestinationStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (DestinationStatus) Values() []DestinationStatus {
+ return []DestinationStatus{
+ "ENABLING",
+ "ACTIVE",
+ "DISABLING",
+ "DISABLED",
+ "ENABLE_FAILED",
+ "UPDATING",
+ }
+}
+
+type ExportFormat string
+
+// Enum values for ExportFormat
+const (
+ ExportFormatDynamodbJson ExportFormat = "DYNAMODB_JSON"
+ ExportFormatIon ExportFormat = "ION"
+)
+
+// Values returns all known values for ExportFormat. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ExportFormat) Values() []ExportFormat {
+ return []ExportFormat{
+ "DYNAMODB_JSON",
+ "ION",
+ }
+}
+
+type ExportStatus string
+
+// Enum values for ExportStatus
+const (
+ ExportStatusInProgress ExportStatus = "IN_PROGRESS"
+ ExportStatusCompleted ExportStatus = "COMPLETED"
+ ExportStatusFailed ExportStatus = "FAILED"
+)
+
+// Values returns all known values for ExportStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ExportStatus) Values() []ExportStatus {
+ return []ExportStatus{
+ "IN_PROGRESS",
+ "COMPLETED",
+ "FAILED",
+ }
+}
+
+type ExportType string
+
+// Enum values for ExportType
+const (
+ ExportTypeFullExport ExportType = "FULL_EXPORT"
+ ExportTypeIncrementalExport ExportType = "INCREMENTAL_EXPORT"
+)
+
+// Values returns all known values for ExportType. Note that this can be expanded
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ExportType) Values() []ExportType {
+ return []ExportType{
+ "FULL_EXPORT",
+ "INCREMENTAL_EXPORT",
+ }
+}
+
+type ExportViewType string
+
+// Enum values for ExportViewType
+const (
+ ExportViewTypeNewImage ExportViewType = "NEW_IMAGE"
+ ExportViewTypeNewAndOldImages ExportViewType = "NEW_AND_OLD_IMAGES"
+)
+
+// Values returns all known values for ExportViewType. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ExportViewType) Values() []ExportViewType {
+ return []ExportViewType{
+ "NEW_IMAGE",
+ "NEW_AND_OLD_IMAGES",
+ }
+}
+
+type GlobalTableStatus string
+
+// Enum values for GlobalTableStatus
+const (
+ GlobalTableStatusCreating GlobalTableStatus = "CREATING"
+ GlobalTableStatusActive GlobalTableStatus = "ACTIVE"
+ GlobalTableStatusDeleting GlobalTableStatus = "DELETING"
+ GlobalTableStatusUpdating GlobalTableStatus = "UPDATING"
+)
+
+// Values returns all known values for GlobalTableStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (GlobalTableStatus) Values() []GlobalTableStatus {
+ return []GlobalTableStatus{
+ "CREATING",
+ "ACTIVE",
+ "DELETING",
+ "UPDATING",
+ }
+}
+
+type ImportStatus string
+
+// Enum values for ImportStatus
+const (
+ ImportStatusInProgress ImportStatus = "IN_PROGRESS"
+ ImportStatusCompleted ImportStatus = "COMPLETED"
+ ImportStatusCancelling ImportStatus = "CANCELLING"
+ ImportStatusCancelled ImportStatus = "CANCELLED"
+ ImportStatusFailed ImportStatus = "FAILED"
+)
+
+// Values returns all known values for ImportStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ImportStatus) Values() []ImportStatus {
+ return []ImportStatus{
+ "IN_PROGRESS",
+ "COMPLETED",
+ "CANCELLING",
+ "CANCELLED",
+ "FAILED",
+ }
+}
+
+type IndexStatus string
+
+// Enum values for IndexStatus
+const (
+ IndexStatusCreating IndexStatus = "CREATING"
+ IndexStatusUpdating IndexStatus = "UPDATING"
+ IndexStatusDeleting IndexStatus = "DELETING"
+ IndexStatusActive IndexStatus = "ACTIVE"
+)
+
+// Values returns all known values for IndexStatus. Note that this can be expanded
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (IndexStatus) Values() []IndexStatus {
+ return []IndexStatus{
+ "CREATING",
+ "UPDATING",
+ "DELETING",
+ "ACTIVE",
+ }
+}
+
+type InputCompressionType string
+
+// Enum values for InputCompressionType
+const (
+ InputCompressionTypeGzip InputCompressionType = "GZIP"
+ InputCompressionTypeZstd InputCompressionType = "ZSTD"
+ InputCompressionTypeNone InputCompressionType = "NONE"
+)
+
+// Values returns all known values for InputCompressionType. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (InputCompressionType) Values() []InputCompressionType {
+ return []InputCompressionType{
+ "GZIP",
+ "ZSTD",
+ "NONE",
+ }
+}
+
+type InputFormat string
+
+// Enum values for InputFormat
+const (
+ InputFormatDynamodbJson InputFormat = "DYNAMODB_JSON"
+ InputFormatIon InputFormat = "ION"
+ InputFormatCsv InputFormat = "CSV"
+)
+
+// Values returns all known values for InputFormat. Note that this can be expanded
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (InputFormat) Values() []InputFormat {
+ return []InputFormat{
+ "DYNAMODB_JSON",
+ "ION",
+ "CSV",
+ }
+}
+
+type KeyType string
+
+// Enum values for KeyType
+const (
+ KeyTypeHash KeyType = "HASH"
+ KeyTypeRange KeyType = "RANGE"
+)
+
+// Values returns all known values for KeyType. Note that this can be expanded in
+// the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (KeyType) Values() []KeyType {
+ return []KeyType{
+ "HASH",
+ "RANGE",
+ }
+}
+
+type MultiRegionConsistency string
+
+// Enum values for MultiRegionConsistency
+const (
+ MultiRegionConsistencyEventual MultiRegionConsistency = "EVENTUAL"
+ MultiRegionConsistencyStrong MultiRegionConsistency = "STRONG"
+)
+
+// Values returns all known values for MultiRegionConsistency. Note that this can
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (MultiRegionConsistency) Values() []MultiRegionConsistency {
+ return []MultiRegionConsistency{
+ "EVENTUAL",
+ "STRONG",
+ }
+}
+
+type PointInTimeRecoveryStatus string
+
+// Enum values for PointInTimeRecoveryStatus
+const (
+ PointInTimeRecoveryStatusEnabled PointInTimeRecoveryStatus = "ENABLED"
+ PointInTimeRecoveryStatusDisabled PointInTimeRecoveryStatus = "DISABLED"
+)
+
+// Values returns all known values for PointInTimeRecoveryStatus. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (PointInTimeRecoveryStatus) Values() []PointInTimeRecoveryStatus {
+ return []PointInTimeRecoveryStatus{
+ "ENABLED",
+ "DISABLED",
+ }
+}
+
+type ProjectionType string
+
+// Enum values for ProjectionType
+const (
+ ProjectionTypeAll ProjectionType = "ALL"
+ ProjectionTypeKeysOnly ProjectionType = "KEYS_ONLY"
+ ProjectionTypeInclude ProjectionType = "INCLUDE"
+)
+
+// Values returns all known values for ProjectionType. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ProjectionType) Values() []ProjectionType {
+ return []ProjectionType{
+ "ALL",
+ "KEYS_ONLY",
+ "INCLUDE",
+ }
+}
+
+type ReplicaStatus string
+
+// Enum values for ReplicaStatus
+const (
+ ReplicaStatusCreating ReplicaStatus = "CREATING"
+ ReplicaStatusCreationFailed ReplicaStatus = "CREATION_FAILED"
+ ReplicaStatusUpdating ReplicaStatus = "UPDATING"
+ ReplicaStatusDeleting ReplicaStatus = "DELETING"
+ ReplicaStatusActive ReplicaStatus = "ACTIVE"
+ ReplicaStatusRegionDisabled ReplicaStatus = "REGION_DISABLED"
+ ReplicaStatusInaccessibleEncryptionCredentials ReplicaStatus = "INACCESSIBLE_ENCRYPTION_CREDENTIALS"
+ ReplicaStatusArchiving ReplicaStatus = "ARCHIVING"
+ ReplicaStatusArchived ReplicaStatus = "ARCHIVED"
+ ReplicaStatusReplicationNotAuthorized ReplicaStatus = "REPLICATION_NOT_AUTHORIZED"
+)
+
+// Values returns all known values for ReplicaStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ReplicaStatus) Values() []ReplicaStatus {
+ return []ReplicaStatus{
+ "CREATING",
+ "CREATION_FAILED",
+ "UPDATING",
+ "DELETING",
+ "ACTIVE",
+ "REGION_DISABLED",
+ "INACCESSIBLE_ENCRYPTION_CREDENTIALS",
+ "ARCHIVING",
+ "ARCHIVED",
+ "REPLICATION_NOT_AUTHORIZED",
+ }
+}
+
+type ReturnConsumedCapacity string
+
+// Enum values for ReturnConsumedCapacity
+const (
+ ReturnConsumedCapacityIndexes ReturnConsumedCapacity = "INDEXES"
+ ReturnConsumedCapacityTotal ReturnConsumedCapacity = "TOTAL"
+ ReturnConsumedCapacityNone ReturnConsumedCapacity = "NONE"
+)
+
+// Values returns all known values for ReturnConsumedCapacity. Note that this can
+// be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ReturnConsumedCapacity) Values() []ReturnConsumedCapacity {
+ return []ReturnConsumedCapacity{
+ "INDEXES",
+ "TOTAL",
+ "NONE",
+ }
+}
+
+type ReturnItemCollectionMetrics string
+
+// Enum values for ReturnItemCollectionMetrics
+const (
+ ReturnItemCollectionMetricsSize ReturnItemCollectionMetrics = "SIZE"
+ ReturnItemCollectionMetricsNone ReturnItemCollectionMetrics = "NONE"
+)
+
+// Values returns all known values for ReturnItemCollectionMetrics. Note that this
+// can be expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ReturnItemCollectionMetrics) Values() []ReturnItemCollectionMetrics {
+ return []ReturnItemCollectionMetrics{
+ "SIZE",
+ "NONE",
+ }
+}
+
+type ReturnValue string
+
+// Enum values for ReturnValue
+const (
+ ReturnValueNone ReturnValue = "NONE"
+ ReturnValueAllOld ReturnValue = "ALL_OLD"
+ ReturnValueUpdatedOld ReturnValue = "UPDATED_OLD"
+ ReturnValueAllNew ReturnValue = "ALL_NEW"
+ ReturnValueUpdatedNew ReturnValue = "UPDATED_NEW"
+)
+
+// Values returns all known values for ReturnValue. Note that this can be expanded
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ReturnValue) Values() []ReturnValue {
+ return []ReturnValue{
+ "NONE",
+ "ALL_OLD",
+ "UPDATED_OLD",
+ "ALL_NEW",
+ "UPDATED_NEW",
+ }
+}
+
+type ReturnValuesOnConditionCheckFailure string
+
+// Enum values for ReturnValuesOnConditionCheckFailure
+const (
+ ReturnValuesOnConditionCheckFailureAllOld ReturnValuesOnConditionCheckFailure = "ALL_OLD"
+ ReturnValuesOnConditionCheckFailureNone ReturnValuesOnConditionCheckFailure = "NONE"
+)
+
+// Values returns all known values for ReturnValuesOnConditionCheckFailure. Note
+// that this can be expanded in the future, and so it is only as up to date as the
+// client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ReturnValuesOnConditionCheckFailure) Values() []ReturnValuesOnConditionCheckFailure {
+ return []ReturnValuesOnConditionCheckFailure{
+ "ALL_OLD",
+ "NONE",
+ }
+}
+
+type S3SseAlgorithm string
+
+// Enum values for S3SseAlgorithm
+const (
+ S3SseAlgorithmAes256 S3SseAlgorithm = "AES256"
+ S3SseAlgorithmKms S3SseAlgorithm = "KMS"
+)
+
+// Values returns all known values for S3SseAlgorithm. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (S3SseAlgorithm) Values() []S3SseAlgorithm {
+ return []S3SseAlgorithm{
+ "AES256",
+ "KMS",
+ }
+}
+
+type ScalarAttributeType string
+
+// Enum values for ScalarAttributeType
+const (
+ ScalarAttributeTypeS ScalarAttributeType = "S"
+ ScalarAttributeTypeN ScalarAttributeType = "N"
+ ScalarAttributeTypeB ScalarAttributeType = "B"
+)
+
+// Values returns all known values for ScalarAttributeType. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (ScalarAttributeType) Values() []ScalarAttributeType {
+ return []ScalarAttributeType{
+ "S",
+ "N",
+ "B",
+ }
+}
+
+type Select string
+
+// Enum values for Select
+const (
+ SelectAllAttributes Select = "ALL_ATTRIBUTES"
+ SelectAllProjectedAttributes Select = "ALL_PROJECTED_ATTRIBUTES"
+ SelectSpecificAttributes Select = "SPECIFIC_ATTRIBUTES"
+ SelectCount Select = "COUNT"
+)
+
+// Values returns all known values for Select. Note that this can be expanded in
+// the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (Select) Values() []Select {
+ return []Select{
+ "ALL_ATTRIBUTES",
+ "ALL_PROJECTED_ATTRIBUTES",
+ "SPECIFIC_ATTRIBUTES",
+ "COUNT",
+ }
+}
+
+type SSEStatus string
+
+// Enum values for SSEStatus
+const (
+ SSEStatusEnabling SSEStatus = "ENABLING"
+ SSEStatusEnabled SSEStatus = "ENABLED"
+ SSEStatusDisabling SSEStatus = "DISABLING"
+ SSEStatusDisabled SSEStatus = "DISABLED"
+ SSEStatusUpdating SSEStatus = "UPDATING"
+)
+
+// Values returns all known values for SSEStatus. Note that this can be expanded
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (SSEStatus) Values() []SSEStatus {
+ return []SSEStatus{
+ "ENABLING",
+ "ENABLED",
+ "DISABLING",
+ "DISABLED",
+ "UPDATING",
+ }
+}
+
+type SSEType string
+
+// Enum values for SSEType
+const (
+ SSETypeAes256 SSEType = "AES256"
+ SSETypeKms SSEType = "KMS"
+)
+
+// Values returns all known values for SSEType. Note that this can be expanded in
+// the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (SSEType) Values() []SSEType {
+ return []SSEType{
+ "AES256",
+ "KMS",
+ }
+}
+
+type StreamViewType string
+
+// Enum values for StreamViewType
+const (
+ StreamViewTypeNewImage StreamViewType = "NEW_IMAGE"
+ StreamViewTypeOldImage StreamViewType = "OLD_IMAGE"
+ StreamViewTypeNewAndOldImages StreamViewType = "NEW_AND_OLD_IMAGES"
+ StreamViewTypeKeysOnly StreamViewType = "KEYS_ONLY"
+)
+
+// Values returns all known values for StreamViewType. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (StreamViewType) Values() []StreamViewType {
+ return []StreamViewType{
+ "NEW_IMAGE",
+ "OLD_IMAGE",
+ "NEW_AND_OLD_IMAGES",
+ "KEYS_ONLY",
+ }
+}
+
+type TableClass string
+
+// Enum values for TableClass
+const (
+ TableClassStandard TableClass = "STANDARD"
+ TableClassStandardInfrequentAccess TableClass = "STANDARD_INFREQUENT_ACCESS"
+)
+
+// Values returns all known values for TableClass. Note that this can be expanded
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (TableClass) Values() []TableClass {
+ return []TableClass{
+ "STANDARD",
+ "STANDARD_INFREQUENT_ACCESS",
+ }
+}
+
+type TableStatus string
+
+// Enum values for TableStatus
+const (
+ TableStatusCreating TableStatus = "CREATING"
+ TableStatusUpdating TableStatus = "UPDATING"
+ TableStatusDeleting TableStatus = "DELETING"
+ TableStatusActive TableStatus = "ACTIVE"
+ TableStatusInaccessibleEncryptionCredentials TableStatus = "INACCESSIBLE_ENCRYPTION_CREDENTIALS"
+ TableStatusArchiving TableStatus = "ARCHIVING"
+ TableStatusArchived TableStatus = "ARCHIVED"
+ TableStatusReplicationNotAuthorized TableStatus = "REPLICATION_NOT_AUTHORIZED"
+)
+
+// Values returns all known values for TableStatus. Note that this can be expanded
+// in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (TableStatus) Values() []TableStatus {
+ return []TableStatus{
+ "CREATING",
+ "UPDATING",
+ "DELETING",
+ "ACTIVE",
+ "INACCESSIBLE_ENCRYPTION_CREDENTIALS",
+ "ARCHIVING",
+ "ARCHIVED",
+ "REPLICATION_NOT_AUTHORIZED",
+ }
+}
+
+type TimeToLiveStatus string
+
+// Enum values for TimeToLiveStatus
+const (
+ TimeToLiveStatusEnabling TimeToLiveStatus = "ENABLING"
+ TimeToLiveStatusDisabling TimeToLiveStatus = "DISABLING"
+ TimeToLiveStatusEnabled TimeToLiveStatus = "ENABLED"
+ TimeToLiveStatusDisabled TimeToLiveStatus = "DISABLED"
+)
+
+// Values returns all known values for TimeToLiveStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (TimeToLiveStatus) Values() []TimeToLiveStatus {
+ return []TimeToLiveStatus{
+ "ENABLING",
+ "DISABLING",
+ "ENABLED",
+ "DISABLED",
+ }
+}
+
+type WitnessStatus string
+
+// Enum values for WitnessStatus
+const (
+ WitnessStatusCreating WitnessStatus = "CREATING"
+ WitnessStatusDeleting WitnessStatus = "DELETING"
+ WitnessStatusActive WitnessStatus = "ACTIVE"
+)
+
+// Values returns all known values for WitnessStatus. Note that this can be
+// expanded in the future, and so it is only as up to date as the client.
+//
+// The ordering of this slice is not guaranteed to be stable across updates.
+func (WitnessStatus) Values() []WitnessStatus {
+ return []WitnessStatus{
+ "CREATING",
+ "DELETING",
+ "ACTIVE",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go
new file mode 100644
index 000000000..45bf62b57
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/errors.go
@@ -0,0 +1,1186 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ "fmt"
+ smithy "github.com/aws/smithy-go"
+)
+
+// There is another ongoing conflicting backup control plane operation on the
+// table. The backup is either being created, deleted or restored to a table.
+type BackupInUseException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *BackupInUseException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *BackupInUseException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *BackupInUseException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "BackupInUseException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *BackupInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Backup not found for the given BackupARN.
+type BackupNotFoundException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *BackupNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *BackupNotFoundException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *BackupNotFoundException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "BackupNotFoundException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *BackupNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// A condition specified in the operation failed to be evaluated.
+type ConditionalCheckFailedException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Item map[string]AttributeValue
+
+ noSmithyDocumentSerde
+}
+
+func (e *ConditionalCheckFailedException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ConditionalCheckFailedException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ConditionalCheckFailedException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ConditionalCheckFailedException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ConditionalCheckFailedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Backups have not yet been enabled for this table.
+type ContinuousBackupsUnavailableException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ContinuousBackupsUnavailableException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ContinuousBackupsUnavailableException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ContinuousBackupsUnavailableException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ContinuousBackupsUnavailableException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ContinuousBackupsUnavailableException) ErrorFault() smithy.ErrorFault {
+ return smithy.FaultClient
+}
+
+// There was an attempt to insert an item with the same primary key as an item
+//
+// that already exists in the DynamoDB table.
+type DuplicateItemException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *DuplicateItemException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *DuplicateItemException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *DuplicateItemException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "DuplicateItemException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *DuplicateItemException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// There was a conflict when writing to the specified S3 bucket.
+type ExportConflictException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ExportConflictException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ExportConflictException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ExportConflictException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ExportConflictException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ExportConflictException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified export was not found.
+type ExportNotFoundException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ExportNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ExportNotFoundException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ExportNotFoundException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ExportNotFoundException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ExportNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified global table already exists.
+type GlobalTableAlreadyExistsException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *GlobalTableAlreadyExistsException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *GlobalTableAlreadyExistsException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *GlobalTableAlreadyExistsException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "GlobalTableAlreadyExistsException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *GlobalTableAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified global table does not exist.
+type GlobalTableNotFoundException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *GlobalTableNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *GlobalTableNotFoundException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *GlobalTableNotFoundException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "GlobalTableNotFoundException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *GlobalTableNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// DynamoDB rejected the request because you retried a request with a different
+// payload but with an idempotent token that was already used.
+type IdempotentParameterMismatchException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *IdempotentParameterMismatchException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *IdempotentParameterMismatchException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *IdempotentParameterMismatchException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "IdempotentParameterMismatchException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *IdempotentParameterMismatchException) ErrorFault() smithy.ErrorFault {
+ return smithy.FaultClient
+}
+
+// There was a conflict when importing from the specified S3 source. This can
+//
+// occur when the current import conflicts with a previous import request that had
+// the same client token.
+type ImportConflictException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ImportConflictException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ImportConflictException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ImportConflictException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ImportConflictException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ImportConflictException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified import was not found.
+type ImportNotFoundException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ImportNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ImportNotFoundException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ImportNotFoundException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ImportNotFoundException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ImportNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The operation tried to access a nonexistent index.
+type IndexNotFoundException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *IndexNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *IndexNotFoundException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *IndexNotFoundException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "IndexNotFoundException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *IndexNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// An error occurred on the server side.
+type InternalServerError struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InternalServerError) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InternalServerError) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InternalServerError) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InternalServerError"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InternalServerError) ErrorFault() smithy.ErrorFault { return smithy.FaultServer }
+
+type InvalidEndpointException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidEndpointException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidEndpointException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidEndpointException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidEndpointException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidEndpointException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified ExportTime is outside of the point in time recovery window.
+type InvalidExportTimeException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidExportTimeException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidExportTimeException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidExportTimeException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidExportTimeException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidExportTimeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// An invalid restore time was specified. RestoreDateTime must be between
+// EarliestRestorableDateTime and LatestRestorableDateTime.
+type InvalidRestoreTimeException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidRestoreTimeException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidRestoreTimeException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidRestoreTimeException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidRestoreTimeException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidRestoreTimeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// An item collection is too large. This exception is only returned for tables
+// that have one or more local secondary indexes.
+type ItemCollectionSizeLimitExceededException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ItemCollectionSizeLimitExceededException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ItemCollectionSizeLimitExceededException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ItemCollectionSizeLimitExceededException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ItemCollectionSizeLimitExceededException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ItemCollectionSizeLimitExceededException) ErrorFault() smithy.ErrorFault {
+ return smithy.FaultClient
+}
+
+// There is no limit to the number of daily on-demand backups that can be taken.
+//
+// For most purposes, up to 500 simultaneous table operations are allowed per
+// account. These operations include CreateTable , UpdateTable , DeleteTable ,
+// UpdateTimeToLive , RestoreTableFromBackup , and RestoreTableToPointInTime .
+//
+// When you are creating a table with one or more secondary indexes, you can have
+// up to 250 such requests running at a time. However, if the table or index
+// specifications are complex, then DynamoDB might temporarily reduce the number of
+// concurrent operations.
+//
+// When importing into DynamoDB, up to 50 simultaneous import table operations are
+// allowed per account.
+//
+// There is a soft account quota of 2,500 tables.
+//
+// GetRecords was called with a value of more than 1000 for the limit request
+// parameter.
+//
+// More than 2 processes are reading from the same streams shard at the same time.
+// Exceeding this limit may result in request throttling.
+type LimitExceededException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *LimitExceededException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *LimitExceededException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *LimitExceededException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "LimitExceededException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *LimitExceededException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Point in time recovery has not yet been enabled for this source table.
+type PointInTimeRecoveryUnavailableException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *PointInTimeRecoveryUnavailableException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *PointInTimeRecoveryUnavailableException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *PointInTimeRecoveryUnavailableException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "PointInTimeRecoveryUnavailableException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *PointInTimeRecoveryUnavailableException) ErrorFault() smithy.ErrorFault {
+ return smithy.FaultClient
+}
+
+// The operation tried to access a nonexistent resource-based policy.
+//
+// If you specified an ExpectedRevisionId , it's possible that a policy is present
+// for the resource but its revision ID didn't match the expected value.
+type PolicyNotFoundException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *PolicyNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *PolicyNotFoundException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *PolicyNotFoundException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "PolicyNotFoundException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *PolicyNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The request was denied due to request throttling. For detailed information
+// about why the request was throttled and the ARN of the impacted resource, find
+// the [ThrottlingReason]field in the returned exception. The Amazon Web Services SDKs for DynamoDB
+// automatically retry requests that receive this exception. Your request is
+// eventually successful, unless your retry queue is too large to finish. Reduce
+// the frequency of requests and use exponential backoff. For more information, go
+// to [Error Retries and Exponential Backoff]in the Amazon DynamoDB Developer Guide.
+//
+// [ThrottlingReason]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ThrottlingReason.html
+// [Error Retries and Exponential Backoff]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff
+type ProvisionedThroughputExceededException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ ThrottlingReasons []ThrottlingReason
+
+ noSmithyDocumentSerde
+}
+
+func (e *ProvisionedThroughputExceededException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ProvisionedThroughputExceededException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ProvisionedThroughputExceededException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ProvisionedThroughputExceededException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ProvisionedThroughputExceededException) ErrorFault() smithy.ErrorFault {
+ return smithy.FaultClient
+}
+
+// The specified replica is already part of the global table.
+type ReplicaAlreadyExistsException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ReplicaAlreadyExistsException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ReplicaAlreadyExistsException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ReplicaAlreadyExistsException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ReplicaAlreadyExistsException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ReplicaAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The specified replica is no longer part of the global table.
+type ReplicaNotFoundException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ReplicaNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ReplicaNotFoundException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ReplicaNotFoundException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ReplicaNotFoundException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ReplicaNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The request was rejected because one or more items in the request are being
+// modified by a request in another Region.
+type ReplicatedWriteConflictException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ReplicatedWriteConflictException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ReplicatedWriteConflictException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ReplicatedWriteConflictException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ReplicatedWriteConflictException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ReplicatedWriteConflictException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Throughput exceeds the current throughput quota for your account. For detailed
+// information about why the request was throttled and the ARN of the impacted
+// resource, find the [ThrottlingReason]field in the returned exception. Contact [Amazon Web Services Support] to request a quota
+// increase.
+//
+// [Amazon Web Services Support]: https://aws.amazon.com/support
+// [ThrottlingReason]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ThrottlingReason.html
+type RequestLimitExceeded struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ ThrottlingReasons []ThrottlingReason
+
+ noSmithyDocumentSerde
+}
+
+func (e *RequestLimitExceeded) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *RequestLimitExceeded) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *RequestLimitExceeded) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "RequestLimitExceeded"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *RequestLimitExceeded) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The operation conflicts with the resource's availability. For example:
+//
+// - You attempted to recreate an existing table.
+//
+// - You tried to delete a table currently in the CREATING state.
+//
+// - You tried to update a resource that was already being updated.
+//
+// When appropriate, wait for the ongoing update to complete and attempt the
+// request again.
+type ResourceInUseException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ResourceInUseException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ResourceInUseException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ResourceInUseException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ResourceInUseException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ResourceInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The operation tried to access a nonexistent table or index. The resource might
+// not be specified correctly, or its status might not be ACTIVE .
+type ResourceNotFoundException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *ResourceNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ResourceNotFoundException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ResourceNotFoundException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ResourceNotFoundException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// A target table with the specified name already exists.
+type TableAlreadyExistsException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *TableAlreadyExistsException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *TableAlreadyExistsException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *TableAlreadyExistsException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "TableAlreadyExistsException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *TableAlreadyExistsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// A target table with the specified name is either being created or deleted.
+type TableInUseException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *TableInUseException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *TableInUseException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *TableInUseException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "TableInUseException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *TableInUseException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// A source table with the name TableName does not currently exist within the
+// subscriber's account or the subscriber is operating in the wrong Amazon Web
+// Services Region.
+type TableNotFoundException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *TableNotFoundException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *TableNotFoundException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *TableNotFoundException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "TableNotFoundException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *TableNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The request was denied due to request throttling. For detailed information
+// about why the request was throttled and the ARN of the impacted resource, find
+// the [ThrottlingReason]field in the returned exception.
+//
+// [ThrottlingReason]: https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_ThrottlingReason.html
+type ThrottlingException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ ThrottlingReasons []ThrottlingReason
+
+ noSmithyDocumentSerde
+}
+
+func (e *ThrottlingException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *ThrottlingException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *ThrottlingException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ThrottlingException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *ThrottlingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The entire transaction request was canceled.
+//
+// DynamoDB cancels a TransactWriteItems request under the following circumstances:
+//
+// - A condition in one of the condition expressions is not met.
+//
+// - A table in the TransactWriteItems request is in a different account or
+// region.
+//
+// - More than one action in the TransactWriteItems operation targets the same
+// item.
+//
+// - There is insufficient provisioned capacity for the transaction to be
+// completed.
+//
+// - An item size becomes too large (larger than 400 KB), or a local secondary
+// index (LSI) becomes too large, or a similar validation error occurs because of
+// changes made by the transaction.
+//
+// - There is a user error, such as an invalid data format.
+//
+// - There is an ongoing TransactWriteItems operation that conflicts with a
+// concurrent TransactWriteItems request. In this case the TransactWriteItems
+// operation fails with a TransactionCanceledException .
+//
+// DynamoDB cancels a TransactGetItems request under the following circumstances:
+//
+// - There is an ongoing TransactGetItems operation that conflicts with a
+// concurrent PutItem , UpdateItem , DeleteItem or TransactWriteItems request. In
+// this case the TransactGetItems operation fails with a
+// TransactionCanceledException .
+//
+// - A table in the TransactGetItems request is in a different account or region.
+//
+// - There is insufficient provisioned capacity for the transaction to be
+// completed.
+//
+// - There is a user error, such as an invalid data format.
+//
+// If using Java, DynamoDB lists the cancellation reasons on the
+// CancellationReasons property. This property is not set for other languages.
+// Transaction cancellation reasons are ordered in the order of requested items, if
+// an item has no error it will have None code and Null message.
+//
+// Cancellation reason codes and possible error messages:
+//
+// - No Errors:
+//
+// - Code: None
+//
+// - Message: null
+//
+// - Conditional Check Failed:
+//
+// - Code: ConditionalCheckFailed
+//
+// - Message: The conditional request failed.
+//
+// - Item Collection Size Limit Exceeded:
+//
+// - Code: ItemCollectionSizeLimitExceeded
+//
+// - Message: Collection size exceeded.
+//
+// - Transaction Conflict:
+//
+// - Code: TransactionConflict
+//
+// - Message: Transaction is ongoing for the item.
+//
+// - Provisioned Throughput Exceeded:
+//
+// - Code: ProvisionedThroughputExceeded
+//
+// - Messages:
+//
+// - The level of configured provisioned throughput for the table was exceeded.
+// Consider increasing your provisioning level with the UpdateTable API.
+//
+// This Message is received when provisioned throughput is exceeded is on a
+//
+// provisioned DynamoDB table.
+//
+// - The level of configured provisioned throughput for one or more global
+// secondary indexes of the table was exceeded. Consider increasing your
+// provisioning level for the under-provisioned global secondary indexes with the
+// UpdateTable API.
+//
+// This message is returned when provisioned throughput is exceeded is on a
+//
+// provisioned GSI.
+//
+// - Throttling Error:
+//
+// - Code: ThrottlingError
+//
+// - Messages:
+//
+// - Throughput exceeds the current capacity of your table or index. DynamoDB is
+// automatically scaling your table or index so please try again shortly. If
+// exceptions persist, check if you have a hot key:
+// https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
+//
+// This message is returned when writes get throttled on an On-Demand table as
+//
+// DynamoDB is automatically scaling the table.
+//
+// - Throughput exceeds the current capacity for one or more global secondary
+// indexes. DynamoDB is automatically scaling your index so please try again
+// shortly.
+//
+// This message is returned when writes get throttled on an On-Demand GSI as
+//
+// DynamoDB is automatically scaling the GSI.
+//
+// - Validation Error:
+//
+// - Code: ValidationError
+//
+// - Messages:
+//
+// - One or more parameter values were invalid.
+//
+// - The update expression attempted to update the secondary index key beyond
+// allowed size limits.
+//
+// - The update expression attempted to update the secondary index key to
+// unsupported type.
+//
+// - An operand in the update expression has an incorrect data type.
+//
+// - Item size to update has exceeded the maximum allowed size.
+//
+// - Number overflow. Attempting to store a number with magnitude larger than
+// supported range.
+//
+// - Type mismatch for attribute to update.
+//
+// - Nesting Levels have exceeded supported limits.
+//
+// - The document path provided in the update expression is invalid for update.
+//
+// - The provided expression refers to an attribute that does not exist in the
+// item.
+type TransactionCanceledException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ CancellationReasons []CancellationReason
+
+ noSmithyDocumentSerde
+}
+
+func (e *TransactionCanceledException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *TransactionCanceledException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *TransactionCanceledException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "TransactionCanceledException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *TransactionCanceledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// Operation was rejected because there is an ongoing transaction for the item.
+type TransactionConflictException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *TransactionConflictException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *TransactionConflictException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *TransactionConflictException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "TransactionConflictException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *TransactionConflictException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
+// The transaction with the given request token is already in progress.
+//
+// # Recommended Settings
+//
+// This is a general recommendation for handling the TransactionInProgressException
+// . These settings help ensure that the client retries will trigger completion of
+// the ongoing TransactWriteItems request.
+//
+// - Set clientExecutionTimeout to a value that allows at least one retry to be
+// processed after 5 seconds have elapsed since the first attempt for the
+// TransactWriteItems operation.
+//
+// - Set socketTimeout to a value a little lower than the requestTimeout setting.
+//
+// - requestTimeout should be set based on the time taken for the individual
+// retries of a single HTTP request for your use case, but setting it to 1 second
+// or higher should work well to reduce chances of retries and
+// TransactionInProgressException errors.
+//
+// - Use exponential backoff when retrying and tune backoff if needed.
+//
+// Assuming [default retry policy], example timeout settings based on the guidelines above are as
+// follows:
+//
+// Example timeline:
+//
+// - 0-1000 first attempt
+//
+// - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base delay
+// for 4xx errors)
+//
+// - 1500-2500 second attempt
+//
+// - 2500-3500 second sleep/delay (500 * 2, exponential backoff)
+//
+// - 3500-4500 third attempt
+//
+// - 4500-6500 third sleep/delay (500 * 2^2)
+//
+// - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds
+// have elapsed since the first attempt reached TC)
+//
+// [default retry policy]: https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97
+type TransactionInProgressException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *TransactionInProgressException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *TransactionInProgressException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *TransactionInProgressException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "TransactionInProgressException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *TransactionInProgressException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go
new file mode 100644
index 000000000..ce3801ef9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/types/types.go
@@ -0,0 +1,3838 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package types
+
+import (
+ smithydocument "github.com/aws/smithy-go/document"
+ "time"
+)
+
+// Contains details of a table archival operation.
+type ArchivalSummary struct {
+
+ // The Amazon Resource Name (ARN) of the backup the table was archived to, when
+ // applicable in the archival reason. If you wish to restore this backup to the
+ // same table name, you will need to delete the original table.
+ ArchivalBackupArn *string
+
+ // The date and time when table archival was initiated by DynamoDB, in UNIX epoch
+ // time format.
+ ArchivalDateTime *time.Time
+
+ // The reason DynamoDB archived the table. Currently, the only possible value is:
+ //
+ // - INACCESSIBLE_ENCRYPTION_CREDENTIALS - The table was archived due to the
+ // table's KMS key being inaccessible for more than seven days. An On-Demand backup
+ // was created at the archival time.
+ ArchivalReason *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents an attribute for describing the schema for the table and indexes.
+type AttributeDefinition struct {
+
+ // A name for the attribute.
+ //
+ // This member is required.
+ AttributeName *string
+
+ // The data type for the attribute, where:
+ //
+ // - S - the attribute is of type String
+ //
+ // - N - the attribute is of type Number
+ //
+ // - B - the attribute is of type Binary
+ //
+ // This member is required.
+ AttributeType ScalarAttributeType
+
+ noSmithyDocumentSerde
+}
+
+// Represents the data for an attribute.
+//
+// Each attribute value is described as a name-value pair. The name is the data
+// type, and the value is the data itself.
+//
+// For more information, see [Data Types] in the Amazon DynamoDB Developer Guide.
+//
+// The following types satisfy this interface:
+//
+// AttributeValueMemberB
+// AttributeValueMemberBOOL
+// AttributeValueMemberBS
+// AttributeValueMemberL
+// AttributeValueMemberM
+// AttributeValueMemberN
+// AttributeValueMemberNS
+// AttributeValueMemberNULL
+// AttributeValueMemberS
+// AttributeValueMemberSS
+//
+// [Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes
+type AttributeValue interface {
+ isAttributeValue()
+}
+
+// An attribute of type Binary. For example:
+//
+// "B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk"
+type AttributeValueMemberB struct {
+ Value []byte
+
+ noSmithyDocumentSerde
+}
+
+func (*AttributeValueMemberB) isAttributeValue() {}
+
+// An attribute of type Boolean. For example:
+//
+// "BOOL": true
+type AttributeValueMemberBOOL struct {
+ Value bool
+
+ noSmithyDocumentSerde
+}
+
+func (*AttributeValueMemberBOOL) isAttributeValue() {}
+
+// An attribute of type Binary Set. For example:
+//
+// "BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="]
+type AttributeValueMemberBS struct {
+ Value [][]byte
+
+ noSmithyDocumentSerde
+}
+
+func (*AttributeValueMemberBS) isAttributeValue() {}
+
+// An attribute of type List. For example:
+//
+// "L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N": "3.14159"}]
+type AttributeValueMemberL struct {
+ Value []AttributeValue
+
+ noSmithyDocumentSerde
+}
+
+func (*AttributeValueMemberL) isAttributeValue() {}
+
+// An attribute of type Map. For example:
+//
+// "M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}}
+type AttributeValueMemberM struct {
+ Value map[string]AttributeValue
+
+ noSmithyDocumentSerde
+}
+
+func (*AttributeValueMemberM) isAttributeValue() {}
+
+// An attribute of type Number. For example:
+//
+// "N": "123.45"
+//
+// Numbers are sent across the network to DynamoDB as strings, to maximize
+// compatibility across languages and libraries. However, DynamoDB treats them as
+// number type attributes for mathematical operations.
+type AttributeValueMemberN struct {
+ Value string
+
+ noSmithyDocumentSerde
+}
+
+func (*AttributeValueMemberN) isAttributeValue() {}
+
+// An attribute of type Number Set. For example:
+//
+// "NS": ["42.2", "-19", "7.5", "3.14"]
+//
+// Numbers are sent across the network to DynamoDB as strings, to maximize
+// compatibility across languages and libraries. However, DynamoDB treats them as
+// number type attributes for mathematical operations.
+type AttributeValueMemberNS struct {
+ Value []string
+
+ noSmithyDocumentSerde
+}
+
+func (*AttributeValueMemberNS) isAttributeValue() {}
+
+// An attribute of type Null. For example:
+//
+// "NULL": true
+type AttributeValueMemberNULL struct {
+ Value bool
+
+ noSmithyDocumentSerde
+}
+
+func (*AttributeValueMemberNULL) isAttributeValue() {}
+
+// An attribute of type String. For example:
+//
+// "S": "Hello"
+type AttributeValueMemberS struct {
+ Value string
+
+ noSmithyDocumentSerde
+}
+
+func (*AttributeValueMemberS) isAttributeValue() {}
+
+// An attribute of type String Set. For example:
+//
+// "SS": ["Giraffe", "Hippo" ,"Zebra"]
+type AttributeValueMemberSS struct {
+ Value []string
+
+ noSmithyDocumentSerde
+}
+
+func (*AttributeValueMemberSS) isAttributeValue() {}
+
+// For the UpdateItem operation, represents the attributes to be modified, the
+// action to perform on each, and the new value for each.
+//
+// You cannot use UpdateItem to update any primary key attributes. Instead, you
+// will need to delete the item, and then use PutItem to create a new item with
+// new attributes.
+//
+// Attribute values cannot be null; string and binary type attributes must have
+// lengths greater than zero; and set type attributes must not be empty. Requests
+// with empty values will be rejected with a ValidationException exception.
+type AttributeValueUpdate struct {
+
+ // Specifies how to perform the update. Valid values are PUT (default), DELETE ,
+ // and ADD . The behavior depends on whether the specified primary key already
+ // exists in the table.
+ //
+ // If an item with the specified Key is found in the table:
+ //
+ // - PUT - Adds the specified attribute to the item. If the attribute already
+ // exists, it is replaced by the new value.
+ //
+ // - DELETE - If no value is specified, the attribute and its value are removed
+ // from the item. The data type of the specified value must match the existing
+ // value's data type.
+ //
+ // If a set of values is specified, then those values are subtracted from the old
+ // set. For example, if the attribute value was the set [a,b,c] and the DELETE
+ // action specified [a,c] , then the final attribute value would be [b] .
+ // Specifying an empty set is an error.
+ //
+ // - ADD - If the attribute does not already exist, then the attribute and its
+ // values are added to the item. If the attribute does exist, then the behavior of
+ // ADD depends on the data type of the attribute:
+ //
+ // - If the existing attribute is a number, and if Value is also a number, then
+ // the Value is mathematically added to the existing attribute. If Value is a
+ // negative number, then it is subtracted from the existing attribute.
+ //
+ // If you use ADD to increment or decrement a number value for an item that doesn't
+ // exist before the update, DynamoDB uses 0 as the initial value.
+ //
+ // In addition, if you use ADD to update an existing item, and intend to increment
+ // or decrement an attribute value which does not yet exist, DynamoDB uses 0 as
+ // the initial value. For example, suppose that the item you want to update does
+ // not yet have an attribute named itemcount, but you decide to ADD the number 3
+ // to this attribute anyway, even though it currently does not exist. DynamoDB will
+ // create the itemcount attribute, set its initial value to 0 , and finally add 3
+ // to it. The result will be a new itemcount attribute in the item, with a value of
+ // 3 .
+ //
+ // - If the existing data type is a set, and if the Value is also a set, then the
+ // Value is added to the existing set. (This is a set operation, not mathematical
+ // addition.) For example, if the attribute value was the set [1,2] , and the ADD
+ // action specified [3] , then the final attribute value would be [1,2,3] . An
+ // error occurs if an Add action is specified for a set attribute and the attribute
+ // type specified does not match the existing set type.
+ //
+ // Both sets must have the same primitive data type. For example, if the existing
+ // data type is a set of strings, the Value must also be a set of strings. The
+ // same holds true for number sets and binary sets.
+ //
+ // This action is only valid for an existing attribute whose data type is number
+ // or is a set. Do not use ADD for any other data types.
+ //
+ // If no item with the specified Key is found:
+ //
+ // - PUT - DynamoDB creates a new item with the specified primary key, and then
+ // adds the attribute.
+ //
+ // - DELETE - Nothing happens; there is no attribute to delete.
+ //
+ // - ADD - DynamoDB creates a new item with the supplied primary key and number
+ // (or set) for the attribute value. The only data types allowed are number, number
+ // set, string set or binary set.
+ Action AttributeAction
+
+ // Represents the data for an attribute.
+ //
+ // Each attribute value is described as a name-value pair. The name is the data
+ // type, and the value is the data itself.
+ //
+ // For more information, see [Data Types] in the Amazon DynamoDB Developer Guide.
+ //
+ // [Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes
+ Value AttributeValue
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of the scaling policy.
+type AutoScalingPolicyDescription struct {
+
+ // The name of the scaling policy.
+ PolicyName *string
+
+ // Represents a target tracking scaling policy configuration.
+ TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationDescription
+
+ noSmithyDocumentSerde
+}
+
+// Represents the auto scaling policy to be modified.
+type AutoScalingPolicyUpdate struct {
+
+ // Represents a target tracking scaling policy configuration.
+ //
+ // This member is required.
+ TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate
+
+ // The name of the scaling policy.
+ PolicyName *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents the auto scaling settings for a global table or global secondary
+// index.
+type AutoScalingSettingsDescription struct {
+
+ // Disabled auto scaling for this global table or global secondary index.
+ AutoScalingDisabled *bool
+
+ // Role ARN used for configuring the auto scaling policy.
+ AutoScalingRoleArn *string
+
+ // The maximum capacity units that a global table or global secondary index should
+ // be scaled up to.
+ MaximumUnits *int64
+
+ // The minimum capacity units that a global table or global secondary index should
+ // be scaled down to.
+ MinimumUnits *int64
+
+ // Information about the scaling policies.
+ ScalingPolicies []AutoScalingPolicyDescription
+
+ noSmithyDocumentSerde
+}
+
+// Represents the auto scaling settings to be modified for a global table or
+// global secondary index.
+type AutoScalingSettingsUpdate struct {
+
+ // Disabled auto scaling for this global table or global secondary index.
+ AutoScalingDisabled *bool
+
+ // Role ARN used for configuring auto scaling policy.
+ AutoScalingRoleArn *string
+
+ // The maximum capacity units that a global table or global secondary index should
+ // be scaled up to.
+ MaximumUnits *int64
+
+ // The minimum capacity units that a global table or global secondary index should
+ // be scaled down to.
+ MinimumUnits *int64
+
+ // The scaling policy to apply for scaling target global table or global secondary
+ // index capacity units.
+ ScalingPolicyUpdate *AutoScalingPolicyUpdate
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a target tracking scaling policy.
+type AutoScalingTargetTrackingScalingPolicyConfigurationDescription struct {
+
+ // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108
+ // (Base 10) or 2e-360 to 2e360 (Base 2).
+ //
+ // This member is required.
+ TargetValue *float64
+
+ // Indicates whether scale in by the target tracking policy is disabled. If the
+ // value is true, scale in is disabled and the target tracking policy won't remove
+ // capacity from the scalable resource. Otherwise, scale in is enabled and the
+ // target tracking policy can remove capacity from the scalable resource. The
+ // default value is false.
+ DisableScaleIn *bool
+
+ // The amount of time, in seconds, after a scale in activity completes before
+ // another scale in activity can start. The cooldown period is used to block
+ // subsequent scale in requests until it has expired. You should scale in
+ // conservatively to protect your application's availability. However, if another
+ // alarm triggers a scale out policy during the cooldown period after a scale-in,
+ // application auto scaling scales out your scalable target immediately.
+ ScaleInCooldown *int32
+
+ // The amount of time, in seconds, after a scale out activity completes before
+ // another scale out activity can start. While the cooldown period is in effect,
+ // the capacity that has been added by the previous scale out event that initiated
+ // the cooldown is calculated as part of the desired capacity for the next scale
+ // out. You should continuously (but not excessively) scale out.
+ ScaleOutCooldown *int32
+
+ noSmithyDocumentSerde
+}
+
+// Represents the settings of a target tracking scaling policy that will be
+// modified.
+type AutoScalingTargetTrackingScalingPolicyConfigurationUpdate struct {
+
+ // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108
+ // (Base 10) or 2e-360 to 2e360 (Base 2).
+ //
+ // This member is required.
+ TargetValue *float64
+
+ // Indicates whether scale in by the target tracking policy is disabled. If the
+ // value is true, scale in is disabled and the target tracking policy won't remove
+ // capacity from the scalable resource. Otherwise, scale in is enabled and the
+ // target tracking policy can remove capacity from the scalable resource. The
+ // default value is false.
+ DisableScaleIn *bool
+
+ // The amount of time, in seconds, after a scale in activity completes before
+ // another scale in activity can start. The cooldown period is used to block
+ // subsequent scale in requests until it has expired. You should scale in
+ // conservatively to protect your application's availability. However, if another
+ // alarm triggers a scale out policy during the cooldown period after a scale-in,
+ // application auto scaling scales out your scalable target immediately.
+ ScaleInCooldown *int32
+
+ // The amount of time, in seconds, after a scale out activity completes before
+ // another scale out activity can start. While the cooldown period is in effect,
+ // the capacity that has been added by the previous scale out event that initiated
+ // the cooldown is calculated as part of the desired capacity for the next scale
+ // out. You should continuously (but not excessively) scale out.
+ ScaleOutCooldown *int32
+
+ noSmithyDocumentSerde
+}
+
+// Contains the description of the backup created for the table.
+type BackupDescription struct {
+
+ // Contains the details of the backup created for the table.
+ BackupDetails *BackupDetails
+
+ // Contains the details of the table when the backup was created.
+ SourceTableDetails *SourceTableDetails
+
+ // Contains the details of the features enabled on the table when the backup was
+ // created. For example, LSIs, GSIs, streams, TTL.
+ SourceTableFeatureDetails *SourceTableFeatureDetails
+
+ noSmithyDocumentSerde
+}
+
+// Contains the details of the backup created for the table.
+type BackupDetails struct {
+
+ // ARN associated with the backup.
+ //
+ // This member is required.
+ BackupArn *string
+
+ // Time at which the backup was created. This is the request time of the backup.
+ //
+ // This member is required.
+ BackupCreationDateTime *time.Time
+
+ // Name of the requested backup.
+ //
+ // This member is required.
+ BackupName *string
+
+ // Backup can be in one of the following states: CREATING, ACTIVE, DELETED.
+ //
+ // This member is required.
+ BackupStatus BackupStatus
+
+ // BackupType:
+ //
+ // - USER - You create and manage these using the on-demand backup feature.
+ //
+ // - SYSTEM - If you delete a table with point-in-time recovery enabled, a SYSTEM
+ // backup is automatically created and is retained for 35 days (at no additional
+ // cost). System backups allow you to restore the deleted table to the state it was
+ // in just before the point of deletion.
+ //
+ // - AWS_BACKUP - On-demand backup created by you from Backup service.
+ //
+ // This member is required.
+ BackupType BackupType
+
+ // Time at which the automatic on-demand backup created by DynamoDB will expire.
+ // This SYSTEM on-demand backup expires automatically 35 days after its creation.
+ BackupExpiryDateTime *time.Time
+
+ // Size of the backup in bytes. DynamoDB updates this value approximately every
+ // six hours. Recent changes might not be reflected in this value.
+ BackupSizeBytes *int64
+
+ noSmithyDocumentSerde
+}
+
+// Contains details for the backup.
+type BackupSummary struct {
+
+ // ARN associated with the backup.
+ BackupArn *string
+
+ // Time at which the backup was created.
+ BackupCreationDateTime *time.Time
+
+ // Time at which the automatic on-demand backup created by DynamoDB will expire.
+ // This SYSTEM on-demand backup expires automatically 35 days after its creation.
+ BackupExpiryDateTime *time.Time
+
+ // Name of the specified backup.
+ BackupName *string
+
+ // Size of the backup in bytes.
+ BackupSizeBytes *int64
+
+ // Backup can be in one of the following states: CREATING, ACTIVE, DELETED.
+ BackupStatus BackupStatus
+
+ // BackupType:
+ //
+ // - USER - You create and manage these using the on-demand backup feature.
+ //
+ // - SYSTEM - If you delete a table with point-in-time recovery enabled, a SYSTEM
+ // backup is automatically created and is retained for 35 days (at no additional
+ // cost). System backups allow you to restore the deleted table to the state it was
+ // in just before the point of deletion.
+ //
+ // - AWS_BACKUP - On-demand backup created by you from Backup service.
+ BackupType BackupType
+
+ // ARN associated with the table.
+ TableArn *string
+
+ // Unique identifier for the table.
+ TableId *string
+
+ // Name of the table.
+ TableName *string
+
+ noSmithyDocumentSerde
+}
+
+// An error associated with a statement in a PartiQL batch that was run.
+type BatchStatementError struct {
+
+ // The error code associated with the failed PartiQL batch statement.
+ Code BatchStatementErrorCodeEnum
+
+ // The item which caused the condition check to fail. This will be set if
+ // ReturnValuesOnConditionCheckFailure is specified as ALL_OLD .
+ Item map[string]AttributeValue
+
+ // The error message associated with the PartiQL batch response.
+ Message *string
+
+ noSmithyDocumentSerde
+}
+
+// A PartiQL batch statement request.
+type BatchStatementRequest struct {
+
+ // A valid PartiQL statement.
+ //
+ // This member is required.
+ Statement *string
+
+ // The read consistency of the PartiQL batch request.
+ ConsistentRead *bool
+
+ // The parameters associated with a PartiQL statement in the batch request.
+ Parameters []AttributeValue
+
+ // An optional parameter that returns the item attributes for a PartiQL batch
+ // request operation that failed a condition check.
+ //
+ // There is no additional cost associated with requesting a return value aside
+ // from the small network and processing overhead of receiving a larger response.
+ // No read capacity units are consumed.
+ ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure
+
+ noSmithyDocumentSerde
+}
+
+// A PartiQL batch statement response..
+type BatchStatementResponse struct {
+
+ // The error associated with a failed PartiQL batch statement.
+ Error *BatchStatementError
+
+ // A DynamoDB item associated with a BatchStatementResponse
+ Item map[string]AttributeValue
+
+ // The table name associated with a failed PartiQL batch statement.
+ TableName *string
+
+ noSmithyDocumentSerde
+}
+
+// Contains the details for the read/write capacity mode. This page talks about
+// PROVISIONED and PAY_PER_REQUEST billing modes. For more information about these
+// modes, see [Read/write capacity mode].
+//
+// You may need to switch to on-demand mode at least once in order to return a
+// BillingModeSummary response.
+//
+// [Read/write capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html
+type BillingModeSummary struct {
+
+ // Controls how you are charged for read and write throughput and how you manage
+ // capacity. This setting can be changed later.
+ //
+ // - PROVISIONED - Sets the read/write capacity mode to PROVISIONED . We
+ // recommend using PROVISIONED for predictable workloads.
+ //
+ // - PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST . We
+ // recommend using PAY_PER_REQUEST for unpredictable workloads.
+ BillingMode BillingMode
+
+ // Represents the time when PAY_PER_REQUEST was last set as the read/write
+ // capacity mode.
+ LastUpdateToPayPerRequestDateTime *time.Time
+
+ noSmithyDocumentSerde
+}
+
+// An ordered list of errors for each item in the request which caused the
+// transaction to get cancelled. The values of the list are ordered according to
+// the ordering of the TransactWriteItems request parameter. If no error occurred
+// for the associated item an error with a Null code and Null message will be
+// present.
+type CancellationReason struct {
+
+ // Status code for the result of the cancelled transaction.
+ Code *string
+
+ // Item in the request which caused the transaction to get cancelled.
+ Item map[string]AttributeValue
+
+ // Cancellation reason message description.
+ Message *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents the amount of provisioned throughput capacity consumed on a table or
+// an index.
+type Capacity struct {
+
+ // The total number of capacity units consumed on a table or an index.
+ CapacityUnits *float64
+
+ // The total number of read capacity units consumed on a table or an index.
+ ReadCapacityUnits *float64
+
+ // The total number of write capacity units consumed on a table or an index.
+ WriteCapacityUnits *float64
+
+ noSmithyDocumentSerde
+}
+
+// Represents the selection criteria for a Query or Scan operation:
+//
+// - For a Query operation, Condition is used for specifying the KeyConditions to
+// use when querying a table or an index. For KeyConditions , only the following
+// comparison operators are supported:
+//
+// EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN
+//
+// Condition is also used in a QueryFilter , which evaluates the query results and
+//
+// returns only the desired values.
+//
+// - For a Scan operation, Condition is used in a ScanFilter , which evaluates
+// the scan results and returns only the desired values.
+type Condition struct {
+
+ // A comparator for evaluating attributes. For example, equals, greater than, less
+ // than, etc.
+ //
+ // The following comparison operators are available:
+ //
+ // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS |
+ // BEGINS_WITH | IN | BETWEEN
+ //
+ // The following are descriptions of each comparison operator.
+ //
+ // - EQ : Equal. EQ is supported for all data types, including lists and maps.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, Binary, String Set, Number Set, or Binary Set. If an item contains an
+ // AttributeValue element of a different type than the one provided in the
+ // request, the value does not match. For example, {"S":"6"} does not equal
+ // {"N":"6"} . Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]} .
+ //
+ // - NE : Not equal. NE is supported for all data types, including lists and maps.
+ //
+ // AttributeValueList can contain only one AttributeValue of type String, Number,
+ // Binary, String Set, Number Set, or Binary Set. If an item contains an
+ // AttributeValue of a different type than the one provided in the request, the
+ // value does not match. For example, {"S":"6"} does not equal {"N":"6"} . Also,
+ // {"N":"6"} does not equal {"NS":["6", "2", "1"]} .
+ //
+ // - LE : Less than or equal.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, or Binary (not a set type). If an item contains an AttributeValue
+ // element of a different type than the one provided in the request, the value does
+ // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"}
+ // does not compare to {"NS":["6", "2", "1"]} .
+ //
+ // - LT : Less than.
+ //
+ // AttributeValueList can contain only one AttributeValue of type String, Number,
+ // or Binary (not a set type). If an item contains an AttributeValue element of a
+ // different type than the one provided in the request, the value does not match.
+ // For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} does not
+ // compare to {"NS":["6", "2", "1"]} .
+ //
+ // - GE : Greater than or equal.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, or Binary (not a set type). If an item contains an AttributeValue
+ // element of a different type than the one provided in the request, the value does
+ // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"}
+ // does not compare to {"NS":["6", "2", "1"]} .
+ //
+ // - GT : Greater than.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, or Binary (not a set type). If an item contains an AttributeValue
+ // element of a different type than the one provided in the request, the value does
+ // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"}
+ // does not compare to {"NS":["6", "2", "1"]} .
+ //
+ // - NOT_NULL : The attribute exists. NOT_NULL is supported for all data types,
+ // including lists and maps.
+ //
+ // This operator tests for the existence of an attribute, not its data type. If
+ // the data type of attribute " a " is null, and you evaluate it using NOT_NULL ,
+ // the result is a Boolean true . This result is because the attribute " a "
+ // exists; its data type is not relevant to the NOT_NULL comparison operator.
+ //
+ // - NULL : The attribute does not exist. NULL is supported for all data types,
+ // including lists and maps.
+ //
+ // This operator tests for the nonexistence of an attribute, not its data type. If
+ // the data type of attribute " a " is null, and you evaluate it using NULL , the
+ // result is a Boolean false . This is because the attribute " a " exists; its
+ // data type is not relevant to the NULL comparison operator.
+ //
+ // - CONTAINS : Checks for a subsequence, or value in a set.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, or Binary (not a set type). If the target attribute of the comparison is
+ // of type String, then the operator checks for a substring match. If the target
+ // attribute of the comparison is of type Binary, then the operator looks for a
+ // subsequence of the target that matches the input. If the target attribute of the
+ // comparison is a set (" SS ", " NS ", or " BS "), then the operator evaluates
+ // to true if it finds an exact match with any member of the set.
+ //
+ // CONTAINS is supported for lists: When evaluating " a CONTAINS b ", " a " can be
+ // a list; however, " b " cannot be a set, a map, or a list.
+ //
+ // - NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in
+ // a set.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, or Binary (not a set type). If the target attribute of the comparison is
+ // a String, then the operator checks for the absence of a substring match. If the
+ // target attribute of the comparison is Binary, then the operator checks for the
+ // absence of a subsequence of the target that matches the input. If the target
+ // attribute of the comparison is a set (" SS ", " NS ", or " BS "), then the
+ // operator evaluates to true if it does not find an exact match with any member of
+ // the set.
+ //
+ // NOT_CONTAINS is supported for lists: When evaluating " a NOT CONTAINS b ", " a "
+ // can be a list; however, " b " cannot be a set, a map, or a list.
+ //
+ // - BEGINS_WITH : Checks for a prefix.
+ //
+ // AttributeValueList can contain only one AttributeValue of type String or Binary
+ // (not a Number or a set type). The target attribute of the comparison must be of
+ // type String or Binary (not a Number or a set type).
+ //
+ // - IN : Checks for matching elements in a list.
+ //
+ // AttributeValueList can contain one or more AttributeValue elements of type
+ // String, Number, or Binary. These attributes are compared against an existing
+ // attribute of an item. If any elements of the input are equal to the item
+ // attribute, the expression evaluates to true.
+ //
+ // - BETWEEN : Greater than or equal to the first value, and less than or equal
+ // to the second value.
+ //
+ // AttributeValueList must contain two AttributeValue elements of the same type,
+ // either String, Number, or Binary (not a set type). A target attribute matches if
+ // the target value is greater than, or equal to, the first element and less than,
+ // or equal to, the second element. If an item contains an AttributeValue element
+ // of a different type than the one provided in the request, the value does not
+ // match. For example, {"S":"6"} does not compare to {"N":"6"} . Also, {"N":"6"}
+ // does not compare to {"NS":["6", "2", "1"]}
+ //
+ // For usage examples of AttributeValueList and ComparisonOperator , see [Legacy Conditional Parameters] in the
+ // Amazon DynamoDB Developer Guide.
+ //
+ // [Legacy Conditional Parameters]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html
+ //
+ // This member is required.
+ ComparisonOperator ComparisonOperator
+
+ // One or more values to evaluate against the supplied attribute. The number of
+ // values in the list depends on the ComparisonOperator being used.
+ //
+ // For type Number, value comparisons are numeric.
+ //
+ // String value comparisons for greater than, equals, or less than are based on
+ // ASCII character code values. For example, a is greater than A , and a is
+ // greater than B . For a list of code values, see [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters].
+ //
+ // For Binary, DynamoDB treats each byte of the binary data as unsigned when it
+ // compares binary values.
+ //
+ // [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters]: http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters
+ AttributeValueList []AttributeValue
+
+ noSmithyDocumentSerde
+}
+
+// Represents a request to perform a check that an item exists or to check the
+// condition of specific attributes of the item.
+type ConditionCheck struct {
+
+ // A condition that must be satisfied in order for a conditional update to
+ // succeed. For more information, see [Condition expressions]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Condition expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html
+ //
+ // This member is required.
+ ConditionExpression *string
+
+ // The primary key of the item to be checked. Each element consists of an
+ // attribute name and a value for that attribute.
+ //
+ // This member is required.
+ Key map[string]AttributeValue
+
+ // Name of the table for the check item request. You can also provide the Amazon
+ // Resource Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // One or more substitution tokens for attribute names in an expression. For more
+ // information, see [Expression attribute names]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Expression attribute names]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html
+ ExpressionAttributeNames map[string]string
+
+ // One or more values that can be substituted in an expression. For more
+ // information, see [Condition expressions]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Condition expressions]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html
+ ExpressionAttributeValues map[string]AttributeValue
+
+ // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the
+ // ConditionCheck condition fails. For ReturnValuesOnConditionCheckFailure , the
+ // valid values are: NONE and ALL_OLD.
+ ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure
+
+ noSmithyDocumentSerde
+}
+
+// The capacity units consumed by an operation. The data returned includes the
+// total provisioned throughput consumed, along with statistics for the table and
+// any indexes involved in the operation. ConsumedCapacity is only returned if the
+// request asked for it. For more information, see [Provisioned capacity mode]in the Amazon DynamoDB
+// Developer Guide.
+//
+// [Provisioned capacity mode]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html
+type ConsumedCapacity struct {
+
+ // The total number of capacity units consumed by the operation.
+ CapacityUnits *float64
+
+ // The amount of throughput consumed on each global index affected by the
+ // operation.
+ GlobalSecondaryIndexes map[string]Capacity
+
+ // The amount of throughput consumed on each local index affected by the operation.
+ LocalSecondaryIndexes map[string]Capacity
+
+ // The total number of read capacity units consumed by the operation.
+ ReadCapacityUnits *float64
+
+ // The amount of throughput consumed on the table affected by the operation.
+ Table *Capacity
+
+ // The name of the table that was affected by the operation. If you had specified
+ // the Amazon Resource Name (ARN) of a table in the input, you'll see the table ARN
+ // in the response.
+ TableName *string
+
+ // The total number of write capacity units consumed by the operation.
+ WriteCapacityUnits *float64
+
+ noSmithyDocumentSerde
+}
+
+// Represents the continuous backups and point in time recovery settings on the
+// table.
+type ContinuousBackupsDescription struct {
+
+ // ContinuousBackupsStatus can be one of the following states: ENABLED, DISABLED
+ //
+ // This member is required.
+ ContinuousBackupsStatus ContinuousBackupsStatus
+
+ // The description of the point in time recovery settings applied to the table.
+ PointInTimeRecoveryDescription *PointInTimeRecoveryDescription
+
+ noSmithyDocumentSerde
+}
+
+// Represents a Contributor Insights summary entry.
+type ContributorInsightsSummary struct {
+
+ // Indicates the current mode of CloudWatch Contributor Insights, specifying
+ // whether it tracks all access and throttled events or throttled events only for
+ // the DynamoDB table or index.
+ ContributorInsightsMode ContributorInsightsMode
+
+ // Describes the current status for contributor insights for the given table and
+ // index, if applicable.
+ ContributorInsightsStatus ContributorInsightsStatus
+
+ // Name of the index associated with the summary, if any.
+ IndexName *string
+
+ // Name of the table associated with the summary.
+ TableName *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents a new global secondary index to be added to an existing table.
+type CreateGlobalSecondaryIndexAction struct {
+
+ // The name of the global secondary index to be created.
+ //
+ // This member is required.
+ IndexName *string
+
+ // The key schema for the global secondary index.
+ //
+ // This member is required.
+ KeySchema []KeySchemaElement
+
+ // Represents attributes that are copied (projected) from the table into an index.
+ // These are in addition to the primary key attributes and index key attributes,
+ // which are automatically projected.
+ //
+ // This member is required.
+ Projection *Projection
+
+ // The maximum number of read and write units for the global secondary index being
+ // created. If you use this parameter, you must specify MaxReadRequestUnits ,
+ // MaxWriteRequestUnits , or both. You must use either OnDemand Throughput or
+ // ProvisionedThroughput based on your table's capacity mode.
+ OnDemandThroughput *OnDemandThroughput
+
+ // Represents the provisioned throughput settings for the specified global
+ // secondary index.
+ //
+ // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the
+ // Amazon DynamoDB Developer Guide.
+ //
+ // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html
+ ProvisionedThroughput *ProvisionedThroughput
+
+ // Represents the warm throughput value (in read units per second and write units
+ // per second) when creating a secondary index.
+ WarmThroughput *WarmThroughput
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the action to add a new witness Region to a MRSC global table. A MRSC
+// global table can be configured with either three replicas, or with two replicas
+// and one witness.
+type CreateGlobalTableWitnessGroupMemberAction struct {
+
+ // The Amazon Web Services Region name to be added as a witness Region for the
+ // MRSC global table. The witness must be in a different Region than the replicas
+ // and within the same Region set:
+ //
+ // - US Region set: US East (N. Virginia), US East (Ohio), US West (Oregon)
+ //
+ // - EU Region set: Europe (Ireland), Europe (London), Europe (Paris), Europe
+ // (Frankfurt)
+ //
+ // - AP Region set: Asia Pacific (Tokyo), Asia Pacific (Seoul), Asia Pacific
+ // (Osaka)
+ //
+ // This member is required.
+ RegionName *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents a replica to be added.
+type CreateReplicaAction struct {
+
+ // The Region of the replica to be added.
+ //
+ // This member is required.
+ RegionName *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents a replica to be created.
+type CreateReplicationGroupMemberAction struct {
+
+ // The Region where the new replica will be created.
+ //
+ // This member is required.
+ RegionName *string
+
+ // Replica-specific global secondary index settings.
+ GlobalSecondaryIndexes []ReplicaGlobalSecondaryIndex
+
+ // The KMS key that should be used for KMS encryption in the new replica. To
+ // specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias
+ // ARN. Note that you should only provide this parameter if the key is different
+ // from the default DynamoDB KMS key alias/aws/dynamodb .
+ KMSMasterKeyId *string
+
+ // The maximum on-demand throughput settings for the specified replica table being
+ // created. You can only modify MaxReadRequestUnits , because you can't modify
+ // MaxWriteRequestUnits for individual replica tables.
+ OnDemandThroughputOverride *OnDemandThroughputOverride
+
+ // Replica-specific provisioned throughput. If not specified, uses the source
+ // table's provisioned throughput settings.
+ ProvisionedThroughputOverride *ProvisionedThroughputOverride
+
+ // Replica-specific table class. If not specified, uses the source table's table
+ // class.
+ TableClassOverride TableClass
+
+ noSmithyDocumentSerde
+}
+
+// Processing options for the CSV file being imported.
+type CsvOptions struct {
+
+ // The delimiter used for separating items in the CSV file being imported.
+ Delimiter *string
+
+ // List of the headers used to specify a common header for all source CSV files
+ // being imported. If this field is specified then the first line of each CSV file
+ // is treated as data instead of the header. If this field is not specified the the
+ // first line of each CSV file is treated as the header.
+ HeaderList []string
+
+ noSmithyDocumentSerde
+}
+
+// Represents a request to perform a DeleteItem operation.
+type Delete struct {
+
+ // The primary key of the item to be deleted. Each element consists of an
+ // attribute name and a value for that attribute.
+ //
+ // This member is required.
+ Key map[string]AttributeValue
+
+ // Name of the table in which the item to be deleted resides. You can also provide
+ // the Amazon Resource Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // A condition that must be satisfied in order for a conditional delete to succeed.
+ ConditionExpression *string
+
+ // One or more substitution tokens for attribute names in an expression.
+ ExpressionAttributeNames map[string]string
+
+ // One or more values that can be substituted in an expression.
+ ExpressionAttributeValues map[string]AttributeValue
+
+ // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Delete
+ // condition fails. For ReturnValuesOnConditionCheckFailure , the valid values are:
+ // NONE and ALL_OLD.
+ ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure
+
+ noSmithyDocumentSerde
+}
+
+// Represents a global secondary index to be deleted from an existing table.
+type DeleteGlobalSecondaryIndexAction struct {
+
+ // The name of the global secondary index to be deleted.
+ //
+ // This member is required.
+ IndexName *string
+
+ noSmithyDocumentSerde
+}
+
+// Specifies the action to remove a witness Region from a MRSC global table. You
+// cannot delete a single witness from a MRSC global table - you must delete both a
+// replica and the witness together. The deletion of both a witness and replica
+// converts the remaining replica to a single-Region DynamoDB table.
+type DeleteGlobalTableWitnessGroupMemberAction struct {
+
+ // The witness Region name to be removed from the MRSC global table.
+ //
+ // This member is required.
+ RegionName *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents a replica to be removed.
+type DeleteReplicaAction struct {
+
+ // The Region of the replica to be removed.
+ //
+ // This member is required.
+ RegionName *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents a replica to be deleted.
+type DeleteReplicationGroupMemberAction struct {
+
+ // The Region where the replica exists.
+ //
+ // This member is required.
+ RegionName *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents a request to perform a DeleteItem operation on an item.
+type DeleteRequest struct {
+
+ // A map of attribute name to attribute values, representing the primary key of
+ // the item to delete. All of the table's primary key attributes must be specified,
+ // and their data types must match those of the table's key schema.
+ //
+ // This member is required.
+ Key map[string]AttributeValue
+
+ noSmithyDocumentSerde
+}
+
+// Enables setting the configuration for Kinesis Streaming.
+type EnableKinesisStreamingConfiguration struct {
+
+ // Toggle for the precision of Kinesis data stream timestamp. The values are
+ // either MILLISECOND or MICROSECOND .
+ ApproximateCreationDateTimePrecision ApproximateCreationDateTimePrecision
+
+ noSmithyDocumentSerde
+}
+
+// An endpoint information details.
+type Endpoint struct {
+
+ // IP address of the endpoint.
+ //
+ // This member is required.
+ Address *string
+
+ // Endpoint cache time to live (TTL) value.
+ //
+ // This member is required.
+ CachePeriodInMinutes int64
+
+ noSmithyDocumentSerde
+}
+
+// Represents a condition to be compared with an attribute value. This condition
+// can be used with DeleteItem , PutItem , or UpdateItem operations; if the
+// comparison evaluates to true, the operation succeeds; if not, the operation
+// fails. You can use ExpectedAttributeValue in one of two different ways:
+//
+// - Use AttributeValueList to specify one or more values to compare against an
+// attribute. Use ComparisonOperator to specify how you want to perform the
+// comparison. If the comparison evaluates to true, then the conditional operation
+// succeeds.
+//
+// - Use Value to specify a value that DynamoDB will compare against an
+// attribute. If the values match, then ExpectedAttributeValue evaluates to true
+// and the conditional operation succeeds. Optionally, you can also set Exists to
+// false, indicating that you do not expect to find the attribute value in the
+// table. In this case, the conditional operation succeeds only if the comparison
+// evaluates to false.
+//
+// Value and Exists are incompatible with AttributeValueList and ComparisonOperator
+// . Note that if you use both sets of parameters at once, DynamoDB will return a
+// ValidationException exception.
+type ExpectedAttributeValue struct {
+
+ // One or more values to evaluate against the supplied attribute. The number of
+ // values in the list depends on the ComparisonOperator being used.
+ //
+ // For type Number, value comparisons are numeric.
+ //
+ // String value comparisons for greater than, equals, or less than are based on
+ // ASCII character code values. For example, a is greater than A , and a is
+ // greater than B . For a list of code values, see [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters].
+ //
+ // For Binary, DynamoDB treats each byte of the binary data as unsigned when it
+ // compares binary values.
+ //
+ // For information on specifying data types in JSON, see [JSON Data Format] in the Amazon DynamoDB
+ // Developer Guide.
+ //
+ // [http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters]: http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters
+ // [JSON Data Format]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html
+ AttributeValueList []AttributeValue
+
+ // A comparator for evaluating attributes in the AttributeValueList . For example,
+ // equals, greater than, less than, etc.
+ //
+ // The following comparison operators are available:
+ //
+ // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS |
+ // BEGINS_WITH | IN | BETWEEN
+ //
+ // The following are descriptions of each comparison operator.
+ //
+ // - EQ : Equal. EQ is supported for all data types, including lists and maps.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, Binary, String Set, Number Set, or Binary Set. If an item contains an
+ // AttributeValue element of a different type than the one provided in the
+ // request, the value does not match. For example, {"S":"6"} does not equal
+ // {"N":"6"} . Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]} .
+ //
+ // - NE : Not equal. NE is supported for all data types, including lists and maps.
+ //
+ // AttributeValueList can contain only one AttributeValue of type String, Number,
+ // Binary, String Set, Number Set, or Binary Set. If an item contains an
+ // AttributeValue of a different type than the one provided in the request, the
+ // value does not match. For example, {"S":"6"} does not equal {"N":"6"} . Also,
+ // {"N":"6"} does not equal {"NS":["6", "2", "1"]} .
+ //
+ // - LE : Less than or equal.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, or Binary (not a set type). If an item contains an AttributeValue
+ // element of a different type than the one provided in the request, the value does
+ // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"}
+ // does not compare to {"NS":["6", "2", "1"]} .
+ //
+ // - LT : Less than.
+ //
+ // AttributeValueList can contain only one AttributeValue of type String, Number,
+ // or Binary (not a set type). If an item contains an AttributeValue element of a
+ // different type than the one provided in the request, the value does not match.
+ // For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"} does not
+ // compare to {"NS":["6", "2", "1"]} .
+ //
+ // - GE : Greater than or equal.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, or Binary (not a set type). If an item contains an AttributeValue
+ // element of a different type than the one provided in the request, the value does
+ // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"}
+ // does not compare to {"NS":["6", "2", "1"]} .
+ //
+ // - GT : Greater than.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, or Binary (not a set type). If an item contains an AttributeValue
+ // element of a different type than the one provided in the request, the value does
+ // not match. For example, {"S":"6"} does not equal {"N":"6"} . Also, {"N":"6"}
+ // does not compare to {"NS":["6", "2", "1"]} .
+ //
+ // - NOT_NULL : The attribute exists. NOT_NULL is supported for all data types,
+ // including lists and maps.
+ //
+ // This operator tests for the existence of an attribute, not its data type. If
+ // the data type of attribute " a " is null, and you evaluate it using NOT_NULL ,
+ // the result is a Boolean true . This result is because the attribute " a "
+ // exists; its data type is not relevant to the NOT_NULL comparison operator.
+ //
+ // - NULL : The attribute does not exist. NULL is supported for all data types,
+ // including lists and maps.
+ //
+ // This operator tests for the nonexistence of an attribute, not its data type. If
+ // the data type of attribute " a " is null, and you evaluate it using NULL , the
+ // result is a Boolean false . This is because the attribute " a " exists; its
+ // data type is not relevant to the NULL comparison operator.
+ //
+ // - CONTAINS : Checks for a subsequence, or value in a set.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, or Binary (not a set type). If the target attribute of the comparison is
+ // of type String, then the operator checks for a substring match. If the target
+ // attribute of the comparison is of type Binary, then the operator looks for a
+ // subsequence of the target that matches the input. If the target attribute of the
+ // comparison is a set (" SS ", " NS ", or " BS "), then the operator evaluates
+ // to true if it finds an exact match with any member of the set.
+ //
+ // CONTAINS is supported for lists: When evaluating " a CONTAINS b ", " a " can be
+ // a list; however, " b " cannot be a set, a map, or a list.
+ //
+ // - NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value in
+ // a set.
+ //
+ // AttributeValueList can contain only one AttributeValue element of type String,
+ // Number, or Binary (not a set type). If the target attribute of the comparison is
+ // a String, then the operator checks for the absence of a substring match. If the
+ // target attribute of the comparison is Binary, then the operator checks for the
+ // absence of a subsequence of the target that matches the input. If the target
+ // attribute of the comparison is a set (" SS ", " NS ", or " BS "), then the
+ // operator evaluates to true if it does not find an exact match with any member of
+ // the set.
+ //
+ // NOT_CONTAINS is supported for lists: When evaluating " a NOT CONTAINS b ", " a "
+ // can be a list; however, " b " cannot be a set, a map, or a list.
+ //
+ // - BEGINS_WITH : Checks for a prefix.
+ //
+ // AttributeValueList can contain only one AttributeValue of type String or Binary
+ // (not a Number or a set type). The target attribute of the comparison must be of
+ // type String or Binary (not a Number or a set type).
+ //
+ // - IN : Checks for matching elements in a list.
+ //
+ // AttributeValueList can contain one or more AttributeValue elements of type
+ // String, Number, or Binary. These attributes are compared against an existing
+ // attribute of an item. If any elements of the input are equal to the item
+ // attribute, the expression evaluates to true.
+ //
+ // - BETWEEN : Greater than or equal to the first value, and less than or equal
+ // to the second value.
+ //
+ // AttributeValueList must contain two AttributeValue elements of the same type,
+ // either String, Number, or Binary (not a set type). A target attribute matches if
+ // the target value is greater than, or equal to, the first element and less than,
+ // or equal to, the second element. If an item contains an AttributeValue element
+ // of a different type than the one provided in the request, the value does not
+ // match. For example, {"S":"6"} does not compare to {"N":"6"} . Also, {"N":"6"}
+ // does not compare to {"NS":["6", "2", "1"]}
+ ComparisonOperator ComparisonOperator
+
+ // Causes DynamoDB to evaluate the value before attempting a conditional operation:
+ //
+ // - If Exists is true , DynamoDB will check to see if that attribute value
+ // already exists in the table. If it is found, then the operation succeeds. If it
+ // is not found, the operation fails with a ConditionCheckFailedException .
+ //
+ // - If Exists is false , DynamoDB assumes that the attribute value does not
+ // exist in the table. If in fact the value does not exist, then the assumption is
+ // valid and the operation succeeds. If the value is found, despite the assumption
+ // that it does not exist, the operation fails with a
+ // ConditionCheckFailedException .
+ //
+ // The default setting for Exists is true . If you supply a Value all by itself,
+ // DynamoDB assumes the attribute exists: You don't have to set Exists to true ,
+ // because it is implied.
+ //
+ // DynamoDB returns a ValidationException if:
+ //
+ // - Exists is true but there is no Value to check. (You expect a value to exist,
+ // but don't specify what that value is.)
+ //
+ // - Exists is false but you also provide a Value . (You cannot expect an
+ // attribute to have a value, while also expecting it not to exist.)
+ Exists *bool
+
+ // Represents the data for the expected attribute.
+ //
+ // Each attribute value is described as a name-value pair. The name is the data
+ // type, and the value is the data itself.
+ //
+ // For more information, see [Data Types] in the Amazon DynamoDB Developer Guide.
+ //
+ // [Data Types]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes
+ Value AttributeValue
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of the exported table.
+type ExportDescription struct {
+
+ // The billable size of the table export.
+ BilledSizeBytes *int64
+
+ // The client token that was provided for the export task. A client token makes
+ // calls to ExportTableToPointInTimeInput idempotent, meaning that multiple
+ // identical calls have the same effect as one single call.
+ ClientToken *string
+
+ // The time at which the export task completed.
+ EndTime *time.Time
+
+ // The Amazon Resource Name (ARN) of the table export.
+ ExportArn *string
+
+ // The format of the exported data. Valid values for ExportFormat are DYNAMODB_JSON
+ // or ION .
+ ExportFormat ExportFormat
+
+ // The name of the manifest file for the export task.
+ ExportManifest *string
+
+ // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or FAILED.
+ ExportStatus ExportStatus
+
+ // Point in time from which table data was exported.
+ ExportTime *time.Time
+
+ // The type of export that was performed. Valid values are FULL_EXPORT or
+ // INCREMENTAL_EXPORT .
+ ExportType ExportType
+
+ // Status code for the result of the failed export.
+ FailureCode *string
+
+ // Export failure reason description.
+ FailureMessage *string
+
+ // Optional object containing the parameters specific to an incremental export.
+ IncrementalExportSpecification *IncrementalExportSpecification
+
+ // The number of items exported.
+ ItemCount *int64
+
+ // The name of the Amazon S3 bucket containing the export.
+ S3Bucket *string
+
+ // The ID of the Amazon Web Services account that owns the bucket containing the
+ // export.
+ S3BucketOwner *string
+
+ // The Amazon S3 bucket prefix used as the file name and path of the exported
+ // snapshot.
+ S3Prefix *string
+
+ // Type of encryption used on the bucket where export data is stored. Valid values
+ // for S3SseAlgorithm are:
+ //
+ // - AES256 - server-side encryption with Amazon S3 managed keys
+ //
+ // - KMS - server-side encryption with KMS managed keys
+ S3SseAlgorithm S3SseAlgorithm
+
+ // The ID of the KMS managed key used to encrypt the S3 bucket where export data
+ // is stored (if applicable).
+ S3SseKmsKeyId *string
+
+ // The time at which the export task began.
+ StartTime *time.Time
+
+ // The Amazon Resource Name (ARN) of the table that was exported.
+ TableArn *string
+
+ // Unique ID of the table that was exported.
+ TableId *string
+
+ noSmithyDocumentSerde
+}
+
+// Summary information about an export task.
+type ExportSummary struct {
+
+ // The Amazon Resource Name (ARN) of the export.
+ ExportArn *string
+
+ // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or FAILED.
+ ExportStatus ExportStatus
+
+ // The type of export that was performed. Valid values are FULL_EXPORT or
+ // INCREMENTAL_EXPORT .
+ ExportType ExportType
+
+ noSmithyDocumentSerde
+}
+
+// Represents a failure a contributor insights operation.
+type FailureException struct {
+
+ // Description of the failure.
+ ExceptionDescription *string
+
+ // Exception name.
+ ExceptionName *string
+
+ noSmithyDocumentSerde
+}
+
+// Specifies an item and related attribute values to retrieve in a TransactGetItem
+// object.
+type Get struct {
+
+ // A map of attribute names to AttributeValue objects that specifies the primary
+ // key of the item to retrieve.
+ //
+ // This member is required.
+ Key map[string]AttributeValue
+
+ // The name of the table from which to retrieve the specified item. You can also
+ // provide the Amazon Resource Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // One or more substitution tokens for attribute names in the ProjectionExpression
+ // parameter.
+ ExpressionAttributeNames map[string]string
+
+ // A string that identifies one or more attributes of the specified item to
+ // retrieve from the table. The attributes in the expression must be separated by
+ // commas. If no attribute names are specified, then all attributes of the
+ // specified item are returned. If any of the requested attributes are not found,
+ // they do not appear in the result.
+ ProjectionExpression *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a global secondary index.
+type GlobalSecondaryIndex struct {
+
+ // The name of the global secondary index. The name must be unique among all other
+ // indexes on this table.
+ //
+ // This member is required.
+ IndexName *string
+
+ // The complete key schema for a global secondary index, which consists of one or
+ // more pairs of attribute names and key types:
+ //
+ // - HASH - partition key
+ //
+ // - RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB's usage of an internal hash function to
+ // evenly distribute data items across partitions, based on their partition key
+ // values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ //
+ // This member is required.
+ KeySchema []KeySchemaElement
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes and
+ // index key attributes, which are automatically projected.
+ //
+ // This member is required.
+ Projection *Projection
+
+ // The maximum number of read and write units for the specified global secondary
+ // index. If you use this parameter, you must specify MaxReadRequestUnits ,
+ // MaxWriteRequestUnits , or both. You must use either OnDemandThroughput or
+ // ProvisionedThroughput based on your table's capacity mode.
+ OnDemandThroughput *OnDemandThroughput
+
+ // Represents the provisioned throughput settings for the specified global
+ // secondary index. You must use either OnDemandThroughput or ProvisionedThroughput
+ // based on your table's capacity mode.
+ //
+ // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the
+ // Amazon DynamoDB Developer Guide.
+ //
+ // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html
+ ProvisionedThroughput *ProvisionedThroughput
+
+ // Represents the warm throughput value (in read units per second and write units
+ // per second) for the specified secondary index. If you use this parameter, you
+ // must specify ReadUnitsPerSecond , WriteUnitsPerSecond , or both.
+ WarmThroughput *WarmThroughput
+
+ noSmithyDocumentSerde
+}
+
+// Represents the auto scaling settings of a global secondary index for a global
+// table that will be modified.
+type GlobalSecondaryIndexAutoScalingUpdate struct {
+
+ // The name of the global secondary index.
+ IndexName *string
+
+ // Represents the auto scaling settings to be modified for a global table or
+ // global secondary index.
+ ProvisionedWriteCapacityAutoScalingUpdate *AutoScalingSettingsUpdate
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a global secondary index.
+type GlobalSecondaryIndexDescription struct {
+
+ // Indicates whether the index is currently backfilling. Backfilling is the
+ // process of reading items from the table and determining whether they can be
+ // added to the index. (Not all items will qualify: For example, a partition key
+ // cannot have any duplicate values.) If an item can be added to the index,
+ // DynamoDB will do so. After all items have been processed, the backfilling
+ // operation is complete and Backfilling is false.
+ //
+ // You can delete an index that is being created during the Backfilling phase when
+ // IndexStatus is set to CREATING and Backfilling is true. You can't delete the
+ // index that is being created when IndexStatus is set to CREATING and Backfilling
+ // is false.
+ //
+ // For indexes that were created during a CreateTable operation, the Backfilling
+ // attribute does not appear in the DescribeTable output.
+ Backfilling *bool
+
+ // The Amazon Resource Name (ARN) that uniquely identifies the index.
+ IndexArn *string
+
+ // The name of the global secondary index.
+ IndexName *string
+
+ // The total size of the specified index, in bytes. DynamoDB updates this value
+ // approximately every six hours. Recent changes might not be reflected in this
+ // value.
+ IndexSizeBytes *int64
+
+ // The current state of the global secondary index:
+ //
+ // - CREATING - The index is being created.
+ //
+ // - UPDATING - The index is being updated.
+ //
+ // - DELETING - The index is being deleted.
+ //
+ // - ACTIVE - The index is ready for use.
+ IndexStatus IndexStatus
+
+ // The number of items in the specified index. DynamoDB updates this value
+ // approximately every six hours. Recent changes might not be reflected in this
+ // value.
+ ItemCount *int64
+
+ // The complete key schema for a global secondary index, which consists of one or
+ // more pairs of attribute names and key types:
+ //
+ // - HASH - partition key
+ //
+ // - RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB's usage of an internal hash function to
+ // evenly distribute data items across partitions, based on their partition key
+ // values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ KeySchema []KeySchemaElement
+
+ // The maximum number of read and write units for the specified global secondary
+ // index. If you use this parameter, you must specify MaxReadRequestUnits ,
+ // MaxWriteRequestUnits , or both.
+ OnDemandThroughput *OnDemandThroughput
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes and
+ // index key attributes, which are automatically projected.
+ Projection *Projection
+
+ // Represents the provisioned throughput settings for the specified global
+ // secondary index.
+ //
+ // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the
+ // Amazon DynamoDB Developer Guide.
+ //
+ // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html
+ ProvisionedThroughput *ProvisionedThroughputDescription
+
+ // Represents the warm throughput value (in read units per second and write units
+ // per second) for the specified secondary index.
+ WarmThroughput *GlobalSecondaryIndexWarmThroughputDescription
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a global secondary index for the table when the
+// backup was created.
+type GlobalSecondaryIndexInfo struct {
+
+ // The name of the global secondary index.
+ IndexName *string
+
+ // The complete key schema for a global secondary index, which consists of one or
+ // more pairs of attribute names and key types:
+ //
+ // - HASH - partition key
+ //
+ // - RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB's usage of an internal hash function to
+ // evenly distribute data items across partitions, based on their partition key
+ // values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ KeySchema []KeySchemaElement
+
+ // Sets the maximum number of read and write units for the specified on-demand
+ // table. If you use this parameter, you must specify MaxReadRequestUnits ,
+ // MaxWriteRequestUnits , or both.
+ OnDemandThroughput *OnDemandThroughput
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes and
+ // index key attributes, which are automatically projected.
+ Projection *Projection
+
+ // Represents the provisioned throughput settings for the specified global
+ // secondary index.
+ ProvisionedThroughput *ProvisionedThroughput
+
+ noSmithyDocumentSerde
+}
+
+// Represents one of the following:
+//
+// - A new global secondary index to be added to an existing table.
+//
+// - New provisioned throughput parameters for an existing global secondary
+// index.
+//
+// - An existing global secondary index to be removed from an existing table.
+type GlobalSecondaryIndexUpdate struct {
+
+ // The parameters required for creating a global secondary index on an existing
+ // table:
+ //
+ // - IndexName
+ //
+ // - KeySchema
+ //
+ // - AttributeDefinitions
+ //
+ // - Projection
+ //
+ // - ProvisionedThroughput
+ Create *CreateGlobalSecondaryIndexAction
+
+ // The name of an existing global secondary index to be removed.
+ Delete *DeleteGlobalSecondaryIndexAction
+
+ // The name of an existing global secondary index, along with new provisioned
+ // throughput settings to be applied to that index.
+ Update *UpdateGlobalSecondaryIndexAction
+
+ noSmithyDocumentSerde
+}
+
+// The description of the warm throughput value on a global secondary index.
+type GlobalSecondaryIndexWarmThroughputDescription struct {
+
+ // Represents warm throughput read units per second value for a global secondary
+ // index.
+ ReadUnitsPerSecond *int64
+
+ // Represents the warm throughput status being created or updated on a global
+ // secondary index. The status can only be UPDATING or ACTIVE .
+ Status IndexStatus
+
+ // Represents warm throughput write units per second value for a global secondary
+ // index.
+ WriteUnitsPerSecond *int64
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a global table.
+type GlobalTable struct {
+
+ // The global table name.
+ GlobalTableName *string
+
+ // The Regions where the global table has replicas.
+ ReplicationGroup []Replica
+
+ noSmithyDocumentSerde
+}
+
+// Contains details about the global table.
+type GlobalTableDescription struct {
+
+ // The creation time of the global table.
+ CreationDateTime *time.Time
+
+ // The unique identifier of the global table.
+ GlobalTableArn *string
+
+ // The global table name.
+ GlobalTableName *string
+
+ // The current state of the global table:
+ //
+ // - CREATING - The global table is being created.
+ //
+ // - UPDATING - The global table is being updated.
+ //
+ // - DELETING - The global table is being deleted.
+ //
+ // - ACTIVE - The global table is ready for use.
+ GlobalTableStatus GlobalTableStatus
+
+ // The Regions where the global table has replicas.
+ ReplicationGroup []ReplicaDescription
+
+ noSmithyDocumentSerde
+}
+
+// Represents the settings of a global secondary index for a global table that
+// will be modified.
+type GlobalTableGlobalSecondaryIndexSettingsUpdate struct {
+
+ // The name of the global secondary index. The name must be unique among all other
+ // indexes on this table.
+ //
+ // This member is required.
+ IndexName *string
+
+ // Auto scaling settings for managing a global secondary index's write capacity
+ // units.
+ ProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate
+
+ // The maximum number of writes consumed per second before DynamoDB returns a
+ // ThrottlingException.
+ ProvisionedWriteCapacityUnits *int64
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a witness Region in a MRSC global table.
+type GlobalTableWitnessDescription struct {
+
+ // The name of the Amazon Web Services Region that serves as a witness for the
+ // MRSC global table.
+ RegionName *string
+
+ // The current status of the witness Region in the MRSC global table.
+ WitnessStatus WitnessStatus
+
+ noSmithyDocumentSerde
+}
+
+// Represents one of the following:
+//
+// - A new witness to be added to a new global table.
+//
+// - An existing witness to be removed from an existing global table.
+//
+// You can configure one witness per MRSC global table.
+type GlobalTableWitnessGroupUpdate struct {
+
+ // Specifies a witness Region to be added to a new MRSC global table. The witness
+ // must be added when creating the MRSC global table.
+ Create *CreateGlobalTableWitnessGroupMemberAction
+
+ // Specifies a witness Region to be removed from an existing global table. Must be
+ // done in conjunction with removing a replica. The deletion of both a witness and
+ // replica converts the remaining replica to a single-Region DynamoDB table.
+ Delete *DeleteGlobalTableWitnessGroupMemberAction
+
+ noSmithyDocumentSerde
+}
+
+// Summary information about the source file for the import.
+type ImportSummary struct {
+
+ // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with
+ // this import task.
+ CloudWatchLogGroupArn *string
+
+ // The time at which this import task ended. (Does this include the successful
+ // complete creation of the table it was imported to?)
+ EndTime *time.Time
+
+ // The Amazon Resource Number (ARN) corresponding to the import request.
+ ImportArn *string
+
+ // The status of the import operation.
+ ImportStatus ImportStatus
+
+ // The format of the source data. Valid values are CSV , DYNAMODB_JSON or ION .
+ InputFormat InputFormat
+
+ // The path and S3 bucket of the source file that is being imported. This
+ // includes the S3Bucket (required), S3KeyPrefix (optional) and S3BucketOwner
+ // (optional if the bucket is owned by the requester).
+ S3BucketSource *S3BucketSource
+
+ // The time at which this import task began.
+ StartTime *time.Time
+
+ // The Amazon Resource Number (ARN) of the table being imported into.
+ TableArn *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of the table being imported into.
+type ImportTableDescription struct {
+
+ // The client token that was provided for the import task. Reusing the client
+ // token on retry makes a call to ImportTable idempotent.
+ ClientToken *string
+
+ // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with
+ // the target table.
+ CloudWatchLogGroupArn *string
+
+ // The time at which the creation of the table associated with this import task
+ // completed.
+ EndTime *time.Time
+
+ // The number of errors occurred on importing the source file into the target
+ // table.
+ ErrorCount int64
+
+ // The error code corresponding to the failure that the import job ran into
+ // during execution.
+ FailureCode *string
+
+ // The error message corresponding to the failure that the import job ran into
+ // during execution.
+ FailureMessage *string
+
+ // The Amazon Resource Number (ARN) corresponding to the import request.
+ ImportArn *string
+
+ // The status of the import.
+ ImportStatus ImportStatus
+
+ // The number of items successfully imported into the new table.
+ ImportedItemCount int64
+
+ // The compression options for the data that has been imported into the target
+ // table. The values are NONE, GZIP, or ZSTD.
+ InputCompressionType InputCompressionType
+
+ // The format of the source data going into the target table.
+ InputFormat InputFormat
+
+ // The format options for the data that was imported into the target table. There
+ // is one value, CsvOption.
+ InputFormatOptions *InputFormatOptions
+
+ // The total number of items processed from the source file.
+ ProcessedItemCount int64
+
+ // The total size of data processed from the source file, in Bytes.
+ ProcessedSizeBytes *int64
+
+ // Values for the S3 bucket the source file is imported from. Includes bucket
+ // name (required), key prefix (optional) and bucket account owner ID (optional).
+ S3BucketSource *S3BucketSource
+
+ // The time when this import task started.
+ StartTime *time.Time
+
+ // The Amazon Resource Number (ARN) of the table being imported into.
+ TableArn *string
+
+ // The parameters for the new table that is being imported into.
+ TableCreationParameters *TableCreationParameters
+
+ // The table id corresponding to the table created by import table process.
+ TableId *string
+
+ noSmithyDocumentSerde
+}
+
+// Optional object containing the parameters specific to an incremental export.
+type IncrementalExportSpecification struct {
+
+ // Time in the past which provides the inclusive start range for the export
+ // table's data, counted in seconds from the start of the Unix epoch. The
+ // incremental export will reflect the table's state including and after this point
+ // in time.
+ ExportFromTime *time.Time
+
+ // Time in the past which provides the exclusive end range for the export table's
+ // data, counted in seconds from the start of the Unix epoch. The incremental
+ // export will reflect the table's state just prior to this point in time. If this
+ // is not provided, the latest time with data available will be used.
+ ExportToTime *time.Time
+
+ // The view type that was chosen for the export. Valid values are
+ // NEW_AND_OLD_IMAGES and NEW_IMAGES . The default value is NEW_AND_OLD_IMAGES .
+ ExportViewType ExportViewType
+
+ noSmithyDocumentSerde
+}
+
+// The format options for the data that was imported into the target table. There
+//
+// is one value, CsvOption.
+type InputFormatOptions struct {
+
+ // The options for imported source files in CSV format. The values are Delimiter
+ // and HeaderList.
+ Csv *CsvOptions
+
+ noSmithyDocumentSerde
+}
+
+// Information about item collections, if any, that were affected by the
+// operation. ItemCollectionMetrics is only returned if the request asked for it.
+// If the table does not have any local secondary indexes, this information is not
+// returned in the response.
+type ItemCollectionMetrics struct {
+
+ // The partition key value of the item collection. This value is the same as the
+ // partition key value of the item.
+ ItemCollectionKey map[string]AttributeValue
+
+ // An estimate of item collection size, in gigabytes. This value is a two-element
+ // array containing a lower bound and an upper bound for the estimate. The estimate
+ // includes the size of all the items in the table, plus the size of all attributes
+ // projected into all of the local secondary indexes on that table. Use this
+ // estimate to measure whether a local secondary index is approaching its size
+ // limit.
+ //
+ // The estimate is subject to change over time; therefore, do not rely on the
+ // precision or accuracy of the estimate.
+ SizeEstimateRangeGB []float64
+
+ noSmithyDocumentSerde
+}
+
+// Details for the requested item.
+type ItemResponse struct {
+
+ // Map of attribute data consisting of the data type and attribute value.
+ Item map[string]AttributeValue
+
+ noSmithyDocumentSerde
+}
+
+// Represents a set of primary keys and, for each key, the attributes to retrieve
+// from the table.
+//
+// For each primary key, you must provide all of the key attributes. For example,
+// with a simple primary key, you only need to provide the partition key. For a
+// composite primary key, you must provide both the partition key and the sort key.
+type KeysAndAttributes struct {
+
+ // The primary key attribute values that define the items and the attributes
+ // associated with the items.
+ //
+ // This member is required.
+ Keys []map[string]AttributeValue
+
+ // This is a legacy parameter. Use ProjectionExpression instead. For more
+ // information, see [Legacy Conditional Parameters]in the Amazon DynamoDB Developer Guide.
+ //
+ // [Legacy Conditional Parameters]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html
+ AttributesToGet []string
+
+ // The consistency of a read operation. If set to true , then a strongly consistent
+ // read is used; otherwise, an eventually consistent read is used.
+ ConsistentRead *bool
+
+ // One or more substitution tokens for attribute names in an expression. The
+ // following are some use cases for using ExpressionAttributeNames :
+ //
+ // - To access an attribute whose name conflicts with a DynamoDB reserved word.
+ //
+ // - To create a placeholder for repeating occurrences of an attribute name in
+ // an expression.
+ //
+ // - To prevent special characters in an attribute name from being
+ // misinterpreted in an expression.
+ //
+ // Use the # character in an expression to dereference an attribute name. For
+ // example, consider the following attribute name:
+ //
+ // - Percentile
+ //
+ // The name of this attribute conflicts with a reserved word, so it cannot be used
+ // directly in an expression. (For the complete list of reserved words, see [Reserved Words]in the
+ // Amazon DynamoDB Developer Guide). To work around this, you could specify the
+ // following for ExpressionAttributeNames :
+ //
+ // - {"#P":"Percentile"}
+ //
+ // You could then use this substitution in an expression, as in this example:
+ //
+ // - #P = :val
+ //
+ // Tokens that begin with the : character are expression attribute values, which
+ // are placeholders for the actual value at runtime.
+ //
+ // For more information on expression attribute names, see [Accessing Item Attributes] in the Amazon DynamoDB
+ // Developer Guide.
+ //
+ // [Reserved Words]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html
+ // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html
+ ExpressionAttributeNames map[string]string
+
+ // A string that identifies one or more attributes to retrieve from the table.
+ // These attributes can include scalars, sets, or elements of a JSON document. The
+ // attributes in the ProjectionExpression must be separated by commas.
+ //
+ // If no attribute names are specified, then all attributes will be returned. If
+ // any of the requested attributes are not found, they will not appear in the
+ // result.
+ //
+ // For more information, see [Accessing Item Attributes] in the Amazon DynamoDB Developer Guide.
+ //
+ // [Accessing Item Attributes]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html
+ ProjectionExpression *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents a single element of a key schema. A key schema specifies the
+// attributes that make up the primary key of a table, or the key attributes of an
+// index.
+//
+// A KeySchemaElement represents exactly one attribute of the primary key. For
+// example, a simple primary key would be represented by one KeySchemaElement (for
+// the partition key). A composite primary key would require one KeySchemaElement
+// for the partition key, and another KeySchemaElement for the sort key.
+//
+// A KeySchemaElement must be a scalar, top-level attribute (not a nested
+// attribute). The data type must be one of String, Number, or Binary. The
+// attribute cannot be nested within a List or a Map.
+type KeySchemaElement struct {
+
+ // The name of a key attribute.
+ //
+ // This member is required.
+ AttributeName *string
+
+ // The role that this key attribute will assume:
+ //
+ // - HASH - partition key
+ //
+ // - RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB's usage of an internal hash function to
+ // evenly distribute data items across partitions, based on their partition key
+ // values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ //
+ // This member is required.
+ KeyType KeyType
+
+ noSmithyDocumentSerde
+}
+
+// Describes a Kinesis data stream destination.
+type KinesisDataStreamDestination struct {
+
+ // The precision of the Kinesis data stream timestamp. The values are either
+ // MILLISECOND or MICROSECOND .
+ ApproximateCreationDateTimePrecision ApproximateCreationDateTimePrecision
+
+ // The current status of replication.
+ DestinationStatus DestinationStatus
+
+ // The human-readable string that corresponds to the replica status.
+ DestinationStatusDescription *string
+
+ // The ARN for a specific Kinesis data stream.
+ StreamArn *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a local secondary index.
+type LocalSecondaryIndex struct {
+
+ // The name of the local secondary index. The name must be unique among all other
+ // indexes on this table.
+ //
+ // This member is required.
+ IndexName *string
+
+ // The complete key schema for the local secondary index, consisting of one or
+ // more pairs of attribute names and key types:
+ //
+ // - HASH - partition key
+ //
+ // - RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB's usage of an internal hash function to
+ // evenly distribute data items across partitions, based on their partition key
+ // values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ //
+ // This member is required.
+ KeySchema []KeySchemaElement
+
+ // Represents attributes that are copied (projected) from the table into the local
+ // secondary index. These are in addition to the primary key attributes and index
+ // key attributes, which are automatically projected.
+ //
+ // This member is required.
+ Projection *Projection
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a local secondary index.
+type LocalSecondaryIndexDescription struct {
+
+ // The Amazon Resource Name (ARN) that uniquely identifies the index.
+ IndexArn *string
+
+ // Represents the name of the local secondary index.
+ IndexName *string
+
+ // The total size of the specified index, in bytes. DynamoDB updates this value
+ // approximately every six hours. Recent changes might not be reflected in this
+ // value.
+ IndexSizeBytes *int64
+
+ // The number of items in the specified index. DynamoDB updates this value
+ // approximately every six hours. Recent changes might not be reflected in this
+ // value.
+ ItemCount *int64
+
+ // The complete key schema for the local secondary index, consisting of one or
+ // more pairs of attribute names and key types:
+ //
+ // - HASH - partition key
+ //
+ // - RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB's usage of an internal hash function to
+ // evenly distribute data items across partitions, based on their partition key
+ // values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ KeySchema []KeySchemaElement
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes and
+ // index key attributes, which are automatically projected.
+ Projection *Projection
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a local secondary index for the table when the
+// backup was created.
+type LocalSecondaryIndexInfo struct {
+
+ // Represents the name of the local secondary index.
+ IndexName *string
+
+ // The complete key schema for a local secondary index, which consists of one or
+ // more pairs of attribute names and key types:
+ //
+ // - HASH - partition key
+ //
+ // - RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB's usage of an internal hash function to
+ // evenly distribute data items across partitions, based on their partition key
+ // values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ KeySchema []KeySchemaElement
+
+ // Represents attributes that are copied (projected) from the table into the
+ // global secondary index. These are in addition to the primary key attributes and
+ // index key attributes, which are automatically projected.
+ Projection *Projection
+
+ noSmithyDocumentSerde
+}
+
+// Sets the maximum number of read and write units for the specified on-demand
+// table. If you use this parameter, you must specify MaxReadRequestUnits ,
+// MaxWriteRequestUnits , or both.
+type OnDemandThroughput struct {
+
+ // Maximum number of read request units for the specified table.
+ //
+ // To specify a maximum OnDemandThroughput on your table, set the value of
+ // MaxReadRequestUnits as greater than or equal to 1. To remove the maximum
+ // OnDemandThroughput that is currently set on your table, set the value of
+ // MaxReadRequestUnits to -1.
+ MaxReadRequestUnits *int64
+
+ // Maximum number of write request units for the specified table.
+ //
+ // To specify a maximum OnDemandThroughput on your table, set the value of
+ // MaxWriteRequestUnits as greater than or equal to 1. To remove the maximum
+ // OnDemandThroughput that is currently set on your table, set the value of
+ // MaxWriteRequestUnits to -1.
+ MaxWriteRequestUnits *int64
+
+ noSmithyDocumentSerde
+}
+
+// Overrides the on-demand throughput settings for this replica table. If you
+// don't specify a value for this parameter, it uses the source table's on-demand
+// throughput settings.
+type OnDemandThroughputOverride struct {
+
+ // Maximum number of read request units for the specified replica table.
+ MaxReadRequestUnits *int64
+
+ noSmithyDocumentSerde
+}
+
+// Represents a PartiQL statement that uses parameters.
+type ParameterizedStatement struct {
+
+ // A PartiQL statement that uses parameters.
+ //
+ // This member is required.
+ Statement *string
+
+ // The parameter values.
+ Parameters []AttributeValue
+
+ // An optional parameter that returns the item attributes for a PartiQL
+ // ParameterizedStatement operation that failed a condition check.
+ //
+ // There is no additional cost associated with requesting a return value aside
+ // from the small network and processing overhead of receiving a larger response.
+ // No read capacity units are consumed.
+ ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure
+
+ noSmithyDocumentSerde
+}
+
+// The description of the point in time settings applied to the table.
+type PointInTimeRecoveryDescription struct {
+
+ // Specifies the earliest point in time you can restore your table to. You can
+ // restore your table to any point in time during the last 35 days.
+ EarliestRestorableDateTime *time.Time
+
+ // LatestRestorableDateTime is typically 5 minutes before the current time.
+ LatestRestorableDateTime *time.Time
+
+ // The current state of point in time recovery:
+ //
+ // - ENABLED - Point in time recovery is enabled.
+ //
+ // - DISABLED - Point in time recovery is disabled.
+ PointInTimeRecoveryStatus PointInTimeRecoveryStatus
+
+ // The number of preceding days for which continuous backups are taken and
+ // maintained. Your table data is only recoverable to any point-in-time from within
+ // the configured recovery period. This parameter is optional.
+ RecoveryPeriodInDays *int32
+
+ noSmithyDocumentSerde
+}
+
+// Represents the settings used to enable point in time recovery.
+type PointInTimeRecoverySpecification struct {
+
+ // Indicates whether point in time recovery is enabled (true) or disabled (false)
+ // on the table.
+ //
+ // This member is required.
+ PointInTimeRecoveryEnabled *bool
+
+ // The number of preceding days for which continuous backups are taken and
+ // maintained. Your table data is only recoverable to any point-in-time from within
+ // the configured recovery period. This parameter is optional. If no value is
+ // provided, the value will default to 35.
+ RecoveryPeriodInDays *int32
+
+ noSmithyDocumentSerde
+}
+
+// Represents attributes that are copied (projected) from the table into an index.
+// These are in addition to the primary key attributes and index key attributes,
+// which are automatically projected.
+type Projection struct {
+
+ // Represents the non-key attribute names which will be projected into the index.
+ //
+ // For global and local secondary indexes, the total count of NonKeyAttributes
+ // summed across all of the secondary indexes, must not exceed 100. If you project
+ // the same attribute into two different indexes, this counts as two distinct
+ // attributes when determining the total. This limit only applies when you specify
+ // the ProjectionType of INCLUDE . You still can specify the ProjectionType of ALL
+ // to project all attributes from the source table, even if the table has more than
+ // 100 attributes.
+ NonKeyAttributes []string
+
+ // The set of attributes that are projected into the index:
+ //
+ // - KEYS_ONLY - Only the index and primary keys are projected into the index.
+ //
+ // - INCLUDE - In addition to the attributes described in KEYS_ONLY , the
+ // secondary index will include other non-key attributes that you specify.
+ //
+ // - ALL - All of the table attributes are projected into the index.
+ //
+ // When using the DynamoDB console, ALL is selected by default.
+ ProjectionType ProjectionType
+
+ noSmithyDocumentSerde
+}
+
+// Represents the provisioned throughput settings for the specified global
+// secondary index. You must use ProvisionedThroughput or OnDemandThroughput based
+// on your table’s capacity mode.
+//
+// For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the
+// Amazon DynamoDB Developer Guide.
+//
+// [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html
+type ProvisionedThroughput struct {
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the
+ // Amazon DynamoDB Developer Guide.
+ //
+ // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.
+ //
+ // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html
+ //
+ // This member is required.
+ ReadCapacityUnits *int64
+
+ // The maximum number of writes consumed per second before DynamoDB returns a
+ // ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the Amazon DynamoDB
+ // Developer Guide.
+ //
+ // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.
+ //
+ // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html
+ //
+ // This member is required.
+ WriteCapacityUnits *int64
+
+ noSmithyDocumentSerde
+}
+
+// Represents the provisioned throughput settings for the table, consisting of
+// read and write capacity units, along with data about increases and decreases.
+type ProvisionedThroughputDescription struct {
+
+ // The date and time of the last provisioned throughput decrease for this table.
+ LastDecreaseDateTime *time.Time
+
+ // The date and time of the last provisioned throughput increase for this table.
+ LastIncreaseDateTime *time.Time
+
+ // The number of provisioned throughput decreases for this table during this UTC
+ // calendar day. For current maximums on provisioned throughput decreases, see [Service, Account, and Table Quotas]in
+ // the Amazon DynamoDB Developer Guide.
+ //
+ // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html
+ NumberOfDecreasesToday *int64
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException . Eventually consistent reads require
+ // less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits
+ // per second provides 100 eventually consistent ReadCapacityUnits per second.
+ ReadCapacityUnits *int64
+
+ // The maximum number of writes consumed per second before DynamoDB returns a
+ // ThrottlingException .
+ WriteCapacityUnits *int64
+
+ noSmithyDocumentSerde
+}
+
+// Replica-specific provisioned throughput settings. If not specified, uses the
+// source table's provisioned throughput settings.
+type ProvisionedThroughputOverride struct {
+
+ // Replica-specific read capacity units. If not specified, uses the source table's
+ // read capacity settings.
+ ReadCapacityUnits *int64
+
+ noSmithyDocumentSerde
+}
+
+// Represents a request to perform a PutItem operation.
+type Put struct {
+
+ // A map of attribute name to attribute values, representing the primary key of
+ // the item to be written by PutItem . All of the table's primary key attributes
+ // must be specified, and their data types must match those of the table's key
+ // schema. If any attributes are present in the item that are part of an index key
+ // schema for the table, their types must match the index key schema.
+ //
+ // This member is required.
+ Item map[string]AttributeValue
+
+ // Name of the table in which to write the item. You can also provide the Amazon
+ // Resource Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // A condition that must be satisfied in order for a conditional update to succeed.
+ ConditionExpression *string
+
+ // One or more substitution tokens for attribute names in an expression.
+ ExpressionAttributeNames map[string]string
+
+ // One or more values that can be substituted in an expression.
+ ExpressionAttributeValues map[string]AttributeValue
+
+ // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Put
+ // condition fails. For ReturnValuesOnConditionCheckFailure , the valid values are:
+ // NONE and ALL_OLD.
+ ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure
+
+ noSmithyDocumentSerde
+}
+
+// Represents a request to perform a PutItem operation on an item.
+type PutRequest struct {
+
+ // A map of attribute name to attribute values, representing the primary key of an
+ // item to be processed by PutItem . All of the table's primary key attributes must
+ // be specified, and their data types must match those of the table's key schema.
+ // If any attributes are present in the item that are part of an index key schema
+ // for the table, their types must match the index key schema.
+ //
+ // This member is required.
+ Item map[string]AttributeValue
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a replica.
+type Replica struct {
+
+ // The Region where the replica needs to be created.
+ RegionName *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents the auto scaling settings of the replica.
+type ReplicaAutoScalingDescription struct {
+
+ // Replica-specific global secondary index auto scaling settings.
+ GlobalSecondaryIndexes []ReplicaGlobalSecondaryIndexAutoScalingDescription
+
+ // The Region where the replica exists.
+ RegionName *string
+
+ // Represents the auto scaling settings for a global table or global secondary
+ // index.
+ ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription
+
+ // Represents the auto scaling settings for a global table or global secondary
+ // index.
+ ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription
+
+ // The current state of the replica:
+ //
+ // - CREATING - The replica is being created.
+ //
+ // - UPDATING - The replica is being updated.
+ //
+ // - DELETING - The replica is being deleted.
+ //
+ // - ACTIVE - The replica is ready for use.
+ ReplicaStatus ReplicaStatus
+
+ noSmithyDocumentSerde
+}
+
+// Represents the auto scaling settings of a replica that will be modified.
+type ReplicaAutoScalingUpdate struct {
+
+ // The Region where the replica exists.
+ //
+ // This member is required.
+ RegionName *string
+
+ // Represents the auto scaling settings of global secondary indexes that will be
+ // modified.
+ ReplicaGlobalSecondaryIndexUpdates []ReplicaGlobalSecondaryIndexAutoScalingUpdate
+
+ // Represents the auto scaling settings to be modified for a global table or
+ // global secondary index.
+ ReplicaProvisionedReadCapacityAutoScalingUpdate *AutoScalingSettingsUpdate
+
+ noSmithyDocumentSerde
+}
+
+// Contains the details of the replica.
+type ReplicaDescription struct {
+
+ // Replica-specific global secondary index settings.
+ GlobalSecondaryIndexes []ReplicaGlobalSecondaryIndexDescription
+
+ // The KMS key of the replica that will be used for KMS encryption.
+ KMSMasterKeyId *string
+
+ // Overrides the maximum on-demand throughput settings for the specified replica
+ // table.
+ OnDemandThroughputOverride *OnDemandThroughputOverride
+
+ // Replica-specific provisioned throughput. If not described, uses the source
+ // table's provisioned throughput settings.
+ ProvisionedThroughputOverride *ProvisionedThroughputOverride
+
+ // The name of the Region.
+ RegionName *string
+
+ // The time at which the replica was first detected as inaccessible. To determine
+ // cause of inaccessibility check the ReplicaStatus property.
+ ReplicaInaccessibleDateTime *time.Time
+
+ // The current state of the replica:
+ //
+ // - CREATING - The replica is being created.
+ //
+ // - UPDATING - The replica is being updated.
+ //
+ // - DELETING - The replica is being deleted.
+ //
+ // - ACTIVE - The replica is ready for use.
+ //
+ // - REGION_DISABLED - The replica is inaccessible because the Amazon Web
+ // Services Region has been disabled.
+ //
+ // If the Amazon Web Services Region remains inaccessible for more than 20 hours,
+ // DynamoDB will remove this replica from the replication group. The replica will
+ // not be deleted and replication will stop from and to this region.
+ //
+ // - INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the table
+ // is inaccessible.
+ //
+ // If the KMS key remains inaccessible for more than 20 hours, DynamoDB will
+ // remove this replica from the replication group. The replica will not be deleted
+ // and replication will stop from and to this region.
+ ReplicaStatus ReplicaStatus
+
+ // Detailed information about the replica status.
+ ReplicaStatusDescription *string
+
+ // Specifies the progress of a Create, Update, or Delete action on the replica as
+ // a percentage.
+ ReplicaStatusPercentProgress *string
+
+ // Contains details of the table class.
+ ReplicaTableClassSummary *TableClassSummary
+
+ // Represents the warm throughput value for this replica.
+ WarmThroughput *TableWarmThroughputDescription
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a replica global secondary index.
+type ReplicaGlobalSecondaryIndex struct {
+
+ // The name of the global secondary index.
+ //
+ // This member is required.
+ IndexName *string
+
+ // Overrides the maximum on-demand throughput settings for the specified global
+ // secondary index in the specified replica table.
+ OnDemandThroughputOverride *OnDemandThroughputOverride
+
+ // Replica table GSI-specific provisioned throughput. If not specified, uses the
+ // source table GSI's read capacity settings.
+ ProvisionedThroughputOverride *ProvisionedThroughputOverride
+
+ noSmithyDocumentSerde
+}
+
+// Represents the auto scaling configuration for a replica global secondary index.
+type ReplicaGlobalSecondaryIndexAutoScalingDescription struct {
+
+ // The name of the global secondary index.
+ IndexName *string
+
+ // The current state of the replica global secondary index:
+ //
+ // - CREATING - The index is being created.
+ //
+ // - UPDATING - The table/index configuration is being updated. The table/index
+ // remains available for data operations when UPDATING
+ //
+ // - DELETING - The index is being deleted.
+ //
+ // - ACTIVE - The index is ready for use.
+ IndexStatus IndexStatus
+
+ // Represents the auto scaling settings for a global table or global secondary
+ // index.
+ ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription
+
+ // Represents the auto scaling settings for a global table or global secondary
+ // index.
+ ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription
+
+ noSmithyDocumentSerde
+}
+
+// Represents the auto scaling settings of a global secondary index for a replica
+// that will be modified.
+type ReplicaGlobalSecondaryIndexAutoScalingUpdate struct {
+
+ // The name of the global secondary index.
+ IndexName *string
+
+ // Represents the auto scaling settings to be modified for a global table or
+ // global secondary index.
+ ProvisionedReadCapacityAutoScalingUpdate *AutoScalingSettingsUpdate
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a replica global secondary index.
+type ReplicaGlobalSecondaryIndexDescription struct {
+
+ // The name of the global secondary index.
+ IndexName *string
+
+ // Overrides the maximum on-demand throughput for the specified global secondary
+ // index in the specified replica table.
+ OnDemandThroughputOverride *OnDemandThroughputOverride
+
+ // If not described, uses the source table GSI's read capacity settings.
+ ProvisionedThroughputOverride *ProvisionedThroughputOverride
+
+ // Represents the warm throughput of the global secondary index for this replica.
+ WarmThroughput *GlobalSecondaryIndexWarmThroughputDescription
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a global secondary index.
+type ReplicaGlobalSecondaryIndexSettingsDescription struct {
+
+ // The name of the global secondary index. The name must be unique among all other
+ // indexes on this table.
+ //
+ // This member is required.
+ IndexName *string
+
+ // The current status of the global secondary index:
+ //
+ // - CREATING - The global secondary index is being created.
+ //
+ // - UPDATING - The global secondary index is being updated.
+ //
+ // - DELETING - The global secondary index is being deleted.
+ //
+ // - ACTIVE - The global secondary index is ready for use.
+ IndexStatus IndexStatus
+
+ // Auto scaling settings for a global secondary index replica's read capacity
+ // units.
+ ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException .
+ ProvisionedReadCapacityUnits *int64
+
+ // Auto scaling settings for a global secondary index replica's write capacity
+ // units.
+ ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription
+
+ // The maximum number of writes consumed per second before DynamoDB returns a
+ // ThrottlingException .
+ ProvisionedWriteCapacityUnits *int64
+
+ noSmithyDocumentSerde
+}
+
+// Represents the settings of a global secondary index for a global table that
+// will be modified.
+type ReplicaGlobalSecondaryIndexSettingsUpdate struct {
+
+ // The name of the global secondary index. The name must be unique among all other
+ // indexes on this table.
+ //
+ // This member is required.
+ IndexName *string
+
+ // Auto scaling settings for managing a global secondary index replica's read
+ // capacity units.
+ ProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException .
+ ProvisionedReadCapacityUnits *int64
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a replica.
+type ReplicaSettingsDescription struct {
+
+ // The Region name of the replica.
+ //
+ // This member is required.
+ RegionName *string
+
+ // The read/write capacity mode of the replica.
+ ReplicaBillingModeSummary *BillingModeSummary
+
+ // Replica global secondary index settings for the global table.
+ ReplicaGlobalSecondaryIndexSettings []ReplicaGlobalSecondaryIndexSettingsDescription
+
+ // Auto scaling settings for a global table replica's read capacity units.
+ ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the
+ // Amazon DynamoDB Developer Guide.
+ //
+ // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput
+ ReplicaProvisionedReadCapacityUnits *int64
+
+ // Auto scaling settings for a global table replica's write capacity units.
+ ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription
+
+ // The maximum number of writes consumed per second before DynamoDB returns a
+ // ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the Amazon DynamoDB
+ // Developer Guide.
+ //
+ // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput
+ ReplicaProvisionedWriteCapacityUnits *int64
+
+ // The current state of the Region:
+ //
+ // - CREATING - The Region is being created.
+ //
+ // - UPDATING - The Region is being updated.
+ //
+ // - DELETING - The Region is being deleted.
+ //
+ // - ACTIVE - The Region is ready for use.
+ ReplicaStatus ReplicaStatus
+
+ // Contains details of the table class.
+ ReplicaTableClassSummary *TableClassSummary
+
+ noSmithyDocumentSerde
+}
+
+// Represents the settings for a global table in a Region that will be modified.
+type ReplicaSettingsUpdate struct {
+
+ // The Region of the replica to be added.
+ //
+ // This member is required.
+ RegionName *string
+
+ // Represents the settings of a global secondary index for a global table that
+ // will be modified.
+ ReplicaGlobalSecondaryIndexSettingsUpdate []ReplicaGlobalSecondaryIndexSettingsUpdate
+
+ // Auto scaling settings for managing a global table replica's read capacity units.
+ ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate
+
+ // The maximum number of strongly consistent reads consumed per second before
+ // DynamoDB returns a ThrottlingException . For more information, see [Specifying Read and Write Requirements] in the
+ // Amazon DynamoDB Developer Guide.
+ //
+ // [Specifying Read and Write Requirements]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput
+ ReplicaProvisionedReadCapacityUnits *int64
+
+ // Replica-specific table class. If not specified, uses the source table's table
+ // class.
+ ReplicaTableClass TableClass
+
+ noSmithyDocumentSerde
+}
+
+// Represents one of the following:
+//
+// - A new replica to be added to an existing regional table or global table.
+// This request invokes the CreateTableReplica action in the destination Region.
+//
+// - New parameters for an existing replica. This request invokes the UpdateTable
+// action in the destination Region.
+//
+// - An existing replica to be deleted. The request invokes the
+// DeleteTableReplica action in the destination Region, deleting the replica and
+// all if its items in the destination Region.
+//
+// When you manually remove a table or global table replica, you do not
+// automatically remove any associated scalable targets, scaling policies, or
+// CloudWatch alarms.
+type ReplicationGroupUpdate struct {
+
+ // The parameters required for creating a replica for the table.
+ Create *CreateReplicationGroupMemberAction
+
+ // The parameters required for deleting a replica for the table.
+ Delete *DeleteReplicationGroupMemberAction
+
+ // The parameters required for updating a replica for the table.
+ Update *UpdateReplicationGroupMemberAction
+
+ noSmithyDocumentSerde
+}
+
+// Represents one of the following:
+//
+// - A new replica to be added to an existing global table.
+//
+// - New parameters for an existing replica.
+//
+// - An existing replica to be removed from an existing global table.
+type ReplicaUpdate struct {
+
+ // The parameters required for creating a replica on an existing global table.
+ Create *CreateReplicaAction
+
+ // The name of the existing replica to be removed.
+ Delete *DeleteReplicaAction
+
+ noSmithyDocumentSerde
+}
+
+// Contains details for the restore.
+type RestoreSummary struct {
+
+ // Point in time or source backup time.
+ //
+ // This member is required.
+ RestoreDateTime *time.Time
+
+ // Indicates if a restore is in progress or not.
+ //
+ // This member is required.
+ RestoreInProgress *bool
+
+ // The Amazon Resource Name (ARN) of the backup from which the table was restored.
+ SourceBackupArn *string
+
+ // The ARN of the source table of the backup that is being restored.
+ SourceTableArn *string
+
+ noSmithyDocumentSerde
+}
+
+// The S3 bucket that is being imported from.
+type S3BucketSource struct {
+
+ // The S3 bucket that is being imported from.
+ //
+ // This member is required.
+ S3Bucket *string
+
+ // The account number of the S3 bucket that is being imported from. If the bucket
+ // is owned by the requester this is optional.
+ S3BucketOwner *string
+
+ // The key prefix shared by all S3 Objects that are being imported.
+ S3KeyPrefix *string
+
+ noSmithyDocumentSerde
+}
+
+// Contains the details of the table when the backup was created.
+type SourceTableDetails struct {
+
+ // Schema of the table.
+ //
+ // This member is required.
+ KeySchema []KeySchemaElement
+
+ // Read IOPs and Write IOPS on the table when the backup was created.
+ //
+ // This member is required.
+ ProvisionedThroughput *ProvisionedThroughput
+
+ // Time when the source table was created.
+ //
+ // This member is required.
+ TableCreationDateTime *time.Time
+
+ // Unique identifier for the table for which the backup was created.
+ //
+ // This member is required.
+ TableId *string
+
+ // The name of the table for which the backup was created.
+ //
+ // This member is required.
+ TableName *string
+
+ // Controls how you are charged for read and write throughput and how you manage
+ // capacity. This setting can be changed later.
+ //
+ // - PROVISIONED - Sets the read/write capacity mode to PROVISIONED . We
+ // recommend using PROVISIONED for predictable workloads.
+ //
+ // - PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST . We
+ // recommend using PAY_PER_REQUEST for unpredictable workloads.
+ BillingMode BillingMode
+
+ // Number of items in the table. Note that this is an approximate value.
+ ItemCount *int64
+
+ // Sets the maximum number of read and write units for the specified on-demand
+ // table. If you use this parameter, you must specify MaxReadRequestUnits ,
+ // MaxWriteRequestUnits , or both.
+ OnDemandThroughput *OnDemandThroughput
+
+ // ARN of the table for which backup was created.
+ TableArn *string
+
+ // Size of the table in bytes. Note that this is an approximate value.
+ TableSizeBytes *int64
+
+ noSmithyDocumentSerde
+}
+
+// Contains the details of the features enabled on the table when the backup was
+// created. For example, LSIs, GSIs, streams, TTL.
+type SourceTableFeatureDetails struct {
+
+ // Represents the GSI properties for the table when the backup was created. It
+ // includes the IndexName, KeySchema, Projection, and ProvisionedThroughput for the
+ // GSIs on the table at the time of backup.
+ GlobalSecondaryIndexes []GlobalSecondaryIndexInfo
+
+ // Represents the LSI properties for the table when the backup was created. It
+ // includes the IndexName, KeySchema and Projection for the LSIs on the table at
+ // the time of backup.
+ LocalSecondaryIndexes []LocalSecondaryIndexInfo
+
+ // The description of the server-side encryption status on the table when the
+ // backup was created.
+ SSEDescription *SSEDescription
+
+ // Stream settings on the table when the backup was created.
+ StreamDescription *StreamSpecification
+
+ // Time to Live settings on the table when the backup was created.
+ TimeToLiveDescription *TimeToLiveDescription
+
+ noSmithyDocumentSerde
+}
+
+// The description of the server-side encryption status on the specified table.
+type SSEDescription struct {
+
+ // Indicates the time, in UNIX epoch date format, when DynamoDB detected that the
+ // table's KMS key was inaccessible. This attribute will automatically be cleared
+ // when DynamoDB detects that the table's KMS key is accessible again. DynamoDB
+ // will initiate the table archival process when table's KMS key remains
+ // inaccessible for more than seven days from this date.
+ InaccessibleEncryptionDateTime *time.Time
+
+ // The KMS key ARN used for the KMS encryption.
+ KMSMasterKeyArn *string
+
+ // Server-side encryption type. The only supported value is:
+ //
+ // - KMS - Server-side encryption that uses Key Management Service. The key is
+ // stored in your account and is managed by KMS (KMS charges apply).
+ SSEType SSEType
+
+ // Represents the current state of server-side encryption. The only supported
+ // values are:
+ //
+ // - ENABLED - Server-side encryption is enabled.
+ //
+ // - UPDATING - Server-side encryption is being updated.
+ Status SSEStatus
+
+ noSmithyDocumentSerde
+}
+
+// Represents the settings used to enable server-side encryption.
+type SSESpecification struct {
+
+ // Indicates whether server-side encryption is done using an Amazon Web Services
+ // managed key or an Amazon Web Services owned key. If enabled (true), server-side
+ // encryption type is set to KMS and an Amazon Web Services managed key is used
+ // (KMS charges apply). If disabled (false) or not specified, server-side
+ // encryption is set to Amazon Web Services owned key.
+ Enabled *bool
+
+ // The KMS key that should be used for the KMS encryption. To specify a key, use
+ // its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note that you
+ // should only provide this parameter if the key is different from the default
+ // DynamoDB key alias/aws/dynamodb .
+ KMSMasterKeyId *string
+
+ // Server-side encryption type. The only supported value is:
+ //
+ // - KMS - Server-side encryption that uses Key Management Service. The key is
+ // stored in your account and is managed by KMS (KMS charges apply).
+ SSEType SSEType
+
+ noSmithyDocumentSerde
+}
+
+// Represents the DynamoDB Streams configuration for a table in DynamoDB.
+type StreamSpecification struct {
+
+ // Indicates whether DynamoDB Streams is enabled (true) or disabled (false) on the
+ // table.
+ //
+ // This member is required.
+ StreamEnabled *bool
+
+ // When an item in the table is modified, StreamViewType determines what
+ // information is written to the stream for this table. Valid values for
+ // StreamViewType are:
+ //
+ // - KEYS_ONLY - Only the key attributes of the modified item are written to the
+ // stream.
+ //
+ // - NEW_IMAGE - The entire item, as it appears after it was modified, is written
+ // to the stream.
+ //
+ // - OLD_IMAGE - The entire item, as it appeared before it was modified, is
+ // written to the stream.
+ //
+ // - NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are
+ // written to the stream.
+ StreamViewType StreamViewType
+
+ noSmithyDocumentSerde
+}
+
+// Represents the auto scaling configuration for a global table.
+type TableAutoScalingDescription struct {
+
+ // Represents replicas of the global table.
+ Replicas []ReplicaAutoScalingDescription
+
+ // The name of the table.
+ TableName *string
+
+ // The current state of the table:
+ //
+ // - CREATING - The table is being created.
+ //
+ // - UPDATING - The table is being updated.
+ //
+ // - DELETING - The table is being deleted.
+ //
+ // - ACTIVE - The table is ready for use.
+ TableStatus TableStatus
+
+ noSmithyDocumentSerde
+}
+
+// Contains details of the table class.
+type TableClassSummary struct {
+
+ // The date and time at which the table class was last updated.
+ LastUpdateDateTime *time.Time
+
+ // The table class of the specified table. Valid values are STANDARD and
+ // STANDARD_INFREQUENT_ACCESS .
+ TableClass TableClass
+
+ noSmithyDocumentSerde
+}
+
+// The parameters for the table created as part of the import operation.
+type TableCreationParameters struct {
+
+ // The attributes of the table created as part of the import operation.
+ //
+ // This member is required.
+ AttributeDefinitions []AttributeDefinition
+
+ // The primary key and option sort key of the table created as part of the import
+ // operation.
+ //
+ // This member is required.
+ KeySchema []KeySchemaElement
+
+ // The name of the table created as part of the import operation.
+ //
+ // This member is required.
+ TableName *string
+
+ // The billing mode for provisioning the table created as part of the import
+ // operation.
+ BillingMode BillingMode
+
+ // The Global Secondary Indexes (GSI) of the table to be created as part of the
+ // import operation.
+ GlobalSecondaryIndexes []GlobalSecondaryIndex
+
+ // Sets the maximum number of read and write units for the specified on-demand
+ // table. If you use this parameter, you must specify MaxReadRequestUnits ,
+ // MaxWriteRequestUnits , or both.
+ OnDemandThroughput *OnDemandThroughput
+
+ // Represents the provisioned throughput settings for the specified global
+ // secondary index. You must use ProvisionedThroughput or OnDemandThroughput based
+ // on your table’s capacity mode.
+ //
+ // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the
+ // Amazon DynamoDB Developer Guide.
+ //
+ // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html
+ ProvisionedThroughput *ProvisionedThroughput
+
+ // Represents the settings used to enable server-side encryption.
+ SSESpecification *SSESpecification
+
+ noSmithyDocumentSerde
+}
+
+// Represents the properties of a table.
+type TableDescription struct {
+
+ // Contains information about the table archive.
+ ArchivalSummary *ArchivalSummary
+
+ // An array of AttributeDefinition objects. Each of these objects describes one
+ // attribute in the table and index key schema.
+ //
+ // Each AttributeDefinition object in this array is composed of:
+ //
+ // - AttributeName - The name of the attribute.
+ //
+ // - AttributeType - The data type for the attribute.
+ AttributeDefinitions []AttributeDefinition
+
+ // Contains the details for the read/write capacity mode.
+ BillingModeSummary *BillingModeSummary
+
+ // The date and time when the table was created, in [UNIX epoch time] format.
+ //
+ // [UNIX epoch time]: http://www.epochconverter.com/
+ CreationDateTime *time.Time
+
+ // Indicates whether deletion protection is enabled (true) or disabled (false) on
+ // the table.
+ DeletionProtectionEnabled *bool
+
+ // The global secondary indexes, if any, on the table. Each index is scoped to a
+ // given partition key value. Each element is composed of:
+ //
+ // - Backfilling - If true, then the index is currently in the backfilling phase.
+ // Backfilling occurs only when a new global secondary index is added to the table.
+ // It is the process by which DynamoDB populates the new index with data from the
+ // table. (This attribute does not appear for indexes that were created during a
+ // CreateTable operation.)
+ //
+ // You can delete an index that is being created during the Backfilling phase when
+ // IndexStatus is set to CREATING and Backfilling is true. You can't delete the
+ // index that is being created when IndexStatus is set to CREATING and
+ // Backfilling is false. (This attribute does not appear for indexes that were
+ // created during a CreateTable operation.)
+ //
+ // - IndexName - The name of the global secondary index.
+ //
+ // - IndexSizeBytes - The total size of the global secondary index, in bytes.
+ // DynamoDB updates this value approximately every six hours. Recent changes might
+ // not be reflected in this value.
+ //
+ // - IndexStatus - The current status of the global secondary index:
+ //
+ // - CREATING - The index is being created.
+ //
+ // - UPDATING - The index is being updated.
+ //
+ // - DELETING - The index is being deleted.
+ //
+ // - ACTIVE - The index is ready for use.
+ //
+ // - ItemCount - The number of items in the global secondary index. DynamoDB
+ // updates this value approximately every six hours. Recent changes might not be
+ // reflected in this value.
+ //
+ // - KeySchema - Specifies the complete index key schema. The attribute names in
+ // the key schema must be between 1 and 255 characters (inclusive). The key schema
+ // must begin with the same partition key as the table.
+ //
+ // - Projection - Specifies attributes that are copied (projected) from the table
+ // into the index. These are in addition to the primary key attributes and index
+ // key attributes, which are automatically projected. Each attribute specification
+ // is composed of:
+ //
+ // - ProjectionType - One of the following:
+ //
+ // - KEYS_ONLY - Only the index and primary keys are projected into the index.
+ //
+ // - INCLUDE - In addition to the attributes described in KEYS_ONLY , the
+ // secondary index will include other non-key attributes that you specify.
+ //
+ // - ALL - All of the table attributes are projected into the index.
+ //
+ // - NonKeyAttributes - A list of one or more non-key attribute names that are
+ // projected into the secondary index. The total count of attributes provided in
+ // NonKeyAttributes , summed across all of the secondary indexes, must not exceed
+ // 100. If you project the same attribute into two different indexes, this counts
+ // as two distinct attributes when determining the total. This limit only applies
+ // when you specify the ProjectionType of INCLUDE . You still can specify the
+ // ProjectionType of ALL to project all attributes from the source table, even if
+ // the table has more than 100 attributes.
+ //
+ // - ProvisionedThroughput - The provisioned throughput settings for the global
+ // secondary index, consisting of read and write capacity units, along with data
+ // about increases and decreases.
+ //
+ // If the table is in the DELETING state, no information about indexes will be
+ // returned.
+ GlobalSecondaryIndexes []GlobalSecondaryIndexDescription
+
+ // Represents the version of [global tables] in use, if the table is replicated across Amazon Web
+ // Services Regions.
+ //
+ // [global tables]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html
+ GlobalTableVersion *string
+
+ // The witness Region and its current status in the MRSC global table. Only one
+ // witness Region can be configured per MRSC global table.
+ GlobalTableWitnesses []GlobalTableWitnessDescription
+
+ // The number of items in the specified table. DynamoDB updates this value
+ // approximately every six hours. Recent changes might not be reflected in this
+ // value.
+ ItemCount *int64
+
+ // The primary key structure for the table. Each KeySchemaElement consists of:
+ //
+ // - AttributeName - The name of the attribute.
+ //
+ // - KeyType - The role of the attribute:
+ //
+ // - HASH - partition key
+ //
+ // - RANGE - sort key
+ //
+ // The partition key of an item is also known as its hash attribute. The term
+ // "hash attribute" derives from DynamoDB's usage of an internal hash function to
+ // evenly distribute data items across partitions, based on their partition key
+ // values.
+ //
+ // The sort key of an item is also known as its range attribute. The term "range
+ // attribute" derives from the way DynamoDB stores items with the same partition
+ // key physically close together, in sorted order by the sort key value.
+ //
+ // For more information about primary keys, see [Primary Key] in the Amazon DynamoDB Developer
+ // Guide.
+ //
+ // [Primary Key]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey
+ KeySchema []KeySchemaElement
+
+ // The Amazon Resource Name (ARN) that uniquely identifies the latest stream for
+ // this table.
+ LatestStreamArn *string
+
+ // A timestamp, in ISO 8601 format, for this stream.
+ //
+ // Note that LatestStreamLabel is not a unique identifier for the stream, because
+ // it is possible that a stream from another table might have the same timestamp.
+ // However, the combination of the following three elements is guaranteed to be
+ // unique:
+ //
+ // - Amazon Web Services customer ID
+ //
+ // - Table name
+ //
+ // - StreamLabel
+ LatestStreamLabel *string
+
+ // Represents one or more local secondary indexes on the table. Each index is
+ // scoped to a given partition key value. Tables with one or more local secondary
+ // indexes are subject to an item collection size limit, where the amount of data
+ // within a given item collection cannot exceed 10 GB. Each element is composed of:
+ //
+ // - IndexName - The name of the local secondary index.
+ //
+ // - KeySchema - Specifies the complete index key schema. The attribute names in
+ // the key schema must be between 1 and 255 characters (inclusive). The key schema
+ // must begin with the same partition key as the table.
+ //
+ // - Projection - Specifies attributes that are copied (projected) from the table
+ // into the index. These are in addition to the primary key attributes and index
+ // key attributes, which are automatically projected. Each attribute specification
+ // is composed of:
+ //
+ // - ProjectionType - One of the following:
+ //
+ // - KEYS_ONLY - Only the index and primary keys are projected into the index.
+ //
+ // - INCLUDE - Only the specified table attributes are projected into the index.
+ // The list of projected attributes is in NonKeyAttributes .
+ //
+ // - ALL - All of the table attributes are projected into the index.
+ //
+ // - NonKeyAttributes - A list of one or more non-key attribute names that are
+ // projected into the secondary index. The total count of attributes provided in
+ // NonKeyAttributes , summed across all of the secondary indexes, must not exceed
+ // 100. If you project the same attribute into two different indexes, this counts
+ // as two distinct attributes when determining the total. This limit only applies
+ // when you specify the ProjectionType of INCLUDE . You still can specify the
+ // ProjectionType of ALL to project all attributes from the source table, even if
+ // the table has more than 100 attributes.
+ //
+ // - IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB
+ // updates this value approximately every six hours. Recent changes might not be
+ // reflected in this value.
+ //
+ // - ItemCount - Represents the number of items in the index. DynamoDB updates
+ // this value approximately every six hours. Recent changes might not be reflected
+ // in this value.
+ //
+ // If the table is in the DELETING state, no information about indexes will be
+ // returned.
+ LocalSecondaryIndexes []LocalSecondaryIndexDescription
+
+ // Indicates one of the following consistency modes for a global table:
+ //
+ // - EVENTUAL : Indicates that the global table is configured for multi-Region
+ // eventual consistency (MREC).
+ //
+ // - STRONG : Indicates that the global table is configured for multi-Region
+ // strong consistency (MRSC).
+ //
+ // If you don't specify this field, the global table consistency mode defaults to
+ // EVENTUAL . For more information about global tables consistency modes, see [Consistency modes] in
+ // DynamoDB developer guide.
+ //
+ // [Consistency modes]: https://docs.aws.amazon.com/V2globaltables_HowItWorks.html#V2globaltables_HowItWorks.consistency-modes
+ MultiRegionConsistency MultiRegionConsistency
+
+ // The maximum number of read and write units for the specified on-demand table.
+ // If you use this parameter, you must specify MaxReadRequestUnits ,
+ // MaxWriteRequestUnits , or both.
+ OnDemandThroughput *OnDemandThroughput
+
+ // The provisioned throughput settings for the table, consisting of read and write
+ // capacity units, along with data about increases and decreases.
+ ProvisionedThroughput *ProvisionedThroughputDescription
+
+ // Represents replicas of the table.
+ Replicas []ReplicaDescription
+
+ // Contains details for the restore.
+ RestoreSummary *RestoreSummary
+
+ // The description of the server-side encryption status on the specified table.
+ SSEDescription *SSEDescription
+
+ // The current DynamoDB Streams configuration for the table.
+ StreamSpecification *StreamSpecification
+
+ // The Amazon Resource Name (ARN) that uniquely identifies the table.
+ TableArn *string
+
+ // Contains details of the table class.
+ TableClassSummary *TableClassSummary
+
+ // Unique identifier for the table for which the backup was created.
+ TableId *string
+
+ // The name of the table.
+ TableName *string
+
+ // The total size of the specified table, in bytes. DynamoDB updates this value
+ // approximately every six hours. Recent changes might not be reflected in this
+ // value.
+ TableSizeBytes *int64
+
+ // The current state of the table:
+ //
+ // - CREATING - The table is being created.
+ //
+ // - UPDATING - The table/index configuration is being updated. The table/index
+ // remains available for data operations when UPDATING .
+ //
+ // - DELETING - The table is being deleted.
+ //
+ // - ACTIVE - The table is ready for use.
+ //
+ // - INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the table
+ // in inaccessible. Table operations may fail due to failure to use the KMS key.
+ // DynamoDB will initiate the table archival process when a table's KMS key remains
+ // inaccessible for more than seven days.
+ //
+ // - ARCHIVING - The table is being archived. Operations are not allowed until
+ // archival is complete.
+ //
+ // - ARCHIVED - The table has been archived. See the ArchivalReason for more
+ // information.
+ TableStatus TableStatus
+
+ // Describes the warm throughput value of the base table.
+ WarmThroughput *TableWarmThroughputDescription
+
+ noSmithyDocumentSerde
+}
+
+// Represents the warm throughput value (in read units per second and write units
+// per second) of the table. Warm throughput is applicable for DynamoDB Standard-IA
+// tables and specifies the minimum provisioned capacity maintained for immediate
+// data access.
+type TableWarmThroughputDescription struct {
+
+ // Represents the base table's warm throughput value in read units per second.
+ ReadUnitsPerSecond *int64
+
+ // Represents warm throughput value of the base table.
+ Status TableStatus
+
+ // Represents the base table's warm throughput value in write units per second.
+ WriteUnitsPerSecond *int64
+
+ noSmithyDocumentSerde
+}
+
+// Describes a tag. A tag is a key-value pair. You can add up to 50 tags to a
+// single DynamoDB table.
+//
+// Amazon Web Services-assigned tag names and values are automatically assigned
+// the aws: prefix, which the user cannot assign. Amazon Web Services-assigned tag
+// names do not count towards the tag limit of 50. User-assigned tag names have the
+// prefix user: in the Cost Allocation Report. You cannot backdate the application
+// of a tag.
+//
+// For an overview on tagging DynamoDB resources, see [Tagging for DynamoDB] in the Amazon DynamoDB
+// Developer Guide.
+//
+// [Tagging for DynamoDB]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html
+type Tag struct {
+
+ // The key of the tag. Tag keys are case sensitive. Each DynamoDB table can only
+ // have up to one tag with the same key. If you try to add an existing tag (same
+ // key), the existing tag value will be updated to the new value.
+ //
+ // This member is required.
+ Key *string
+
+ // The value of the tag. Tag values are case-sensitive and can be null.
+ //
+ // This member is required.
+ Value *string
+
+ noSmithyDocumentSerde
+}
+
+// Represents the specific reason why a DynamoDB request was throttled and the ARN
+// of the impacted resource. This helps identify exactly what resource is being
+// throttled, what type of operation caused it, and why the throttling occurred.
+type ThrottlingReason struct {
+
+ // The reason for throttling. The throttling reason follows a specific format:
+ // ResourceType+OperationType+LimitType :
+ //
+ // - Resource Type (What is being throttled): Table or Index
+ //
+ // - Operation Type (What kind of operation): Read or Write
+ //
+ // - Limit Type (Why the throttling occurred):
+ //
+ // - ProvisionedThroughputExceeded : The request rate is exceeding the [provisioned throughput capacity](read or
+ // write capacity units) configured for a table or a global secondary index (GSI)
+ // in provisioned capacity mode.
+ //
+ // - AccountLimitExceeded : The request rate has caused a table or global
+ // secondary index (GSI) in on-demand mode to exceed the [per-table account-level service quotas]for read/write
+ // throughput in the current Amazon Web Services Region.
+ //
+ // - KeyRangeThroughputExceeded : The request rate directed at a specific
+ // partition key value has exceeded the [internal partition-level throughput limits], indicating uneven access patterns
+ // across the table's or GSI's key space.
+ //
+ // - MaxOnDemandThroughputExceeded : The request rate has exceeded the [configured maximum throughput limits]set for a
+ // table or index in on-demand capacity mode.
+ //
+ // Examples of complete throttling reasons:
+ //
+ // - TableReadProvisionedThroughputExceeded
+ //
+ // - IndexWriteAccountLimitExceeded
+ //
+ // This helps identify exactly what resource is being throttled, what type of
+ // operation caused it, and why the throttling occurred.
+ //
+ // [provisioned throughput capacity]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html
+ // [per-table account-level service quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ServiceQuotas.html#default-limits-throughput
+ // [configured maximum throughput limits]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode-max-throughput.html
+ // [internal partition-level throughput limits]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html
+ Reason *string
+
+ // The Amazon Resource Name (ARN) of the DynamoDB table or index that experienced
+ // the throttling event.
+ Resource *string
+
+ noSmithyDocumentSerde
+}
+
+// The description of the Time to Live (TTL) status on the specified table.
+type TimeToLiveDescription struct {
+
+ // The name of the TTL attribute for items in the table.
+ AttributeName *string
+
+ // The TTL status for the table.
+ TimeToLiveStatus TimeToLiveStatus
+
+ noSmithyDocumentSerde
+}
+
+// Represents the settings used to enable or disable Time to Live (TTL) for the
+// specified table.
+type TimeToLiveSpecification struct {
+
+ // The name of the TTL attribute used to store the expiration time for items in
+ // the table.
+ //
+ // This member is required.
+ AttributeName *string
+
+ // Indicates whether TTL is to be enabled (true) or disabled (false) on the table.
+ //
+ // This member is required.
+ Enabled *bool
+
+ noSmithyDocumentSerde
+}
+
+// Specifies an item to be retrieved as part of the transaction.
+type TransactGetItem struct {
+
+ // Contains the primary key that identifies the item to get, together with the
+ // name of the table that contains the item, and optionally the specific attributes
+ // of the item to retrieve.
+ //
+ // This member is required.
+ Get *Get
+
+ noSmithyDocumentSerde
+}
+
+// A list of requests that can perform update, put, delete, or check operations on
+// multiple items in one or more tables atomically.
+type TransactWriteItem struct {
+
+ // A request to perform a check item operation.
+ ConditionCheck *ConditionCheck
+
+ // A request to perform a DeleteItem operation.
+ Delete *Delete
+
+ // A request to perform a PutItem operation.
+ Put *Put
+
+ // A request to perform an UpdateItem operation.
+ Update *Update
+
+ noSmithyDocumentSerde
+}
+
+// Represents a request to perform an UpdateItem operation.
+type Update struct {
+
+ // The primary key of the item to be updated. Each element consists of an
+ // attribute name and a value for that attribute.
+ //
+ // This member is required.
+ Key map[string]AttributeValue
+
+ // Name of the table for the UpdateItem request. You can also provide the Amazon
+ // Resource Name (ARN) of the table in this parameter.
+ //
+ // This member is required.
+ TableName *string
+
+ // An expression that defines one or more attributes to be updated, the action to
+ // be performed on them, and new value(s) for them.
+ //
+ // This member is required.
+ UpdateExpression *string
+
+ // A condition that must be satisfied in order for a conditional update to succeed.
+ ConditionExpression *string
+
+ // One or more substitution tokens for attribute names in an expression.
+ ExpressionAttributeNames map[string]string
+
+ // One or more values that can be substituted in an expression.
+ ExpressionAttributeValues map[string]AttributeValue
+
+ // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the Update
+ // condition fails. For ReturnValuesOnConditionCheckFailure , the valid values are:
+ // NONE and ALL_OLD.
+ ReturnValuesOnConditionCheckFailure ReturnValuesOnConditionCheckFailure
+
+ noSmithyDocumentSerde
+}
+
+// Represents the new provisioned throughput settings to be applied to a global
+// secondary index.
+type UpdateGlobalSecondaryIndexAction struct {
+
+ // The name of the global secondary index to be updated.
+ //
+ // This member is required.
+ IndexName *string
+
+ // Updates the maximum number of read and write units for the specified global
+ // secondary index. If you use this parameter, you must specify MaxReadRequestUnits
+ // , MaxWriteRequestUnits , or both.
+ OnDemandThroughput *OnDemandThroughput
+
+ // Represents the provisioned throughput settings for the specified global
+ // secondary index.
+ //
+ // For current minimum and maximum provisioned throughput values, see [Service, Account, and Table Quotas] in the
+ // Amazon DynamoDB Developer Guide.
+ //
+ // [Service, Account, and Table Quotas]: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html
+ ProvisionedThroughput *ProvisionedThroughput
+
+ // Represents the warm throughput value of the new provisioned throughput settings
+ // to be applied to a global secondary index.
+ WarmThroughput *WarmThroughput
+
+ noSmithyDocumentSerde
+}
+
+// Enables updating the configuration for Kinesis Streaming.
+type UpdateKinesisStreamingConfiguration struct {
+
+ // Enables updating the precision of Kinesis data stream timestamp.
+ ApproximateCreationDateTimePrecision ApproximateCreationDateTimePrecision
+
+ noSmithyDocumentSerde
+}
+
+// Represents a replica to be modified.
+type UpdateReplicationGroupMemberAction struct {
+
+ // The Region where the replica exists.
+ //
+ // This member is required.
+ RegionName *string
+
+ // Replica-specific global secondary index settings.
+ GlobalSecondaryIndexes []ReplicaGlobalSecondaryIndex
+
+ // The KMS key of the replica that should be used for KMS encryption. To specify a
+ // key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note
+ // that you should only provide this parameter if the key is different from the
+ // default DynamoDB KMS key alias/aws/dynamodb .
+ KMSMasterKeyId *string
+
+ // Overrides the maximum on-demand throughput for the replica table.
+ OnDemandThroughputOverride *OnDemandThroughputOverride
+
+ // Replica-specific provisioned throughput. If not specified, uses the source
+ // table's provisioned throughput settings.
+ ProvisionedThroughputOverride *ProvisionedThroughputOverride
+
+ // Replica-specific table class. If not specified, uses the source table's table
+ // class.
+ TableClassOverride TableClass
+
+ noSmithyDocumentSerde
+}
+
+// Provides visibility into the number of read and write operations your table or
+// secondary index can instantaneously support. The settings can be modified using
+// the UpdateTable operation to meet the throughput requirements of an upcoming
+// peak event.
+type WarmThroughput struct {
+
+ // Represents the number of read operations your base table can instantaneously
+ // support.
+ ReadUnitsPerSecond *int64
+
+ // Represents the number of write operations your base table can instantaneously
+ // support.
+ WriteUnitsPerSecond *int64
+
+ noSmithyDocumentSerde
+}
+
+// Represents an operation to perform - either DeleteItem or PutItem . You can only
+// request one of these operations, not both, in a single WriteRequest . If you do
+// need to perform both of these operations, you need to provide two separate
+// WriteRequest objects.
+type WriteRequest struct {
+
+ // A request to perform a DeleteItem operation.
+ DeleteRequest *DeleteRequest
+
+ // A request to perform a PutItem operation.
+ PutRequest *PutRequest
+
+ noSmithyDocumentSerde
+}
+
+type noSmithyDocumentSerde = smithydocument.NoSerde
+
+// UnknownUnionMember is returned when a union member is returned over the wire,
+// but has an unknown tag.
+type UnknownUnionMember struct {
+ Tag string
+ Value []byte
+
+ noSmithyDocumentSerde
+}
+
+func (*UnknownUnionMember) isAttributeValue() {}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go
new file mode 100644
index 000000000..781ef41cf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/dynamodb/validators.go
@@ -0,0 +1,3558 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package dynamodb
+
+import (
+ "context"
+ "fmt"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+)
+
+type validateOpBatchExecuteStatement struct {
+}
+
+func (*validateOpBatchExecuteStatement) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpBatchExecuteStatement) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*BatchExecuteStatementInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpBatchExecuteStatementInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpBatchGetItem struct {
+}
+
+func (*validateOpBatchGetItem) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpBatchGetItem) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*BatchGetItemInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpBatchGetItemInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpBatchWriteItem struct {
+}
+
+func (*validateOpBatchWriteItem) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpBatchWriteItem) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*BatchWriteItemInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpBatchWriteItemInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCreateBackup struct {
+}
+
+func (*validateOpCreateBackup) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCreateBackup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CreateBackupInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCreateBackupInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCreateGlobalTable struct {
+}
+
+func (*validateOpCreateGlobalTable) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCreateGlobalTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CreateGlobalTableInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCreateGlobalTableInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpCreateTable struct {
+}
+
+func (*validateOpCreateTable) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCreateTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CreateTableInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCreateTableInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteBackup struct {
+}
+
+func (*validateOpDeleteBackup) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteBackup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteBackupInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteBackupInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteItem struct {
+}
+
+func (*validateOpDeleteItem) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteItem) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteItemInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteItemInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteResourcePolicy struct {
+}
+
+func (*validateOpDeleteResourcePolicy) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteResourcePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteResourcePolicyInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteResourcePolicyInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDeleteTable struct {
+}
+
+func (*validateOpDeleteTable) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDeleteTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DeleteTableInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDeleteTableInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDescribeBackup struct {
+}
+
+func (*validateOpDescribeBackup) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDescribeBackup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DescribeBackupInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDescribeBackupInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDescribeContinuousBackups struct {
+}
+
+func (*validateOpDescribeContinuousBackups) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDescribeContinuousBackups) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DescribeContinuousBackupsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDescribeContinuousBackupsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDescribeContributorInsights struct {
+}
+
+func (*validateOpDescribeContributorInsights) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDescribeContributorInsights) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DescribeContributorInsightsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDescribeContributorInsightsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDescribeExport struct {
+}
+
+func (*validateOpDescribeExport) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDescribeExport) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DescribeExportInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDescribeExportInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDescribeGlobalTable struct {
+}
+
+func (*validateOpDescribeGlobalTable) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDescribeGlobalTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DescribeGlobalTableInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDescribeGlobalTableInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDescribeGlobalTableSettings struct {
+}
+
+func (*validateOpDescribeGlobalTableSettings) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDescribeGlobalTableSettings) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DescribeGlobalTableSettingsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDescribeGlobalTableSettingsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDescribeImport struct {
+}
+
+func (*validateOpDescribeImport) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDescribeImport) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DescribeImportInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDescribeImportInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDescribeKinesisStreamingDestination struct {
+}
+
+func (*validateOpDescribeKinesisStreamingDestination) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDescribeKinesisStreamingDestination) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DescribeKinesisStreamingDestinationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDescribeKinesisStreamingDestinationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDescribeTable struct {
+}
+
+func (*validateOpDescribeTable) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDescribeTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DescribeTableInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDescribeTableInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDescribeTableReplicaAutoScaling struct {
+}
+
+func (*validateOpDescribeTableReplicaAutoScaling) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDescribeTableReplicaAutoScaling) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DescribeTableReplicaAutoScalingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDescribeTableReplicaAutoScalingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDescribeTimeToLive struct {
+}
+
+func (*validateOpDescribeTimeToLive) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDescribeTimeToLive) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DescribeTimeToLiveInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDescribeTimeToLiveInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpDisableKinesisStreamingDestination struct {
+}
+
+func (*validateOpDisableKinesisStreamingDestination) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpDisableKinesisStreamingDestination) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*DisableKinesisStreamingDestinationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpDisableKinesisStreamingDestinationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpEnableKinesisStreamingDestination struct {
+}
+
+func (*validateOpEnableKinesisStreamingDestination) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpEnableKinesisStreamingDestination) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*EnableKinesisStreamingDestinationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpEnableKinesisStreamingDestinationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpExecuteStatement struct {
+}
+
+func (*validateOpExecuteStatement) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpExecuteStatement) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ExecuteStatementInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpExecuteStatementInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpExecuteTransaction struct {
+}
+
+func (*validateOpExecuteTransaction) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpExecuteTransaction) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ExecuteTransactionInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpExecuteTransactionInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpExportTableToPointInTime struct {
+}
+
+func (*validateOpExportTableToPointInTime) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpExportTableToPointInTime) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ExportTableToPointInTimeInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpExportTableToPointInTimeInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetItem struct {
+}
+
+func (*validateOpGetItem) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetItem) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetItemInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetItemInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpGetResourcePolicy struct {
+}
+
+func (*validateOpGetResourcePolicy) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpGetResourcePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*GetResourcePolicyInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpGetResourcePolicyInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpImportTable struct {
+}
+
+func (*validateOpImportTable) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpImportTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ImportTableInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpImportTableInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpListTagsOfResource struct {
+}
+
+func (*validateOpListTagsOfResource) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpListTagsOfResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ListTagsOfResourceInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpListTagsOfResourceInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutItem struct {
+}
+
+func (*validateOpPutItem) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutItem) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutItemInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutItemInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpPutResourcePolicy struct {
+}
+
+func (*validateOpPutResourcePolicy) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpPutResourcePolicy) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*PutResourcePolicyInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpPutResourcePolicyInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpQuery struct {
+}
+
+func (*validateOpQuery) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpQuery) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*QueryInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpQueryInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpRestoreTableFromBackup struct {
+}
+
+func (*validateOpRestoreTableFromBackup) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpRestoreTableFromBackup) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*RestoreTableFromBackupInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpRestoreTableFromBackupInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpRestoreTableToPointInTime struct {
+}
+
+func (*validateOpRestoreTableToPointInTime) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpRestoreTableToPointInTime) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*RestoreTableToPointInTimeInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpRestoreTableToPointInTimeInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpScan struct {
+}
+
+func (*validateOpScan) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpScan) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*ScanInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpScanInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpTagResource struct {
+}
+
+func (*validateOpTagResource) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpTagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*TagResourceInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpTagResourceInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpTransactGetItems struct {
+}
+
+func (*validateOpTransactGetItems) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpTransactGetItems) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*TransactGetItemsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpTransactGetItemsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpTransactWriteItems struct {
+}
+
+func (*validateOpTransactWriteItems) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpTransactWriteItems) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*TransactWriteItemsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpTransactWriteItemsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpUntagResource struct {
+}
+
+func (*validateOpUntagResource) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpUntagResource) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*UntagResourceInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpUntagResourceInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpUpdateContinuousBackups struct {
+}
+
+func (*validateOpUpdateContinuousBackups) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpUpdateContinuousBackups) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*UpdateContinuousBackupsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpUpdateContinuousBackupsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpUpdateContributorInsights struct {
+}
+
+func (*validateOpUpdateContributorInsights) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpUpdateContributorInsights) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*UpdateContributorInsightsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpUpdateContributorInsightsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpUpdateGlobalTable struct {
+}
+
+func (*validateOpUpdateGlobalTable) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpUpdateGlobalTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*UpdateGlobalTableInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpUpdateGlobalTableInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpUpdateGlobalTableSettings struct {
+}
+
+func (*validateOpUpdateGlobalTableSettings) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpUpdateGlobalTableSettings) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*UpdateGlobalTableSettingsInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpUpdateGlobalTableSettingsInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpUpdateItem struct {
+}
+
+func (*validateOpUpdateItem) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpUpdateItem) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*UpdateItemInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpUpdateItemInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpUpdateKinesisStreamingDestination struct {
+}
+
+func (*validateOpUpdateKinesisStreamingDestination) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpUpdateKinesisStreamingDestination) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*UpdateKinesisStreamingDestinationInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpUpdateKinesisStreamingDestinationInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpUpdateTable struct {
+}
+
+func (*validateOpUpdateTable) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpUpdateTable) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*UpdateTableInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpUpdateTableInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpUpdateTableReplicaAutoScaling struct {
+}
+
+func (*validateOpUpdateTableReplicaAutoScaling) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpUpdateTableReplicaAutoScaling) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*UpdateTableReplicaAutoScalingInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpUpdateTableReplicaAutoScalingInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+type validateOpUpdateTimeToLive struct {
+}
+
+func (*validateOpUpdateTimeToLive) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpUpdateTimeToLive) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*UpdateTimeToLiveInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpUpdateTimeToLiveInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
+func addOpBatchExecuteStatementValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpBatchExecuteStatement{}, middleware.After)
+}
+
+func addOpBatchGetItemValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpBatchGetItem{}, middleware.After)
+}
+
+func addOpBatchWriteItemValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpBatchWriteItem{}, middleware.After)
+}
+
+func addOpCreateBackupValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCreateBackup{}, middleware.After)
+}
+
+func addOpCreateGlobalTableValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCreateGlobalTable{}, middleware.After)
+}
+
+func addOpCreateTableValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCreateTable{}, middleware.After)
+}
+
+func addOpDeleteBackupValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteBackup{}, middleware.After)
+}
+
+func addOpDeleteItemValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteItem{}, middleware.After)
+}
+
+func addOpDeleteResourcePolicyValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteResourcePolicy{}, middleware.After)
+}
+
+func addOpDeleteTableValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDeleteTable{}, middleware.After)
+}
+
+func addOpDescribeBackupValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDescribeBackup{}, middleware.After)
+}
+
+func addOpDescribeContinuousBackupsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDescribeContinuousBackups{}, middleware.After)
+}
+
+func addOpDescribeContributorInsightsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDescribeContributorInsights{}, middleware.After)
+}
+
+func addOpDescribeExportValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDescribeExport{}, middleware.After)
+}
+
+func addOpDescribeGlobalTableValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDescribeGlobalTable{}, middleware.After)
+}
+
+func addOpDescribeGlobalTableSettingsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDescribeGlobalTableSettings{}, middleware.After)
+}
+
+func addOpDescribeImportValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDescribeImport{}, middleware.After)
+}
+
+func addOpDescribeKinesisStreamingDestinationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDescribeKinesisStreamingDestination{}, middleware.After)
+}
+
+func addOpDescribeTableValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDescribeTable{}, middleware.After)
+}
+
+func addOpDescribeTableReplicaAutoScalingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDescribeTableReplicaAutoScaling{}, middleware.After)
+}
+
+func addOpDescribeTimeToLiveValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDescribeTimeToLive{}, middleware.After)
+}
+
+func addOpDisableKinesisStreamingDestinationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpDisableKinesisStreamingDestination{}, middleware.After)
+}
+
+func addOpEnableKinesisStreamingDestinationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpEnableKinesisStreamingDestination{}, middleware.After)
+}
+
+func addOpExecuteStatementValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpExecuteStatement{}, middleware.After)
+}
+
+func addOpExecuteTransactionValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpExecuteTransaction{}, middleware.After)
+}
+
+func addOpExportTableToPointInTimeValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpExportTableToPointInTime{}, middleware.After)
+}
+
+func addOpGetItemValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetItem{}, middleware.After)
+}
+
+func addOpGetResourcePolicyValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpGetResourcePolicy{}, middleware.After)
+}
+
+func addOpImportTableValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpImportTable{}, middleware.After)
+}
+
+func addOpListTagsOfResourceValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpListTagsOfResource{}, middleware.After)
+}
+
+func addOpPutItemValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutItem{}, middleware.After)
+}
+
+func addOpPutResourcePolicyValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpPutResourcePolicy{}, middleware.After)
+}
+
+func addOpQueryValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpQuery{}, middleware.After)
+}
+
+func addOpRestoreTableFromBackupValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpRestoreTableFromBackup{}, middleware.After)
+}
+
+func addOpRestoreTableToPointInTimeValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpRestoreTableToPointInTime{}, middleware.After)
+}
+
+func addOpScanValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpScan{}, middleware.After)
+}
+
+func addOpTagResourceValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpTagResource{}, middleware.After)
+}
+
+func addOpTransactGetItemsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpTransactGetItems{}, middleware.After)
+}
+
+func addOpTransactWriteItemsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpTransactWriteItems{}, middleware.After)
+}
+
+func addOpUntagResourceValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpUntagResource{}, middleware.After)
+}
+
+func addOpUpdateContinuousBackupsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpUpdateContinuousBackups{}, middleware.After)
+}
+
+func addOpUpdateContributorInsightsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpUpdateContributorInsights{}, middleware.After)
+}
+
+func addOpUpdateGlobalTableValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpUpdateGlobalTable{}, middleware.After)
+}
+
+func addOpUpdateGlobalTableSettingsValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpUpdateGlobalTableSettings{}, middleware.After)
+}
+
+func addOpUpdateItemValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpUpdateItem{}, middleware.After)
+}
+
+func addOpUpdateKinesisStreamingDestinationValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpUpdateKinesisStreamingDestination{}, middleware.After)
+}
+
+func addOpUpdateTableValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpUpdateTable{}, middleware.After)
+}
+
+func addOpUpdateTableReplicaAutoScalingValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpUpdateTableReplicaAutoScaling{}, middleware.After)
+}
+
+func addOpUpdateTimeToLiveValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpUpdateTimeToLive{}, middleware.After)
+}
+
+func validateAttributeDefinition(v *types.AttributeDefinition) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AttributeDefinition"}
+ if v.AttributeName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AttributeName"))
+ }
+ if len(v.AttributeType) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("AttributeType"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAttributeDefinitions(v []types.AttributeDefinition) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AttributeDefinitions"}
+ for i := range v {
+ if err := validateAttributeDefinition(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAutoScalingPolicyUpdate(v *types.AutoScalingPolicyUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AutoScalingPolicyUpdate"}
+ if v.TargetTrackingScalingPolicyConfiguration == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TargetTrackingScalingPolicyConfiguration"))
+ } else if v.TargetTrackingScalingPolicyConfiguration != nil {
+ if err := validateAutoScalingTargetTrackingScalingPolicyConfigurationUpdate(v.TargetTrackingScalingPolicyConfiguration); err != nil {
+ invalidParams.AddNested("TargetTrackingScalingPolicyConfiguration", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAutoScalingSettingsUpdate(v *types.AutoScalingSettingsUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AutoScalingSettingsUpdate"}
+ if v.ScalingPolicyUpdate != nil {
+ if err := validateAutoScalingPolicyUpdate(v.ScalingPolicyUpdate); err != nil {
+ invalidParams.AddNested("ScalingPolicyUpdate", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateAutoScalingTargetTrackingScalingPolicyConfigurationUpdate(v *types.AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AutoScalingTargetTrackingScalingPolicyConfigurationUpdate"}
+ if v.TargetValue == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TargetValue"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateBatchGetRequestMap(v map[string]types.KeysAndAttributes) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "BatchGetRequestMap"}
+ for key := range v {
+ value := v[key]
+ if err := validateKeysAndAttributes(&value); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%q]", key), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateBatchStatementRequest(v *types.BatchStatementRequest) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "BatchStatementRequest"}
+ if v.Statement == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Statement"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateBatchWriteItemRequestMap(v map[string][]types.WriteRequest) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "BatchWriteItemRequestMap"}
+ for key := range v {
+ if err := validateWriteRequests(v[key]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%q]", key), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateCondition(v *types.Condition) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Condition"}
+ if len(v.ComparisonOperator) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("ComparisonOperator"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateConditionCheck(v *types.ConditionCheck) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ConditionCheck"}
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.ConditionExpression == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ConditionExpression"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateCreateGlobalSecondaryIndexAction(v *types.CreateGlobalSecondaryIndexAction) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateGlobalSecondaryIndexAction"}
+ if v.IndexName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("IndexName"))
+ }
+ if v.KeySchema == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("KeySchema"))
+ } else if v.KeySchema != nil {
+ if err := validateKeySchema(v.KeySchema); err != nil {
+ invalidParams.AddNested("KeySchema", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Projection == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Projection"))
+ }
+ if v.ProvisionedThroughput != nil {
+ if err := validateProvisionedThroughput(v.ProvisionedThroughput); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateCreateGlobalTableWitnessGroupMemberAction(v *types.CreateGlobalTableWitnessGroupMemberAction) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateGlobalTableWitnessGroupMemberAction"}
+ if v.RegionName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RegionName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateCreateReplicaAction(v *types.CreateReplicaAction) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateReplicaAction"}
+ if v.RegionName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RegionName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateCreateReplicationGroupMemberAction(v *types.CreateReplicationGroupMemberAction) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateReplicationGroupMemberAction"}
+ if v.RegionName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RegionName"))
+ }
+ if v.GlobalSecondaryIndexes != nil {
+ if err := validateReplicaGlobalSecondaryIndexList(v.GlobalSecondaryIndexes); err != nil {
+ invalidParams.AddNested("GlobalSecondaryIndexes", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateDelete(v *types.Delete) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Delete"}
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateDeleteGlobalSecondaryIndexAction(v *types.DeleteGlobalSecondaryIndexAction) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteGlobalSecondaryIndexAction"}
+ if v.IndexName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("IndexName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateDeleteGlobalTableWitnessGroupMemberAction(v *types.DeleteGlobalTableWitnessGroupMemberAction) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteGlobalTableWitnessGroupMemberAction"}
+ if v.RegionName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RegionName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateDeleteReplicaAction(v *types.DeleteReplicaAction) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteReplicaAction"}
+ if v.RegionName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RegionName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateDeleteReplicationGroupMemberAction(v *types.DeleteReplicationGroupMemberAction) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteReplicationGroupMemberAction"}
+ if v.RegionName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RegionName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateDeleteRequest(v *types.DeleteRequest) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteRequest"}
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateFilterConditionMap(v map[string]types.Condition) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "FilterConditionMap"}
+ for key := range v {
+ value := v[key]
+ if err := validateCondition(&value); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%q]", key), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGet(v *types.Get) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Get"}
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGlobalSecondaryIndex(v *types.GlobalSecondaryIndex) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GlobalSecondaryIndex"}
+ if v.IndexName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("IndexName"))
+ }
+ if v.KeySchema == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("KeySchema"))
+ } else if v.KeySchema != nil {
+ if err := validateKeySchema(v.KeySchema); err != nil {
+ invalidParams.AddNested("KeySchema", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Projection == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Projection"))
+ }
+ if v.ProvisionedThroughput != nil {
+ if err := validateProvisionedThroughput(v.ProvisionedThroughput); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGlobalSecondaryIndexAutoScalingUpdate(v *types.GlobalSecondaryIndexAutoScalingUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GlobalSecondaryIndexAutoScalingUpdate"}
+ if v.ProvisionedWriteCapacityAutoScalingUpdate != nil {
+ if err := validateAutoScalingSettingsUpdate(v.ProvisionedWriteCapacityAutoScalingUpdate); err != nil {
+ invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingUpdate", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGlobalSecondaryIndexAutoScalingUpdateList(v []types.GlobalSecondaryIndexAutoScalingUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GlobalSecondaryIndexAutoScalingUpdateList"}
+ for i := range v {
+ if err := validateGlobalSecondaryIndexAutoScalingUpdate(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGlobalSecondaryIndexList(v []types.GlobalSecondaryIndex) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GlobalSecondaryIndexList"}
+ for i := range v {
+ if err := validateGlobalSecondaryIndex(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGlobalSecondaryIndexUpdate(v *types.GlobalSecondaryIndexUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GlobalSecondaryIndexUpdate"}
+ if v.Update != nil {
+ if err := validateUpdateGlobalSecondaryIndexAction(v.Update); err != nil {
+ invalidParams.AddNested("Update", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Create != nil {
+ if err := validateCreateGlobalSecondaryIndexAction(v.Create); err != nil {
+ invalidParams.AddNested("Create", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Delete != nil {
+ if err := validateDeleteGlobalSecondaryIndexAction(v.Delete); err != nil {
+ invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGlobalSecondaryIndexUpdateList(v []types.GlobalSecondaryIndexUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GlobalSecondaryIndexUpdateList"}
+ for i := range v {
+ if err := validateGlobalSecondaryIndexUpdate(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGlobalTableGlobalSecondaryIndexSettingsUpdate(v *types.GlobalTableGlobalSecondaryIndexSettingsUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GlobalTableGlobalSecondaryIndexSettingsUpdate"}
+ if v.IndexName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("IndexName"))
+ }
+ if v.ProvisionedWriteCapacityAutoScalingSettingsUpdate != nil {
+ if err := validateAutoScalingSettingsUpdate(v.ProvisionedWriteCapacityAutoScalingSettingsUpdate); err != nil {
+ invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGlobalTableGlobalSecondaryIndexSettingsUpdateList(v []types.GlobalTableGlobalSecondaryIndexSettingsUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GlobalTableGlobalSecondaryIndexSettingsUpdateList"}
+ for i := range v {
+ if err := validateGlobalTableGlobalSecondaryIndexSettingsUpdate(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGlobalTableWitnessGroupUpdate(v *types.GlobalTableWitnessGroupUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GlobalTableWitnessGroupUpdate"}
+ if v.Create != nil {
+ if err := validateCreateGlobalTableWitnessGroupMemberAction(v.Create); err != nil {
+ invalidParams.AddNested("Create", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Delete != nil {
+ if err := validateDeleteGlobalTableWitnessGroupMemberAction(v.Delete); err != nil {
+ invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateGlobalTableWitnessGroupUpdateList(v []types.GlobalTableWitnessGroupUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GlobalTableWitnessGroupUpdateList"}
+ for i := range v {
+ if err := validateGlobalTableWitnessGroupUpdate(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateKeyConditions(v map[string]types.Condition) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "KeyConditions"}
+ for key := range v {
+ value := v[key]
+ if err := validateCondition(&value); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%q]", key), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateKeysAndAttributes(v *types.KeysAndAttributes) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "KeysAndAttributes"}
+ if v.Keys == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Keys"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateKeySchema(v []types.KeySchemaElement) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "KeySchema"}
+ for i := range v {
+ if err := validateKeySchemaElement(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateKeySchemaElement(v *types.KeySchemaElement) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "KeySchemaElement"}
+ if v.AttributeName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AttributeName"))
+ }
+ if len(v.KeyType) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("KeyType"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLocalSecondaryIndex(v *types.LocalSecondaryIndex) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LocalSecondaryIndex"}
+ if v.IndexName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("IndexName"))
+ }
+ if v.KeySchema == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("KeySchema"))
+ } else if v.KeySchema != nil {
+ if err := validateKeySchema(v.KeySchema); err != nil {
+ invalidParams.AddNested("KeySchema", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Projection == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Projection"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateLocalSecondaryIndexList(v []types.LocalSecondaryIndex) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "LocalSecondaryIndexList"}
+ for i := range v {
+ if err := validateLocalSecondaryIndex(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateParameterizedStatement(v *types.ParameterizedStatement) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ParameterizedStatement"}
+ if v.Statement == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Statement"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateParameterizedStatements(v []types.ParameterizedStatement) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ParameterizedStatements"}
+ for i := range v {
+ if err := validateParameterizedStatement(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validatePartiQLBatchRequest(v []types.BatchStatementRequest) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PartiQLBatchRequest"}
+ for i := range v {
+ if err := validateBatchStatementRequest(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validatePointInTimeRecoverySpecification(v *types.PointInTimeRecoverySpecification) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PointInTimeRecoverySpecification"}
+ if v.PointInTimeRecoveryEnabled == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("PointInTimeRecoveryEnabled"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateProvisionedThroughput(v *types.ProvisionedThroughput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ProvisionedThroughput"}
+ if v.ReadCapacityUnits == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ReadCapacityUnits"))
+ }
+ if v.WriteCapacityUnits == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("WriteCapacityUnits"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validatePut(v *types.Put) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Put"}
+ if v.Item == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Item"))
+ }
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validatePutRequest(v *types.PutRequest) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutRequest"}
+ if v.Item == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Item"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicaAutoScalingUpdate(v *types.ReplicaAutoScalingUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicaAutoScalingUpdate"}
+ if v.RegionName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RegionName"))
+ }
+ if v.ReplicaGlobalSecondaryIndexUpdates != nil {
+ if err := validateReplicaGlobalSecondaryIndexAutoScalingUpdateList(v.ReplicaGlobalSecondaryIndexUpdates); err != nil {
+ invalidParams.AddNested("ReplicaGlobalSecondaryIndexUpdates", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ReplicaProvisionedReadCapacityAutoScalingUpdate != nil {
+ if err := validateAutoScalingSettingsUpdate(v.ReplicaProvisionedReadCapacityAutoScalingUpdate); err != nil {
+ invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingUpdate", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicaAutoScalingUpdateList(v []types.ReplicaAutoScalingUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicaAutoScalingUpdateList"}
+ for i := range v {
+ if err := validateReplicaAutoScalingUpdate(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicaGlobalSecondaryIndex(v *types.ReplicaGlobalSecondaryIndex) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicaGlobalSecondaryIndex"}
+ if v.IndexName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("IndexName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicaGlobalSecondaryIndexAutoScalingUpdate(v *types.ReplicaGlobalSecondaryIndexAutoScalingUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicaGlobalSecondaryIndexAutoScalingUpdate"}
+ if v.ProvisionedReadCapacityAutoScalingUpdate != nil {
+ if err := validateAutoScalingSettingsUpdate(v.ProvisionedReadCapacityAutoScalingUpdate); err != nil {
+ invalidParams.AddNested("ProvisionedReadCapacityAutoScalingUpdate", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicaGlobalSecondaryIndexAutoScalingUpdateList(v []types.ReplicaGlobalSecondaryIndexAutoScalingUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicaGlobalSecondaryIndexAutoScalingUpdateList"}
+ for i := range v {
+ if err := validateReplicaGlobalSecondaryIndexAutoScalingUpdate(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicaGlobalSecondaryIndexList(v []types.ReplicaGlobalSecondaryIndex) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicaGlobalSecondaryIndexList"}
+ for i := range v {
+ if err := validateReplicaGlobalSecondaryIndex(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicaGlobalSecondaryIndexSettingsUpdate(v *types.ReplicaGlobalSecondaryIndexSettingsUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicaGlobalSecondaryIndexSettingsUpdate"}
+ if v.IndexName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("IndexName"))
+ }
+ if v.ProvisionedReadCapacityAutoScalingSettingsUpdate != nil {
+ if err := validateAutoScalingSettingsUpdate(v.ProvisionedReadCapacityAutoScalingSettingsUpdate); err != nil {
+ invalidParams.AddNested("ProvisionedReadCapacityAutoScalingSettingsUpdate", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicaGlobalSecondaryIndexSettingsUpdateList(v []types.ReplicaGlobalSecondaryIndexSettingsUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicaGlobalSecondaryIndexSettingsUpdateList"}
+ for i := range v {
+ if err := validateReplicaGlobalSecondaryIndexSettingsUpdate(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicaSettingsUpdate(v *types.ReplicaSettingsUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicaSettingsUpdate"}
+ if v.RegionName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RegionName"))
+ }
+ if v.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate != nil {
+ if err := validateAutoScalingSettingsUpdate(v.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate); err != nil {
+ invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ReplicaGlobalSecondaryIndexSettingsUpdate != nil {
+ if err := validateReplicaGlobalSecondaryIndexSettingsUpdateList(v.ReplicaGlobalSecondaryIndexSettingsUpdate); err != nil {
+ invalidParams.AddNested("ReplicaGlobalSecondaryIndexSettingsUpdate", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicaSettingsUpdateList(v []types.ReplicaSettingsUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicaSettingsUpdateList"}
+ for i := range v {
+ if err := validateReplicaSettingsUpdate(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicationGroupUpdate(v *types.ReplicationGroupUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicationGroupUpdate"}
+ if v.Create != nil {
+ if err := validateCreateReplicationGroupMemberAction(v.Create); err != nil {
+ invalidParams.AddNested("Create", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Update != nil {
+ if err := validateUpdateReplicationGroupMemberAction(v.Update); err != nil {
+ invalidParams.AddNested("Update", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Delete != nil {
+ if err := validateDeleteReplicationGroupMemberAction(v.Delete); err != nil {
+ invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicationGroupUpdateList(v []types.ReplicationGroupUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicationGroupUpdateList"}
+ for i := range v {
+ if err := validateReplicationGroupUpdate(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicaUpdate(v *types.ReplicaUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicaUpdate"}
+ if v.Create != nil {
+ if err := validateCreateReplicaAction(v.Create); err != nil {
+ invalidParams.AddNested("Create", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Delete != nil {
+ if err := validateDeleteReplicaAction(v.Delete); err != nil {
+ invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateReplicaUpdateList(v []types.ReplicaUpdate) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ReplicaUpdateList"}
+ for i := range v {
+ if err := validateReplicaUpdate(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateS3BucketSource(v *types.S3BucketSource) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "S3BucketSource"}
+ if v.S3Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("S3Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateStreamSpecification(v *types.StreamSpecification) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "StreamSpecification"}
+ if v.StreamEnabled == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("StreamEnabled"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTableCreationParameters(v *types.TableCreationParameters) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TableCreationParameters"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.AttributeDefinitions == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AttributeDefinitions"))
+ } else if v.AttributeDefinitions != nil {
+ if err := validateAttributeDefinitions(v.AttributeDefinitions); err != nil {
+ invalidParams.AddNested("AttributeDefinitions", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.KeySchema == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("KeySchema"))
+ } else if v.KeySchema != nil {
+ if err := validateKeySchema(v.KeySchema); err != nil {
+ invalidParams.AddNested("KeySchema", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ProvisionedThroughput != nil {
+ if err := validateProvisionedThroughput(v.ProvisionedThroughput); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.GlobalSecondaryIndexes != nil {
+ if err := validateGlobalSecondaryIndexList(v.GlobalSecondaryIndexes); err != nil {
+ invalidParams.AddNested("GlobalSecondaryIndexes", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTag(v *types.Tag) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Tag"}
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.Value == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Value"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTagList(v []types.Tag) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TagList"}
+ for i := range v {
+ if err := validateTag(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTimeToLiveSpecification(v *types.TimeToLiveSpecification) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TimeToLiveSpecification"}
+ if v.Enabled == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Enabled"))
+ }
+ if v.AttributeName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AttributeName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTransactGetItem(v *types.TransactGetItem) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TransactGetItem"}
+ if v.Get == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Get"))
+ } else if v.Get != nil {
+ if err := validateGet(v.Get); err != nil {
+ invalidParams.AddNested("Get", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTransactGetItemList(v []types.TransactGetItem) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TransactGetItemList"}
+ for i := range v {
+ if err := validateTransactGetItem(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTransactWriteItem(v *types.TransactWriteItem) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TransactWriteItem"}
+ if v.ConditionCheck != nil {
+ if err := validateConditionCheck(v.ConditionCheck); err != nil {
+ invalidParams.AddNested("ConditionCheck", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Put != nil {
+ if err := validatePut(v.Put); err != nil {
+ invalidParams.AddNested("Put", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Delete != nil {
+ if err := validateDelete(v.Delete); err != nil {
+ invalidParams.AddNested("Delete", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Update != nil {
+ if err := validateUpdate(v.Update); err != nil {
+ invalidParams.AddNested("Update", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateTransactWriteItemList(v []types.TransactWriteItem) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TransactWriteItemList"}
+ for i := range v {
+ if err := validateTransactWriteItem(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateUpdate(v *types.Update) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "Update"}
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if v.UpdateExpression == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("UpdateExpression"))
+ }
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateUpdateGlobalSecondaryIndexAction(v *types.UpdateGlobalSecondaryIndexAction) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UpdateGlobalSecondaryIndexAction"}
+ if v.IndexName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("IndexName"))
+ }
+ if v.ProvisionedThroughput != nil {
+ if err := validateProvisionedThroughput(v.ProvisionedThroughput); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateUpdateReplicationGroupMemberAction(v *types.UpdateReplicationGroupMemberAction) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UpdateReplicationGroupMemberAction"}
+ if v.RegionName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RegionName"))
+ }
+ if v.GlobalSecondaryIndexes != nil {
+ if err := validateReplicaGlobalSecondaryIndexList(v.GlobalSecondaryIndexes); err != nil {
+ invalidParams.AddNested("GlobalSecondaryIndexes", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateWriteRequest(v *types.WriteRequest) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "WriteRequest"}
+ if v.PutRequest != nil {
+ if err := validatePutRequest(v.PutRequest); err != nil {
+ invalidParams.AddNested("PutRequest", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.DeleteRequest != nil {
+ if err := validateDeleteRequest(v.DeleteRequest); err != nil {
+ invalidParams.AddNested("DeleteRequest", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateWriteRequests(v []types.WriteRequest) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "WriteRequests"}
+ for i := range v {
+ if err := validateWriteRequest(&v[i]); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("[%d]", i), err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpBatchExecuteStatementInput(v *BatchExecuteStatementInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "BatchExecuteStatementInput"}
+ if v.Statements == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Statements"))
+ } else if v.Statements != nil {
+ if err := validatePartiQLBatchRequest(v.Statements); err != nil {
+ invalidParams.AddNested("Statements", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpBatchGetItemInput(v *BatchGetItemInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "BatchGetItemInput"}
+ if v.RequestItems == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RequestItems"))
+ } else if v.RequestItems != nil {
+ if err := validateBatchGetRequestMap(v.RequestItems); err != nil {
+ invalidParams.AddNested("RequestItems", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpBatchWriteItemInput(v *BatchWriteItemInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "BatchWriteItemInput"}
+ if v.RequestItems == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("RequestItems"))
+ } else if v.RequestItems != nil {
+ if err := validateBatchWriteItemRequestMap(v.RequestItems); err != nil {
+ invalidParams.AddNested("RequestItems", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpCreateBackupInput(v *CreateBackupInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateBackupInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.BackupName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("BackupName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpCreateGlobalTableInput(v *CreateGlobalTableInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateGlobalTableInput"}
+ if v.GlobalTableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("GlobalTableName"))
+ }
+ if v.ReplicationGroup == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ReplicationGroup"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpCreateTableInput(v *CreateTableInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateTableInput"}
+ if v.AttributeDefinitions == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("AttributeDefinitions"))
+ } else if v.AttributeDefinitions != nil {
+ if err := validateAttributeDefinitions(v.AttributeDefinitions); err != nil {
+ invalidParams.AddNested("AttributeDefinitions", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.KeySchema == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("KeySchema"))
+ } else if v.KeySchema != nil {
+ if err := validateKeySchema(v.KeySchema); err != nil {
+ invalidParams.AddNested("KeySchema", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.LocalSecondaryIndexes != nil {
+ if err := validateLocalSecondaryIndexList(v.LocalSecondaryIndexes); err != nil {
+ invalidParams.AddNested("LocalSecondaryIndexes", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.GlobalSecondaryIndexes != nil {
+ if err := validateGlobalSecondaryIndexList(v.GlobalSecondaryIndexes); err != nil {
+ invalidParams.AddNested("GlobalSecondaryIndexes", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ProvisionedThroughput != nil {
+ if err := validateProvisionedThroughput(v.ProvisionedThroughput); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.StreamSpecification != nil {
+ if err := validateStreamSpecification(v.StreamSpecification); err != nil {
+ invalidParams.AddNested("StreamSpecification", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.Tags != nil {
+ if err := validateTagList(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteBackupInput(v *DeleteBackupInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteBackupInput"}
+ if v.BackupArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("BackupArn"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteItemInput(v *DeleteItemInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteItemInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteResourcePolicyInput(v *DeleteResourcePolicyInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteResourcePolicyInput"}
+ if v.ResourceArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ResourceArn"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDeleteTableInput(v *DeleteTableInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DeleteTableInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDescribeBackupInput(v *DescribeBackupInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DescribeBackupInput"}
+ if v.BackupArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("BackupArn"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDescribeContinuousBackupsInput(v *DescribeContinuousBackupsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DescribeContinuousBackupsInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDescribeContributorInsightsInput(v *DescribeContributorInsightsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DescribeContributorInsightsInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDescribeExportInput(v *DescribeExportInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DescribeExportInput"}
+ if v.ExportArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ExportArn"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDescribeGlobalTableInput(v *DescribeGlobalTableInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DescribeGlobalTableInput"}
+ if v.GlobalTableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("GlobalTableName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDescribeGlobalTableSettingsInput(v *DescribeGlobalTableSettingsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DescribeGlobalTableSettingsInput"}
+ if v.GlobalTableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("GlobalTableName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDescribeImportInput(v *DescribeImportInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DescribeImportInput"}
+ if v.ImportArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ImportArn"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDescribeKinesisStreamingDestinationInput(v *DescribeKinesisStreamingDestinationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DescribeKinesisStreamingDestinationInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDescribeTableInput(v *DescribeTableInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DescribeTableInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDescribeTableReplicaAutoScalingInput(v *DescribeTableReplicaAutoScalingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DescribeTableReplicaAutoScalingInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDescribeTimeToLiveInput(v *DescribeTimeToLiveInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DescribeTimeToLiveInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpDisableKinesisStreamingDestinationInput(v *DisableKinesisStreamingDestinationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "DisableKinesisStreamingDestinationInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.StreamArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("StreamArn"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpEnableKinesisStreamingDestinationInput(v *EnableKinesisStreamingDestinationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "EnableKinesisStreamingDestinationInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.StreamArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("StreamArn"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpExecuteStatementInput(v *ExecuteStatementInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ExecuteStatementInput"}
+ if v.Statement == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Statement"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpExecuteTransactionInput(v *ExecuteTransactionInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ExecuteTransactionInput"}
+ if v.TransactStatements == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TransactStatements"))
+ } else if v.TransactStatements != nil {
+ if err := validateParameterizedStatements(v.TransactStatements); err != nil {
+ invalidParams.AddNested("TransactStatements", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpExportTableToPointInTimeInput(v *ExportTableToPointInTimeInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ExportTableToPointInTimeInput"}
+ if v.TableArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableArn"))
+ }
+ if v.S3Bucket == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("S3Bucket"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetItemInput(v *GetItemInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetItemInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpGetResourcePolicyInput(v *GetResourcePolicyInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "GetResourcePolicyInput"}
+ if v.ResourceArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ResourceArn"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpImportTableInput(v *ImportTableInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ImportTableInput"}
+ if v.S3BucketSource == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("S3BucketSource"))
+ } else if v.S3BucketSource != nil {
+ if err := validateS3BucketSource(v.S3BucketSource); err != nil {
+ invalidParams.AddNested("S3BucketSource", err.(smithy.InvalidParamsError))
+ }
+ }
+ if len(v.InputFormat) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("InputFormat"))
+ }
+ if v.TableCreationParameters == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableCreationParameters"))
+ } else if v.TableCreationParameters != nil {
+ if err := validateTableCreationParameters(v.TableCreationParameters); err != nil {
+ invalidParams.AddNested("TableCreationParameters", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpListTagsOfResourceInput(v *ListTagsOfResourceInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ListTagsOfResourceInput"}
+ if v.ResourceArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ResourceArn"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutItemInput(v *PutItemInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutItemInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.Item == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Item"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpPutResourcePolicyInput(v *PutResourcePolicyInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "PutResourcePolicyInput"}
+ if v.ResourceArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ResourceArn"))
+ }
+ if v.Policy == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Policy"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpQueryInput(v *QueryInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "QueryInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.KeyConditions != nil {
+ if err := validateKeyConditions(v.KeyConditions); err != nil {
+ invalidParams.AddNested("KeyConditions", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.QueryFilter != nil {
+ if err := validateFilterConditionMap(v.QueryFilter); err != nil {
+ invalidParams.AddNested("QueryFilter", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpRestoreTableFromBackupInput(v *RestoreTableFromBackupInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RestoreTableFromBackupInput"}
+ if v.TargetTableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TargetTableName"))
+ }
+ if v.BackupArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("BackupArn"))
+ }
+ if v.GlobalSecondaryIndexOverride != nil {
+ if err := validateGlobalSecondaryIndexList(v.GlobalSecondaryIndexOverride); err != nil {
+ invalidParams.AddNested("GlobalSecondaryIndexOverride", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.LocalSecondaryIndexOverride != nil {
+ if err := validateLocalSecondaryIndexList(v.LocalSecondaryIndexOverride); err != nil {
+ invalidParams.AddNested("LocalSecondaryIndexOverride", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ProvisionedThroughputOverride != nil {
+ if err := validateProvisionedThroughput(v.ProvisionedThroughputOverride); err != nil {
+ invalidParams.AddNested("ProvisionedThroughputOverride", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpRestoreTableToPointInTimeInput(v *RestoreTableToPointInTimeInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "RestoreTableToPointInTimeInput"}
+ if v.TargetTableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TargetTableName"))
+ }
+ if v.GlobalSecondaryIndexOverride != nil {
+ if err := validateGlobalSecondaryIndexList(v.GlobalSecondaryIndexOverride); err != nil {
+ invalidParams.AddNested("GlobalSecondaryIndexOverride", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.LocalSecondaryIndexOverride != nil {
+ if err := validateLocalSecondaryIndexList(v.LocalSecondaryIndexOverride); err != nil {
+ invalidParams.AddNested("LocalSecondaryIndexOverride", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ProvisionedThroughputOverride != nil {
+ if err := validateProvisionedThroughput(v.ProvisionedThroughputOverride); err != nil {
+ invalidParams.AddNested("ProvisionedThroughputOverride", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpScanInput(v *ScanInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "ScanInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.ScanFilter != nil {
+ if err := validateFilterConditionMap(v.ScanFilter); err != nil {
+ invalidParams.AddNested("ScanFilter", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpTagResourceInput(v *TagResourceInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TagResourceInput"}
+ if v.ResourceArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ResourceArn"))
+ }
+ if v.Tags == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Tags"))
+ } else if v.Tags != nil {
+ if err := validateTagList(v.Tags); err != nil {
+ invalidParams.AddNested("Tags", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpTransactGetItemsInput(v *TransactGetItemsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TransactGetItemsInput"}
+ if v.TransactItems == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TransactItems"))
+ } else if v.TransactItems != nil {
+ if err := validateTransactGetItemList(v.TransactItems); err != nil {
+ invalidParams.AddNested("TransactItems", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpTransactWriteItemsInput(v *TransactWriteItemsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "TransactWriteItemsInput"}
+ if v.TransactItems == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TransactItems"))
+ } else if v.TransactItems != nil {
+ if err := validateTransactWriteItemList(v.TransactItems); err != nil {
+ invalidParams.AddNested("TransactItems", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpUntagResourceInput(v *UntagResourceInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UntagResourceInput"}
+ if v.ResourceArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ResourceArn"))
+ }
+ if v.TagKeys == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TagKeys"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpUpdateContinuousBackupsInput(v *UpdateContinuousBackupsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UpdateContinuousBackupsInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.PointInTimeRecoverySpecification == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("PointInTimeRecoverySpecification"))
+ } else if v.PointInTimeRecoverySpecification != nil {
+ if err := validatePointInTimeRecoverySpecification(v.PointInTimeRecoverySpecification); err != nil {
+ invalidParams.AddNested("PointInTimeRecoverySpecification", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpUpdateContributorInsightsInput(v *UpdateContributorInsightsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UpdateContributorInsightsInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if len(v.ContributorInsightsAction) == 0 {
+ invalidParams.Add(smithy.NewErrParamRequired("ContributorInsightsAction"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpUpdateGlobalTableInput(v *UpdateGlobalTableInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UpdateGlobalTableInput"}
+ if v.GlobalTableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("GlobalTableName"))
+ }
+ if v.ReplicaUpdates == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ReplicaUpdates"))
+ } else if v.ReplicaUpdates != nil {
+ if err := validateReplicaUpdateList(v.ReplicaUpdates); err != nil {
+ invalidParams.AddNested("ReplicaUpdates", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpUpdateGlobalTableSettingsInput(v *UpdateGlobalTableSettingsInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UpdateGlobalTableSettingsInput"}
+ if v.GlobalTableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("GlobalTableName"))
+ }
+ if v.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate != nil {
+ if err := validateAutoScalingSettingsUpdate(v.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate); err != nil {
+ invalidParams.AddNested("GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil {
+ if err := validateGlobalTableGlobalSecondaryIndexSettingsUpdateList(v.GlobalTableGlobalSecondaryIndexSettingsUpdate); err != nil {
+ invalidParams.AddNested("GlobalTableGlobalSecondaryIndexSettingsUpdate", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ReplicaSettingsUpdate != nil {
+ if err := validateReplicaSettingsUpdateList(v.ReplicaSettingsUpdate); err != nil {
+ invalidParams.AddNested("ReplicaSettingsUpdate", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpUpdateItemInput(v *UpdateItemInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UpdateItemInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.Key == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("Key"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpUpdateKinesisStreamingDestinationInput(v *UpdateKinesisStreamingDestinationInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UpdateKinesisStreamingDestinationInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.StreamArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("StreamArn"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpUpdateTableInput(v *UpdateTableInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UpdateTableInput"}
+ if v.AttributeDefinitions != nil {
+ if err := validateAttributeDefinitions(v.AttributeDefinitions); err != nil {
+ invalidParams.AddNested("AttributeDefinitions", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.ProvisionedThroughput != nil {
+ if err := validateProvisionedThroughput(v.ProvisionedThroughput); err != nil {
+ invalidParams.AddNested("ProvisionedThroughput", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.GlobalSecondaryIndexUpdates != nil {
+ if err := validateGlobalSecondaryIndexUpdateList(v.GlobalSecondaryIndexUpdates); err != nil {
+ invalidParams.AddNested("GlobalSecondaryIndexUpdates", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.StreamSpecification != nil {
+ if err := validateStreamSpecification(v.StreamSpecification); err != nil {
+ invalidParams.AddNested("StreamSpecification", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ReplicaUpdates != nil {
+ if err := validateReplicationGroupUpdateList(v.ReplicaUpdates); err != nil {
+ invalidParams.AddNested("ReplicaUpdates", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.GlobalTableWitnessUpdates != nil {
+ if err := validateGlobalTableWitnessGroupUpdateList(v.GlobalTableWitnessUpdates); err != nil {
+ invalidParams.AddNested("GlobalTableWitnessUpdates", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpUpdateTableReplicaAutoScalingInput(v *UpdateTableReplicaAutoScalingInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UpdateTableReplicaAutoScalingInput"}
+ if v.GlobalSecondaryIndexUpdates != nil {
+ if err := validateGlobalSecondaryIndexAutoScalingUpdateList(v.GlobalSecondaryIndexUpdates); err != nil {
+ invalidParams.AddNested("GlobalSecondaryIndexUpdates", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.ProvisionedWriteCapacityAutoScalingUpdate != nil {
+ if err := validateAutoScalingSettingsUpdate(v.ProvisionedWriteCapacityAutoScalingUpdate); err != nil {
+ invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingUpdate", err.(smithy.InvalidParamsError))
+ }
+ }
+ if v.ReplicaUpdates != nil {
+ if err := validateReplicaAutoScalingUpdateList(v.ReplicaUpdates); err != nil {
+ invalidParams.AddNested("ReplicaUpdates", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
+func validateOpUpdateTimeToLiveInput(v *UpdateTimeToLiveInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "UpdateTimeToLiveInput"}
+ if v.TableName == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TableName"))
+ }
+ if v.TimeToLiveSpecification == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TimeToLiveSpecification"))
+ } else if v.TimeToLiveSpecification != nil {
+ if err := validateTimeToLiveSpecification(v.TimeToLiveSpecification); err != nil {
+ invalidParams.AddNested("TimeToLiveSpecification", err.(smithy.InvalidParamsError))
+ }
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md
new file mode 100644
index 000000000..607fc0922
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/CHANGELOG.md
@@ -0,0 +1,176 @@
+# v1.13.1 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+
+# v1.13.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+
+# v1.12.4 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+
+# v1.12.3 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+
+# v1.12.2 (2025-01-24)
+
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.12.1 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+
+# v1.12.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+
+# v1.11.5 (2024-09-20)
+
+* No change notes available for this release.
+
+# v1.11.4 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+
+# v1.11.3 (2024-06-28)
+
+* No change notes available for this release.
+
+# v1.11.2 (2024-03-29)
+
+* No change notes available for this release.
+
+# v1.11.1 (2024-02-21)
+
+* No change notes available for this release.
+
+# v1.11.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+
+# v1.10.4 (2023-12-07)
+
+* No change notes available for this release.
+
+# v1.10.3 (2023-11-30)
+
+* No change notes available for this release.
+
+# v1.10.2 (2023-11-29)
+
+* No change notes available for this release.
+
+# v1.10.1 (2023-11-15)
+
+* No change notes available for this release.
+
+# v1.10.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+
+# v1.9.15 (2023-10-06)
+
+* No change notes available for this release.
+
+# v1.9.14 (2023-08-18)
+
+* No change notes available for this release.
+
+# v1.9.13 (2023-08-07)
+
+* No change notes available for this release.
+
+# v1.9.12 (2023-07-31)
+
+* No change notes available for this release.
+
+# v1.9.11 (2022-12-02)
+
+* No change notes available for this release.
+
+# v1.9.10 (2022-10-24)
+
+* No change notes available for this release.
+
+# v1.9.9 (2022-09-14)
+
+* No change notes available for this release.
+
+# v1.9.8 (2022-09-02)
+
+* No change notes available for this release.
+
+# v1.9.7 (2022-08-31)
+
+* No change notes available for this release.
+
+# v1.9.6 (2022-08-29)
+
+* No change notes available for this release.
+
+# v1.9.5 (2022-08-11)
+
+* No change notes available for this release.
+
+# v1.9.4 (2022-08-09)
+
+* No change notes available for this release.
+
+# v1.9.3 (2022-06-29)
+
+* No change notes available for this release.
+
+# v1.9.2 (2022-06-07)
+
+* No change notes available for this release.
+
+# v1.9.1 (2022-03-24)
+
+* No change notes available for this release.
+
+# v1.9.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.8.0 (2022-02-24)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.7.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.6.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.5.0 (2021-11-06)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.4.0 (2021-10-21)
+
+* **Feature**: Updated to latest version
+
+# v1.3.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.2.2 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+
+# v1.2.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.2.0 (2021-06-25)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+
+# v1.1.0 (2021-05-14)
+
+* **Feature**: Constant has been added to modules to enable runtime version inspection for reporting.
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go
new file mode 100644
index 000000000..3f451fc9b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/accept_encoding_gzip.go
@@ -0,0 +1,176 @@
+package acceptencoding
+
+import (
+ "compress/gzip"
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+const acceptEncodingHeaderKey = "Accept-Encoding"
+const contentEncodingHeaderKey = "Content-Encoding"
+
+// AddAcceptEncodingGzipOptions provides the options for the
+// AddAcceptEncodingGzip middleware setup.
+type AddAcceptEncodingGzipOptions struct {
+ Enable bool
+}
+
+// AddAcceptEncodingGzip explicitly adds handling for accept-encoding GZIP
+// middleware to the operation stack. This allows checksums to be correctly
+// computed without disabling GZIP support.
+func AddAcceptEncodingGzip(stack *middleware.Stack, options AddAcceptEncodingGzipOptions) error {
+ if options.Enable {
+ if err := stack.Finalize.Add(&EnableGzip{}, middleware.Before); err != nil {
+ return err
+ }
+ if err := stack.Deserialize.Insert(&DecompressGzip{}, "OperationDeserializer", middleware.After); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ return stack.Finalize.Add(&DisableGzip{}, middleware.Before)
+}
+
+// DisableGzip provides the middleware that will
+// disable the underlying http client automatically enabling for gzip
+// decompress content-encoding support.
+type DisableGzip struct{}
+
+// ID returns the id for the middleware.
+func (*DisableGzip) ID() string {
+ return "DisableAcceptEncodingGzip"
+}
+
+// HandleFinalize implements the FinalizeMiddleware interface.
+func (*DisableGzip) HandleFinalize(
+ ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ output middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := input.Request.(*smithyhttp.Request)
+ if !ok {
+ return output, metadata, &smithy.SerializationError{
+ Err: fmt.Errorf("unknown request type %T", input.Request),
+ }
+ }
+
+ // Explicitly enable gzip support, this will prevent the http client from
+ // auto extracting the zipped content.
+ req.Header.Set(acceptEncodingHeaderKey, "identity")
+
+ return next.HandleFinalize(ctx, input)
+}
+
+// EnableGzip provides a middleware to enable support for
+// gzip responses, with manual decompression. This prevents the underlying HTTP
+// client from performing the gzip decompression automatically.
+type EnableGzip struct{}
+
+// ID returns the id for the middleware.
+func (*EnableGzip) ID() string {
+ return "AcceptEncodingGzip"
+}
+
+// HandleFinalize implements the FinalizeMiddleware interface.
+func (*EnableGzip) HandleFinalize(
+ ctx context.Context, input middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ output middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := input.Request.(*smithyhttp.Request)
+ if !ok {
+ return output, metadata, &smithy.SerializationError{
+ Err: fmt.Errorf("unknown request type %T", input.Request),
+ }
+ }
+
+ // Explicitly enable gzip support, this will prevent the http client from
+ // auto extracting the zipped content.
+ req.Header.Set(acceptEncodingHeaderKey, "gzip")
+
+ return next.HandleFinalize(ctx, input)
+}
+
+// DecompressGzip provides the middleware for decompressing a gzip
+// response from the service.
+type DecompressGzip struct{}
+
+// ID returns the id for the middleware.
+func (*DecompressGzip) ID() string {
+ return "DecompressGzip"
+}
+
+// HandleDeserialize implements the DeserializeMiddlware interface.
+func (*DecompressGzip) HandleDeserialize(
+ ctx context.Context, input middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ output middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ output, metadata, err = next.HandleDeserialize(ctx, input)
+ if err != nil {
+ return output, metadata, err
+ }
+
+ resp, ok := output.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return output, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("unknown response type %T", output.RawResponse),
+ }
+ }
+ if v := resp.Header.Get(contentEncodingHeaderKey); v != "gzip" {
+ return output, metadata, err
+ }
+
+ // Clear content length since it will no longer be valid once the response
+ // body is decompressed.
+ resp.Header.Del("Content-Length")
+ resp.ContentLength = -1
+
+ resp.Body = wrapGzipReader(resp.Body)
+
+ return output, metadata, err
+}
+
+type gzipReader struct {
+ reader io.ReadCloser
+ gzip *gzip.Reader
+}
+
+func wrapGzipReader(reader io.ReadCloser) *gzipReader {
+ return &gzipReader{
+ reader: reader,
+ }
+}
+
+// Read wraps the gzip reader around the underlying io.Reader to extract the
+// response bytes on the fly.
+func (g *gzipReader) Read(b []byte) (n int, err error) {
+ if g.gzip == nil {
+ g.gzip, err = gzip.NewReader(g.reader)
+ if err != nil {
+ g.gzip = nil // ensure uninitialized gzip value isn't used in close.
+ return 0, fmt.Errorf("failed to decompress gzip response, %w", err)
+ }
+ }
+
+ return g.gzip.Read(b)
+}
+
+func (g *gzipReader) Close() error {
+ if g.gzip == nil {
+ return nil
+ }
+
+ if err := g.gzip.Close(); err != nil {
+ g.reader.Close()
+ return fmt.Errorf("failed to decompress gzip response, %w", err)
+ }
+
+ return g.reader.Close()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go
new file mode 100644
index 000000000..7056d9bf6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/doc.go
@@ -0,0 +1,22 @@
+/*
+Package acceptencoding provides customizations associated with Accept Encoding Header.
+
+# Accept encoding gzip
+
+The Go HTTP client automatically supports accept-encoding and content-encoding
+gzip by default. This default behavior is not desired by the SDK, and prevents
+validating the response body's checksum. To prevent this the SDK must manually
+control usage of content-encoding gzip.
+
+To control content-encoding, the SDK must always set the `Accept-Encoding`
+header to a value. This prevents the HTTP client from using gzip automatically.
+When gzip is enabled on the API client, the SDK's customization will control
+decompressing the gzip data in order to not break the checksum validation. When
+gzip is disabled, the API client will disable gzip, preventing the HTTP
+client's default behavior.
+
+An `EnableAcceptEncodingGzip` option may or may not be present depending on the client using
+the below middleware. The option if present can be used to enable auto decompressing
+gzip by the SDK.
+*/
+package acceptencoding
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go
new file mode 100644
index 000000000..7a0b6aae2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package acceptencoding
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.13.1"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md
new file mode 100644
index 000000000..6214ff291
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/CHANGELOG.md
@@ -0,0 +1,466 @@
+# v1.11.6 (2025-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.5 (2025-08-27)
+
+* **Dependency Update**: Update to smithy-go v1.23.0.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.4 (2025-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.3 (2025-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.2 (2025-08-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.1 (2025-07-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.18 (2025-07-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.17 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.16 (2025-06-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.15 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.14 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.13 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.12 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.11 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.10 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.10.9 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.8 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.7 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.6 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.5 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.4 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.3 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.2 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.1 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.19 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.18 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.17 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.16 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.15 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.14 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.13 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.12 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.11 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.10 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.9 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.8 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.7 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.6 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.5 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.4 (2024-03-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.3 (2024-03-04)
+
+* **Bug Fix**: Fix misaligned struct member used in atomic operation. This fixes a panic caused by attempting to atomically access a struct member which is not 64-bit aligned when running on 32-bit arch, due to the smaller sync.Map struct.
+
+# v1.9.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.11 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.10 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.9 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.8 (2023-11-30.2)
+
+* **Bug Fix**: Respect caller region overrides in endpoint discovery.
+
+# v1.8.7 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.6 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.3 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.8.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.37 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.36 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.35 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.34 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.33 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.32 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.31 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.30 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.29 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.28 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.27 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.26 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.25 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.24 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.23 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.22 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.21 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.19 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.18 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.17 (2022-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.16 (2022-09-14)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.15 (2022-09-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.14 (2022-08-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.13 (2022-08-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.12 (2022-08-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.11 (2022-08-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.10 (2022-08-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.9 (2022-08-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.8 (2022-07-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.7 (2022-06-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.6 (2022-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.5 (2022-05-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.4 (2022-04-25)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.3 (2022-03-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.2 (2022-03-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.1 (2022-03-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.7.0 (2022-03-08)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.6.0 (2022-02-24)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.5.0 (2022-01-14)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.4.0 (2022-01-07)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.3 (2021-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.2 (2021-11-30)
+
+* **Bug Fix**: Fixed a race condition that caused concurrent calls relying on endpoint discovery to share the same `url.URL` reference in their operation's http.Request.
+
+# v1.3.1 (2021-11-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.3.0 (2021-11-06)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.2.0 (2021-10-21)
+
+* **Feature**: Updated to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.2 (2021-10-11)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.1 (2021-09-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.1.0 (2021-08-27)
+
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.3 (2021-08-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.2 (2021-08-04)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.1 (2021-07-15)
+
+* **Dependency Update**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.0.0 (2021-06-25)
+
+* **Release**: Release new modules
+* **Feature**: Module supporting endpoint-discovery across all service clients.
+* **Feature**: Updated `github.com/aws/smithy-go` to latest version
+* **Dependency Update**: Updated to the latest SDK module versions
+
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/LICENSE.txt
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/cache.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/cache.go
new file mode 100644
index 000000000..6abd3029c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/cache.go
@@ -0,0 +1,98 @@
+package endpointdiscovery
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+// EndpointCache is an LRU cache that holds a series of endpoints
+// based on some key. The data structure makes use of a read write
+// mutex to enable asynchronous use.
+type EndpointCache struct {
+ // size is used to count the number elements in the cache.
+ // The atomic package is used to ensure this size is accurate when
+ // using multiple goroutines.
+ size int64
+ endpoints sync.Map
+ endpointLimit int64
+}
+
+// NewEndpointCache will return a newly initialized cache with a limit
+// of endpointLimit entries.
+func NewEndpointCache(endpointLimit int64) *EndpointCache {
+ return &EndpointCache{
+ endpointLimit: endpointLimit,
+ endpoints: sync.Map{},
+ }
+}
+
+// Get is a concurrent safe get operation that will retrieve an endpoint
+// based on endpointKey. A boolean will also be returned to illustrate whether
+// or not the endpoint had been found.
+func (c *EndpointCache) get(endpointKey string) (Endpoint, bool) {
+ endpoint, ok := c.endpoints.Load(endpointKey)
+ if !ok {
+ return Endpoint{}, false
+ }
+
+ ev := endpoint.(Endpoint)
+ ev.Prune()
+
+ c.endpoints.Store(endpointKey, ev)
+ return endpoint.(Endpoint), true
+}
+
+// Has returns if the enpoint cache contains a valid entry for the endpoint key
+// provided.
+func (c *EndpointCache) Has(endpointKey string) bool {
+ _, found := c.Get(endpointKey)
+ return found
+}
+
+// Get will retrieve a weighted address based off of the endpoint key. If an endpoint
+// should be retrieved, due to not existing or the current endpoint has expired
+// the Discoverer object that was passed in will attempt to discover a new endpoint
+// and add that to the cache.
+func (c *EndpointCache) Get(endpointKey string) (WeightedAddress, bool) {
+ endpoint, ok := c.get(endpointKey)
+ if !ok {
+ return WeightedAddress{}, false
+ }
+ return endpoint.GetValidAddress()
+}
+
+// Add is a concurrent safe operation that will allow new endpoints to be added
+// to the cache. If the cache is full, the number of endpoints equal endpointLimit,
+// then this will remove the oldest entry before adding the new endpoint.
+func (c *EndpointCache) Add(endpoint Endpoint) {
+ // de-dups multiple adds of an endpoint with a pre-existing key
+ if iface, ok := c.endpoints.Load(endpoint.Key); ok {
+ e := iface.(Endpoint)
+ if e.Len() > 0 {
+ return
+ }
+ }
+
+ size := atomic.AddInt64(&c.size, 1)
+ if size > 0 && size > c.endpointLimit {
+ c.deleteRandomKey()
+ }
+
+ c.endpoints.Store(endpoint.Key, endpoint)
+}
+
+// deleteRandomKey will delete a random key from the cache. If
+// no key was deleted false will be returned.
+func (c *EndpointCache) deleteRandomKey() bool {
+ atomic.AddInt64(&c.size, -1)
+ found := false
+
+ c.endpoints.Range(func(key, value interface{}) bool {
+ found = true
+ c.endpoints.Delete(key)
+
+ return false
+ })
+
+ return found
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/doc.go
new file mode 100644
index 000000000..36a16a755
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/doc.go
@@ -0,0 +1,33 @@
+/*
+Package endpointdiscovery provides a feature implemented in the AWS SDK for Go V2 that
+allows client to fetch a valid endpoint to serve an API request. Discovered
+endpoints are stored in an internal thread-safe cache to reduce the number
+of calls made to fetch the endpoint.
+
+Endpoint discovery stores endpoint by associating to a generated cache key.
+Cache key is built using service-modeled sdkId and any service-defined input
+identifiers provided by the customer.
+
+Endpoint cache keys follow the grammar:
+
+ key = sdkId.identifiers
+
+ identifiers = map[string]string
+
+The endpoint discovery cache implementation is internal. Clients resolves the
+cache size to 10 entries. Each entry may contain multiple host addresses as
+returned by the service.
+
+Each discovered endpoint has a TTL associated to it, and are evicted from
+cache lazily i.e. when client tries to retrieve an endpoint but finds an
+expired entry instead.
+
+Endpoint discovery feature can be turned on by setting the
+`AWS_ENABLE_ENDPOINT_DISCOVERY` env variable to TRUE.
+
+By default, the feature is set to AUTO - indicating operations that require
+endpoint discovery always use it. To completely turn off the feature, one
+should set the value as FALSE. Similar configuration rules apply for shared
+config file where key is `endpoint_discovery_enabled`.
+*/
+package endpointdiscovery
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/endpoint.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/endpoint.go
new file mode 100644
index 000000000..5fa06f2ae
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/endpoint.go
@@ -0,0 +1,94 @@
+package endpointdiscovery
+
+import (
+ "net/url"
+ "time"
+)
+
+// Endpoint represents an endpoint used in endpoint discovery.
+type Endpoint struct {
+ Key string
+ Addresses WeightedAddresses
+}
+
+// WeightedAddresses represents a list of WeightedAddress.
+type WeightedAddresses []WeightedAddress
+
+// WeightedAddress represents an address with a given weight.
+type WeightedAddress struct {
+ URL *url.URL
+ Expired time.Time
+}
+
+// HasExpired will return whether or not the endpoint has expired with
+// the exception of a zero expiry meaning does not expire.
+func (e WeightedAddress) HasExpired() bool {
+ return e.Expired.Before(time.Now())
+}
+
+// Add will add a given WeightedAddress to the address list of Endpoint.
+func (e *Endpoint) Add(addr WeightedAddress) {
+ e.Addresses = append(e.Addresses, addr)
+}
+
+// Len returns the number of valid endpoints where valid means the endpoint
+// has not expired.
+func (e *Endpoint) Len() int {
+ validEndpoints := 0
+ for _, endpoint := range e.Addresses {
+ if endpoint.HasExpired() {
+ continue
+ }
+
+ validEndpoints++
+ }
+ return validEndpoints
+}
+
+// GetValidAddress will return a non-expired weight endpoint
+func (e *Endpoint) GetValidAddress() (WeightedAddress, bool) {
+ for i := 0; i < len(e.Addresses); i++ {
+ we := e.Addresses[i]
+
+ if we.HasExpired() {
+ continue
+ }
+
+ we.URL = cloneURL(we.URL)
+
+ return we, true
+ }
+
+ return WeightedAddress{}, false
+}
+
+// Prune will prune the expired addresses from the endpoint by allocating a new []WeightAddress.
+// This is not concurrent safe, and should be called from a single owning thread.
+func (e *Endpoint) Prune() bool {
+ validLen := e.Len()
+ if validLen == len(e.Addresses) {
+ return false
+ }
+ wa := make([]WeightedAddress, 0, validLen)
+ for i := range e.Addresses {
+ if e.Addresses[i].HasExpired() {
+ continue
+ }
+ wa = append(wa, e.Addresses[i])
+ }
+ e.Addresses = wa
+ return true
+}
+
+func cloneURL(u *url.URL) (clone *url.URL) {
+ clone = &url.URL{}
+
+ *clone = *u
+
+ if u.User != nil {
+ user := *u.User
+ clone.User = &user
+ }
+
+ return clone
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go
new file mode 100644
index 000000000..b83027a6e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/go_module_metadata.go
@@ -0,0 +1,6 @@
+// Code generated by internal/repotools/cmd/updatemodulemeta DO NOT EDIT.
+
+package endpointdiscovery
+
+// goModuleVersion is the tagged release for this module
+const goModuleVersion = "1.11.6"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/middleware.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/middleware.go
new file mode 100644
index 000000000..c6b073d21
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery/middleware.go
@@ -0,0 +1,102 @@
+package endpointdiscovery
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// DiscoverEndpointOptions are optionals used with DiscoverEndpoint operation.
+type DiscoverEndpointOptions struct {
+
+ // EndpointResolverUsedForDiscovery is the endpoint resolver used to
+ // resolve an endpoint for discovery api call.
+ EndpointResolverUsedForDiscovery interface{}
+
+ // DisableHTTPS will disable tls for endpoint discovery call and
+ // subsequent discovered endpoint if service did not return an
+ // endpoint scheme.
+ DisableHTTPS bool
+
+ // Logger to log warnings or debug statements.
+ Logger logging.Logger
+}
+
+// DiscoverEndpoint is a finalize step middleware used to discover endpoint
+// for an API operation.
+type DiscoverEndpoint struct {
+
+ // Options provides optional settings used with
+ // Discover Endpoint operation.
+ Options []func(*DiscoverEndpointOptions)
+
+ // DiscoverOperation represents the endpoint discovery operation that
+ // returns an Endpoint or error.
+ DiscoverOperation func(ctx context.Context, region string, options ...func(*DiscoverEndpointOptions)) (WeightedAddress, error)
+
+ // EndpointDiscoveryEnableState represents the customer configuration for endpoint
+ // discovery feature.
+ EndpointDiscoveryEnableState aws.EndpointDiscoveryEnableState
+
+ // EndpointDiscoveryRequired states if an operation requires to perform
+ // endpoint discovery.
+ EndpointDiscoveryRequired bool
+
+ // The client region
+ Region string
+}
+
+// ID represents the middleware identifier
+func (*DiscoverEndpoint) ID() string {
+ return "DiscoverEndpoint"
+}
+
+// HandleFinalize performs endpoint discovery and updates the request host with
+// the result.
+//
+// The resolved host from this procedure MUST override that of modeled endpoint
+// resolution and middleware should be ordered accordingly.
+func (d *DiscoverEndpoint) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (out middleware.FinalizeOutput, metadata middleware.Metadata, err error) {
+ if d.EndpointDiscoveryEnableState == aws.EndpointDiscoveryDisabled {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ if !d.EndpointDiscoveryRequired && d.EndpointDiscoveryEnableState != aws.EndpointDiscoveryEnabled {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ if es := awsmiddleware.GetEndpointSource(ctx); es == aws.EndpointSourceCustom {
+ if d.EndpointDiscoveryEnableState == aws.EndpointDiscoveryEnabled {
+ return middleware.FinalizeOutput{}, middleware.Metadata{},
+ fmt.Errorf("Invalid configuration: endpoint discovery is enabled, but a custom endpoint is provided")
+ }
+
+ return next.HandleFinalize(ctx, in)
+ }
+
+ weightedAddress, err := d.DiscoverOperation(ctx, d.Region, d.Options...)
+ if err != nil {
+ return middleware.FinalizeOutput{}, middleware.Metadata{}, err
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return middleware.FinalizeOutput{}, middleware.Metadata{},
+ fmt.Errorf("expected request to be of type *smithyhttp.Request, got %T", in.Request)
+ }
+
+ if weightedAddress.URL != nil {
+ // we only want the host, normal endpoint resolution can include path/query
+ req.URL.Host = weightedAddress.URL.Host
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
index f19fdf9c3..869246098 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/CHANGELOG.md
@@ -1,3 +1,301 @@
+# v1.13.0 (2025-07-28)
+
+* **Feature**: Add support for HTTP interceptors.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.18 (2025-07-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.17 (2025-06-17)
+
+* **Dependency Update**: Update to smithy-go v1.22.4.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.16 (2025-06-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.15 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.14 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.13 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.12 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.11 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.10 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.12.9 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.8 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.7 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.6 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.5 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.4 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.3 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.2 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.1 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.20 (2024-09-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.19 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.18 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.17 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.16 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.15 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.14 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.13 (2024-06-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.12 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.11 (2024-06-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.10 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.9 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.8 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.7 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.6 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.5 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.4 (2024-03-05)
+
+* **Bug Fix**: Restore typo'd API `AddAsIsInternalPresigingMiddleware` as an alias for backwards compatibility.
+
+# v1.11.3 (2024-03-04)
+
+* **Bug Fix**: Correct a typo in internal AddAsIsPresigningMiddleware API.
+
+# v1.11.2 (2024-02-23)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.1 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.10 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.9 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.8 (2023-12-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.7 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.6 (2023-11-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.3 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.2 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.1 (2023-11-01)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.10.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.37 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.36 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.35 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.34 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.33 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.32 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.31 (2023-07-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.30 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.29 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.28 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.27 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.26 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.25 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.24 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.23 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.22 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.21 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.20 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.19 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.9.18 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.9.17 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go
index cc919701a..5d5286f92 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/context.go
@@ -27,13 +27,21 @@ func GetIsPresigning(ctx context.Context) bool {
type isPresigningKey struct{}
-// AddAsIsPresigingMiddleware adds a middleware to the head of the stack that
+// AddAsIsPresigningMiddleware adds a middleware to the head of the stack that
// will update the stack's context to be flagged as being invoked for the
// purpose of presigning.
-func AddAsIsPresigingMiddleware(stack *middleware.Stack) error {
+func AddAsIsPresigningMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(asIsPresigningMiddleware{}, middleware.Before)
}
+// AddAsIsPresigingMiddleware is an alias for backwards compatibility.
+//
+// Deprecated: This API was released with a typo. Use
+// [AddAsIsPresigningMiddleware] instead.
+func AddAsIsPresigingMiddleware(stack *middleware.Stack) error {
+ return AddAsIsPresigningMiddleware(stack)
+}
+
type asIsPresigningMiddleware struct{}
func (asIsPresigningMiddleware) ID() string { return "AsIsPresigningMiddleware" }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
index da09a149e..beae329a8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/internal/presigned-url/go_module_metadata.go
@@ -3,4 +3,4 @@
package presignedurl
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.9.17"
+const goModuleVersion = "1.13.0"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
index f58d0adb9..3be25b8be 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/CHANGELOG.md
@@ -1,3 +1,423 @@
+# v1.25.3 (2025-04-03)
+
+* No change notes available for this release.
+
+# v1.25.2 (2025-03-25)
+
+* No change notes available for this release.
+
+# v1.25.1 (2025-03-04.2)
+
+* **Bug Fix**: Add assurance test for operation order.
+
+# v1.25.0 (2025-02-27)
+
+* **Feature**: Track credential providers via User-Agent Feature ids
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.16 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.15 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.14 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.13 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.12 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.24.11 (2025-01-17)
+
+* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop.
+
+# v1.24.10 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.9 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.8 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.7 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.6 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.5 (2024-11-07)
+
+* **Bug Fix**: Adds case-insensitive handling of error message fields in service responses
+
+# v1.24.4 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.3 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.2 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.1 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.4 (2024-10-03)
+
+* No change notes available for this release.
+
+# v1.23.3 (2024-09-27)
+
+* No change notes available for this release.
+
+# v1.23.2 (2024-09-25)
+
+* No change notes available for this release.
+
+# v1.23.1 (2024-09-23)
+
+* No change notes available for this release.
+
+# v1.23.0 (2024-09-20)
+
+* **Feature**: Add tracing and metrics support to service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.8 (2024-09-17)
+
+* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution.
+
+# v1.22.7 (2024-09-04)
+
+* No change notes available for this release.
+
+# v1.22.6 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.5 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.4 (2024-07-18)
+
+* No change notes available for this release.
+
+# v1.22.3 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.2 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.1 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.0 (2024-06-26)
+
+* **Feature**: Support list-of-string endpoint parameter.
+
+# v1.21.1 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.0 (2024-06-18)
+
+* **Feature**: Track usage of various AWS SDK features in user-agent string.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.12 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.11 (2024-06-07)
+
+* **Bug Fix**: Add clock skew correction on all service clients
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.10 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.9 (2024-05-23)
+
+* No change notes available for this release.
+
+# v1.20.8 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.7 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.6 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
+# v1.20.5 (2024-04-05)
+
+* No change notes available for this release.
+
+# v1.20.4 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.3 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.2 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.19.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.1 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+
+# v1.19.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.7 (2024-01-18)
+
+* No change notes available for this release.
+
+# v1.18.6 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.5 (2023-12-08)
+
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.18.4 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.3 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+
+# v1.18.2 (2023-12-01)
+
+* **Bug Fix**: Correct wrapping of errors in authentication workflow.
+* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.1 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.0 (2023-11-29)
+
+* **Feature**: Expose Options() accessor on service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.4 (2023-11-28)
+
+* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction.
+
+# v1.17.3 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.16.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.0 (2023-10-02)
+
+* **Feature**: Fix FIPS Endpoints in aws-us-gov.
+
+# v1.14.1 (2023-09-22)
+
+* No change notes available for this release.
+
+# v1.14.0 (2023-09-18)
+
+* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service.
+* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field.
+
+# v1.13.6 (2023-08-31)
+
+* No change notes available for this release.
+
+# v1.13.5 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.4 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.3 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.2 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.1 (2023-08-01)
+
+* No change notes available for this release.
+
+# v1.13.0 (2023-07-31)
+
+* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.14 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.13 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.12 (2023-06-15)
+
+* No change notes available for this release.
+
+# v1.12.11 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.10 (2023-05-04)
+
+* No change notes available for this release.
+
+# v1.12.9 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.8 (2023-04-10)
+
+* No change notes available for this release.
+
+# v1.12.7 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.6 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.5 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.4 (2023-02-22)
+
+* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
+
+# v1.12.3 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.2 (2023-02-15)
+
+* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910.
+* **Bug Fix**: Correct error type parsing for restJson services.
+
+# v1.12.1 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.12.0 (2023-01-05)
+
+* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401).
+
+# v1.11.28 (2022-12-20)
+
+* No change notes available for this release.
+
+# v1.11.27 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.26 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.25 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.11.24 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.11.23 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go
index 7bb069844..9f10e65ad 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_client.go
@@ -4,169 +4,241 @@ package sso
import (
"context"
+ "errors"
+ "fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/defaults"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/retry"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware"
smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
smithydocument "github.com/aws/smithy-go/document"
"github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
"github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
smithyhttp "github.com/aws/smithy-go/transport/http"
"net"
"net/http"
+ "sync/atomic"
"time"
)
const ServiceID = "SSO"
const ServiceAPIVersion = "2019-06-10"
-// Client provides the API client to make operations call for AWS Single Sign-On.
-type Client struct {
- options Options
+type operationMetrics struct {
+ Duration metrics.Float64Histogram
+ SerializeDuration metrics.Float64Histogram
+ ResolveIdentityDuration metrics.Float64Histogram
+ ResolveEndpointDuration metrics.Float64Histogram
+ SignRequestDuration metrics.Float64Histogram
+ DeserializeDuration metrics.Float64Histogram
}
-// New returns an initialized Client based on the functional options. Provide
-// additional functional options to further configure the behavior of the client,
-// such as changing the client's endpoint or adding custom middleware behavior.
-func New(options Options, optFns ...func(*Options)) *Client {
- options = options.Copy()
+func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram {
+ switch name {
+ case "client.call.duration":
+ return m.Duration
+ case "client.call.serialization_duration":
+ return m.SerializeDuration
+ case "client.call.resolve_identity_duration":
+ return m.ResolveIdentityDuration
+ case "client.call.resolve_endpoint_duration":
+ return m.ResolveEndpointDuration
+ case "client.call.signing_duration":
+ return m.SignRequestDuration
+ case "client.call.deserialization_duration":
+ return m.DeserializeDuration
+ default:
+ panic("unrecognized operation metric")
+ }
+}
- resolveDefaultLogger(&options)
+func timeOperationMetric[T any](
+ ctx context.Context, metric string, fn func() (T, error),
+ opts ...metrics.RecordMetricOption,
+) (T, error) {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
- setResolvedDefaultsMode(&options)
+ start := time.Now()
+ v, err := fn()
+ end := time.Now()
- resolveRetryer(&options)
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
+ return v, err
+}
- resolveHTTPClient(&options)
+func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
- resolveHTTPSignerV4(&options)
+ var ended bool
+ start := time.Now()
+ return func() {
+ if ended {
+ return
+ }
+ ended = true
- resolveDefaultEndpointConfiguration(&options)
+ end := time.Now()
- for _, fn := range optFns {
- fn(&options)
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
}
+}
- client := &Client{
- options: options,
+func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption {
+ return func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("rpc.service", middleware.GetServiceID(ctx))
+ o.Properties.Set("rpc.method", middleware.GetOperationName(ctx))
}
+}
- return client
+type operationMetricsKey struct{}
+
+func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) {
+ meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/sso")
+ om := &operationMetrics{}
+
+ var err error
+
+ om.Duration, err = operationMetricTimer(meter, "client.call.duration",
+ "Overall call duration (including retries and time to send or receive request and response body)")
+ if err != nil {
+ return nil, err
+ }
+ om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration",
+ "The time it takes to serialize a message body")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration",
+ "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration",
+ "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request")
+ if err != nil {
+ return nil, err
+ }
+ om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration",
+ "The time it takes to sign a request")
+ if err != nil {
+ return nil, err
+ }
+ om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration",
+ "The time it takes to deserialize a message body")
+ if err != nil {
+ return nil, err
+ }
+
+ return context.WithValue(parent, operationMetricsKey{}, om), nil
}
-type Options struct {
- // Set of options to modify how an operation is invoked. These apply to all
- // operations invoked for this client. Use functional options on operation call to
- // modify this list for per operation behavior.
- APIOptions []func(*middleware.Stack) error
+func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) {
+ return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = desc
+ })
+}
- // Configures the events that will be sent to the configured logger.
- ClientLogMode aws.ClientLogMode
+func getOperationMetrics(ctx context.Context) *operationMetrics {
+ return ctx.Value(operationMetricsKey{}).(*operationMetrics)
+}
- // The credentials object to use when signing requests.
- Credentials aws.CredentialsProvider
+func operationTracer(p tracing.TracerProvider) tracing.Tracer {
+ return p.Tracer("github.com/aws/aws-sdk-go-v2/service/sso")
+}
- // The configuration DefaultsMode that the SDK should use when constructing the
- // clients initial default settings.
- DefaultsMode aws.DefaultsMode
+// Client provides the API client to make operations call for AWS Single Sign-On.
+type Client struct {
+ options Options
- // The endpoint options to be used when attempting to resolve an endpoint.
- EndpointOptions EndpointResolverOptions
+ // Difference between the time reported by the server and the client
+ timeOffset *atomic.Int64
+}
- // The service endpoint resolver.
- EndpointResolver EndpointResolver
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
- // Signature Version 4 (SigV4) Signer
- HTTPSignerV4 HTTPSignerV4
+ resolveDefaultLogger(&options)
- // The logger writer interface to write logging messages to.
- Logger logging.Logger
+ setResolvedDefaultsMode(&options)
- // The region to send requests to. (Required)
- Region string
+ resolveRetryer(&options)
- // RetryMaxAttempts specifies the maximum number attempts an API client will call
- // an operation that fails with a retryable error. A value of 0 is ignored, and
- // will not be used to configure the API client created default retryer, or modify
- // per operation call's retry max attempts. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. If specified in an operation call's functional
- // options with a value that is different than the constructed client's Options,
- // the Client's Retryer will be wrapped to use the operation's specific
- // RetryMaxAttempts value.
- RetryMaxAttempts int
+ resolveHTTPClient(&options)
- // RetryMode specifies the retry mode the API client will be created with, if
- // Retryer option is not also specified. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. Currently does not support per operation call
- // overrides, may in the future.
- RetryMode aws.RetryMode
+ resolveHTTPSignerV4(&options)
- // Retryer guides how HTTP requests should be retried in case of recoverable
- // failures. When nil the API client will use a default retryer. The kind of
- // default retry created by the API client can be changed with the RetryMode
- // option.
- Retryer aws.Retryer
+ resolveEndpointResolverV2(&options)
- // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
- // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You
- // should not populate this structure programmatically, or rely on the values here
- // within your applications.
- RuntimeEnvironment aws.RuntimeEnvironment
+ resolveTracerProvider(&options)
- // The initial DefaultsMode used when the client options were constructed. If the
- // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
- // value was at that point in time. Currently does not support per operation call
- // overrides, may in the future.
- resolvedDefaultsMode aws.DefaultsMode
+ resolveMeterProvider(&options)
- // The HTTP client to invoke API calls with. Defaults to client's default HTTP
- // implementation if nil.
- HTTPClient HTTPClient
-}
+ resolveAuthSchemeResolver(&options)
-// WithAPIOptions returns a functional option for setting the Client's APIOptions
-// option.
-func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
- return func(o *Options) {
- o.APIOptions = append(o.APIOptions, optFns...)
+ for _, fn := range optFns {
+ fn(&options)
}
-}
-// WithEndpointResolver returns a functional option for setting the Client's
-// EndpointResolver option.
-func WithEndpointResolver(v EndpointResolver) func(*Options) {
- return func(o *Options) {
- o.EndpointResolver = v
+ finalizeRetryMaxAttempts(&options)
+
+ ignoreAnonymousAuth(&options)
+
+ wrapWithAnonymousAuth(&options)
+
+ resolveAuthSchemes(&options)
+
+ client := &Client{
+ options: options,
}
-}
-type HTTPClient interface {
- Do(*http.Request) (*http.Response, error)
-}
+ initializeTimeOffsetResolver(client)
-// Copy creates a clone where the APIOptions list is deep copied.
-func (o Options) Copy() Options {
- to := o
- to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
- copy(to.APIOptions, o.APIOptions)
+ return client
+}
- return to
+// Options returns a copy of the client configuration.
+//
+// Callers SHOULD NOT perform mutations on any inner structures within client
+// config. Config overrides should instead be made on a per-operation basis through
+// functional options.
+func (c *Client) Options() Options {
+ return c.options.Copy()
}
-func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
+
+func (c *Client) invokeOperation(
+ ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error,
+) (
+ result interface{}, metadata middleware.Metadata, err error,
+) {
ctx = middleware.ClearStackValues(ctx)
+ ctx = middleware.WithServiceID(ctx, ServiceID)
+ ctx = middleware.WithOperationName(ctx, opID)
+
stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
options := c.options.Copy()
+
for _, fn := range optFns {
fn(&options)
}
- finalizeRetryMaxAttemptOptions(&options, *c)
+ finalizeOperationRetryMaxAttempts(&options, *c)
finalizeClientEndpointResolverOptions(&options)
@@ -182,20 +254,142 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf
}
}
- handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
- result, metadata, err = handler.Handle(ctx, params)
+ ctx, err = withOperationMetrics(ctx, options.MeterProvider)
+ if err != nil {
+ return nil, metadata, err
+ }
+
+ tracer := operationTracer(options.TracerProvider)
+ spanName := fmt.Sprintf("%s.%s", ServiceID, opID)
+
+ ctx = tracing.WithOperationTracer(ctx, tracer)
+
+ ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) {
+ o.Kind = tracing.SpanKindClient
+ o.Properties.Set("rpc.system", "aws-api")
+ o.Properties.Set("rpc.method", opID)
+ o.Properties.Set("rpc.service", ServiceID)
+ })
+ endTimer := startMetricTimer(ctx, "client.call.duration")
+ defer endTimer()
+ defer span.End()
+
+ handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) {
+ o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso")
+ })
+ decorated := middleware.DecorateHandler(handler, stack)
+ result, metadata, err = decorated.Handle(ctx, params)
if err != nil {
+ span.SetProperty("exception.type", fmt.Sprintf("%T", err))
+ span.SetProperty("exception.message", err.Error())
+
+ var aerr smithy.APIError
+ if errors.As(err, &aerr) {
+ span.SetProperty("api.error_code", aerr.ErrorCode())
+ span.SetProperty("api.error_message", aerr.ErrorMessage())
+ span.SetProperty("api.error_fault", aerr.ErrorFault().String())
+ }
+
err = &smithy.OperationError{
ServiceID: ServiceID,
OperationName: opID,
Err: err,
}
}
+
+ span.SetProperty("error", err != nil)
+ if err == nil {
+ span.SetStatus(tracing.SpanStatusOK)
+ } else {
+ span.SetStatus(tracing.SpanStatusError)
+ }
+
return result, metadata, err
}
+type operationInputKey struct{}
+
+func setOperationInput(ctx context.Context, input interface{}) context.Context {
+ return middleware.WithStackValue(ctx, operationInputKey{}, input)
+}
+
+func getOperationInput(ctx context.Context) interface{} {
+ return middleware.GetStackValue(ctx, operationInputKey{})
+}
+
+type setOperationInputMiddleware struct {
+}
+
+func (*setOperationInputMiddleware) ID() string {
+ return "setOperationInput"
+}
+
+func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ ctx = setOperationInput(ctx, in.Parameters)
+ return next.HandleSerialize(ctx, in)
+}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+ if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+ return fmt.Errorf("add ResolveAuthScheme: %w", err)
+ }
+ if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+ return fmt.Errorf("add GetIdentity: %v", err)
+ }
+ if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+ return fmt.Errorf("add ResolveEndpointV2: %v", err)
+ }
+ if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil {
+ return fmt.Errorf("add Signing: %w", err)
+ }
+ return nil
+}
+func resolveAuthSchemeResolver(options *Options) {
+ if options.AuthSchemeResolver == nil {
+ options.AuthSchemeResolver = &defaultAuthSchemeResolver{}
+ }
+}
+
+func resolveAuthSchemes(options *Options) {
+ if options.AuthSchemes == nil {
+ options.AuthSchemes = []smithyhttp.AuthScheme{
+ internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{
+ Signer: options.HTTPSignerV4,
+ Logger: options.Logger,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ }),
+ }
+ }
+}
+
type noSmithyDocumentSerde = smithydocument.NoSerde
+type legacyEndpointContextSetter struct {
+ LegacyResolver EndpointResolver
+}
+
+func (*legacyEndpointContextSetter) ID() string {
+ return "legacyEndpointContextSetter"
+}
+
+func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.LegacyResolver != nil {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true)
+ }
+
+ return next.HandleInitialize(ctx, in)
+
+}
+func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error {
+ return stack.Initialize.Add(&legacyEndpointContextSetter{
+ LegacyResolver: o.EndpointResolver,
+ }, middleware.Before)
+}
+
func resolveDefaultLogger(o *Options) {
if o.Logger != nil {
return
@@ -233,6 +427,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
APIOptions: cfg.APIOptions,
Logger: cfg.Logger,
ClientLogMode: cfg.ClientLogMode,
+ AppID: cfg.AppID,
}
resolveAWSRetryerProvider(cfg, &opts)
resolveAWSRetryMaxAttempts(cfg, &opts)
@@ -240,6 +435,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
resolveAWSEndpointResolver(cfg, &opts)
resolveUseDualStackEndpoint(cfg, &opts)
resolveUseFIPSEndpoint(cfg, &opts)
+ resolveBaseEndpoint(cfg, &opts)
return New(opts, optFns...)
}
@@ -331,7 +527,15 @@ func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
o.RetryMaxAttempts = cfg.RetryMaxAttempts
}
-func finalizeRetryMaxAttemptOptions(o *Options, client Client) {
+func finalizeRetryMaxAttempts(o *Options) {
+ if o.RetryMaxAttempts == 0 {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
return
}
@@ -343,20 +547,39 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
return
}
- o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver())
+ o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions)
}
-func addClientUserAgent(stack *middleware.Stack) error {
- return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion)(stack)
+func addClientUserAgent(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sso", goModuleVersion)
+ if len(options.AppID) > 0 {
+ ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
+ }
+
+ return nil
}
-func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error {
- mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{
- CredentialsProvider: o.Credentials,
- Signer: o.HTTPSignerV4,
- LogSigning: o.ClientLogMode.IsSigning(),
- })
- return stack.Finalize.Add(mw, middleware.After)
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+ id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+ mw, ok := stack.Build.Get(id)
+ if !ok {
+ mw = awsmiddleware.NewRequestUserAgent()
+ if err := stack.Build.Add(mw, middleware.After); err != nil {
+ return nil, err
+ }
+ }
+
+ ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+ }
+
+ return ua, nil
}
type HTTPSignerV4 interface {
@@ -377,12 +600,97 @@ func newDefaultV4Signer(o Options) *v4.Signer {
})
}
-func addRetryMiddlewares(stack *middleware.Stack, o Options) error {
- mo := retry.AddRetryMiddlewaresOptions{
- Retryer: o.Retryer,
- LogRetryAttempts: o.ClientLogMode.IsRetries(),
+func addClientRequestID(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+ return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+
+func addSpanRetryLoop(stack *middleware.Stack, options Options) error {
+ return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before)
+}
+
+type spanRetryLoop struct {
+ options Options
+}
+
+func (*spanRetryLoop) ID() string {
+ return "spanRetryLoop"
+}
+
+func (m *spanRetryLoop) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ middleware.FinalizeOutput, middleware.Metadata, error,
+) {
+ tracer := operationTracer(m.options.TracerProvider)
+ ctx, span := tracer.StartSpan(ctx, "RetryLoop")
+ defer span.End()
+
+ return next.HandleFinalize(ctx, in)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addIsWaiterUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter)
+ return nil
+ })
+}
+
+func addIsPaginatorUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator)
+ return nil
+ })
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+ attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+ m.LogAttempts = o.ClientLogMode.IsRetries()
+ m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sso")
+ })
+ if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil {
+ return err
}
- return retry.AddRetryMiddlewares(stack, mo)
+ if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+ return err
+ }
+ return nil
}
// resolves dual-stack endpoint configuration
@@ -415,12 +723,99 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
return nil
}
+func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string {
+ if mode == aws.AccountIDEndpointModeDisabled {
+ return nil
+ }
+
+ if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" {
+ return aws.String(ca.Credentials.AccountID)
+ }
+
+ return nil
+}
+
+func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error {
+ mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset}
+ if err := stack.Build.Add(&mw, middleware.After); err != nil {
+ return err
+ }
+ return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before)
+}
+func initializeTimeOffsetResolver(c *Client) {
+ c.timeOffset = new(atomic.Int64)
+}
+
+func addUserAgentRetryMode(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ switch options.Retryer.(type) {
+ case *retry.Standard:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard)
+ case *retry.AdaptiveMode:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive)
+ }
+ return nil
+}
+
+type setCredentialSourceMiddleware struct {
+ ua *awsmiddleware.RequestUserAgent
+ options Options
+}
+
+func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" }
+
+func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource)
+ if !ok {
+ return next.HandleBuild(ctx, in)
+ }
+ providerSources := asProviderSource.ProviderSources()
+ for _, source := range providerSources {
+ m.ua.AddCredentialsSource(source)
+ }
+ return next.HandleBuild(ctx, in)
+}
+
+func addCredentialSource(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ mw := setCredentialSourceMiddleware{ua: ua, options: options}
+ return stack.Build.Insert(&mw, "UserAgent", middleware.Before)
+}
+
+func resolveTracerProvider(options *Options) {
+ if options.TracerProvider == nil {
+ options.TracerProvider = &tracing.NopTracerProvider{}
+ }
+}
+
+func resolveMeterProvider(options *Options) {
+ if options.MeterProvider == nil {
+ options.MeterProvider = metrics.NopMeterProvider{}
+ }
+}
+
+func addRecursionDetection(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
- return awsmiddleware.AddRequestIDRetrieverMiddleware(stack)
+ return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+
}
func addResponseErrorMiddleware(stack *middleware.Stack) error {
- return awshttp.AddResponseErrorMiddleware(stack)
+ return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+
}
func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
@@ -431,3 +826,118 @@ func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
}, middleware.After)
}
+
+type disableHTTPSMiddleware struct {
+ DisableHTTPS bool
+}
+
+func (*disableHTTPSMiddleware) ID() string {
+ return "disableHTTPS"
+}
+
+func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) {
+ req.URL.Scheme = "http"
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Finalize.Insert(&disableHTTPSMiddleware{
+ DisableHTTPS: o.EndpointOptions.DisableHTTPS,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+type spanInitializeStart struct {
+}
+
+func (*spanInitializeStart) ID() string {
+ return "spanInitializeStart"
+}
+
+func (m *spanInitializeStart) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "Initialize")
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanInitializeEnd struct {
+}
+
+func (*spanInitializeEnd) ID() string {
+ return "spanInitializeEnd"
+}
+
+func (m *spanInitializeEnd) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanBuildRequestStart struct {
+}
+
+func (*spanBuildRequestStart) ID() string {
+ return "spanBuildRequestStart"
+}
+
+func (m *spanBuildRequestStart) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ middleware.SerializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "BuildRequest")
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type spanBuildRequestEnd struct {
+}
+
+func (*spanBuildRequestEnd) ID() string {
+ return "spanBuildRequestEnd"
+}
+
+func (m *spanBuildRequestEnd) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ middleware.BuildOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleBuild(ctx, in)
+}
+
+func addSpanInitializeStart(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before)
+}
+
+func addSpanInitializeEnd(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After)
+}
+
+func addSpanBuildRequestStart(stack *middleware.Stack) error {
+ return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before)
+}
+
+func addSpanBuildRequestEnd(stack *middleware.Stack) error {
+ return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go
index 1c2b7499d..b8031eeea 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_GetRoleCredentials.go
@@ -4,14 +4,15 @@ package sso
import (
"context"
+ "fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/service/sso/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// Returns the STS short-term credentials for a given role name that is assigned to
-// the user.
+// Returns the STS short-term credentials for a given role name that is assigned
+// to the user.
func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredentialsInput, optFns ...func(*Options)) (*GetRoleCredentialsOutput, error) {
if params == nil {
params = &GetRoleCredentialsInput{}
@@ -29,10 +30,10 @@ func (c *Client) GetRoleCredentials(ctx context.Context, params *GetRoleCredenti
type GetRoleCredentialsInput struct {
- // The token issued by the CreateToken API call. For more information, see
- // CreateToken
- // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
- // in the IAM Identity Center OIDC API Reference Guide.
+ // The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+ // IAM Identity Center OIDC API Reference Guide.
+ //
+ // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
//
// This member is required.
AccessToken *string
@@ -62,6 +63,9 @@ type GetRoleCredentialsOutput struct {
}
func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsRestjson1_serializeOpGetRoleCredentials{}, middleware.After)
if err != nil {
return err
@@ -70,28 +74,38 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetRoleCredentials"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -100,12 +114,27 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = addOpGetRoleCredentialsValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetRoleCredentials(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -115,6 +144,21 @@ func (c *Client) addOperationGetRoleCredentialsMiddlewares(stack *middleware.Sta
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go
index 4fffc77af..4294e4d3c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccountRoles.go
@@ -29,10 +29,10 @@ func (c *Client) ListAccountRoles(ctx context.Context, params *ListAccountRolesI
type ListAccountRolesInput struct {
- // The token issued by the CreateToken API call. For more information, see
- // CreateToken
- // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
- // in the IAM Identity Center OIDC API Reference Guide.
+ // The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+ // IAM Identity Center OIDC API Reference Guide.
+ //
+ // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
//
// This member is required.
AccessToken *string
@@ -68,6 +68,9 @@ type ListAccountRolesOutput struct {
}
func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccountRoles{}, middleware.After)
if err != nil {
return err
@@ -76,28 +79,38 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccountRoles"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -106,12 +119,27 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = addOpListAccountRolesValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccountRoles(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -121,17 +149,24 @@ func (c *Client) addOperationListAccountRolesMiddlewares(stack *middleware.Stack
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
-// ListAccountRolesAPIClient is a client that implements the ListAccountRoles
-// operation.
-type ListAccountRolesAPIClient interface {
- ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error)
-}
-
-var _ ListAccountRolesAPIClient = (*Client)(nil)
-
// ListAccountRolesPaginatorOptions is the paginator options for ListAccountRoles
type ListAccountRolesPaginatorOptions struct {
// The number of items that clients can request per page.
@@ -195,6 +230,9 @@ func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func
}
params.MaxResults = limit
+ optFns = append([]func(*Options){
+ addIsPaginatorUserAgent,
+ }, optFns...)
result, err := p.client.ListAccountRoles(ctx, ¶ms, optFns...)
if err != nil {
return nil, err
@@ -214,6 +252,14 @@ func (p *ListAccountRolesPaginator) NextPage(ctx context.Context, optFns ...func
return result, nil
}
+// ListAccountRolesAPIClient is a client that implements the ListAccountRoles
+// operation.
+type ListAccountRolesAPIClient interface {
+ ListAccountRoles(context.Context, *ListAccountRolesInput, ...func(*Options)) (*ListAccountRolesOutput, error)
+}
+
+var _ ListAccountRolesAPIClient = (*Client)(nil)
+
func newServiceMetadataMiddleware_opListAccountRoles(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go
index e717a426c..1db72a995 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_ListAccounts.go
@@ -12,10 +12,10 @@ import (
)
// Lists all AWS accounts assigned to the user. These AWS accounts are assigned by
-// the administrator of the account. For more information, see Assign User Access
-// (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers)
-// in the IAM Identity Center User Guide. This operation returns a paginated
-// response.
+// the administrator of the account. For more information, see [Assign User Access]in the IAM Identity
+// Center User Guide. This operation returns a paginated response.
+//
+// [Assign User Access]: https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers
func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, optFns ...func(*Options)) (*ListAccountsOutput, error) {
if params == nil {
params = &ListAccountsInput{}
@@ -33,10 +33,10 @@ func (c *Client) ListAccounts(ctx context.Context, params *ListAccountsInput, op
type ListAccountsInput struct {
- // The token issued by the CreateToken API call. For more information, see
- // CreateToken
- // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
- // in the IAM Identity Center OIDC API Reference Guide.
+ // The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+ // IAM Identity Center OIDC API Reference Guide.
+ //
+ // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
//
// This member is required.
AccessToken *string
@@ -67,6 +67,9 @@ type ListAccountsOutput struct {
}
func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsRestjson1_serializeOpListAccounts{}, middleware.After)
if err != nil {
return err
@@ -75,28 +78,38 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "ListAccounts"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -105,12 +118,27 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = addOpListAccountsValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opListAccounts(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -120,16 +148,24 @@ func (c *Client) addOperationListAccountsMiddlewares(stack *middleware.Stack, op
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
-// ListAccountsAPIClient is a client that implements the ListAccounts operation.
-type ListAccountsAPIClient interface {
- ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error)
-}
-
-var _ ListAccountsAPIClient = (*Client)(nil)
-
// ListAccountsPaginatorOptions is the paginator options for ListAccounts
type ListAccountsPaginatorOptions struct {
// This is the number of items clients can request per page.
@@ -193,6 +229,9 @@ func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Op
}
params.MaxResults = limit
+ optFns = append([]func(*Options){
+ addIsPaginatorUserAgent,
+ }, optFns...)
result, err := p.client.ListAccounts(ctx, ¶ms, optFns...)
if err != nil {
return nil, err
@@ -212,6 +251,13 @@ func (p *ListAccountsPaginator) NextPage(ctx context.Context, optFns ...func(*Op
return result, nil
}
+// ListAccountsAPIClient is a client that implements the ListAccounts operation.
+type ListAccountsAPIClient interface {
+ ListAccounts(context.Context, *ListAccountsInput, ...func(*Options)) (*ListAccountsOutput, error)
+}
+
+var _ ListAccountsAPIClient = (*Client)(nil)
+
func newServiceMetadataMiddleware_opListAccounts(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go
index 8b9b44745..2ca66ca50 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/api_op_Logout.go
@@ -4,6 +4,7 @@ package sso
import (
"context"
+ "fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -11,17 +12,20 @@ import (
// Removes the locally stored SSO tokens from the client-side cache and sends an
// API call to the IAM Identity Center service to invalidate the corresponding
-// server-side IAM Identity Center sign in session. If a user uses IAM Identity
-// Center to access the AWS CLI, the user’s IAM Identity Center sign in session is
-// used to obtain an IAM session, as specified in the corresponding IAM Identity
-// Center permission set. More specifically, IAM Identity Center assumes an IAM
-// role in the target account on behalf of the user, and the corresponding
-// temporary AWS credentials are returned to the client. After user logout, any
-// existing IAM role sessions that were created by using IAM Identity Center
-// permission sets continue based on the duration configured in the permission set.
-// For more information, see User authentications
-// (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html) in
-// the IAM Identity Center User Guide.
+// server-side IAM Identity Center sign in session.
+//
+// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM
+// Identity Center sign in session is used to obtain an IAM session, as specified
+// in the corresponding IAM Identity Center permission set. More specifically, IAM
+// Identity Center assumes an IAM role in the target account on behalf of the user,
+// and the corresponding temporary AWS credentials are returned to the client.
+//
+// After user logout, any existing IAM role sessions that were created by using
+// IAM Identity Center permission sets continue based on the duration configured in
+// the permission set. For more information, see [User authentications]in the IAM Identity Center User
+// Guide.
+//
+// [User authentications]: https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html
func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func(*Options)) (*LogoutOutput, error) {
if params == nil {
params = &LogoutInput{}
@@ -39,10 +43,10 @@ func (c *Client) Logout(ctx context.Context, params *LogoutInput, optFns ...func
type LogoutInput struct {
- // The token issued by the CreateToken API call. For more information, see
- // CreateToken
- // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
- // in the IAM Identity Center OIDC API Reference Guide.
+ // The token issued by the CreateToken API call. For more information, see [CreateToken] in the
+ // IAM Identity Center OIDC API Reference Guide.
+ //
+ // [CreateToken]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html
//
// This member is required.
AccessToken *string
@@ -58,6 +62,9 @@ type LogoutOutput struct {
}
func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsRestjson1_serializeOpLogout{}, middleware.After)
if err != nil {
return err
@@ -66,28 +73,38 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "Logout"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -96,12 +113,27 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = addOpLogoutValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opLogout(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -111,6 +143,21 @@ func (c *Client) addOperationLogoutMiddlewares(stack *middleware.Stack, options
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go
new file mode 100644
index 000000000..366963b49
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/auth.go
@@ -0,0 +1,337 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) {
+ params.Region = options.Region
+}
+
+type setLegacyContextSigningOptionsMiddleware struct {
+}
+
+func (*setLegacyContextSigningOptionsMiddleware) ID() string {
+ return "setLegacyContextSigningOptions"
+}
+
+func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ rscheme := getResolvedAuthScheme(ctx)
+ schemeID := rscheme.Scheme.SchemeID()
+
+ if sn := awsmiddleware.GetSigningName(ctx); sn != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn)
+ }
+ }
+
+ if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr})
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
+}
+
+type withAnonymous struct {
+ resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ opts = append(opts, &smithyauth.Option{
+ SchemeID: smithyauth.SchemeIDAnonymous,
+ })
+ return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+ if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+ return
+ }
+
+ options.AuthSchemeResolver = &withAnonymous{
+ resolver: options.AuthSchemeResolver,
+ }
+}
+
+// AuthResolverParameters contains the set of inputs necessary for auth scheme
+// resolution.
+type AuthResolverParameters struct {
+ // The name of the operation being invoked.
+ Operation string
+
+ // The region in which the operation is being invoked.
+ Region string
+}
+
+func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters {
+ params := &AuthResolverParameters{
+ Operation: operation,
+ }
+
+ bindAuthParamsRegion(ctx, params, input, options)
+
+ return params
+}
+
+// AuthSchemeResolver returns a set of possible authentication options for an
+// operation.
+type AuthSchemeResolver interface {
+ ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error)
+}
+
+type defaultAuthSchemeResolver struct{}
+
+var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil)
+
+func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ if overrides, ok := operationAuthOptions[params.Operation]; ok {
+ return overrides(params), nil
+ }
+ return serviceAuthOptions(params), nil
+}
+
+var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{
+ "GetRoleCredentials": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+
+ "ListAccountRoles": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+
+ "ListAccounts": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+
+ "Logout": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+}
+
+func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {
+ SchemeID: smithyauth.SchemeIDSigV4,
+ SignerProperties: func() smithy.Properties {
+ var props smithy.Properties
+ smithyhttp.SetSigV4SigningName(&props, "awsssoportal")
+ smithyhttp.SetSigV4SigningRegion(&props, params.Region)
+ return props
+ }(),
+ },
+ }
+}
+
+type resolveAuthSchemeMiddleware struct {
+ operation string
+ options Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+ return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveAuthScheme")
+ defer span.End()
+
+ params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options)
+ options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return out, metadata, fmt.Errorf("resolve auth scheme: %w", err)
+ }
+
+ scheme, ok := m.selectScheme(options)
+ if !ok {
+ return out, metadata, fmt.Errorf("could not select an auth scheme")
+ }
+
+ ctx = setResolvedAuthScheme(ctx, scheme)
+
+ span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID())
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) {
+ for _, option := range options {
+ if option.SchemeID == smithyauth.SchemeIDAnonymous {
+ return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true
+ }
+
+ for _, scheme := range m.options.AuthSchemes {
+ if scheme.SchemeID() != option.SchemeID {
+ continue
+ }
+
+ if scheme.IdentityResolver(m.options) != nil {
+ return newResolvedAuthScheme(scheme, option), true
+ }
+ }
+ }
+
+ return nil, false
+}
+
+type resolvedAuthSchemeKey struct{}
+
+type resolvedAuthScheme struct {
+ Scheme smithyhttp.AuthScheme
+ IdentityProperties smithy.Properties
+ SignerProperties smithy.Properties
+}
+
+func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme {
+ return &resolvedAuthScheme{
+ Scheme: scheme,
+ IdentityProperties: option.IdentityProperties,
+ SignerProperties: option.SignerProperties,
+ }
+}
+
+func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context {
+ return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme)
+}
+
+func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme {
+ v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme)
+ return v
+}
+
+type getIdentityMiddleware struct {
+ options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+ return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ innerCtx, span := tracing.StartSpan(ctx, "GetIdentity")
+ defer span.End()
+
+ rscheme := getResolvedAuthScheme(innerCtx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ resolver := rscheme.Scheme.IdentityResolver(m.options)
+ if resolver == nil {
+ return out, metadata, fmt.Errorf("no identity resolver")
+ }
+
+ identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration",
+ func() (smithyauth.Identity, error) {
+ return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties)
+ },
+ func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("get identity: %w", err)
+ }
+
+ ctx = setIdentity(ctx, identity)
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+type identityKey struct{}
+
+func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context {
+ return middleware.WithStackValue(ctx, identityKey{}, identity)
+}
+
+func getIdentity(ctx context.Context) smithyauth.Identity {
+ v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity)
+ return v
+}
+
+type signRequestMiddleware struct {
+ options Options
+}
+
+func (*signRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "SignRequest")
+ defer span.End()
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request)
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ identity := getIdentity(ctx)
+ if identity == nil {
+ return out, metadata, fmt.Errorf("no identity")
+ }
+
+ signer := rscheme.Scheme.Signer()
+ if signer == nil {
+ return out, metadata, fmt.Errorf("no signer")
+ }
+
+ _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) {
+ return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties)
+ }, func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("sign request: %w", err)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go
index 6a1851da2..ec23c36f5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/deserializers.go
@@ -13,12 +13,23 @@ import (
smithyio "github.com/aws/smithy-go/io"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
+ smithytime "github.com/aws/smithy-go/time"
+ "github.com/aws/smithy-go/tracing"
smithyhttp "github.com/aws/smithy-go/transport/http"
"io"
"io/ioutil"
"strings"
+ "time"
)
+func deserializeS3Expires(v string) (*time.Time, error) {
+ t, err := smithytime.ParseHTTPDate(v)
+ if err != nil {
+ return nil, nil
+ }
+ return &t, nil
+}
+
type awsRestjson1_deserializeOpGetRoleCredentials struct {
}
@@ -34,6 +45,10 @@ func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx con
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -73,6 +88,7 @@ func (m *awsRestjson1_deserializeOpGetRoleCredentials) HandleDeserialize(ctx con
}
}
+ span.End()
return out, metadata, err
}
@@ -86,9 +102,9 @@ func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Resp
errorCode := "UnknownError"
errorMessage := errorCode
- code := response.Header.Get("X-Amzn-ErrorType")
- if len(code) != 0 {
- errorCode = restjson.SanitizeErrorCode(code)
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
}
var buff [1024]byte
@@ -97,7 +113,7 @@ func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Resp
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- code, message, err := restjson.GetErrorInfo(decoder)
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -109,8 +125,8 @@ func awsRestjson1_deserializeOpErrorGetRoleCredentials(response *smithyhttp.Resp
}
errorBody.Seek(0, io.SeekStart)
- if len(code) != 0 {
- errorCode = restjson.SanitizeErrorCode(code)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
}
if len(message) != 0 {
errorMessage = message
@@ -190,6 +206,10 @@ func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx conte
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -229,6 +249,7 @@ func (m *awsRestjson1_deserializeOpListAccountRoles) HandleDeserialize(ctx conte
}
}
+ span.End()
return out, metadata, err
}
@@ -242,9 +263,9 @@ func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Respon
errorCode := "UnknownError"
errorMessage := errorCode
- code := response.Header.Get("X-Amzn-ErrorType")
- if len(code) != 0 {
- errorCode = restjson.SanitizeErrorCode(code)
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
}
var buff [1024]byte
@@ -253,7 +274,7 @@ func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Respon
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- code, message, err := restjson.GetErrorInfo(decoder)
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -265,8 +286,8 @@ func awsRestjson1_deserializeOpErrorListAccountRoles(response *smithyhttp.Respon
}
errorBody.Seek(0, io.SeekStart)
- if len(code) != 0 {
- errorCode = restjson.SanitizeErrorCode(code)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
}
if len(message) != 0 {
errorMessage = message
@@ -355,6 +376,10 @@ func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.C
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -394,6 +419,7 @@ func (m *awsRestjson1_deserializeOpListAccounts) HandleDeserialize(ctx context.C
}
}
+ span.End()
return out, metadata, err
}
@@ -407,9 +433,9 @@ func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response,
errorCode := "UnknownError"
errorMessage := errorCode
- code := response.Header.Get("X-Amzn-ErrorType")
- if len(code) != 0 {
- errorCode = restjson.SanitizeErrorCode(code)
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
}
var buff [1024]byte
@@ -418,7 +444,7 @@ func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response,
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- code, message, err := restjson.GetErrorInfo(decoder)
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -430,8 +456,8 @@ func awsRestjson1_deserializeOpErrorListAccounts(response *smithyhttp.Response,
}
errorBody.Seek(0, io.SeekStart)
- if len(code) != 0 {
- errorCode = restjson.SanitizeErrorCode(code)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
}
if len(message) != 0 {
errorMessage = message
@@ -520,6 +546,10 @@ func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -537,6 +567,7 @@ func (m *awsRestjson1_deserializeOpLogout) HandleDeserialize(ctx context.Context
}
}
+ span.End()
return out, metadata, err
}
@@ -550,9 +581,9 @@ func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metada
errorCode := "UnknownError"
errorMessage := errorCode
- code := response.Header.Get("X-Amzn-ErrorType")
- if len(code) != 0 {
- errorCode = restjson.SanitizeErrorCode(code)
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
}
var buff [1024]byte
@@ -561,7 +592,7 @@ func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metada
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- code, message, err := restjson.GetErrorInfo(decoder)
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -573,8 +604,8 @@ func awsRestjson1_deserializeOpErrorLogout(response *smithyhttp.Response, metada
}
errorBody.Seek(0, io.SeekStart)
- if len(code) != 0 {
- errorCode = restjson.SanitizeErrorCode(code)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
}
if len(message) != 0 {
errorMessage = message
@@ -858,7 +889,7 @@ func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRe
for key, value := range shape {
switch key {
- case "message":
+ case "message", "Message":
if value != nil {
jtv, ok := value.(string)
if !ok {
@@ -898,7 +929,7 @@ func awsRestjson1_deserializeDocumentResourceNotFoundException(v **types.Resourc
for key, value := range shape {
switch key {
- case "message":
+ case "message", "Message":
if value != nil {
jtv, ok := value.(string)
if !ok {
@@ -1092,7 +1123,7 @@ func awsRestjson1_deserializeDocumentTooManyRequestsException(v **types.TooManyR
for key, value := range shape {
switch key {
- case "message":
+ case "message", "Message":
if value != nil {
jtv, ok := value.(string)
if !ok {
@@ -1132,7 +1163,7 @@ func awsRestjson1_deserializeDocumentUnauthorizedException(v **types.Unauthorize
for key, value := range shape {
switch key {
- case "message":
+ case "message", "Message":
if value != nil {
jtv, ok := value.(string)
if !ok {
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go
index f981b154f..7f6e429fd 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/doc.go
@@ -6,17 +6,22 @@
// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web
// service that makes it easy for you to assign user access to IAM Identity Center
// resources such as the AWS access portal. Users can get AWS account applications
-// and roles assigned to them and get federated into the application. Although AWS
-// Single Sign-On was renamed, the sso and identitystore API namespaces will
-// continue to retain their original name for backward compatibility purposes. For
-// more information, see IAM Identity Center rename
-// (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed).
+// and roles assigned to them and get federated into the application.
+//
+// Although AWS Single Sign-On was renamed, the sso and identitystore API
+// namespaces will continue to retain their original name for backward
+// compatibility purposes. For more information, see [IAM Identity Center rename].
+//
// This reference guide describes the IAM Identity Center Portal operations that
// you can call programatically and includes detailed information on data types and
-// errors. AWS provides SDKs that consist of libraries and sample code for various
+// errors.
+//
+// AWS provides SDKs that consist of libraries and sample code for various
// programming languages and platforms, such as Java, Ruby, .Net, iOS, or Android.
// The SDKs provide a convenient way to create programmatic access to IAM Identity
// Center and other AWS services. For more information about the AWS SDKs,
-// including how to download and install them, see Tools for Amazon Web Services
-// (http://aws.amazon.com/tools/).
+// including how to download and install them, see [Tools for Amazon Web Services].
+//
+// [Tools for Amazon Web Services]: http://aws.amazon.com/tools/
+// [IAM Identity Center rename]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed
package sso
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go
index 43c06f11a..53c6bc756 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/endpoints.go
@@ -8,10 +8,19 @@ import (
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn"
internalendpoints "github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints"
+ smithyauth "github.com/aws/smithy-go/auth"
+ smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ "github.com/aws/smithy-go/tracing"
smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
"net/url"
+ "os"
"strings"
)
@@ -39,13 +48,6 @@ func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointRe
return fn(region, options)
}
-func resolveDefaultEndpointConfiguration(o *Options) {
- if o.EndpointResolver != nil {
- return
- }
- o.EndpointResolver = NewDefaultEndpointResolver()
-}
-
// EndpointResolverFromURL returns an EndpointResolver configured using the
// provided endpoint url. By default, the resolved endpoint resolver uses the
// client region as signing region, and the endpoint source is set to
@@ -79,6 +81,10 @@ func (*ResolveEndpoint) ID() string {
func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
@@ -94,6 +100,11 @@ func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.Ser
var endpoint aws.Endpoint
endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo)
if err != nil {
+ nf := (&aws.EndpointNotFoundError{})
+ if errors.As(err, &nf) {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false)
+ return next.HandleSerialize(ctx, in)
+ }
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
@@ -129,27 +140,10 @@ func removeResolveEndpointMiddleware(stack *middleware.Stack) error {
type wrappedEndpointResolver struct {
awsResolver aws.EndpointResolverWithOptions
- resolver EndpointResolver
}
func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
- if w.awsResolver == nil {
- goto fallback
- }
- endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options)
- if err == nil {
- return endpoint, nil
- }
-
- if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) {
- return endpoint, err
- }
-
-fallback:
- if w.resolver == nil {
- return endpoint, fmt.Errorf("default endpoint resolver provided was nil")
- }
- return w.resolver.ResolveEndpoint(region, options)
+ return w.awsResolver.ResolveEndpoint(ServiceID, region, options)
}
type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error)
@@ -160,12 +154,13 @@ func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, opti
var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil)
-// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver.
-// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided
-// fallbackResolver for resolution.
+// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver.
+// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error,
+// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked
+// via its middleware.
//
-// fallbackResolver must not be nil
-func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver {
+// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated.
+func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver {
var resolver aws.EndpointResolverWithOptions
if awsResolverWithOptions != nil {
@@ -176,7 +171,6 @@ func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptio
return &wrappedEndpointResolver{
awsResolver: resolver,
- resolver: fallbackResolver,
}
}
@@ -198,3 +192,365 @@ func finalizeClientEndpointResolverOptions(options *Options) {
}
}
+
+func resolveEndpointResolverV2(options *Options) {
+ if options.EndpointResolverV2 == nil {
+ options.EndpointResolverV2 = NewDefaultEndpointResolverV2()
+ }
+}
+
+func resolveBaseEndpoint(cfg aws.Config, o *Options) {
+ if cfg.BaseEndpoint != nil {
+ o.BaseEndpoint = cfg.BaseEndpoint
+ }
+
+ _, g := os.LookupEnv("AWS_ENDPOINT_URL")
+ _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO")
+
+ if g && !s {
+ return
+ }
+
+ value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO", cfg.ConfigSources)
+ if found && err == nil {
+ o.BaseEndpoint = &value
+ }
+}
+
+func bindRegion(region string) *string {
+ if region == "" {
+ return nil
+ }
+ return aws.String(endpoints.MapFIPSRegion(region))
+}
+
+// EndpointParameters provides the parameters that influence how endpoints are
+// resolved.
+type EndpointParameters struct {
+ // The AWS region used to dispatch the request.
+ //
+ // Parameter is
+ // required.
+ //
+ // AWS::Region
+ Region *string
+
+ // When true, use the dual-stack endpoint. If the configured endpoint does not
+ // support dual-stack, dispatching the request MAY return an error.
+ //
+ // Defaults to
+ // false if no value is provided.
+ //
+ // AWS::UseDualStack
+ UseDualStack *bool
+
+ // When true, send this request to the FIPS-compliant regional endpoint. If the
+ // configured endpoint does not have a FIPS compliant endpoint, dispatching the
+ // request will return an error.
+ //
+ // Defaults to false if no value is
+ // provided.
+ //
+ // AWS::UseFIPS
+ UseFIPS *bool
+
+ // Override the endpoint used to send this request
+ //
+ // Parameter is
+ // required.
+ //
+ // SDK::Endpoint
+ Endpoint *string
+}
+
+// ValidateRequired validates required parameters are set.
+func (p EndpointParameters) ValidateRequired() error {
+ if p.UseDualStack == nil {
+ return fmt.Errorf("parameter UseDualStack is required")
+ }
+
+ if p.UseFIPS == nil {
+ return fmt.Errorf("parameter UseFIPS is required")
+ }
+
+ return nil
+}
+
+// WithDefaults returns a shallow copy of EndpointParameterswith default values
+// applied to members where applicable.
+func (p EndpointParameters) WithDefaults() EndpointParameters {
+ if p.UseDualStack == nil {
+ p.UseDualStack = ptr.Bool(false)
+ }
+
+ if p.UseFIPS == nil {
+ p.UseFIPS = ptr.Bool(false)
+ }
+ return p
+}
+
+type stringSlice []string
+
+func (s stringSlice) Get(i int) *string {
+ if i < 0 || i >= len(s) {
+ return nil
+ }
+
+ v := s[i]
+ return &v
+}
+
+// EndpointResolverV2 provides the interface for resolving service endpoints.
+type EndpointResolverV2 interface {
+ // ResolveEndpoint attempts to resolve the endpoint with the provided options,
+ // returning the endpoint if found. Otherwise an error is returned.
+ ResolveEndpoint(ctx context.Context, params EndpointParameters) (
+ smithyendpoints.Endpoint, error,
+ )
+}
+
+// resolver provides the implementation for resolving endpoints.
+type resolver struct{}
+
+func NewDefaultEndpointResolverV2() EndpointResolverV2 {
+ return &resolver{}
+}
+
+// ResolveEndpoint attempts to resolve the endpoint with the provided options,
+// returning the endpoint if found. Otherwise an error is returned.
+func (r *resolver) ResolveEndpoint(
+ ctx context.Context, params EndpointParameters,
+) (
+ endpoint smithyendpoints.Endpoint, err error,
+) {
+ params = params.WithDefaults()
+ if err = params.ValidateRequired(); err != nil {
+ return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err)
+ }
+ _UseDualStack := *params.UseDualStack
+ _UseFIPS := *params.UseFIPS
+
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if _UseFIPS == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported")
+ }
+ if _UseDualStack == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported")
+ }
+ uriString := _Endpoint
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ if exprVal := params.Region; exprVal != nil {
+ _Region := *exprVal
+ _ = _Region
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _PartitionResult := *exprVal
+ _ = _PartitionResult
+ if _UseFIPS == true {
+ if _UseDualStack == true {
+ if true == _PartitionResult.SupportsFIPS {
+ if true == _PartitionResult.SupportsDualStack {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://portal.sso-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DualStackDnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both")
+ }
+ }
+ if _UseFIPS == true {
+ if true == _PartitionResult.SupportsFIPS {
+ if "aws-us-gov" == _PartitionResult.Name {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://portal.sso.")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://portal.sso-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS")
+ }
+ if _UseDualStack == true {
+ if true == _PartitionResult.SupportsDualStack {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://portal.sso.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DualStackDnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack")
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://portal.sso.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region")
+}
+
+type endpointParamsBinder interface {
+ bindEndpointParams(*EndpointParameters)
+}
+
+func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters {
+ params := &EndpointParameters{}
+
+ params.Region = bindRegion(options.Region)
+ params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
+ params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
+ params.Endpoint = options.BaseEndpoint
+
+ if b, ok := input.(endpointParamsBinder); ok {
+ b.bindEndpointParams(params)
+ }
+
+ return params
+}
+
+type resolveEndpointV2Middleware struct {
+ options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveEndpoint")
+ defer span.End()
+
+ if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.options.EndpointResolverV2 == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ params := bindEndpointParams(ctx, getOperationInput(ctx), m.options)
+ endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration",
+ func() (smithyendpoints.Endpoint, error) {
+ return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params)
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ span.SetProperty("client.call.resolved_endpoint", endpt.URI.String())
+
+ if endpt.URI.RawPath == "" && req.URL.RawPath != "" {
+ endpt.URI.RawPath = endpt.URI.Path
+ }
+ req.URL.Scheme = endpt.URI.Scheme
+ req.URL.Host = endpt.URI.Host
+ req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path)
+ req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath)
+ for k := range endpt.Headers {
+ req.Header.Set(k, endpt.Headers.Get(k))
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ opts, _ := smithyauth.GetAuthOptions(&endpt.Properties)
+ for _, o := range opts {
+ rscheme.SignerProperties.SetAll(&o.SignerProperties)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
index 5be0e34cd..1a88fe4df 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/generated.json
@@ -12,19 +12,25 @@
"api_op_ListAccountRoles.go",
"api_op_ListAccounts.go",
"api_op_Logout.go",
+ "auth.go",
"deserializers.go",
"doc.go",
"endpoints.go",
+ "endpoints_config_test.go",
+ "endpoints_test.go",
"generated.json",
"internal/endpoints/endpoints.go",
"internal/endpoints/endpoints_test.go",
+ "options.go",
"protocol_test.go",
"serializers.go",
+ "snapshot_test.go",
+ "sra_operation_order_test.go",
"types/errors.go",
"types/types.go",
"validators.go"
],
- "go": "1.15",
+ "go": "1.22",
"module": "github.com/aws/aws-sdk-go-v2/service/sso",
"unstable": false
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
index 268b841f6..59aa2aebd 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/go_module_metadata.go
@@ -3,4 +3,4 @@
package sso
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.11.23"
+const goModuleVersion = "1.25.3"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
index aeac293ea..04416606b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/internal/endpoints/endpoints.go
@@ -87,15 +87,21 @@ func New() *Resolver {
var partitionRegexp = struct {
Aws *regexp.Regexp
AwsCn *regexp.Regexp
+ AwsEusc *regexp.Regexp
AwsIso *regexp.Regexp
AwsIsoB *regexp.Regexp
+ AwsIsoE *regexp.Regexp
+ AwsIsoF *regexp.Regexp
AwsUsGov *regexp.Regexp
}{
- Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"),
+ Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"),
AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
+ AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"),
AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
+ AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"),
+ AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"),
AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
}
@@ -135,6 +141,14 @@ var defaultPartitions = endpoints.Partitions{
RegionRegex: partitionRegexp.Aws,
IsRegionalized: true,
Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "af-south-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.af-south-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "af-south-1",
+ },
+ },
endpoints.EndpointKey{
Region: "ap-east-1",
}: endpoints.Endpoint{
@@ -175,6 +189,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "ap-south-1",
},
},
+ endpoints.EndpointKey{
+ Region: "ap-south-2",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-south-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpoints.EndpointKey{
Region: "ap-southeast-1",
}: endpoints.Endpoint{
@@ -191,6 +213,30 @@ var defaultPartitions = endpoints.Partitions{
Region: "ap-southeast-2",
},
},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-3",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-southeast-3.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-4",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-southeast-4.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-5",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ap-southeast-5.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-5",
+ },
+ },
endpoints.EndpointKey{
Region: "ca-central-1",
}: endpoints.Endpoint{
@@ -199,6 +245,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "ca-central-1",
},
},
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.ca-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ca-west-1",
+ },
+ },
endpoints.EndpointKey{
Region: "eu-central-1",
}: endpoints.Endpoint{
@@ -207,6 +261,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "eu-central-1",
},
},
+ endpoints.EndpointKey{
+ Region: "eu-central-2",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.eu-central-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpoints.EndpointKey{
Region: "eu-north-1",
}: endpoints.Endpoint{
@@ -223,6 +285,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "eu-south-1",
},
},
+ endpoints.EndpointKey{
+ Region: "eu-south-2",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.eu-south-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpoints.EndpointKey{
Region: "eu-west-1",
}: endpoints.Endpoint{
@@ -247,6 +317,22 @@ var defaultPartitions = endpoints.Partitions{
Region: "eu-west-3",
},
},
+ endpoints.EndpointKey{
+ Region: "il-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.il-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "me-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.me-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "me-central-1",
+ },
+ },
endpoints.EndpointKey{
Region: "me-south-1",
}: endpoints.Endpoint{
@@ -279,6 +365,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "us-east-2",
},
},
+ endpoints.EndpointKey{
+ Region: "us-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.us-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-1",
+ },
+ },
endpoints.EndpointKey{
Region: "us-west-2",
}: endpoints.Endpoint{
@@ -323,6 +417,45 @@ var defaultPartitions = endpoints.Partitions{
},
RegionRegex: partitionRegexp.AwsCn,
IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "cn-north-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.cn-north-1.amazonaws.com.cn",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "cn-northwest-1",
+ }: endpoints.Endpoint{
+ Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ {
+ ID: "aws-eusc",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "portal.sso-fips.{region}.amazonaws.eu",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "portal.sso.{region}.amazonaws.eu",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsEusc,
+ IsRegionalized: true,
},
{
ID: "aws-iso",
@@ -366,6 +499,48 @@ var defaultPartitions = endpoints.Partitions{
RegionRegex: partitionRegexp.AwsIsoB,
IsRegionalized: true,
},
+ {
+ ID: "aws-iso-e",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "portal.sso-fips.{region}.cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "portal.sso.{region}.cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoE,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso-f",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "portal.sso-fips.{region}.csp.hci.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "portal.sso.{region}.csp.hci.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoF,
+ IsRegionalized: true,
+ },
{
ID: "aws-us-gov",
Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go
new file mode 100644
index 000000000..aa744f159
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/options.go
@@ -0,0 +1,232 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sso
+
+import (
+ "context"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+)
+
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+type Options struct {
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // The optional application specific identifier appended to the User-Agent header.
+ AppID string
+
+ // This endpoint will be given as input to an EndpointResolverV2. It is used for
+ // providing a custom base endpoint that is subject to modifications by the
+ // processing EndpointResolverV2.
+ BaseEndpoint *string
+
+ // Configures the events that will be sent to the configured logger.
+ ClientLogMode aws.ClientLogMode
+
+ // The credentials object to use when signing requests.
+ Credentials aws.CredentialsProvider
+
+ // The configuration DefaultsMode that the SDK should use when constructing the
+ // clients initial default settings.
+ DefaultsMode aws.DefaultsMode
+
+ // The endpoint options to be used when attempting to resolve an endpoint.
+ EndpointOptions EndpointResolverOptions
+
+ // The service endpoint resolver.
+ //
+ // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
+ // value for this field will likely prevent you from using any endpoint-related
+ // service features released after the introduction of EndpointResolverV2 and
+ // BaseEndpoint.
+ //
+ // To migrate an EndpointResolver implementation that uses a custom endpoint, set
+ // the client option BaseEndpoint instead.
+ EndpointResolver EndpointResolver
+
+ // Resolves the endpoint used for a particular service operation. This should be
+ // used over the deprecated EndpointResolver.
+ EndpointResolverV2 EndpointResolverV2
+
+ // Signature Version 4 (SigV4) Signer
+ HTTPSignerV4 HTTPSignerV4
+
+ // The logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // The client meter provider.
+ MeterProvider metrics.MeterProvider
+
+ // The region to send requests to. (Required)
+ Region string
+
+ // RetryMaxAttempts specifies the maximum number attempts an API client will call
+ // an operation that fails with a retryable error. A value of 0 is ignored, and
+ // will not be used to configure the API client created default retryer, or modify
+ // per operation call's retry max attempts.
+ //
+ // If specified in an operation call's functional options with a value that is
+ // different than the constructed client's Options, the Client's Retryer will be
+ // wrapped to use the operation's specific RetryMaxAttempts value.
+ RetryMaxAttempts int
+
+ // RetryMode specifies the retry mode the API client will be created with, if
+ // Retryer option is not also specified.
+ //
+ // When creating a new API Clients this member will only be used if the Retryer
+ // Options member is nil. This value will be ignored if Retryer is not nil.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ RetryMode aws.RetryMode
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer. The kind of
+ // default retry created by the API client can be changed with the RetryMode
+ // option.
+ Retryer aws.Retryer
+
+ // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
+ // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You
+ // should not populate this structure programmatically, or rely on the values here
+ // within your applications.
+ RuntimeEnvironment aws.RuntimeEnvironment
+
+ // The client tracer provider.
+ TracerProvider tracing.TracerProvider
+
+ // The initial DefaultsMode used when the client options were constructed. If the
+ // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
+ // value was at that point in time.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ resolvedDefaultsMode aws.DefaultsMode
+
+ // The HTTP client to invoke API calls with. Defaults to client's default HTTP
+ // implementation if nil.
+ HTTPClient HTTPClient
+
+ // The auth scheme resolver which determines how to authenticate for each
+ // operation.
+ AuthSchemeResolver AuthSchemeResolver
+
+ // The list of auth schemes supported by the client.
+ AuthSchemes []smithyhttp.AuthScheme
+}
+
+// Copy creates a clone where the APIOptions list is deep copied.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
+ copy(to.APIOptions, o.APIOptions)
+
+ return to
+}
+
+func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver {
+ if schemeID == "aws.auth#sigv4" {
+ return getSigV4IdentityResolver(o)
+ }
+ if schemeID == "smithy.api#noAuth" {
+ return &smithyauth.AnonymousIdentityResolver{}
+ }
+ return nil
+}
+
+// WithAPIOptions returns a functional option for setting the Client's APIOptions
+// option.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, optFns...)
+ }
+}
+
+// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
+// this field will likely prevent you from using any endpoint-related service
+// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
+// To migrate an EndpointResolver implementation that uses a custom endpoint, set
+// the client option BaseEndpoint instead.
+func WithEndpointResolver(v EndpointResolver) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolver = v
+ }
+}
+
+// WithEndpointResolverV2 returns a functional option for setting the Client's
+// EndpointResolverV2 option.
+func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolverV2 = v
+ }
+}
+
+func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver {
+ if o.Credentials != nil {
+ return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials}
+ }
+ return nil
+}
+
+// WithSigV4SigningName applies an override to the authentication workflow to
+// use the given signing name for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing name from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningName(name string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+// WithSigV4SigningRegion applies an override to the authentication workflow to
+// use the given signing region for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing region from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningRegion(region string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+func ignoreAnonymousAuth(options *Options) {
+ if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) {
+ options.Credentials = nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go
index 29e320811..a7a5b57de 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/serializers.go
@@ -8,6 +8,7 @@ import (
smithy "github.com/aws/smithy-go"
"github.com/aws/smithy-go/encoding/httpbinding"
"github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -21,6 +22,10 @@ func (*awsRestjson1_serializeOpGetRoleCredentials) ID() string {
func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -36,7 +41,14 @@ func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "GET"
- restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
@@ -50,6 +62,8 @@ func (m *awsRestjson1_serializeOpGetRoleCredentials) HandleSerialize(ctx context
}
in.Request = request
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCredentialsInput, encoder *httpbinding.Encoder) error {
@@ -57,7 +71,7 @@ func awsRestjson1_serializeOpHttpBindingsGetRoleCredentialsInput(v *GetRoleCrede
return fmt.Errorf("unsupported serialization of nil %T", v)
}
- if v.AccessToken != nil && len(*v.AccessToken) > 0 {
+ if v.AccessToken != nil {
locationName := "X-Amz-Sso_bearer_token"
encoder.SetHeader(locationName).String(*v.AccessToken)
}
@@ -83,6 +97,10 @@ func (*awsRestjson1_serializeOpListAccountRoles) ID() string {
func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -98,7 +116,14 @@ func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.C
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "GET"
- restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
@@ -112,6 +137,8 @@ func (m *awsRestjson1_serializeOpListAccountRoles) HandleSerialize(ctx context.C
}
in.Request = request
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRolesInput, encoder *httpbinding.Encoder) error {
@@ -119,7 +146,7 @@ func awsRestjson1_serializeOpHttpBindingsListAccountRolesInput(v *ListAccountRol
return fmt.Errorf("unsupported serialization of nil %T", v)
}
- if v.AccessToken != nil && len(*v.AccessToken) > 0 {
+ if v.AccessToken != nil {
locationName := "X-Amz-Sso_bearer_token"
encoder.SetHeader(locationName).String(*v.AccessToken)
}
@@ -149,6 +176,10 @@ func (*awsRestjson1_serializeOpListAccounts) ID() string {
func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -164,7 +195,14 @@ func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Conte
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "GET"
- restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
@@ -178,6 +216,8 @@ func (m *awsRestjson1_serializeOpListAccounts) HandleSerialize(ctx context.Conte
}
in.Request = request
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput, encoder *httpbinding.Encoder) error {
@@ -185,7 +225,7 @@ func awsRestjson1_serializeOpHttpBindingsListAccountsInput(v *ListAccountsInput,
return fmt.Errorf("unsupported serialization of nil %T", v)
}
- if v.AccessToken != nil && len(*v.AccessToken) > 0 {
+ if v.AccessToken != nil {
locationName := "X-Amz-Sso_bearer_token"
encoder.SetHeader(locationName).String(*v.AccessToken)
}
@@ -211,6 +251,10 @@ func (*awsRestjson1_serializeOpLogout) ID() string {
func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -226,7 +270,14 @@ func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
- restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
@@ -240,6 +291,8 @@ func (m *awsRestjson1_serializeOpLogout) HandleSerialize(ctx context.Context, in
}
in.Request = request
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *httpbinding.Encoder) error {
@@ -247,7 +300,7 @@ func awsRestjson1_serializeOpHttpBindingsLogoutInput(v *LogoutInput, encoder *ht
return fmt.Errorf("unsupported serialization of nil %T", v)
}
- if v.AccessToken != nil && len(*v.AccessToken) > 0 {
+ if v.AccessToken != nil {
locationName := "X-Amz-Sso_bearer_token"
encoder.SetHeader(locationName).String(*v.AccessToken)
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go
index 1401d585c..e97a126e8 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/errors.go
@@ -12,6 +12,8 @@ import (
type InvalidRequestException struct {
Message *string
+ ErrorCodeOverride *string
+
noSmithyDocumentSerde
}
@@ -24,13 +26,20 @@ func (e *InvalidRequestException) ErrorMessage() string {
}
return *e.Message
}
-func (e *InvalidRequestException) ErrorCode() string { return "InvalidRequestException" }
+func (e *InvalidRequestException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidRequestException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The specified resource doesn't exist.
type ResourceNotFoundException struct {
Message *string
+ ErrorCodeOverride *string
+
noSmithyDocumentSerde
}
@@ -43,7 +52,12 @@ func (e *ResourceNotFoundException) ErrorMessage() string {
}
return *e.Message
}
-func (e *ResourceNotFoundException) ErrorCode() string { return "ResourceNotFoundException" }
+func (e *ResourceNotFoundException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ResourceNotFoundException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// Indicates that the request is being made too frequently and is more than what
@@ -51,6 +65,8 @@ func (e *ResourceNotFoundException) ErrorFault() smithy.ErrorFault { return smit
type TooManyRequestsException struct {
Message *string
+ ErrorCodeOverride *string
+
noSmithyDocumentSerde
}
@@ -63,7 +79,12 @@ func (e *TooManyRequestsException) ErrorMessage() string {
}
return *e.Message
}
-func (e *TooManyRequestsException) ErrorCode() string { return "TooManyRequestsException" }
+func (e *TooManyRequestsException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "TooManyRequestsException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// Indicates that the request is not authorized. This can happen due to an invalid
@@ -71,6 +92,8 @@ func (e *TooManyRequestsException) ErrorFault() smithy.ErrorFault { return smith
type UnauthorizedException struct {
Message *string
+ ErrorCodeOverride *string
+
noSmithyDocumentSerde
}
@@ -83,5 +106,10 @@ func (e *UnauthorizedException) ErrorMessage() string {
}
return *e.Message
}
-func (e *UnauthorizedException) ErrorCode() string { return "UnauthorizedException" }
+func (e *UnauthorizedException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "UnauthorizedException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *UnauthorizedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go
index 051056b75..07ac468e3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sso/types/types.go
@@ -25,25 +25,24 @@ type AccountInfo struct {
type RoleCredentials struct {
// The identifier used for the temporary security credentials. For more
- // information, see Using Temporary Security Credentials to Request Access to AWS
- // Resources
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
- // in the AWS IAM User Guide.
+ // information, see [Using Temporary Security Credentials to Request Access to AWS Resources]in the AWS IAM User Guide.
+ //
+ // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html
AccessKeyId *string
// The date on which temporary security credentials expire.
Expiration int64
- // The key that is used to sign the request. For more information, see Using
- // Temporary Security Credentials to Request Access to AWS Resources
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
- // in the AWS IAM User Guide.
+ // The key that is used to sign the request. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS
+ // IAM User Guide.
+ //
+ // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html
SecretAccessKey *string
- // The token used for temporary credentials. For more information, see Using
- // Temporary Security Credentials to Request Access to AWS Resources
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
- // in the AWS IAM User Guide.
+ // The token used for temporary credentials. For more information, see [Using Temporary Security Credentials to Request Access to AWS Resources] in the AWS
+ // IAM User Guide.
+ //
+ // [Using Temporary Security Credentials to Request Access to AWS Resources]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html
SessionToken *string
noSmithyDocumentSerde
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
index 25eabe16c..b4cdac6b3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/CHANGELOG.md
@@ -1,3 +1,432 @@
+# v1.30.1 (2025-04-03)
+
+* No change notes available for this release.
+
+# v1.30.0 (2025-03-27)
+
+* **Feature**: This release adds AwsAdditionalDetails in the CreateTokenWithIAM API response.
+
+# v1.29.2 (2025-03-24)
+
+* No change notes available for this release.
+
+# v1.29.1 (2025-03-04.2)
+
+* **Bug Fix**: Add assurance test for operation order.
+
+# v1.29.0 (2025-02-27)
+
+* **Feature**: Track credential providers via User-Agent Feature ids
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.15 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.14 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.13 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.12 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.11 (2025-01-24)
+
+* **Documentation**: Fixed typos in the descriptions.
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.28.10 (2025-01-17)
+
+* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop.
+
+# v1.28.9 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.8 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.7 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.6 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.5 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.4 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.3 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.2 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.1 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.4 (2024-10-03)
+
+* No change notes available for this release.
+
+# v1.27.3 (2024-09-27)
+
+* No change notes available for this release.
+
+# v1.27.2 (2024-09-25)
+
+* No change notes available for this release.
+
+# v1.27.1 (2024-09-23)
+
+* No change notes available for this release.
+
+# v1.27.0 (2024-09-20)
+
+* **Feature**: Add tracing and metrics support to service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.8 (2024-09-17)
+
+* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution.
+
+# v1.26.7 (2024-09-04)
+
+* No change notes available for this release.
+
+# v1.26.6 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.5 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.4 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.3 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.2 (2024-07-03)
+
+* No change notes available for this release.
+
+# v1.26.1 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.0 (2024-06-26)
+
+* **Feature**: Support list-of-string endpoint parameter.
+
+# v1.25.1 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.0 (2024-06-18)
+
+* **Feature**: Track usage of various AWS SDK features in user-agent string.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.6 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.5 (2024-06-07)
+
+* **Bug Fix**: Add clock skew correction on all service clients
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.4 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.3 (2024-05-23)
+
+* No change notes available for this release.
+
+# v1.24.2 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.1 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.0 (2024-05-10)
+
+* **Feature**: Updated request parameters for PKCE support.
+
+# v1.23.5 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
+# v1.23.4 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.3 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.2 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.22.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.22.1 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+
+# v1.22.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.7 (2024-01-16)
+
+* No change notes available for this release.
+
+# v1.21.6 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.5 (2023-12-08)
+
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.21.4 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.3 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+
+# v1.21.2 (2023-12-01)
+
+* **Bug Fix**: Correct wrapping of errors in authentication workflow.
+* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.1 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.0 (2023-11-29)
+
+* **Feature**: Expose Options() accessor on service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.3 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.2 (2023-11-28)
+
+* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction.
+
+# v1.20.1 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2023-11-17)
+
+* **Feature**: Adding support for `sso-oauth:CreateTokenWithIAM`.
+
+# v1.19.2 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.1 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.3 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.2 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.1 (2023-09-22)
+
+* No change notes available for this release.
+
+# v1.17.0 (2023-09-20)
+
+* **Feature**: Update FIPS endpoints in aws-us-gov.
+
+# v1.16.0 (2023-09-18)
+
+* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service.
+* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field.
+
+# v1.15.6 (2023-09-05)
+
+* No change notes available for this release.
+
+# v1.15.5 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.4 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.3 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.2 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.15.1 (2023-08-01)
+
+* No change notes available for this release.
+
+# v1.15.0 (2023-07-31)
+
+* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.14 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.13 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.12 (2023-06-15)
+
+* No change notes available for this release.
+
+# v1.14.11 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.10 (2023-05-04)
+
+* No change notes available for this release.
+
+# v1.14.9 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.8 (2023-04-10)
+
+* No change notes available for this release.
+
+# v1.14.7 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.6 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.5 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.4 (2023-02-22)
+
+* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
+
+# v1.14.3 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.2 (2023-02-15)
+
+* **Announcement**: When receiving an error response in restJson-based services, an incorrect error type may have been returned based on the content of the response. This has been fixed via PR #2012 tracked in issue #1910.
+* **Bug Fix**: Correct error type parsing for restJson services.
+
+# v1.14.1 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.14.0 (2023-01-05)
+
+* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401).
+
+# v1.13.11 (2022-12-19)
+
+* No change notes available for this release.
+
+# v1.13.10 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.9 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.8 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.7 (2022-10-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.13.6 (2022-09-30)
+
+* **Documentation**: Documentation updates for the IAM Identity Center OIDC CLI Reference.
+
# v1.13.5 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go
index 5e0a85a2c..57440b1fa 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_client.go
@@ -4,169 +4,241 @@ package ssooidc
import (
"context"
+ "errors"
+ "fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/defaults"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/retry"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware"
smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
smithydocument "github.com/aws/smithy-go/document"
"github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
"github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
smithyhttp "github.com/aws/smithy-go/transport/http"
"net"
"net/http"
+ "sync/atomic"
"time"
)
const ServiceID = "SSO OIDC"
const ServiceAPIVersion = "2019-06-10"
-// Client provides the API client to make operations call for AWS SSO OIDC.
-type Client struct {
- options Options
+type operationMetrics struct {
+ Duration metrics.Float64Histogram
+ SerializeDuration metrics.Float64Histogram
+ ResolveIdentityDuration metrics.Float64Histogram
+ ResolveEndpointDuration metrics.Float64Histogram
+ SignRequestDuration metrics.Float64Histogram
+ DeserializeDuration metrics.Float64Histogram
}
-// New returns an initialized Client based on the functional options. Provide
-// additional functional options to further configure the behavior of the client,
-// such as changing the client's endpoint or adding custom middleware behavior.
-func New(options Options, optFns ...func(*Options)) *Client {
- options = options.Copy()
+func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram {
+ switch name {
+ case "client.call.duration":
+ return m.Duration
+ case "client.call.serialization_duration":
+ return m.SerializeDuration
+ case "client.call.resolve_identity_duration":
+ return m.ResolveIdentityDuration
+ case "client.call.resolve_endpoint_duration":
+ return m.ResolveEndpointDuration
+ case "client.call.signing_duration":
+ return m.SignRequestDuration
+ case "client.call.deserialization_duration":
+ return m.DeserializeDuration
+ default:
+ panic("unrecognized operation metric")
+ }
+}
- resolveDefaultLogger(&options)
+func timeOperationMetric[T any](
+ ctx context.Context, metric string, fn func() (T, error),
+ opts ...metrics.RecordMetricOption,
+) (T, error) {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
- setResolvedDefaultsMode(&options)
+ start := time.Now()
+ v, err := fn()
+ end := time.Now()
- resolveRetryer(&options)
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
+ return v, err
+}
- resolveHTTPClient(&options)
+func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
- resolveHTTPSignerV4(&options)
+ var ended bool
+ start := time.Now()
+ return func() {
+ if ended {
+ return
+ }
+ ended = true
- resolveDefaultEndpointConfiguration(&options)
+ end := time.Now()
- for _, fn := range optFns {
- fn(&options)
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
}
+}
- client := &Client{
- options: options,
+func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption {
+ return func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("rpc.service", middleware.GetServiceID(ctx))
+ o.Properties.Set("rpc.method", middleware.GetOperationName(ctx))
}
+}
- return client
+type operationMetricsKey struct{}
+
+func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) {
+ meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc")
+ om := &operationMetrics{}
+
+ var err error
+
+ om.Duration, err = operationMetricTimer(meter, "client.call.duration",
+ "Overall call duration (including retries and time to send or receive request and response body)")
+ if err != nil {
+ return nil, err
+ }
+ om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration",
+ "The time it takes to serialize a message body")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration",
+ "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration",
+ "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request")
+ if err != nil {
+ return nil, err
+ }
+ om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration",
+ "The time it takes to sign a request")
+ if err != nil {
+ return nil, err
+ }
+ om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration",
+ "The time it takes to deserialize a message body")
+ if err != nil {
+ return nil, err
+ }
+
+ return context.WithValue(parent, operationMetricsKey{}, om), nil
}
-type Options struct {
- // Set of options to modify how an operation is invoked. These apply to all
- // operations invoked for this client. Use functional options on operation call to
- // modify this list for per operation behavior.
- APIOptions []func(*middleware.Stack) error
+func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) {
+ return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = desc
+ })
+}
- // Configures the events that will be sent to the configured logger.
- ClientLogMode aws.ClientLogMode
+func getOperationMetrics(ctx context.Context) *operationMetrics {
+ return ctx.Value(operationMetricsKey{}).(*operationMetrics)
+}
- // The credentials object to use when signing requests.
- Credentials aws.CredentialsProvider
+func operationTracer(p tracing.TracerProvider) tracing.Tracer {
+ return p.Tracer("github.com/aws/aws-sdk-go-v2/service/ssooidc")
+}
- // The configuration DefaultsMode that the SDK should use when constructing the
- // clients initial default settings.
- DefaultsMode aws.DefaultsMode
+// Client provides the API client to make operations call for AWS SSO OIDC.
+type Client struct {
+ options Options
- // The endpoint options to be used when attempting to resolve an endpoint.
- EndpointOptions EndpointResolverOptions
+ // Difference between the time reported by the server and the client
+ timeOffset *atomic.Int64
+}
- // The service endpoint resolver.
- EndpointResolver EndpointResolver
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
- // Signature Version 4 (SigV4) Signer
- HTTPSignerV4 HTTPSignerV4
+ resolveDefaultLogger(&options)
- // The logger writer interface to write logging messages to.
- Logger logging.Logger
+ setResolvedDefaultsMode(&options)
- // The region to send requests to. (Required)
- Region string
+ resolveRetryer(&options)
- // RetryMaxAttempts specifies the maximum number attempts an API client will call
- // an operation that fails with a retryable error. A value of 0 is ignored, and
- // will not be used to configure the API client created default retryer, or modify
- // per operation call's retry max attempts. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. If specified in an operation call's functional
- // options with a value that is different than the constructed client's Options,
- // the Client's Retryer will be wrapped to use the operation's specific
- // RetryMaxAttempts value.
- RetryMaxAttempts int
+ resolveHTTPClient(&options)
- // RetryMode specifies the retry mode the API client will be created with, if
- // Retryer option is not also specified. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. Currently does not support per operation call
- // overrides, may in the future.
- RetryMode aws.RetryMode
+ resolveHTTPSignerV4(&options)
- // Retryer guides how HTTP requests should be retried in case of recoverable
- // failures. When nil the API client will use a default retryer. The kind of
- // default retry created by the API client can be changed with the RetryMode
- // option.
- Retryer aws.Retryer
+ resolveEndpointResolverV2(&options)
- // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
- // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You
- // should not populate this structure programmatically, or rely on the values here
- // within your applications.
- RuntimeEnvironment aws.RuntimeEnvironment
+ resolveTracerProvider(&options)
- // The initial DefaultsMode used when the client options were constructed. If the
- // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
- // value was at that point in time. Currently does not support per operation call
- // overrides, may in the future.
- resolvedDefaultsMode aws.DefaultsMode
+ resolveMeterProvider(&options)
- // The HTTP client to invoke API calls with. Defaults to client's default HTTP
- // implementation if nil.
- HTTPClient HTTPClient
-}
+ resolveAuthSchemeResolver(&options)
-// WithAPIOptions returns a functional option for setting the Client's APIOptions
-// option.
-func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
- return func(o *Options) {
- o.APIOptions = append(o.APIOptions, optFns...)
+ for _, fn := range optFns {
+ fn(&options)
}
-}
-// WithEndpointResolver returns a functional option for setting the Client's
-// EndpointResolver option.
-func WithEndpointResolver(v EndpointResolver) func(*Options) {
- return func(o *Options) {
- o.EndpointResolver = v
+ finalizeRetryMaxAttempts(&options)
+
+ ignoreAnonymousAuth(&options)
+
+ wrapWithAnonymousAuth(&options)
+
+ resolveAuthSchemes(&options)
+
+ client := &Client{
+ options: options,
}
-}
-type HTTPClient interface {
- Do(*http.Request) (*http.Response, error)
-}
+ initializeTimeOffsetResolver(client)
-// Copy creates a clone where the APIOptions list is deep copied.
-func (o Options) Copy() Options {
- to := o
- to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
- copy(to.APIOptions, o.APIOptions)
+ return client
+}
- return to
+// Options returns a copy of the client configuration.
+//
+// Callers SHOULD NOT perform mutations on any inner structures within client
+// config. Config overrides should instead be made on a per-operation basis through
+// functional options.
+func (c *Client) Options() Options {
+ return c.options.Copy()
}
-func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
+
+func (c *Client) invokeOperation(
+ ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error,
+) (
+ result interface{}, metadata middleware.Metadata, err error,
+) {
ctx = middleware.ClearStackValues(ctx)
+ ctx = middleware.WithServiceID(ctx, ServiceID)
+ ctx = middleware.WithOperationName(ctx, opID)
+
stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
options := c.options.Copy()
+
for _, fn := range optFns {
fn(&options)
}
- finalizeRetryMaxAttemptOptions(&options, *c)
+ finalizeOperationRetryMaxAttempts(&options, *c)
finalizeClientEndpointResolverOptions(&options)
@@ -182,20 +254,142 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf
}
}
- handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
- result, metadata, err = handler.Handle(ctx, params)
+ ctx, err = withOperationMetrics(ctx, options.MeterProvider)
+ if err != nil {
+ return nil, metadata, err
+ }
+
+ tracer := operationTracer(options.TracerProvider)
+ spanName := fmt.Sprintf("%s.%s", ServiceID, opID)
+
+ ctx = tracing.WithOperationTracer(ctx, tracer)
+
+ ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) {
+ o.Kind = tracing.SpanKindClient
+ o.Properties.Set("rpc.system", "aws-api")
+ o.Properties.Set("rpc.method", opID)
+ o.Properties.Set("rpc.service", ServiceID)
+ })
+ endTimer := startMetricTimer(ctx, "client.call.duration")
+ defer endTimer()
+ defer span.End()
+
+ handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) {
+ o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc")
+ })
+ decorated := middleware.DecorateHandler(handler, stack)
+ result, metadata, err = decorated.Handle(ctx, params)
if err != nil {
+ span.SetProperty("exception.type", fmt.Sprintf("%T", err))
+ span.SetProperty("exception.message", err.Error())
+
+ var aerr smithy.APIError
+ if errors.As(err, &aerr) {
+ span.SetProperty("api.error_code", aerr.ErrorCode())
+ span.SetProperty("api.error_message", aerr.ErrorMessage())
+ span.SetProperty("api.error_fault", aerr.ErrorFault().String())
+ }
+
err = &smithy.OperationError{
ServiceID: ServiceID,
OperationName: opID,
Err: err,
}
}
+
+ span.SetProperty("error", err != nil)
+ if err == nil {
+ span.SetStatus(tracing.SpanStatusOK)
+ } else {
+ span.SetStatus(tracing.SpanStatusError)
+ }
+
return result, metadata, err
}
+type operationInputKey struct{}
+
+func setOperationInput(ctx context.Context, input interface{}) context.Context {
+ return middleware.WithStackValue(ctx, operationInputKey{}, input)
+}
+
+func getOperationInput(ctx context.Context) interface{} {
+ return middleware.GetStackValue(ctx, operationInputKey{})
+}
+
+type setOperationInputMiddleware struct {
+}
+
+func (*setOperationInputMiddleware) ID() string {
+ return "setOperationInput"
+}
+
+func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ ctx = setOperationInput(ctx, in.Parameters)
+ return next.HandleSerialize(ctx, in)
+}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+ if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+ return fmt.Errorf("add ResolveAuthScheme: %w", err)
+ }
+ if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+ return fmt.Errorf("add GetIdentity: %v", err)
+ }
+ if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+ return fmt.Errorf("add ResolveEndpointV2: %v", err)
+ }
+ if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil {
+ return fmt.Errorf("add Signing: %w", err)
+ }
+ return nil
+}
+func resolveAuthSchemeResolver(options *Options) {
+ if options.AuthSchemeResolver == nil {
+ options.AuthSchemeResolver = &defaultAuthSchemeResolver{}
+ }
+}
+
+func resolveAuthSchemes(options *Options) {
+ if options.AuthSchemes == nil {
+ options.AuthSchemes = []smithyhttp.AuthScheme{
+ internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{
+ Signer: options.HTTPSignerV4,
+ Logger: options.Logger,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ }),
+ }
+ }
+}
+
type noSmithyDocumentSerde = smithydocument.NoSerde
+type legacyEndpointContextSetter struct {
+ LegacyResolver EndpointResolver
+}
+
+func (*legacyEndpointContextSetter) ID() string {
+ return "legacyEndpointContextSetter"
+}
+
+func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.LegacyResolver != nil {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true)
+ }
+
+ return next.HandleInitialize(ctx, in)
+
+}
+func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error {
+ return stack.Initialize.Add(&legacyEndpointContextSetter{
+ LegacyResolver: o.EndpointResolver,
+ }, middleware.Before)
+}
+
func resolveDefaultLogger(o *Options) {
if o.Logger != nil {
return
@@ -233,6 +427,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
APIOptions: cfg.APIOptions,
Logger: cfg.Logger,
ClientLogMode: cfg.ClientLogMode,
+ AppID: cfg.AppID,
}
resolveAWSRetryerProvider(cfg, &opts)
resolveAWSRetryMaxAttempts(cfg, &opts)
@@ -240,6 +435,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
resolveAWSEndpointResolver(cfg, &opts)
resolveUseDualStackEndpoint(cfg, &opts)
resolveUseFIPSEndpoint(cfg, &opts)
+ resolveBaseEndpoint(cfg, &opts)
return New(opts, optFns...)
}
@@ -331,7 +527,15 @@ func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
o.RetryMaxAttempts = cfg.RetryMaxAttempts
}
-func finalizeRetryMaxAttemptOptions(o *Options, client Client) {
+func finalizeRetryMaxAttempts(o *Options) {
+ if o.RetryMaxAttempts == 0 {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
return
}
@@ -343,20 +547,39 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
return
}
- o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver())
+ o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions)
}
-func addClientUserAgent(stack *middleware.Stack) error {
- return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion)(stack)
+func addClientUserAgent(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "ssooidc", goModuleVersion)
+ if len(options.AppID) > 0 {
+ ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
+ }
+
+ return nil
}
-func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error {
- mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{
- CredentialsProvider: o.Credentials,
- Signer: o.HTTPSignerV4,
- LogSigning: o.ClientLogMode.IsSigning(),
- })
- return stack.Finalize.Add(mw, middleware.After)
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+ id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+ mw, ok := stack.Build.Get(id)
+ if !ok {
+ mw = awsmiddleware.NewRequestUserAgent()
+ if err := stack.Build.Add(mw, middleware.After); err != nil {
+ return nil, err
+ }
+ }
+
+ ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+ }
+
+ return ua, nil
}
type HTTPSignerV4 interface {
@@ -377,12 +600,97 @@ func newDefaultV4Signer(o Options) *v4.Signer {
})
}
-func addRetryMiddlewares(stack *middleware.Stack, o Options) error {
- mo := retry.AddRetryMiddlewaresOptions{
- Retryer: o.Retryer,
- LogRetryAttempts: o.ClientLogMode.IsRetries(),
+func addClientRequestID(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+ return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+
+func addSpanRetryLoop(stack *middleware.Stack, options Options) error {
+ return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before)
+}
+
+type spanRetryLoop struct {
+ options Options
+}
+
+func (*spanRetryLoop) ID() string {
+ return "spanRetryLoop"
+}
+
+func (m *spanRetryLoop) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ middleware.FinalizeOutput, middleware.Metadata, error,
+) {
+ tracer := operationTracer(m.options.TracerProvider)
+ ctx, span := tracer.StartSpan(ctx, "RetryLoop")
+ defer span.End()
+
+ return next.HandleFinalize(ctx, in)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addIsWaiterUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter)
+ return nil
+ })
+}
+
+func addIsPaginatorUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator)
+ return nil
+ })
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+ attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+ m.LogAttempts = o.ClientLogMode.IsRetries()
+ m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/ssooidc")
+ })
+ if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil {
+ return err
}
- return retry.AddRetryMiddlewares(stack, mo)
+ if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+ return err
+ }
+ return nil
}
// resolves dual-stack endpoint configuration
@@ -415,12 +723,99 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
return nil
}
+func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string {
+ if mode == aws.AccountIDEndpointModeDisabled {
+ return nil
+ }
+
+ if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" {
+ return aws.String(ca.Credentials.AccountID)
+ }
+
+ return nil
+}
+
+func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error {
+ mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset}
+ if err := stack.Build.Add(&mw, middleware.After); err != nil {
+ return err
+ }
+ return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before)
+}
+func initializeTimeOffsetResolver(c *Client) {
+ c.timeOffset = new(atomic.Int64)
+}
+
+func addUserAgentRetryMode(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ switch options.Retryer.(type) {
+ case *retry.Standard:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard)
+ case *retry.AdaptiveMode:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive)
+ }
+ return nil
+}
+
+type setCredentialSourceMiddleware struct {
+ ua *awsmiddleware.RequestUserAgent
+ options Options
+}
+
+func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" }
+
+func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource)
+ if !ok {
+ return next.HandleBuild(ctx, in)
+ }
+ providerSources := asProviderSource.ProviderSources()
+ for _, source := range providerSources {
+ m.ua.AddCredentialsSource(source)
+ }
+ return next.HandleBuild(ctx, in)
+}
+
+func addCredentialSource(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ mw := setCredentialSourceMiddleware{ua: ua, options: options}
+ return stack.Build.Insert(&mw, "UserAgent", middleware.Before)
+}
+
+func resolveTracerProvider(options *Options) {
+ if options.TracerProvider == nil {
+ options.TracerProvider = &tracing.NopTracerProvider{}
+ }
+}
+
+func resolveMeterProvider(options *Options) {
+ if options.MeterProvider == nil {
+ options.MeterProvider = metrics.NopMeterProvider{}
+ }
+}
+
+func addRecursionDetection(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
- return awsmiddleware.AddRequestIDRetrieverMiddleware(stack)
+ return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+
}
func addResponseErrorMiddleware(stack *middleware.Stack) error {
- return awshttp.AddResponseErrorMiddleware(stack)
+ return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+
}
func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
@@ -431,3 +826,118 @@ func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
}, middleware.After)
}
+
+type disableHTTPSMiddleware struct {
+ DisableHTTPS bool
+}
+
+func (*disableHTTPSMiddleware) ID() string {
+ return "disableHTTPS"
+}
+
+func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) {
+ req.URL.Scheme = "http"
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Finalize.Insert(&disableHTTPSMiddleware{
+ DisableHTTPS: o.EndpointOptions.DisableHTTPS,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+type spanInitializeStart struct {
+}
+
+func (*spanInitializeStart) ID() string {
+ return "spanInitializeStart"
+}
+
+func (m *spanInitializeStart) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "Initialize")
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanInitializeEnd struct {
+}
+
+func (*spanInitializeEnd) ID() string {
+ return "spanInitializeEnd"
+}
+
+func (m *spanInitializeEnd) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanBuildRequestStart struct {
+}
+
+func (*spanBuildRequestStart) ID() string {
+ return "spanBuildRequestStart"
+}
+
+func (m *spanBuildRequestStart) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ middleware.SerializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "BuildRequest")
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type spanBuildRequestEnd struct {
+}
+
+func (*spanBuildRequestEnd) ID() string {
+ return "spanBuildRequestEnd"
+}
+
+func (m *spanBuildRequestEnd) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ middleware.BuildOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleBuild(ctx, in)
+}
+
+func addSpanInitializeStart(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before)
+}
+
+func addSpanInitializeEnd(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After)
+}
+
+func addSpanBuildRequestStart(stack *middleware.Stack) error {
+ return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before)
+}
+
+func addSpanBuildRequestEnd(stack *middleware.Stack) error {
+ return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go
index c6e64a13d..493878338 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateToken.go
@@ -4,14 +4,16 @@ package ssooidc
import (
"context"
+ "fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// Creates and returns an access token for the authorized client. The access token
-// issued will be used to fetch short-term credentials for the assigned roles in
-// the AWS account.
+// Creates and returns access and refresh tokens for clients that are
+// authenticated using client secrets. The access token can be used to fetch
+// short-lived credentials for the assigned AWS accounts or to access application
+// APIs using bearer authentication.
func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optFns ...func(*Options)) (*CreateTokenOutput, error) {
if params == nil {
params = &CreateTokenInput{}
@@ -29,43 +31,64 @@ func (c *Client) CreateToken(ctx context.Context, params *CreateTokenInput, optF
type CreateTokenInput struct {
- // The unique identifier string for each client. This value should come from the
- // persisted result of the RegisterClient API.
+ // The unique identifier string for the client or application. This value comes
+ // from the result of the RegisterClientAPI.
//
// This member is required.
ClientId *string
// A secret string generated for the client. This value should come from the
- // persisted result of the RegisterClient API.
+ // persisted result of the RegisterClientAPI.
//
// This member is required.
ClientSecret *string
- // Supports grant types for authorization code, refresh token, and device code
- // request.
+ // Supports the following OAuth grant types: Authorization Code, Device Code, and
+ // Refresh Token. Specify one of the following values, depending on the grant type
+ // that you want:
+ //
+ // * Authorization Code - authorization_code
+ //
+ // * Device Code - urn:ietf:params:oauth:grant-type:device_code
+ //
+ // * Refresh Token - refresh_token
//
// This member is required.
GrantType *string
- // The authorization code received from the authorization service. This parameter
- // is required to perform an authorization grant request to get access to a token.
+ // Used only when calling this API for the Authorization Code grant type. The
+ // short-lived code is used to identify this authorization request.
Code *string
- // Used only when calling this API for the device code grant type. This short-term
- // code is used to identify this authentication attempt. This should come from an
- // in-memory reference to the result of the StartDeviceAuthorization API.
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value is generated by the client and presented to validate the original code
+ // challenge value the client passed at authorization time.
+ CodeVerifier *string
+
+ // Used only when calling this API for the Device Code grant type. This
+ // short-lived code is used to identify this authorization request. This comes from
+ // the result of the StartDeviceAuthorizationAPI.
DeviceCode *string
- // The location of the application that will receive the authorization code. Users
- // authorize the service to send the request to this location.
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value specifies the location of the client or application that has registered to
+ // receive the authorization code.
RedirectUri *string
- // The token used to obtain an access token in the event that the access token is
- // invalid or expired. This token is not issued by the service.
+ // Used only when calling this API for the Refresh Token grant type. This token is
+ // used to refresh short-lived tokens, such as the access token, that might expire.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide in
+ // the [IAM Identity Center OIDC API Reference].
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
RefreshToken *string
- // The list of scopes that is defined by the client. Upon authorization, this list
- // is used to restrict permissions when granting an access token.
+ // The list of scopes for which authorization is requested. The access token that
+ // is issued is limited to the scopes that are granted. If this value is not
+ // specified, IAM Identity Center authorizes all scopes that are configured for the
+ // client during the call to RegisterClient.
Scope []string
noSmithyDocumentSerde
@@ -73,21 +96,35 @@ type CreateTokenInput struct {
type CreateTokenOutput struct {
- // An opaque token to access AWS SSO resources assigned to a user.
+ // A bearer token to access Amazon Web Services accounts and applications assigned
+ // to a user.
AccessToken *string
// Indicates the time in seconds when an access token will expire.
ExpiresIn int32
- // The identifier of the user that associated with the access token, if present.
+ // The idToken is not implemented or supported. For more information about the
+ // features and limitations of the current IAM Identity Center OIDC implementation,
+ // see Considerations for Using this Guide in the [IAM Identity Center OIDC API Reference].
+ //
+ // A JSON Web Token (JWT) that identifies who is associated with the issued access
+ // token.
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
IdToken *string
// A token that, if present, can be used to refresh a previously issued access
// token that might have expired.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide in
+ // the [IAM Identity Center OIDC API Reference].
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
RefreshToken *string
// Used to notify the client that the returned token is an access token. The
- // supported type is BearerToken.
+ // supported token type is Bearer .
TokenType *string
// Metadata pertaining to the operation's result.
@@ -97,6 +134,9 @@ type CreateTokenOutput struct {
}
func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateToken{}, middleware.After)
if err != nil {
return err
@@ -105,28 +145,38 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "CreateToken"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -135,12 +185,27 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = addOpCreateTokenValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateToken(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -150,6 +215,21 @@ func (c *Client) addOperationCreateTokenMiddlewares(stack *middleware.Stack, opt
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go
new file mode 100644
index 000000000..09f3647e8
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_CreateTokenWithIAM.go
@@ -0,0 +1,280 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/ssooidc/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Creates and returns access and refresh tokens for clients and applications that
+// are authenticated using IAM entities. The access token can be used to fetch
+// short-lived credentials for the assigned Amazon Web Services accounts or to
+// access application APIs using bearer authentication.
+func (c *Client) CreateTokenWithIAM(ctx context.Context, params *CreateTokenWithIAMInput, optFns ...func(*Options)) (*CreateTokenWithIAMOutput, error) {
+ if params == nil {
+ params = &CreateTokenWithIAMInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "CreateTokenWithIAM", params, optFns, c.addOperationCreateTokenWithIAMMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*CreateTokenWithIAMOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type CreateTokenWithIAMInput struct {
+
+ // The unique identifier string for the client or application. This value is an
+ // application ARN that has OAuth grants configured.
+ //
+ // This member is required.
+ ClientId *string
+
+ // Supports the following OAuth grant types: Authorization Code, Refresh Token,
+ // JWT Bearer, and Token Exchange. Specify one of the following values, depending
+ // on the grant type that you want:
+ //
+ // * Authorization Code - authorization_code
+ //
+ // * Refresh Token - refresh_token
+ //
+ // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer
+ //
+ // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange
+ //
+ // This member is required.
+ GrantType *string
+
+ // Used only when calling this API for the JWT Bearer grant type. This value
+ // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To
+ // authorize a trusted token issuer, configure the JWT Bearer GrantOptions for the
+ // application.
+ Assertion *string
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // short-lived code is used to identify this authorization request. The code is
+ // obtained through a redirect from IAM Identity Center to a redirect URI persisted
+ // in the Authorization Code GrantOptions for the application.
+ Code *string
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value is generated by the client and presented to validate the original code
+ // challenge value the client passed at authorization time.
+ CodeVerifier *string
+
+ // Used only when calling this API for the Authorization Code grant type. This
+ // value specifies the location of the client or application that has registered to
+ // receive the authorization code.
+ RedirectUri *string
+
+ // Used only when calling this API for the Refresh Token grant type. This token is
+ // used to refresh short-lived tokens, such as the access token, that might expire.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide in
+ // the [IAM Identity Center OIDC API Reference].
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
+ RefreshToken *string
+
+ // Used only when calling this API for the Token Exchange grant type. This value
+ // specifies the type of token that the requester can receive. The following values
+ // are supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
+ //
+ // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
+ RequestedTokenType *string
+
+ // The list of scopes for which authorization is requested. The access token that
+ // is issued is limited to the scopes that are granted. If the value is not
+ // specified, IAM Identity Center authorizes all scopes configured for the
+ // application, including the following default scopes: openid , aws ,
+ // sts:identity_context .
+ Scope []string
+
+ // Used only when calling this API for the Token Exchange grant type. This value
+ // specifies the subject of the exchange. The value of the subject token must be an
+ // access token issued by IAM Identity Center to a different client or application.
+ // The access token must have authorized scopes that indicate the requested
+ // application as a target audience.
+ SubjectToken *string
+
+ // Used only when calling this API for the Token Exchange grant type. This value
+ // specifies the type of token that is passed as the subject of the exchange. The
+ // following value is supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
+ SubjectTokenType *string
+
+ noSmithyDocumentSerde
+}
+
+type CreateTokenWithIAMOutput struct {
+
+ // A bearer token to access Amazon Web Services accounts and applications assigned
+ // to a user.
+ AccessToken *string
+
+ // A structure containing information from the idToken . Only the identityContext
+ // is in it, which is a value extracted from the idToken . This provides direct
+ // access to identity information without requiring JWT parsing.
+ AwsAdditionalDetails *types.AwsAdditionalDetails
+
+ // Indicates the time in seconds when an access token will expire.
+ ExpiresIn int32
+
+ // A JSON Web Token (JWT) that identifies the user associated with the issued
+ // access token.
+ IdToken *string
+
+ // Indicates the type of tokens that are issued by IAM Identity Center. The
+ // following values are supported:
+ //
+ // * Access Token - urn:ietf:params:oauth:token-type:access_token
+ //
+ // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
+ IssuedTokenType *string
+
+ // A token that, if present, can be used to refresh a previously issued access
+ // token that might have expired.
+ //
+ // For more information about the features and limitations of the current IAM
+ // Identity Center OIDC implementation, see Considerations for Using this Guide in
+ // the [IAM Identity Center OIDC API Reference].
+ //
+ // [IAM Identity Center OIDC API Reference]: https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html
+ RefreshToken *string
+
+ // The list of scopes for which authorization is granted. The access token that is
+ // issued is limited to the scopes that are granted.
+ Scope []string
+
+ // Used to notify the requester that the returned token is an access token. The
+ // supported token type is Bearer .
+ TokenType *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationCreateTokenWithIAMMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsRestjson1_serializeOpCreateTokenWithIAM{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsRestjson1_deserializeOpCreateTokenWithIAM{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "CreateTokenWithIAM"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpCreateTokenWithIAMValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateTokenWithIAM(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opCreateTokenWithIAM(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "CreateTokenWithIAM",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go
index 096b35df2..1e2d3828f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_RegisterClient.go
@@ -4,14 +4,15 @@ package ssooidc
import (
"context"
+ "fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// Registers a client with AWS SSO. This allows clients to initiate device
-// authorization. The output should be persisted for reuse through many
-// authentication requests.
+// Registers a public client with IAM Identity Center. This allows clients to
+// perform authorization using the authorization code grant with Proof Key for Code
+// Exchange (PKCE) or the device code grant.
func (c *Client) RegisterClient(ctx context.Context, params *RegisterClientInput, optFns ...func(*Options)) (*RegisterClientOutput, error) {
if params == nil {
params = &RegisterClientInput{}
@@ -40,8 +41,35 @@ type RegisterClientInput struct {
// This member is required.
ClientType *string
- // The list of scopes that are defined by the client. Upon authorization, this list
- // is used to restrict permissions when granting an access token.
+ // This IAM Identity Center application ARN is used to define
+ // administrator-managed configuration for public client access to resources. At
+ // authorization, the scopes, grants, and redirect URI available to this client
+ // will be restricted by this application resource.
+ EntitledApplicationArn *string
+
+ // The list of OAuth 2.0 grant types that are defined by the client. This list is
+ // used to restrict the token granting flows available to the client. Supports the
+ // following OAuth 2.0 grant types: Authorization Code, Device Code, and Refresh
+ // Token.
+ //
+ // * Authorization Code - authorization_code
+ //
+ // * Device Code - urn:ietf:params:oauth:grant-type:device_code
+ //
+ // * Refresh Token - refresh_token
+ GrantTypes []string
+
+ // The IAM Identity Center Issuer URL associated with an instance of IAM Identity
+ // Center. This value is needed for user access to resources through the client.
+ IssuerUrl *string
+
+ // The list of redirect URI that are defined by the client. At completion of
+ // authorization, this list is used to restrict what locations the user agent can
+ // be redirected back to.
+ RedirectUris []string
+
+ // The list of scopes that are defined by the client. Upon authorization, this
+ // list is used to restrict permissions when granting an access token.
Scopes []string
noSmithyDocumentSerde
@@ -49,7 +77,7 @@ type RegisterClientInput struct {
type RegisterClientOutput struct {
- // The endpoint where the client can request authorization.
+ // An endpoint that the client can use to request authorization.
AuthorizationEndpoint *string
// The unique identifier string for each client. This client uses this identifier
@@ -59,14 +87,14 @@ type RegisterClientOutput struct {
// Indicates the time at which the clientId and clientSecret were issued.
ClientIdIssuedAt int64
- // A secret string generated for the client. The client will use this string to get
- // authenticated by the service in subsequent calls.
+ // A secret string generated for the client. The client will use this string to
+ // get authenticated by the service in subsequent calls.
ClientSecret *string
// Indicates the time at which the clientId and clientSecret will become invalid.
ClientSecretExpiresAt int64
- // The endpoint where the client can get an access token.
+ // An endpoint that the client can use to create tokens.
TokenEndpoint *string
// Metadata pertaining to the operation's result.
@@ -76,6 +104,9 @@ type RegisterClientOutput struct {
}
func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsRestjson1_serializeOpRegisterClient{}, middleware.After)
if err != nil {
return err
@@ -84,28 +115,38 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack,
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "RegisterClient"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -114,12 +155,27 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack,
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = addOpRegisterClientValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opRegisterClient(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -129,6 +185,21 @@ func (c *Client) addOperationRegisterClientMiddlewares(stack *middleware.Stack,
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go
index 0d893b431..de0108f1f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/api_op_StartDeviceAuthorization.go
@@ -4,6 +4,7 @@ package ssooidc
import (
"context"
+ "fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
@@ -28,23 +29,23 @@ func (c *Client) StartDeviceAuthorization(ctx context.Context, params *StartDevi
type StartDeviceAuthorizationInput struct {
- // The unique identifier string for the client that is registered with AWS SSO.
- // This value should come from the persisted result of the RegisterClient API
+ // The unique identifier string for the client that is registered with IAM
+ // Identity Center. This value should come from the persisted result of the RegisterClientAPI
// operation.
//
// This member is required.
ClientId *string
// A secret string that is generated for the client. This value should come from
- // the persisted result of the RegisterClient API operation.
+ // the persisted result of the RegisterClientAPI operation.
//
// This member is required.
ClientSecret *string
- // The URL for the AWS SSO user portal. For more information, see Using the User
- // Portal
- // (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html)
- // in the AWS Single Sign-On User Guide.
+ // The URL for the Amazon Web Services access portal. For more information, see [Using the Amazon Web Services access portal]
+ // in the IAM Identity Center User Guide.
+ //
+ // [Using the Amazon Web Services access portal]: https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html
//
// This member is required.
StartUrl *string
@@ -73,9 +74,9 @@ type StartDeviceAuthorizationOutput struct {
// device.
VerificationUri *string
- // An alternate URL that the client can use to automatically launch a browser. This
- // process skips the manual step in which the user visits the verification page and
- // enters their code.
+ // An alternate URL that the client can use to automatically launch a browser.
+ // This process skips the manual step in which the user visits the verification
+ // page and enters their code.
VerificationUriComplete *string
// Metadata pertaining to the operation's result.
@@ -85,6 +86,9 @@ type StartDeviceAuthorizationOutput struct {
}
func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsRestjson1_serializeOpStartDeviceAuthorization{}, middleware.After)
if err != nil {
return err
@@ -93,28 +97,38 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "StartDeviceAuthorization"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -123,12 +137,27 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = addOpStartDeviceAuthorizationValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opStartDeviceAuthorization(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -138,6 +167,21 @@ func (c *Client) addOperationStartDeviceAuthorizationMiddlewares(stack *middlewa
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go
new file mode 100644
index 000000000..e4b87f5bc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/auth.go
@@ -0,0 +1,331 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) {
+ params.Region = options.Region
+}
+
+type setLegacyContextSigningOptionsMiddleware struct {
+}
+
+func (*setLegacyContextSigningOptionsMiddleware) ID() string {
+ return "setLegacyContextSigningOptions"
+}
+
+func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ rscheme := getResolvedAuthScheme(ctx)
+ schemeID := rscheme.Scheme.SchemeID()
+
+ if sn := awsmiddleware.GetSigningName(ctx); sn != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn)
+ }
+ }
+
+ if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr})
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
+}
+
+type withAnonymous struct {
+ resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ opts = append(opts, &smithyauth.Option{
+ SchemeID: smithyauth.SchemeIDAnonymous,
+ })
+ return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+ if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+ return
+ }
+
+ options.AuthSchemeResolver = &withAnonymous{
+ resolver: options.AuthSchemeResolver,
+ }
+}
+
+// AuthResolverParameters contains the set of inputs necessary for auth scheme
+// resolution.
+type AuthResolverParameters struct {
+ // The name of the operation being invoked.
+ Operation string
+
+ // The region in which the operation is being invoked.
+ Region string
+}
+
+func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters {
+ params := &AuthResolverParameters{
+ Operation: operation,
+ }
+
+ bindAuthParamsRegion(ctx, params, input, options)
+
+ return params
+}
+
+// AuthSchemeResolver returns a set of possible authentication options for an
+// operation.
+type AuthSchemeResolver interface {
+ ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error)
+}
+
+type defaultAuthSchemeResolver struct{}
+
+var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil)
+
+func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ if overrides, ok := operationAuthOptions[params.Operation]; ok {
+ return overrides(params), nil
+ }
+ return serviceAuthOptions(params), nil
+}
+
+var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{
+ "CreateToken": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+
+ "RegisterClient": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+
+ "StartDeviceAuthorization": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+}
+
+func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {
+ SchemeID: smithyauth.SchemeIDSigV4,
+ SignerProperties: func() smithy.Properties {
+ var props smithy.Properties
+ smithyhttp.SetSigV4SigningName(&props, "sso-oauth")
+ smithyhttp.SetSigV4SigningRegion(&props, params.Region)
+ return props
+ }(),
+ },
+ }
+}
+
+type resolveAuthSchemeMiddleware struct {
+ operation string
+ options Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+ return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveAuthScheme")
+ defer span.End()
+
+ params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options)
+ options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return out, metadata, fmt.Errorf("resolve auth scheme: %w", err)
+ }
+
+ scheme, ok := m.selectScheme(options)
+ if !ok {
+ return out, metadata, fmt.Errorf("could not select an auth scheme")
+ }
+
+ ctx = setResolvedAuthScheme(ctx, scheme)
+
+ span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID())
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) {
+ for _, option := range options {
+ if option.SchemeID == smithyauth.SchemeIDAnonymous {
+ return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true
+ }
+
+ for _, scheme := range m.options.AuthSchemes {
+ if scheme.SchemeID() != option.SchemeID {
+ continue
+ }
+
+ if scheme.IdentityResolver(m.options) != nil {
+ return newResolvedAuthScheme(scheme, option), true
+ }
+ }
+ }
+
+ return nil, false
+}
+
+type resolvedAuthSchemeKey struct{}
+
+type resolvedAuthScheme struct {
+ Scheme smithyhttp.AuthScheme
+ IdentityProperties smithy.Properties
+ SignerProperties smithy.Properties
+}
+
+func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme {
+ return &resolvedAuthScheme{
+ Scheme: scheme,
+ IdentityProperties: option.IdentityProperties,
+ SignerProperties: option.SignerProperties,
+ }
+}
+
+func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context {
+ return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme)
+}
+
+func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme {
+ v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme)
+ return v
+}
+
+type getIdentityMiddleware struct {
+ options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+ return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ innerCtx, span := tracing.StartSpan(ctx, "GetIdentity")
+ defer span.End()
+
+ rscheme := getResolvedAuthScheme(innerCtx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ resolver := rscheme.Scheme.IdentityResolver(m.options)
+ if resolver == nil {
+ return out, metadata, fmt.Errorf("no identity resolver")
+ }
+
+ identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration",
+ func() (smithyauth.Identity, error) {
+ return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties)
+ },
+ func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("get identity: %w", err)
+ }
+
+ ctx = setIdentity(ctx, identity)
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+type identityKey struct{}
+
+func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context {
+ return middleware.WithStackValue(ctx, identityKey{}, identity)
+}
+
+func getIdentity(ctx context.Context) smithyauth.Identity {
+ v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity)
+ return v
+}
+
+type signRequestMiddleware struct {
+ options Options
+}
+
+func (*signRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "SignRequest")
+ defer span.End()
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request)
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ identity := getIdentity(ctx)
+ if identity == nil {
+ return out, metadata, fmt.Errorf("no identity")
+ }
+
+ signer := rscheme.Scheme.Signer()
+ if signer == nil {
+ return out, metadata, fmt.Errorf("no signer")
+ }
+
+ _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) {
+ return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties)
+ }, func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("sign request: %w", err)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go
index e9939aff0..93f3653d5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/deserializers.go
@@ -13,11 +13,22 @@ import (
smithyio "github.com/aws/smithy-go/io"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
+ smithytime "github.com/aws/smithy-go/time"
+ "github.com/aws/smithy-go/tracing"
smithyhttp "github.com/aws/smithy-go/transport/http"
"io"
"strings"
+ "time"
)
+func deserializeS3Expires(v string) (*time.Time, error) {
+ t, err := smithytime.ParseHTTPDate(v)
+ if err != nil {
+ return nil, nil
+ }
+ return &t, nil
+}
+
type awsRestjson1_deserializeOpCreateToken struct {
}
@@ -33,6 +44,10 @@ func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Co
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -72,6 +87,7 @@ func (m *awsRestjson1_deserializeOpCreateToken) HandleDeserialize(ctx context.Co
}
}
+ span.End()
return out, metadata, err
}
@@ -85,9 +101,9 @@ func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, m
errorCode := "UnknownError"
errorMessage := errorCode
- code := response.Header.Get("X-Amzn-ErrorType")
- if len(code) != 0 {
- errorCode = restjson.SanitizeErrorCode(code)
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
}
var buff [1024]byte
@@ -96,7 +112,7 @@ func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, m
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- code, message, err := restjson.GetErrorInfo(decoder)
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -108,8 +124,8 @@ func awsRestjson1_deserializeOpErrorCreateToken(response *smithyhttp.Response, m
}
errorBody.Seek(0, io.SeekStart)
- if len(code) != 0 {
- errorCode = restjson.SanitizeErrorCode(code)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
}
if len(message) != 0 {
errorMessage = message
@@ -239,6 +255,254 @@ func awsRestjson1_deserializeOpDocumentCreateTokenOutput(v **CreateTokenOutput,
return nil
}
+type awsRestjson1_deserializeOpCreateTokenWithIAM struct {
+}
+
+func (*awsRestjson1_deserializeOpCreateTokenWithIAM) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsRestjson1_deserializeOpCreateTokenWithIAM) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response, &metadata)
+ }
+ output := &CreateTokenWithIAMOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(response.Body, ringBuffer)
+
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ err = awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(&output, shape)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body with invalid JSON, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ span.End()
+ return out, metadata, err
+}
+
+func awsRestjson1_deserializeOpErrorCreateTokenWithIAM(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
+ }
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
+ }
+ if len(message) != 0 {
+ errorMessage = message
+ }
+
+ switch {
+ case strings.EqualFold("AccessDeniedException", errorCode):
+ return awsRestjson1_deserializeErrorAccessDeniedException(response, errorBody)
+
+ case strings.EqualFold("AuthorizationPendingException", errorCode):
+ return awsRestjson1_deserializeErrorAuthorizationPendingException(response, errorBody)
+
+ case strings.EqualFold("ExpiredTokenException", errorCode):
+ return awsRestjson1_deserializeErrorExpiredTokenException(response, errorBody)
+
+ case strings.EqualFold("InternalServerException", errorCode):
+ return awsRestjson1_deserializeErrorInternalServerException(response, errorBody)
+
+ case strings.EqualFold("InvalidClientException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidClientException(response, errorBody)
+
+ case strings.EqualFold("InvalidGrantException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidGrantException(response, errorBody)
+
+ case strings.EqualFold("InvalidRequestException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
+
+ case strings.EqualFold("InvalidRequestRegionException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRequestRegionException(response, errorBody)
+
+ case strings.EqualFold("InvalidScopeException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody)
+
+ case strings.EqualFold("SlowDownException", errorCode):
+ return awsRestjson1_deserializeErrorSlowDownException(response, errorBody)
+
+ case strings.EqualFold("UnauthorizedClientException", errorCode):
+ return awsRestjson1_deserializeErrorUnauthorizedClientException(response, errorBody)
+
+ case strings.EqualFold("UnsupportedGrantTypeException", errorCode):
+ return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
+func awsRestjson1_deserializeOpDocumentCreateTokenWithIAMOutput(v **CreateTokenWithIAMOutput, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *CreateTokenWithIAMOutput
+ if *v == nil {
+ sv = &CreateTokenWithIAMOutput{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "accessToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected AccessToken to be of type string, got %T instead", value)
+ }
+ sv.AccessToken = ptr.String(jtv)
+ }
+
+ case "awsAdditionalDetails":
+ if err := awsRestjson1_deserializeDocumentAwsAdditionalDetails(&sv.AwsAdditionalDetails, value); err != nil {
+ return err
+ }
+
+ case "expiresIn":
+ if value != nil {
+ jtv, ok := value.(json.Number)
+ if !ok {
+ return fmt.Errorf("expected ExpirationInSeconds to be json.Number, got %T instead", value)
+ }
+ i64, err := jtv.Int64()
+ if err != nil {
+ return err
+ }
+ sv.ExpiresIn = int32(i64)
+ }
+
+ case "idToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IdToken to be of type string, got %T instead", value)
+ }
+ sv.IdToken = ptr.String(jtv)
+ }
+
+ case "issuedTokenType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TokenTypeURI to be of type string, got %T instead", value)
+ }
+ sv.IssuedTokenType = ptr.String(jtv)
+ }
+
+ case "refreshToken":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected RefreshToken to be of type string, got %T instead", value)
+ }
+ sv.RefreshToken = ptr.String(jtv)
+ }
+
+ case "scope":
+ if err := awsRestjson1_deserializeDocumentScopes(&sv.Scope, value); err != nil {
+ return err
+ }
+
+ case "tokenType":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected TokenType to be of type string, got %T instead", value)
+ }
+ sv.TokenType = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
type awsRestjson1_deserializeOpRegisterClient struct {
}
@@ -254,6 +518,10 @@ func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -293,6 +561,7 @@ func (m *awsRestjson1_deserializeOpRegisterClient) HandleDeserialize(ctx context
}
}
+ span.End()
return out, metadata, err
}
@@ -306,9 +575,9 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response
errorCode := "UnknownError"
errorMessage := errorCode
- code := response.Header.Get("X-Amzn-ErrorType")
- if len(code) != 0 {
- errorCode = restjson.SanitizeErrorCode(code)
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
}
var buff [1024]byte
@@ -317,7 +586,7 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- code, message, err := restjson.GetErrorInfo(decoder)
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -329,8 +598,8 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response
}
errorBody.Seek(0, io.SeekStart)
- if len(code) != 0 {
- errorCode = restjson.SanitizeErrorCode(code)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
}
if len(message) != 0 {
errorMessage = message
@@ -343,12 +612,18 @@ func awsRestjson1_deserializeOpErrorRegisterClient(response *smithyhttp.Response
case strings.EqualFold("InvalidClientMetadataException", errorCode):
return awsRestjson1_deserializeErrorInvalidClientMetadataException(response, errorBody)
+ case strings.EqualFold("InvalidRedirectUriException", errorCode):
+ return awsRestjson1_deserializeErrorInvalidRedirectUriException(response, errorBody)
+
case strings.EqualFold("InvalidRequestException", errorCode):
return awsRestjson1_deserializeErrorInvalidRequestException(response, errorBody)
case strings.EqualFold("InvalidScopeException", errorCode):
return awsRestjson1_deserializeErrorInvalidScopeException(response, errorBody)
+ case strings.EqualFold("UnsupportedGrantTypeException", errorCode):
+ return awsRestjson1_deserializeErrorUnsupportedGrantTypeException(response, errorBody)
+
default:
genericError := &smithy.GenericAPIError{
Code: errorCode,
@@ -467,6 +742,10 @@ func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(c
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -506,6 +785,7 @@ func (m *awsRestjson1_deserializeOpStartDeviceAuthorization) HandleDeserialize(c
}
}
+ span.End()
return out, metadata, err
}
@@ -519,9 +799,9 @@ func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhtt
errorCode := "UnknownError"
errorMessage := errorCode
- code := response.Header.Get("X-Amzn-ErrorType")
- if len(code) != 0 {
- errorCode = restjson.SanitizeErrorCode(code)
+ headerCode := response.Header.Get("X-Amzn-ErrorType")
+ if len(headerCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(headerCode)
}
var buff [1024]byte
@@ -530,7 +810,7 @@ func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhtt
body := io.TeeReader(errorBody, ringBuffer)
decoder := json.NewDecoder(body)
decoder.UseNumber()
- code, message, err := restjson.GetErrorInfo(decoder)
+ jsonCode, message, err := restjson.GetErrorInfo(decoder)
if err != nil {
var snapshot bytes.Buffer
io.Copy(&snapshot, ringBuffer)
@@ -542,8 +822,8 @@ func awsRestjson1_deserializeOpErrorStartDeviceAuthorization(response *smithyhtt
}
errorBody.Seek(0, io.SeekStart)
- if len(code) != 0 {
- errorCode = restjson.SanitizeErrorCode(code)
+ if len(headerCode) == 0 && len(jsonCode) != 0 {
+ errorCode = restjson.SanitizeErrorCode(jsonCode)
}
if len(message) != 0 {
errorMessage = message
@@ -920,6 +1200,42 @@ func awsRestjson1_deserializeErrorInvalidGrantException(response *smithyhttp.Res
return output
}
+func awsRestjson1_deserializeErrorInvalidRedirectUriException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidRedirectUriException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentInvalidRedirectUriException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
output := &types.InvalidRequestException{}
var buff [1024]byte
@@ -956,6 +1272,42 @@ func awsRestjson1_deserializeErrorInvalidRequestException(response *smithyhttp.R
return output
}
+func awsRestjson1_deserializeErrorInvalidRequestRegionException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
+ output := &types.InvalidRequestRegionException{}
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+
+ body := io.TeeReader(errorBody, ringBuffer)
+ decoder := json.NewDecoder(body)
+ decoder.UseNumber()
+ var shape interface{}
+ if err := decoder.Decode(&shape); err != nil && err != io.EOF {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ err := awsRestjson1_deserializeDocumentInvalidRequestRegionException(&output, shape)
+
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return err
+ }
+
+ errorBody.Seek(0, io.SeekStart)
+
+ return output
+}
+
func awsRestjson1_deserializeErrorInvalidScopeException(response *smithyhttp.Response, errorBody *bytes.Reader) error {
output := &types.InvalidScopeException{}
var buff [1024]byte
@@ -1198,6 +1550,46 @@ func awsRestjson1_deserializeDocumentAuthorizationPendingException(v **types.Aut
return nil
}
+func awsRestjson1_deserializeDocumentAwsAdditionalDetails(v **types.AwsAdditionalDetails, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.AwsAdditionalDetails
+ if *v == nil {
+ sv = &types.AwsAdditionalDetails{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "identityContext":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected IdentityContext to be of type string, got %T instead", value)
+ }
+ sv.IdentityContext = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
func awsRestjson1_deserializeDocumentExpiredTokenException(v **types.ExpiredTokenException, value interface{}) error {
if v == nil {
return fmt.Errorf("unexpected nil of type %T", v)
@@ -1443,6 +1835,55 @@ func awsRestjson1_deserializeDocumentInvalidGrantException(v **types.InvalidGran
return nil
}
+func awsRestjson1_deserializeDocumentInvalidRedirectUriException(v **types.InvalidRedirectUriException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidRedirectUriException
+ if *v == nil {
+ sv = &types.InvalidRedirectUriException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRequestException, value interface{}) error {
if v == nil {
return fmt.Errorf("unexpected nil of type %T", v)
@@ -1492,6 +1933,73 @@ func awsRestjson1_deserializeDocumentInvalidRequestException(v **types.InvalidRe
return nil
}
+func awsRestjson1_deserializeDocumentInvalidRequestRegionException(v **types.InvalidRequestRegionException, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var sv *types.InvalidRequestRegionException
+ if *v == nil {
+ sv = &types.InvalidRequestRegionException{}
+ } else {
+ sv = *v
+ }
+
+ for key, value := range shape {
+ switch key {
+ case "endpoint":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Location to be of type string, got %T instead", value)
+ }
+ sv.Endpoint = ptr.String(jtv)
+ }
+
+ case "error":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Error to be of type string, got %T instead", value)
+ }
+ sv.Error_ = ptr.String(jtv)
+ }
+
+ case "error_description":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected ErrorDescription to be of type string, got %T instead", value)
+ }
+ sv.Error_description = ptr.String(jtv)
+ }
+
+ case "region":
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Region to be of type string, got %T instead", value)
+ }
+ sv.Region = ptr.String(jtv)
+ }
+
+ default:
+ _, _ = key, value
+
+ }
+ }
+ *v = sv
+ return nil
+}
+
func awsRestjson1_deserializeDocumentInvalidScopeException(v **types.InvalidScopeException, value interface{}) error {
if v == nil {
return fmt.Errorf("unexpected nil of type %T", v)
@@ -1541,6 +2049,42 @@ func awsRestjson1_deserializeDocumentInvalidScopeException(v **types.InvalidScop
return nil
}
+func awsRestjson1_deserializeDocumentScopes(v *[]string, value interface{}) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ if value == nil {
+ return nil
+ }
+
+ shape, ok := value.([]interface{})
+ if !ok {
+ return fmt.Errorf("unexpected JSON type %v", value)
+ }
+
+ var cv []string
+ if *v == nil {
+ cv = []string{}
+ } else {
+ cv = *v
+ }
+
+ for _, value := range shape {
+ var col string
+ if value != nil {
+ jtv, ok := value.(string)
+ if !ok {
+ return fmt.Errorf("expected Scope to be of type string, got %T instead", value)
+ }
+ col = jtv
+ }
+ cv = append(cv, col)
+
+ }
+ *v = cv
+ return nil
+}
+
func awsRestjson1_deserializeDocumentSlowDownException(v **types.SlowDownException, value interface{}) error {
if v == nil {
return fmt.Errorf("unexpected nil of type %T", v)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go
index 79c458291..f3510b18c 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/doc.go
@@ -1,22 +1,49 @@
// Code generated by smithy-go-codegen DO NOT EDIT.
-// Package ssooidc provides the API client, operations, and parameter types for AWS
-// SSO OIDC.
-//
-// AWS Single Sign-On (SSO) OpenID Connect (OIDC) is a web service that enables a
-// client (such as AWS CLI or a native application) to register with AWS SSO. The
-// service also enables the client to fetch the user’s access token upon successful
-// authentication and authorization with AWS SSO. This service conforms with the
-// OAuth 2.0 based implementation of the device authorization grant standard
-// (https://tools.ietf.org/html/rfc8628 (https://tools.ietf.org/html/rfc8628)). For
-// general information about AWS SSO, see What is AWS Single Sign-On?
-// (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html) in the
-// AWS SSO User Guide. This API reference guide describes the AWS SSO OIDC
-// operations that you can call programatically and includes detailed information
-// on data types and errors. AWS provides SDKs that consist of libraries and sample
-// code for various programming languages and platforms such as Java, Ruby, .Net,
-// iOS, and Android. The SDKs provide a convenient way to create programmatic
-// access to AWS SSO and other AWS services. For more information about the AWS
-// SDKs, including how to download and install them, see Tools for Amazon Web
-// Services (http://aws.amazon.com/tools/).
+// Package ssooidc provides the API client, operations, and parameter types for
+// AWS SSO OIDC.
+//
+// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a
+// client (such as CLI or a native application) to register with IAM Identity
+// Center. The service also enables the client to fetch the user’s access token
+// upon successful authentication and authorization with IAM Identity Center.
+//
+// # API namespaces
+//
+// IAM Identity Center uses the sso and identitystore API namespaces. IAM Identity
+// Center OpenID Connect uses the sso-oidc namespace.
+//
+// # Considerations for using this guide
+//
+// Before you begin using this guide, we recommend that you first review the
+// following important information about how the IAM Identity Center OIDC service
+// works.
+//
+// - The IAM Identity Center OIDC service currently implements only the portions
+// of the OAuth 2.0 Device Authorization Grant standard ([https://tools.ietf.org/html/rfc8628] ) that are necessary to
+// enable single sign-on authentication with the CLI.
+//
+// - With older versions of the CLI, the service only emits OIDC access tokens,
+// so to obtain a new token, users must explicitly re-authenticate. To access the
+// OIDC flow that supports token refresh and doesn’t require re-authentication,
+// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI V2) with
+// support for OIDC token refresh and configurable IAM Identity Center session
+// durations. For more information, see [Configure Amazon Web Services access portal session duration].
+//
+// - The access tokens provided by this service grant access to all Amazon Web
+// Services account entitlements assigned to an IAM Identity Center user, not just
+// a particular application.
+//
+// - The documentation in this guide does not describe the mechanism to convert
+// the access token into Amazon Web Services Auth (“sigv4”) credentials for use
+// with IAM-protected Amazon Web Services service endpoints. For more information,
+// see [GetRoleCredentials]in the IAM Identity Center Portal API Reference Guide.
+//
+// For general information about IAM Identity Center, see [What is IAM Identity Center?] in the IAM Identity
+// Center User Guide.
+//
+// [Configure Amazon Web Services access portal session duration]: https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html
+// [GetRoleCredentials]: https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html
+// [https://tools.ietf.org/html/rfc8628]: https://tools.ietf.org/html/rfc8628
+// [What is IAM Identity Center?]: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
package ssooidc
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go
index 35cd21f18..6feea0c9f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/endpoints.go
@@ -8,10 +8,19 @@ import (
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn"
internalendpoints "github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints"
+ smithyauth "github.com/aws/smithy-go/auth"
+ smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ "github.com/aws/smithy-go/tracing"
smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
"net/url"
+ "os"
"strings"
)
@@ -39,13 +48,6 @@ func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointRe
return fn(region, options)
}
-func resolveDefaultEndpointConfiguration(o *Options) {
- if o.EndpointResolver != nil {
- return
- }
- o.EndpointResolver = NewDefaultEndpointResolver()
-}
-
// EndpointResolverFromURL returns an EndpointResolver configured using the
// provided endpoint url. By default, the resolved endpoint resolver uses the
// client region as signing region, and the endpoint source is set to
@@ -79,6 +81,10 @@ func (*ResolveEndpoint) ID() string {
func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
@@ -94,6 +100,11 @@ func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.Ser
var endpoint aws.Endpoint
endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo)
if err != nil {
+ nf := (&aws.EndpointNotFoundError{})
+ if errors.As(err, &nf) {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false)
+ return next.HandleSerialize(ctx, in)
+ }
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
@@ -105,7 +116,7 @@ func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.Ser
if len(awsmiddleware.GetSigningName(ctx)) == 0 {
signingName := endpoint.SigningName
if len(signingName) == 0 {
- signingName = "awsssooidc"
+ signingName = "sso-oauth"
}
ctx = awsmiddleware.SetSigningName(ctx, signingName)
}
@@ -129,27 +140,10 @@ func removeResolveEndpointMiddleware(stack *middleware.Stack) error {
type wrappedEndpointResolver struct {
awsResolver aws.EndpointResolverWithOptions
- resolver EndpointResolver
}
func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
- if w.awsResolver == nil {
- goto fallback
- }
- endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options)
- if err == nil {
- return endpoint, nil
- }
-
- if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) {
- return endpoint, err
- }
-
-fallback:
- if w.resolver == nil {
- return endpoint, fmt.Errorf("default endpoint resolver provided was nil")
- }
- return w.resolver.ResolveEndpoint(region, options)
+ return w.awsResolver.ResolveEndpoint(ServiceID, region, options)
}
type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error)
@@ -160,12 +154,13 @@ func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, opti
var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil)
-// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver.
-// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided
-// fallbackResolver for resolution.
+// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver.
+// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error,
+// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked
+// via its middleware.
//
-// fallbackResolver must not be nil
-func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver {
+// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated.
+func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver {
var resolver aws.EndpointResolverWithOptions
if awsResolverWithOptions != nil {
@@ -176,7 +171,6 @@ func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptio
return &wrappedEndpointResolver{
awsResolver: resolver,
- resolver: fallbackResolver,
}
}
@@ -198,3 +192,365 @@ func finalizeClientEndpointResolverOptions(options *Options) {
}
}
+
+func resolveEndpointResolverV2(options *Options) {
+ if options.EndpointResolverV2 == nil {
+ options.EndpointResolverV2 = NewDefaultEndpointResolverV2()
+ }
+}
+
+func resolveBaseEndpoint(cfg aws.Config, o *Options) {
+ if cfg.BaseEndpoint != nil {
+ o.BaseEndpoint = cfg.BaseEndpoint
+ }
+
+ _, g := os.LookupEnv("AWS_ENDPOINT_URL")
+ _, s := os.LookupEnv("AWS_ENDPOINT_URL_SSO_OIDC")
+
+ if g && !s {
+ return
+ }
+
+ value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "SSO OIDC", cfg.ConfigSources)
+ if found && err == nil {
+ o.BaseEndpoint = &value
+ }
+}
+
+func bindRegion(region string) *string {
+ if region == "" {
+ return nil
+ }
+ return aws.String(endpoints.MapFIPSRegion(region))
+}
+
+// EndpointParameters provides the parameters that influence how endpoints are
+// resolved.
+type EndpointParameters struct {
+ // The AWS region used to dispatch the request.
+ //
+ // Parameter is
+ // required.
+ //
+ // AWS::Region
+ Region *string
+
+ // When true, use the dual-stack endpoint. If the configured endpoint does not
+ // support dual-stack, dispatching the request MAY return an error.
+ //
+ // Defaults to
+ // false if no value is provided.
+ //
+ // AWS::UseDualStack
+ UseDualStack *bool
+
+ // When true, send this request to the FIPS-compliant regional endpoint. If the
+ // configured endpoint does not have a FIPS compliant endpoint, dispatching the
+ // request will return an error.
+ //
+ // Defaults to false if no value is
+ // provided.
+ //
+ // AWS::UseFIPS
+ UseFIPS *bool
+
+ // Override the endpoint used to send this request
+ //
+ // Parameter is
+ // required.
+ //
+ // SDK::Endpoint
+ Endpoint *string
+}
+
+// ValidateRequired validates required parameters are set.
+func (p EndpointParameters) ValidateRequired() error {
+ if p.UseDualStack == nil {
+ return fmt.Errorf("parameter UseDualStack is required")
+ }
+
+ if p.UseFIPS == nil {
+ return fmt.Errorf("parameter UseFIPS is required")
+ }
+
+ return nil
+}
+
+// WithDefaults returns a shallow copy of EndpointParameterswith default values
+// applied to members where applicable.
+func (p EndpointParameters) WithDefaults() EndpointParameters {
+ if p.UseDualStack == nil {
+ p.UseDualStack = ptr.Bool(false)
+ }
+
+ if p.UseFIPS == nil {
+ p.UseFIPS = ptr.Bool(false)
+ }
+ return p
+}
+
+type stringSlice []string
+
+func (s stringSlice) Get(i int) *string {
+ if i < 0 || i >= len(s) {
+ return nil
+ }
+
+ v := s[i]
+ return &v
+}
+
+// EndpointResolverV2 provides the interface for resolving service endpoints.
+type EndpointResolverV2 interface {
+ // ResolveEndpoint attempts to resolve the endpoint with the provided options,
+ // returning the endpoint if found. Otherwise an error is returned.
+ ResolveEndpoint(ctx context.Context, params EndpointParameters) (
+ smithyendpoints.Endpoint, error,
+ )
+}
+
+// resolver provides the implementation for resolving endpoints.
+type resolver struct{}
+
+func NewDefaultEndpointResolverV2() EndpointResolverV2 {
+ return &resolver{}
+}
+
+// ResolveEndpoint attempts to resolve the endpoint with the provided options,
+// returning the endpoint if found. Otherwise an error is returned.
+func (r *resolver) ResolveEndpoint(
+ ctx context.Context, params EndpointParameters,
+) (
+ endpoint smithyendpoints.Endpoint, err error,
+) {
+ params = params.WithDefaults()
+ if err = params.ValidateRequired(); err != nil {
+ return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err)
+ }
+ _UseDualStack := *params.UseDualStack
+ _UseFIPS := *params.UseFIPS
+
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if _UseFIPS == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported")
+ }
+ if _UseDualStack == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported")
+ }
+ uriString := _Endpoint
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ if exprVal := params.Region; exprVal != nil {
+ _Region := *exprVal
+ _ = _Region
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _PartitionResult := *exprVal
+ _ = _PartitionResult
+ if _UseFIPS == true {
+ if _UseDualStack == true {
+ if true == _PartitionResult.SupportsFIPS {
+ if true == _PartitionResult.SupportsDualStack {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://oidc-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DualStackDnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both")
+ }
+ }
+ if _UseFIPS == true {
+ if _PartitionResult.SupportsFIPS == true {
+ if _PartitionResult.Name == "aws-us-gov" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://oidc.")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://oidc-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS")
+ }
+ if _UseDualStack == true {
+ if true == _PartitionResult.SupportsDualStack {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://oidc.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DualStackDnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack")
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://oidc.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region")
+}
+
+type endpointParamsBinder interface {
+ bindEndpointParams(*EndpointParameters)
+}
+
+func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters {
+ params := &EndpointParameters{}
+
+ params.Region = bindRegion(options.Region)
+ params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
+ params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
+ params.Endpoint = options.BaseEndpoint
+
+ if b, ok := input.(endpointParamsBinder); ok {
+ b.bindEndpointParams(params)
+ }
+
+ return params
+}
+
+type resolveEndpointV2Middleware struct {
+ options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveEndpoint")
+ defer span.End()
+
+ if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.options.EndpointResolverV2 == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ params := bindEndpointParams(ctx, getOperationInput(ctx), m.options)
+ endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration",
+ func() (smithyendpoints.Endpoint, error) {
+ return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params)
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ span.SetProperty("client.call.resolved_endpoint", endpt.URI.String())
+
+ if endpt.URI.RawPath == "" && req.URL.RawPath != "" {
+ endpt.URI.RawPath = endpt.URI.Path
+ }
+ req.URL.Scheme = endpt.URI.Scheme
+ req.URL.Host = endpt.URI.Host
+ req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path)
+ req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath)
+ for k := range endpt.Headers {
+ req.Header.Set(k, endpt.Headers.Get(k))
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ opts, _ := smithyauth.GetAuthOptions(&endpt.Properties)
+ for _, o := range opts {
+ rscheme.SignerProperties.SetAll(&o.SignerProperties)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
index 4afe3223e..35f180975 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/generated.json
@@ -9,21 +9,28 @@
"api_client.go",
"api_client_test.go",
"api_op_CreateToken.go",
+ "api_op_CreateTokenWithIAM.go",
"api_op_RegisterClient.go",
"api_op_StartDeviceAuthorization.go",
+ "auth.go",
"deserializers.go",
"doc.go",
"endpoints.go",
+ "endpoints_config_test.go",
+ "endpoints_test.go",
"generated.json",
"internal/endpoints/endpoints.go",
"internal/endpoints/endpoints_test.go",
+ "options.go",
"protocol_test.go",
"serializers.go",
+ "snapshot_test.go",
+ "sra_operation_order_test.go",
"types/errors.go",
"types/types.go",
"validators.go"
],
- "go": "1.15",
+ "go": "1.22",
"module": "github.com/aws/aws-sdk-go-v2/service/ssooidc",
"unstable": false
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
index 67b861e2f..04623412d 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/go_module_metadata.go
@@ -3,4 +3,4 @@
package ssooidc
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.13.5"
+const goModuleVersion = "1.30.1"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
index 090c04b3d..ba7b4f9eb 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/internal/endpoints/endpoints.go
@@ -87,15 +87,21 @@ func New() *Resolver {
var partitionRegexp = struct {
Aws *regexp.Regexp
AwsCn *regexp.Regexp
+ AwsEusc *regexp.Regexp
AwsIso *regexp.Regexp
AwsIsoB *regexp.Regexp
+ AwsIsoE *regexp.Regexp
+ AwsIsoF *regexp.Regexp
AwsUsGov *regexp.Regexp
}{
- Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"),
+ Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"),
AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
+ AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"),
AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
+ AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"),
+ AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"),
AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
}
@@ -135,6 +141,14 @@ var defaultPartitions = endpoints.Partitions{
RegionRegex: partitionRegexp.Aws,
IsRegionalized: true,
Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "af-south-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.af-south-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "af-south-1",
+ },
+ },
endpoints.EndpointKey{
Region: "ap-east-1",
}: endpoints.Endpoint{
@@ -175,6 +189,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "ap-south-1",
},
},
+ endpoints.EndpointKey{
+ Region: "ap-south-2",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-south-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-south-2",
+ },
+ },
endpoints.EndpointKey{
Region: "ap-southeast-1",
}: endpoints.Endpoint{
@@ -191,6 +213,30 @@ var defaultPartitions = endpoints.Partitions{
Region: "ap-southeast-2",
},
},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-3",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-southeast-3.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-3",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-4",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-southeast-4.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-4",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "ap-southeast-5",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ap-southeast-5.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ap-southeast-5",
+ },
+ },
endpoints.EndpointKey{
Region: "ca-central-1",
}: endpoints.Endpoint{
@@ -199,6 +245,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "ca-central-1",
},
},
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.ca-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "ca-west-1",
+ },
+ },
endpoints.EndpointKey{
Region: "eu-central-1",
}: endpoints.Endpoint{
@@ -207,6 +261,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "eu-central-1",
},
},
+ endpoints.EndpointKey{
+ Region: "eu-central-2",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.eu-central-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-central-2",
+ },
+ },
endpoints.EndpointKey{
Region: "eu-north-1",
}: endpoints.Endpoint{
@@ -223,6 +285,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "eu-south-1",
},
},
+ endpoints.EndpointKey{
+ Region: "eu-south-2",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.eu-south-2.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "eu-south-2",
+ },
+ },
endpoints.EndpointKey{
Region: "eu-west-1",
}: endpoints.Endpoint{
@@ -247,6 +317,22 @@ var defaultPartitions = endpoints.Partitions{
Region: "eu-west-3",
},
},
+ endpoints.EndpointKey{
+ Region: "il-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.il-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "il-central-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "me-central-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.me-central-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "me-central-1",
+ },
+ },
endpoints.EndpointKey{
Region: "me-south-1",
}: endpoints.Endpoint{
@@ -279,6 +365,14 @@ var defaultPartitions = endpoints.Partitions{
Region: "us-east-2",
},
},
+ endpoints.EndpointKey{
+ Region: "us-west-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.us-west-1.amazonaws.com",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "us-west-1",
+ },
+ },
endpoints.EndpointKey{
Region: "us-west-2",
}: endpoints.Endpoint{
@@ -323,6 +417,45 @@ var defaultPartitions = endpoints.Partitions{
},
RegionRegex: partitionRegexp.AwsCn,
IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "cn-north-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.cn-north-1.amazonaws.com.cn",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "cn-north-1",
+ },
+ },
+ endpoints.EndpointKey{
+ Region: "cn-northwest-1",
+ }: endpoints.Endpoint{
+ Hostname: "oidc.cn-northwest-1.amazonaws.com.cn",
+ CredentialScope: endpoints.CredentialScope{
+ Region: "cn-northwest-1",
+ },
+ },
+ },
+ },
+ {
+ ID: "aws-eusc",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "oidc-fips.{region}.amazonaws.eu",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "oidc.{region}.amazonaws.eu",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsEusc,
+ IsRegionalized: true,
},
{
ID: "aws-iso",
@@ -366,6 +499,48 @@ var defaultPartitions = endpoints.Partitions{
RegionRegex: partitionRegexp.AwsIsoB,
IsRegionalized: true,
},
+ {
+ ID: "aws-iso-e",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "oidc-fips.{region}.cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "oidc.{region}.cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoE,
+ IsRegionalized: true,
+ },
+ {
+ ID: "aws-iso-f",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "oidc-fips.{region}.csp.hci.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "oidc.{region}.csp.hci.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoF,
+ IsRegionalized: true,
+ },
{
ID: "aws-us-gov",
Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go
new file mode 100644
index 000000000..55dd80d0e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/options.go
@@ -0,0 +1,232 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package ssooidc
+
+import (
+ "context"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+)
+
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+type Options struct {
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // The optional application specific identifier appended to the User-Agent header.
+ AppID string
+
+ // This endpoint will be given as input to an EndpointResolverV2. It is used for
+ // providing a custom base endpoint that is subject to modifications by the
+ // processing EndpointResolverV2.
+ BaseEndpoint *string
+
+ // Configures the events that will be sent to the configured logger.
+ ClientLogMode aws.ClientLogMode
+
+ // The credentials object to use when signing requests.
+ Credentials aws.CredentialsProvider
+
+ // The configuration DefaultsMode that the SDK should use when constructing the
+ // clients initial default settings.
+ DefaultsMode aws.DefaultsMode
+
+ // The endpoint options to be used when attempting to resolve an endpoint.
+ EndpointOptions EndpointResolverOptions
+
+ // The service endpoint resolver.
+ //
+ // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
+ // value for this field will likely prevent you from using any endpoint-related
+ // service features released after the introduction of EndpointResolverV2 and
+ // BaseEndpoint.
+ //
+ // To migrate an EndpointResolver implementation that uses a custom endpoint, set
+ // the client option BaseEndpoint instead.
+ EndpointResolver EndpointResolver
+
+ // Resolves the endpoint used for a particular service operation. This should be
+ // used over the deprecated EndpointResolver.
+ EndpointResolverV2 EndpointResolverV2
+
+ // Signature Version 4 (SigV4) Signer
+ HTTPSignerV4 HTTPSignerV4
+
+ // The logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // The client meter provider.
+ MeterProvider metrics.MeterProvider
+
+ // The region to send requests to. (Required)
+ Region string
+
+ // RetryMaxAttempts specifies the maximum number attempts an API client will call
+ // an operation that fails with a retryable error. A value of 0 is ignored, and
+ // will not be used to configure the API client created default retryer, or modify
+ // per operation call's retry max attempts.
+ //
+ // If specified in an operation call's functional options with a value that is
+ // different than the constructed client's Options, the Client's Retryer will be
+ // wrapped to use the operation's specific RetryMaxAttempts value.
+ RetryMaxAttempts int
+
+ // RetryMode specifies the retry mode the API client will be created with, if
+ // Retryer option is not also specified.
+ //
+ // When creating a new API Clients this member will only be used if the Retryer
+ // Options member is nil. This value will be ignored if Retryer is not nil.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ RetryMode aws.RetryMode
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer. The kind of
+ // default retry created by the API client can be changed with the RetryMode
+ // option.
+ Retryer aws.Retryer
+
+ // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
+ // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You
+ // should not populate this structure programmatically, or rely on the values here
+ // within your applications.
+ RuntimeEnvironment aws.RuntimeEnvironment
+
+ // The client tracer provider.
+ TracerProvider tracing.TracerProvider
+
+ // The initial DefaultsMode used when the client options were constructed. If the
+ // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
+ // value was at that point in time.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ resolvedDefaultsMode aws.DefaultsMode
+
+ // The HTTP client to invoke API calls with. Defaults to client's default HTTP
+ // implementation if nil.
+ HTTPClient HTTPClient
+
+ // The auth scheme resolver which determines how to authenticate for each
+ // operation.
+ AuthSchemeResolver AuthSchemeResolver
+
+ // The list of auth schemes supported by the client.
+ AuthSchemes []smithyhttp.AuthScheme
+}
+
+// Copy creates a clone where the APIOptions list is deep copied.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
+ copy(to.APIOptions, o.APIOptions)
+
+ return to
+}
+
+func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver {
+ if schemeID == "aws.auth#sigv4" {
+ return getSigV4IdentityResolver(o)
+ }
+ if schemeID == "smithy.api#noAuth" {
+ return &smithyauth.AnonymousIdentityResolver{}
+ }
+ return nil
+}
+
+// WithAPIOptions returns a functional option for setting the Client's APIOptions
+// option.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, optFns...)
+ }
+}
+
+// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
+// this field will likely prevent you from using any endpoint-related service
+// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
+// To migrate an EndpointResolver implementation that uses a custom endpoint, set
+// the client option BaseEndpoint instead.
+func WithEndpointResolver(v EndpointResolver) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolver = v
+ }
+}
+
+// WithEndpointResolverV2 returns a functional option for setting the Client's
+// EndpointResolverV2 option.
+func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolverV2 = v
+ }
+}
+
+func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver {
+ if o.Credentials != nil {
+ return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials}
+ }
+ return nil
+}
+
+// WithSigV4SigningName applies an override to the authentication workflow to
+// use the given signing name for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing name from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningName(name string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+// WithSigV4SigningRegion applies an override to the authentication workflow to
+// use the given signing region for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing region from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningRegion(region string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+func ignoreAnonymousAuth(options *Options) {
+ if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) {
+ options.Credentials = nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go
index a8cfd7b46..1ad103d1e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/serializers.go
@@ -10,6 +10,7 @@ import (
"github.com/aws/smithy-go/encoding/httpbinding"
smithyjson "github.com/aws/smithy-go/encoding/json"
"github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
@@ -23,6 +24,10 @@ func (*awsRestjson1_serializeOpCreateToken) ID() string {
func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -38,7 +43,14 @@ func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Contex
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
- restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
@@ -59,6 +71,8 @@ func (m *awsRestjson1_serializeOpCreateToken) HandleSerialize(ctx context.Contex
}
in.Request = request
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsCreateTokenInput(v *CreateTokenInput, encoder *httpbinding.Encoder) error {
@@ -88,6 +102,11 @@ func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value
ok.String(*v.Code)
}
+ if v.CodeVerifier != nil {
+ ok := object.Key("codeVerifier")
+ ok.String(*v.CodeVerifier)
+ }
+
if v.DeviceCode != nil {
ok := object.Key("deviceCode")
ok.String(*v.DeviceCode)
@@ -118,6 +137,139 @@ func awsRestjson1_serializeOpDocumentCreateTokenInput(v *CreateTokenInput, value
return nil
}
+type awsRestjson1_serializeOpCreateTokenWithIAM struct {
+}
+
+func (*awsRestjson1_serializeOpCreateTokenWithIAM) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsRestjson1_serializeOpCreateTokenWithIAM) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*CreateTokenWithIAMInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ opPath, opQuery := httpbinding.SplitURI("/token?aws_iam=t")
+ request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
+ request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
+ request.Method = "POST"
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ restEncoder.SetHeader("Content-Type").String("application/json")
+
+ jsonEncoder := smithyjson.NewEncoder()
+ if err := awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(input, jsonEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(jsonEncoder.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = restEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+func awsRestjson1_serializeOpHttpBindingsCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, encoder *httpbinding.Encoder) error {
+ if v == nil {
+ return fmt.Errorf("unsupported serialization of nil %T", v)
+ }
+
+ return nil
+}
+
+func awsRestjson1_serializeOpDocumentCreateTokenWithIAMInput(v *CreateTokenWithIAMInput, value smithyjson.Value) error {
+ object := value.Object()
+ defer object.Close()
+
+ if v.Assertion != nil {
+ ok := object.Key("assertion")
+ ok.String(*v.Assertion)
+ }
+
+ if v.ClientId != nil {
+ ok := object.Key("clientId")
+ ok.String(*v.ClientId)
+ }
+
+ if v.Code != nil {
+ ok := object.Key("code")
+ ok.String(*v.Code)
+ }
+
+ if v.CodeVerifier != nil {
+ ok := object.Key("codeVerifier")
+ ok.String(*v.CodeVerifier)
+ }
+
+ if v.GrantType != nil {
+ ok := object.Key("grantType")
+ ok.String(*v.GrantType)
+ }
+
+ if v.RedirectUri != nil {
+ ok := object.Key("redirectUri")
+ ok.String(*v.RedirectUri)
+ }
+
+ if v.RefreshToken != nil {
+ ok := object.Key("refreshToken")
+ ok.String(*v.RefreshToken)
+ }
+
+ if v.RequestedTokenType != nil {
+ ok := object.Key("requestedTokenType")
+ ok.String(*v.RequestedTokenType)
+ }
+
+ if v.Scope != nil {
+ ok := object.Key("scope")
+ if err := awsRestjson1_serializeDocumentScopes(v.Scope, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.SubjectToken != nil {
+ ok := object.Key("subjectToken")
+ ok.String(*v.SubjectToken)
+ }
+
+ if v.SubjectTokenType != nil {
+ ok := object.Key("subjectTokenType")
+ ok.String(*v.SubjectTokenType)
+ }
+
+ return nil
+}
+
type awsRestjson1_serializeOpRegisterClient struct {
}
@@ -128,6 +280,10 @@ func (*awsRestjson1_serializeOpRegisterClient) ID() string {
func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -143,7 +299,14 @@ func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Con
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
- restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
@@ -164,6 +327,8 @@ func (m *awsRestjson1_serializeOpRegisterClient) HandleSerialize(ctx context.Con
}
in.Request = request
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsRegisterClientInput(v *RegisterClientInput, encoder *httpbinding.Encoder) error {
@@ -188,6 +353,30 @@ func awsRestjson1_serializeOpDocumentRegisterClientInput(v *RegisterClientInput,
ok.String(*v.ClientType)
}
+ if v.EntitledApplicationArn != nil {
+ ok := object.Key("entitledApplicationArn")
+ ok.String(*v.EntitledApplicationArn)
+ }
+
+ if v.GrantTypes != nil {
+ ok := object.Key("grantTypes")
+ if err := awsRestjson1_serializeDocumentGrantTypes(v.GrantTypes, ok); err != nil {
+ return err
+ }
+ }
+
+ if v.IssuerUrl != nil {
+ ok := object.Key("issuerUrl")
+ ok.String(*v.IssuerUrl)
+ }
+
+ if v.RedirectUris != nil {
+ ok := object.Key("redirectUris")
+ if err := awsRestjson1_serializeDocumentRedirectUris(v.RedirectUris, ok); err != nil {
+ return err
+ }
+ }
+
if v.Scopes != nil {
ok := object.Key("scopes")
if err := awsRestjson1_serializeDocumentScopes(v.Scopes, ok); err != nil {
@@ -208,6 +397,10 @@ func (*awsRestjson1_serializeOpStartDeviceAuthorization) ID() string {
func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -223,7 +416,14 @@ func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx c
request.URL.Path = smithyhttp.JoinPath(request.URL.Path, opPath)
request.URL.RawQuery = smithyhttp.JoinRawQuery(request.URL.RawQuery, opQuery)
request.Method = "POST"
- restEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ var restEncoder *httpbinding.Encoder
+ if request.URL.RawPath == "" {
+ restEncoder, err = httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ } else {
+ request.URL.RawPath = smithyhttp.JoinPath(request.URL.RawPath, opPath)
+ restEncoder, err = httpbinding.NewEncoderWithRawPath(request.URL.Path, request.URL.RawPath, request.URL.RawQuery, request.Header)
+ }
+
if err != nil {
return out, metadata, &smithy.SerializationError{Err: err}
}
@@ -244,6 +444,8 @@ func (m *awsRestjson1_serializeOpStartDeviceAuthorization) HandleSerialize(ctx c
}
in.Request = request
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
func awsRestjson1_serializeOpHttpBindingsStartDeviceAuthorizationInput(v *StartDeviceAuthorizationInput, encoder *httpbinding.Encoder) error {
@@ -276,6 +478,28 @@ func awsRestjson1_serializeOpDocumentStartDeviceAuthorizationInput(v *StartDevic
return nil
}
+func awsRestjson1_serializeDocumentGrantTypes(v []string, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
+
+func awsRestjson1_serializeDocumentRedirectUris(v []string, value smithyjson.Value) error {
+ array := value.Array()
+ defer array.Close()
+
+ for i := range v {
+ av := array.Value()
+ av.String(v[i])
+ }
+ return nil
+}
+
func awsRestjson1_serializeDocumentScopes(v []string, value smithyjson.Value) error {
array := value.Array()
defer array.Close()
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go
index beef5aaa3..2cfe7b48f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/errors.go
@@ -11,6 +11,8 @@ import (
type AccessDeniedException struct {
Message *string
+ ErrorCodeOverride *string
+
Error_ *string
Error_description *string
@@ -26,14 +28,21 @@ func (e *AccessDeniedException) ErrorMessage() string {
}
return *e.Message
}
-func (e *AccessDeniedException) ErrorCode() string { return "AccessDeniedException" }
+func (e *AccessDeniedException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "AccessDeniedException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *AccessDeniedException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
-// Indicates that a request to authorize a client with an access user session token
-// is pending.
+// Indicates that a request to authorize a client with an access user session
+// token is pending.
type AuthorizationPendingException struct {
Message *string
+ ErrorCodeOverride *string
+
Error_ *string
Error_description *string
@@ -49,7 +58,12 @@ func (e *AuthorizationPendingException) ErrorMessage() string {
}
return *e.Message
}
-func (e *AuthorizationPendingException) ErrorCode() string { return "AuthorizationPendingException" }
+func (e *AuthorizationPendingException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "AuthorizationPendingException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *AuthorizationPendingException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// Indicates that the token issued by the service is expired and is no longer
@@ -57,6 +71,8 @@ func (e *AuthorizationPendingException) ErrorFault() smithy.ErrorFault { return
type ExpiredTokenException struct {
Message *string
+ ErrorCodeOverride *string
+
Error_ *string
Error_description *string
@@ -72,7 +88,12 @@ func (e *ExpiredTokenException) ErrorMessage() string {
}
return *e.Message
}
-func (e *ExpiredTokenException) ErrorCode() string { return "ExpiredTokenException" }
+func (e *ExpiredTokenException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ExpiredTokenException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// Indicates that an error from the service occurred while trying to process a
@@ -80,6 +101,8 @@ func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.F
type InternalServerException struct {
Message *string
+ ErrorCodeOverride *string
+
Error_ *string
Error_description *string
@@ -95,15 +118,22 @@ func (e *InternalServerException) ErrorMessage() string {
}
return *e.Message
}
-func (e *InternalServerException) ErrorCode() string { return "InternalServerException" }
+func (e *InternalServerException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InternalServerException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *InternalServerException) ErrorFault() smithy.ErrorFault { return smithy.FaultServer }
// Indicates that the clientId or clientSecret in the request is invalid. For
// example, this can occur when a client sends an incorrect clientId or an expired
-// clientSecret.
+// clientSecret .
type InvalidClientException struct {
Message *string
+ ErrorCodeOverride *string
+
Error_ *string
Error_description *string
@@ -119,14 +149,21 @@ func (e *InvalidClientException) ErrorMessage() string {
}
return *e.Message
}
-func (e *InvalidClientException) ErrorCode() string { return "InvalidClientException" }
+func (e *InvalidClientException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidClientException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *InvalidClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
-// Indicates that the client information sent in the request during registration is
-// invalid.
+// Indicates that the client information sent in the request during registration
+// is invalid.
type InvalidClientMetadataException struct {
Message *string
+ ErrorCodeOverride *string
+
Error_ *string
Error_description *string
@@ -142,14 +179,21 @@ func (e *InvalidClientMetadataException) ErrorMessage() string {
}
return *e.Message
}
-func (e *InvalidClientMetadataException) ErrorCode() string { return "InvalidClientMetadataException" }
+func (e *InvalidClientMetadataException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidClientMetadataException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *InvalidClientMetadataException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// Indicates that a request contains an invalid grant. This can occur if a client
-// makes a CreateToken request with an invalid grant type.
+// makes a CreateTokenrequest with an invalid grant type.
type InvalidGrantException struct {
Message *string
+ ErrorCodeOverride *string
+
Error_ *string
Error_description *string
@@ -165,14 +209,51 @@ func (e *InvalidGrantException) ErrorMessage() string {
}
return *e.Message
}
-func (e *InvalidGrantException) ErrorCode() string { return "InvalidGrantException" }
+func (e *InvalidGrantException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidGrantException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *InvalidGrantException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+// Indicates that one or more redirect URI in the request is not supported for
+// this operation.
+type InvalidRedirectUriException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidRedirectUriException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidRedirectUriException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidRedirectUriException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidRedirectUriException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidRedirectUriException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
// Indicates that something is wrong with the input to the request. For example, a
// required parameter might be missing or out of range.
type InvalidRequestException struct {
Message *string
+ ErrorCodeOverride *string
+
Error_ *string
Error_description *string
@@ -188,13 +269,52 @@ func (e *InvalidRequestException) ErrorMessage() string {
}
return *e.Message
}
-func (e *InvalidRequestException) ErrorCode() string { return "InvalidRequestException" }
+func (e *InvalidRequestException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidRequestException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *InvalidRequestException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+// Indicates that a token provided as input to the request was issued by and is
+// only usable by calling IAM Identity Center endpoints in another region.
+type InvalidRequestRegionException struct {
+ Message *string
+
+ ErrorCodeOverride *string
+
+ Error_ *string
+ Error_description *string
+ Endpoint *string
+ Region *string
+
+ noSmithyDocumentSerde
+}
+
+func (e *InvalidRequestRegionException) Error() string {
+ return fmt.Sprintf("%s: %s", e.ErrorCode(), e.ErrorMessage())
+}
+func (e *InvalidRequestRegionException) ErrorMessage() string {
+ if e.Message == nil {
+ return ""
+ }
+ return *e.Message
+}
+func (e *InvalidRequestRegionException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidRequestRegionException"
+ }
+ return *e.ErrorCodeOverride
+}
+func (e *InvalidRequestRegionException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
+
// Indicates that the scope provided in the request is invalid.
type InvalidScopeException struct {
Message *string
+ ErrorCodeOverride *string
+
Error_ *string
Error_description *string
@@ -210,7 +330,12 @@ func (e *InvalidScopeException) ErrorMessage() string {
}
return *e.Message
}
-func (e *InvalidScopeException) ErrorCode() string { return "InvalidScopeException" }
+func (e *InvalidScopeException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidScopeException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *InvalidScopeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// Indicates that the client is making the request too frequently and is more than
@@ -218,6 +343,8 @@ func (e *InvalidScopeException) ErrorFault() smithy.ErrorFault { return smithy.F
type SlowDownException struct {
Message *string
+ ErrorCodeOverride *string
+
Error_ *string
Error_description *string
@@ -233,7 +360,12 @@ func (e *SlowDownException) ErrorMessage() string {
}
return *e.Message
}
-func (e *SlowDownException) ErrorCode() string { return "SlowDownException" }
+func (e *SlowDownException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "SlowDownException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *SlowDownException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// Indicates that the client is not currently authorized to make the request. This
@@ -241,6 +373,8 @@ func (e *SlowDownException) ErrorFault() smithy.ErrorFault { return smithy.Fault
type UnauthorizedClientException struct {
Message *string
+ ErrorCodeOverride *string
+
Error_ *string
Error_description *string
@@ -256,13 +390,20 @@ func (e *UnauthorizedClientException) ErrorMessage() string {
}
return *e.Message
}
-func (e *UnauthorizedClientException) ErrorCode() string { return "UnauthorizedClientException" }
+func (e *UnauthorizedClientException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "UnauthorizedClientException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *UnauthorizedClientException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// Indicates that the grant type in the request is not supported by the service.
type UnsupportedGrantTypeException struct {
Message *string
+ ErrorCodeOverride *string
+
Error_ *string
Error_description *string
@@ -278,5 +419,10 @@ func (e *UnsupportedGrantTypeException) ErrorMessage() string {
}
return *e.Message
}
-func (e *UnsupportedGrantTypeException) ErrorCode() string { return "UnsupportedGrantTypeException" }
+func (e *UnsupportedGrantTypeException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "UnsupportedGrantTypeException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *UnsupportedGrantTypeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go
index 0ec0789f8..2e8f3ea03 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/types/types.go
@@ -6,4 +6,17 @@ import (
smithydocument "github.com/aws/smithy-go/document"
)
+// This structure contains Amazon Web Services-specific parameter extensions for
+// the token endpoint responses and includes the identity context.
+type AwsAdditionalDetails struct {
+
+ // STS context assertion that carries a user identifier to the Amazon Web Services
+ // service that it calls and can be used to obtain an identity-enhanced IAM role
+ // session. This value corresponds to the sts:identity_context claim in the ID
+ // token.
+ IdentityContext *string
+
+ noSmithyDocumentSerde
+}
+
type noSmithyDocumentSerde = smithydocument.NoSerde
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go
index 5a309484e..9c17e4c8e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/ssooidc/validators.go
@@ -29,6 +29,26 @@ func (m *validateOpCreateToken) HandleInitialize(ctx context.Context, in middlew
return next.HandleInitialize(ctx, in)
}
+type validateOpCreateTokenWithIAM struct {
+}
+
+func (*validateOpCreateTokenWithIAM) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpCreateTokenWithIAM) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*CreateTokenWithIAMInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpCreateTokenWithIAMInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
type validateOpRegisterClient struct {
}
@@ -73,6 +93,10 @@ func addOpCreateTokenValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpCreateToken{}, middleware.After)
}
+func addOpCreateTokenWithIAMValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpCreateTokenWithIAM{}, middleware.After)
+}
+
func addOpRegisterClientValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpRegisterClient{}, middleware.After)
}
@@ -102,6 +126,24 @@ func validateOpCreateTokenInput(v *CreateTokenInput) error {
}
}
+func validateOpCreateTokenWithIAMInput(v *CreateTokenWithIAMInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "CreateTokenWithIAMInput"}
+ if v.ClientId == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("ClientId"))
+ }
+ if v.GrantType == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("GrantType"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
func validateOpRegisterClientInput(v *RegisterClientInput) error {
if v == nil {
return nil
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
index 6c66c2137..dc3d12761 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/CHANGELOG.md
@@ -1,3 +1,470 @@
+# v1.33.20 (2025-06-06)
+
+* No change notes available for this release.
+
+# v1.33.19 (2025-04-10)
+
+* No change notes available for this release.
+
+# v1.33.18 (2025-04-03)
+
+* No change notes available for this release.
+
+# v1.33.17 (2025-03-04.2)
+
+* **Bug Fix**: Add assurance test for operation order.
+
+# v1.33.16 (2025-02-27)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.15 (2025-02-18)
+
+* **Bug Fix**: Bump go version to 1.22
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.14 (2025-02-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.13 (2025-02-04)
+
+* No change notes available for this release.
+
+# v1.33.12 (2025-01-31)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.11 (2025-01-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.10 (2025-01-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade to smithy-go v1.22.2.
+
+# v1.33.9 (2025-01-17)
+
+* **Bug Fix**: Fix bug where credentials weren't refreshed during retry loop.
+
+# v1.33.8 (2025-01-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.7 (2025-01-14)
+
+* No change notes available for this release.
+
+# v1.33.6 (2025-01-10)
+
+* **Documentation**: Fixed typos in the descriptions.
+
+# v1.33.5 (2025-01-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.4 (2025-01-08)
+
+* No change notes available for this release.
+
+# v1.33.3 (2024-12-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.2 (2024-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.1 (2024-11-18)
+
+* **Dependency Update**: Update to smithy-go v1.22.1.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.33.0 (2024-11-14)
+
+* **Feature**: This release introduces the new API 'AssumeRoot', which returns short-term credentials that you can use to perform privileged tasks.
+
+# v1.32.4 (2024-11-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.3 (2024-10-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.2 (2024-10-08)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.1 (2024-10-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.32.0 (2024-10-04)
+
+* **Feature**: Add support for HTTP client metrics.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.31.4 (2024-10-03)
+
+* No change notes available for this release.
+
+# v1.31.3 (2024-09-27)
+
+* No change notes available for this release.
+
+# v1.31.2 (2024-09-25)
+
+* No change notes available for this release.
+
+# v1.31.1 (2024-09-23)
+
+* No change notes available for this release.
+
+# v1.31.0 (2024-09-20)
+
+* **Feature**: Add tracing and metrics support to service clients.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.8 (2024-09-17)
+
+* **Bug Fix**: **BREAKFIX**: Only generate AccountIDEndpointMode config for services that use it. This is a compiler break, but removes no actual functionality, as no services currently use the account ID in endpoint resolution.
+
+# v1.30.7 (2024-09-04)
+
+* No change notes available for this release.
+
+# v1.30.6 (2024-09-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.5 (2024-08-22)
+
+* No change notes available for this release.
+
+# v1.30.4 (2024-08-15)
+
+* **Dependency Update**: Bump minimum Go version to 1.21.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.3 (2024-07-10.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.2 (2024-07-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.1 (2024-06-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.30.0 (2024-06-26)
+
+* **Feature**: Support list-of-string endpoint parameter.
+
+# v1.29.1 (2024-06-19)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.29.0 (2024-06-18)
+
+* **Feature**: Track usage of various AWS SDK features in user-agent string.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.13 (2024-06-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.12 (2024-06-07)
+
+* **Bug Fix**: Add clock skew correction on all service clients
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.11 (2024-06-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.10 (2024-05-23)
+
+* No change notes available for this release.
+
+# v1.28.9 (2024-05-16)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.8 (2024-05-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.7 (2024-05-08)
+
+* **Bug Fix**: GoDoc improvement
+
+# v1.28.6 (2024-03-29)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.5 (2024-03-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.4 (2024-03-07)
+
+* **Bug Fix**: Remove dependency on go-cmp.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.3 (2024-03-05)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.2 (2024-03-04)
+
+* **Bug Fix**: Update internal/presigned-url dependency for corrected API name.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.1 (2024-02-23)
+
+* **Bug Fix**: Move all common, SDK-side middleware stack ops into the service client module to prevent cross-module compatibility issues in the future.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.28.0 (2024-02-22)
+
+* **Feature**: Add middleware stack snapshot tests.
+
+# v1.27.2 (2024-02-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.27.1 (2024-02-20)
+
+* **Bug Fix**: When sourcing values for a service's `EndpointParameters`, the lack of a configured region (i.e. `options.Region == ""`) will now translate to a `nil` value for `EndpointParameters.Region` instead of a pointer to the empty string `""`. This will result in a much more explicit error when calling an operation instead of an obscure hostname lookup failure.
+
+# v1.27.0 (2024-02-13)
+
+* **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.7 (2024-01-04)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.6 (2023-12-20)
+
+* No change notes available for this release.
+
+# v1.26.5 (2023-12-08)
+
+* **Bug Fix**: Reinstate presence of default Retryer in functional options, but still respect max attempts set therein.
+
+# v1.26.4 (2023-12-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.3 (2023-12-06)
+
+* **Bug Fix**: Restore pre-refactor auth behavior where all operations could technically be performed anonymously.
+* **Bug Fix**: STS `AssumeRoleWithSAML` and `AssumeRoleWithWebIdentity` would incorrectly attempt to use SigV4 authentication.
+
+# v1.26.2 (2023-12-01)
+
+* **Bug Fix**: Correct wrapping of errors in authentication workflow.
+* **Bug Fix**: Correctly recognize cache-wrapped instances of AnonymousCredentials at client construction.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.1 (2023-11-30)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.26.0 (2023-11-29)
+
+* **Feature**: Expose Options() accessor on service clients.
+* **Documentation**: Documentation updates for AWS Security Token Service.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.6 (2023-11-28.2)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.5 (2023-11-28)
+
+* **Bug Fix**: Respect setting RetryMaxAttempts in functional options at client construction.
+
+# v1.25.4 (2023-11-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.3 (2023-11-17)
+
+* **Documentation**: API updates for the AWS Security Token Service
+
+# v1.25.2 (2023-11-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.1 (2023-11-09)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.25.0 (2023-11-01)
+
+* **Feature**: Adds support for configured endpoints via environment variables and the AWS shared configuration file.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.24.0 (2023-10-31)
+
+* **Feature**: **BREAKING CHANGE**: Bump minimum go version to 1.19 per the revised [go version support policy](https://aws.amazon.com/blogs/developer/aws-sdk-for-go-aligns-with-go-release-policy-on-supported-runtimes/).
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.2 (2023-10-12)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.1 (2023-10-06)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.23.0 (2023-10-02)
+
+* **Feature**: STS API updates for assumeRole
+
+# v1.22.0 (2023-09-18)
+
+* **Announcement**: [BREAKFIX] Change in MaxResults datatype from value to pointer type in cognito-sync service.
+* **Feature**: Adds several endpoint ruleset changes across all models: smaller rulesets, removed non-unique regional endpoints, fixes FIPS and DualStack endpoints, and make region not required in SDK::Endpoint. Additional breakfix to cognito-sync field.
+
+# v1.21.5 (2023-08-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.4 (2023-08-18)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.3 (2023-08-17)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.2 (2023-08-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.21.1 (2023-08-01)
+
+* No change notes available for this release.
+
+# v1.21.0 (2023-07-31)
+
+* **Feature**: Adds support for smithy-modeled endpoint resolution. A new rules-based endpoint resolution will be added to the SDK which will supercede and deprecate existing endpoint resolution. Specifically, EndpointResolver will be deprecated while BaseEndpoint and EndpointResolverV2 will take its place. For more information, please see the Endpoints section in our Developer Guide.
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.1 (2023-07-28)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.20.0 (2023-07-25)
+
+* **Feature**: API updates for the AWS Security Token Service
+
+# v1.19.3 (2023-07-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.2 (2023-06-15)
+
+* No change notes available for this release.
+
+# v1.19.1 (2023-06-13)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.19.0 (2023-05-08)
+
+* **Feature**: Documentation updates for AWS Security Token Service.
+
+# v1.18.11 (2023-05-04)
+
+* No change notes available for this release.
+
+# v1.18.10 (2023-04-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.9 (2023-04-10)
+
+* No change notes available for this release.
+
+# v1.18.8 (2023-04-07)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.7 (2023-03-21)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.6 (2023-03-10)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.5 (2023-02-22)
+
+* **Bug Fix**: Prevent nil pointer dereference when retrieving error codes.
+
+# v1.18.4 (2023-02-20)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.18.3 (2023-02-03)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+* **Dependency Update**: Upgrade smithy to 1.27.2 and correct empty query list serialization.
+
+# v1.18.2 (2023-01-25)
+
+* **Documentation**: Doc only change to update wording in a key topic
+
+# v1.18.1 (2023-01-23)
+
+* No change notes available for this release.
+
+# v1.18.0 (2023-01-05)
+
+* **Feature**: Add `ErrorCodeOverride` field to all error structs (aws/smithy-go#401).
+
+# v1.17.7 (2022-12-15)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.6 (2022-12-02)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.5 (2022-11-22)
+
+* No change notes available for this release.
+
+# v1.17.4 (2022-11-17)
+
+* **Documentation**: Documentation updates for AWS Security Token Service.
+
+# v1.17.3 (2022-11-16)
+
+* No change notes available for this release.
+
+# v1.17.2 (2022-11-10)
+
+* No change notes available for this release.
+
+# v1.17.1 (2022-10-24)
+
+* **Dependency Update**: Updated to the latest SDK module versions
+
+# v1.17.0 (2022-10-21)
+
+* **Feature**: Add presign functionality for sts:AssumeRole operation
+* **Dependency Update**: Updated to the latest SDK module versions
+
# v1.16.19 (2022-09-20)
* **Dependency Update**: Updated to the latest SDK module versions
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
index 3041fc467..fca363d2f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_client.go
@@ -4,6 +4,8 @@ package sts
import (
"context"
+ "errors"
+ "fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/defaults"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
@@ -11,165 +13,236 @@ import (
"github.com/aws/aws-sdk-go-v2/aws/retry"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
awshttp "github.com/aws/aws-sdk-go-v2/aws/transport/http"
+ internalauth "github.com/aws/aws-sdk-go-v2/internal/auth"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ internalmiddleware "github.com/aws/aws-sdk-go-v2/internal/middleware"
+ acceptencodingcust "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding"
presignedurlcust "github.com/aws/aws-sdk-go-v2/service/internal/presigned-url"
smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
smithydocument "github.com/aws/smithy-go/document"
"github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
"github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
smithyhttp "github.com/aws/smithy-go/transport/http"
"net"
"net/http"
+ "sync/atomic"
"time"
)
const ServiceID = "STS"
const ServiceAPIVersion = "2011-06-15"
-// Client provides the API client to make operations call for AWS Security Token
-// Service.
-type Client struct {
- options Options
+type operationMetrics struct {
+ Duration metrics.Float64Histogram
+ SerializeDuration metrics.Float64Histogram
+ ResolveIdentityDuration metrics.Float64Histogram
+ ResolveEndpointDuration metrics.Float64Histogram
+ SignRequestDuration metrics.Float64Histogram
+ DeserializeDuration metrics.Float64Histogram
}
-// New returns an initialized Client based on the functional options. Provide
-// additional functional options to further configure the behavior of the client,
-// such as changing the client's endpoint or adding custom middleware behavior.
-func New(options Options, optFns ...func(*Options)) *Client {
- options = options.Copy()
+func (m *operationMetrics) histogramFor(name string) metrics.Float64Histogram {
+ switch name {
+ case "client.call.duration":
+ return m.Duration
+ case "client.call.serialization_duration":
+ return m.SerializeDuration
+ case "client.call.resolve_identity_duration":
+ return m.ResolveIdentityDuration
+ case "client.call.resolve_endpoint_duration":
+ return m.ResolveEndpointDuration
+ case "client.call.signing_duration":
+ return m.SignRequestDuration
+ case "client.call.deserialization_duration":
+ return m.DeserializeDuration
+ default:
+ panic("unrecognized operation metric")
+ }
+}
- resolveDefaultLogger(&options)
+func timeOperationMetric[T any](
+ ctx context.Context, metric string, fn func() (T, error),
+ opts ...metrics.RecordMetricOption,
+) (T, error) {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
- setResolvedDefaultsMode(&options)
+ start := time.Now()
+ v, err := fn()
+ end := time.Now()
- resolveRetryer(&options)
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
+ return v, err
+}
- resolveHTTPClient(&options)
+func startMetricTimer(ctx context.Context, metric string, opts ...metrics.RecordMetricOption) func() {
+ instr := getOperationMetrics(ctx).histogramFor(metric)
+ opts = append([]metrics.RecordMetricOption{withOperationMetadata(ctx)}, opts...)
- resolveHTTPSignerV4(&options)
+ var ended bool
+ start := time.Now()
+ return func() {
+ if ended {
+ return
+ }
+ ended = true
- resolveDefaultEndpointConfiguration(&options)
+ end := time.Now()
- for _, fn := range optFns {
- fn(&options)
+ elapsed := end.Sub(start)
+ instr.Record(ctx, float64(elapsed)/1e9, opts...)
}
+}
- client := &Client{
- options: options,
+func withOperationMetadata(ctx context.Context) metrics.RecordMetricOption {
+ return func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("rpc.service", middleware.GetServiceID(ctx))
+ o.Properties.Set("rpc.method", middleware.GetOperationName(ctx))
}
+}
- return client
+type operationMetricsKey struct{}
+
+func withOperationMetrics(parent context.Context, mp metrics.MeterProvider) (context.Context, error) {
+ meter := mp.Meter("github.com/aws/aws-sdk-go-v2/service/sts")
+ om := &operationMetrics{}
+
+ var err error
+
+ om.Duration, err = operationMetricTimer(meter, "client.call.duration",
+ "Overall call duration (including retries and time to send or receive request and response body)")
+ if err != nil {
+ return nil, err
+ }
+ om.SerializeDuration, err = operationMetricTimer(meter, "client.call.serialization_duration",
+ "The time it takes to serialize a message body")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveIdentityDuration, err = operationMetricTimer(meter, "client.call.auth.resolve_identity_duration",
+ "The time taken to acquire an identity (AWS credentials, bearer token, etc) from an Identity Provider")
+ if err != nil {
+ return nil, err
+ }
+ om.ResolveEndpointDuration, err = operationMetricTimer(meter, "client.call.resolve_endpoint_duration",
+ "The time it takes to resolve an endpoint (endpoint resolver, not DNS) for the request")
+ if err != nil {
+ return nil, err
+ }
+ om.SignRequestDuration, err = operationMetricTimer(meter, "client.call.auth.signing_duration",
+ "The time it takes to sign a request")
+ if err != nil {
+ return nil, err
+ }
+ om.DeserializeDuration, err = operationMetricTimer(meter, "client.call.deserialization_duration",
+ "The time it takes to deserialize a message body")
+ if err != nil {
+ return nil, err
+ }
+
+ return context.WithValue(parent, operationMetricsKey{}, om), nil
}
-type Options struct {
- // Set of options to modify how an operation is invoked. These apply to all
- // operations invoked for this client. Use functional options on operation call to
- // modify this list for per operation behavior.
- APIOptions []func(*middleware.Stack) error
+func operationMetricTimer(m metrics.Meter, name, desc string) (metrics.Float64Histogram, error) {
+ return m.Float64Histogram(name, func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = desc
+ })
+}
- // Configures the events that will be sent to the configured logger.
- ClientLogMode aws.ClientLogMode
+func getOperationMetrics(ctx context.Context) *operationMetrics {
+ return ctx.Value(operationMetricsKey{}).(*operationMetrics)
+}
- // The credentials object to use when signing requests.
- Credentials aws.CredentialsProvider
+func operationTracer(p tracing.TracerProvider) tracing.Tracer {
+ return p.Tracer("github.com/aws/aws-sdk-go-v2/service/sts")
+}
- // The configuration DefaultsMode that the SDK should use when constructing the
- // clients initial default settings.
- DefaultsMode aws.DefaultsMode
+// Client provides the API client to make operations call for AWS Security Token
+// Service.
+type Client struct {
+ options Options
- // The endpoint options to be used when attempting to resolve an endpoint.
- EndpointOptions EndpointResolverOptions
+ // Difference between the time reported by the server and the client
+ timeOffset *atomic.Int64
+}
- // The service endpoint resolver.
- EndpointResolver EndpointResolver
+// New returns an initialized Client based on the functional options. Provide
+// additional functional options to further configure the behavior of the client,
+// such as changing the client's endpoint or adding custom middleware behavior.
+func New(options Options, optFns ...func(*Options)) *Client {
+ options = options.Copy()
- // Signature Version 4 (SigV4) Signer
- HTTPSignerV4 HTTPSignerV4
+ resolveDefaultLogger(&options)
- // The logger writer interface to write logging messages to.
- Logger logging.Logger
+ setResolvedDefaultsMode(&options)
- // The region to send requests to. (Required)
- Region string
+ resolveRetryer(&options)
- // RetryMaxAttempts specifies the maximum number attempts an API client will call
- // an operation that fails with a retryable error. A value of 0 is ignored, and
- // will not be used to configure the API client created default retryer, or modify
- // per operation call's retry max attempts. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. If specified in an operation call's functional
- // options with a value that is different than the constructed client's Options,
- // the Client's Retryer will be wrapped to use the operation's specific
- // RetryMaxAttempts value.
- RetryMaxAttempts int
+ resolveHTTPClient(&options)
- // RetryMode specifies the retry mode the API client will be created with, if
- // Retryer option is not also specified. When creating a new API Clients this
- // member will only be used if the Retryer Options member is nil. This value will
- // be ignored if Retryer is not nil. Currently does not support per operation call
- // overrides, may in the future.
- RetryMode aws.RetryMode
+ resolveHTTPSignerV4(&options)
- // Retryer guides how HTTP requests should be retried in case of recoverable
- // failures. When nil the API client will use a default retryer. The kind of
- // default retry created by the API client can be changed with the RetryMode
- // option.
- Retryer aws.Retryer
+ resolveEndpointResolverV2(&options)
- // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
- // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig. You
- // should not populate this structure programmatically, or rely on the values here
- // within your applications.
- RuntimeEnvironment aws.RuntimeEnvironment
+ resolveTracerProvider(&options)
- // The initial DefaultsMode used when the client options were constructed. If the
- // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
- // value was at that point in time. Currently does not support per operation call
- // overrides, may in the future.
- resolvedDefaultsMode aws.DefaultsMode
+ resolveMeterProvider(&options)
- // The HTTP client to invoke API calls with. Defaults to client's default HTTP
- // implementation if nil.
- HTTPClient HTTPClient
-}
+ resolveAuthSchemeResolver(&options)
-// WithAPIOptions returns a functional option for setting the Client's APIOptions
-// option.
-func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
- return func(o *Options) {
- o.APIOptions = append(o.APIOptions, optFns...)
+ for _, fn := range optFns {
+ fn(&options)
}
-}
-// WithEndpointResolver returns a functional option for setting the Client's
-// EndpointResolver option.
-func WithEndpointResolver(v EndpointResolver) func(*Options) {
- return func(o *Options) {
- o.EndpointResolver = v
+ finalizeRetryMaxAttempts(&options)
+
+ ignoreAnonymousAuth(&options)
+
+ wrapWithAnonymousAuth(&options)
+
+ resolveAuthSchemes(&options)
+
+ client := &Client{
+ options: options,
}
-}
-type HTTPClient interface {
- Do(*http.Request) (*http.Response, error)
-}
+ initializeTimeOffsetResolver(client)
-// Copy creates a clone where the APIOptions list is deep copied.
-func (o Options) Copy() Options {
- to := o
- to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
- copy(to.APIOptions, o.APIOptions)
+ return client
+}
- return to
+// Options returns a copy of the client configuration.
+//
+// Callers SHOULD NOT perform mutations on any inner structures within client
+// config. Config overrides should instead be made on a per-operation basis through
+// functional options.
+func (c *Client) Options() Options {
+ return c.options.Copy()
}
-func (c *Client) invokeOperation(ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error) (result interface{}, metadata middleware.Metadata, err error) {
+
+func (c *Client) invokeOperation(
+ ctx context.Context, opID string, params interface{}, optFns []func(*Options), stackFns ...func(*middleware.Stack, Options) error,
+) (
+ result interface{}, metadata middleware.Metadata, err error,
+) {
ctx = middleware.ClearStackValues(ctx)
+ ctx = middleware.WithServiceID(ctx, ServiceID)
+ ctx = middleware.WithOperationName(ctx, opID)
+
stack := middleware.NewStack(opID, smithyhttp.NewStackRequest)
options := c.options.Copy()
+
for _, fn := range optFns {
fn(&options)
}
- finalizeRetryMaxAttemptOptions(&options, *c)
+ finalizeOperationRetryMaxAttempts(&options, *c)
finalizeClientEndpointResolverOptions(&options)
@@ -185,20 +258,142 @@ func (c *Client) invokeOperation(ctx context.Context, opID string, params interf
}
}
- handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack)
- result, metadata, err = handler.Handle(ctx, params)
+ ctx, err = withOperationMetrics(ctx, options.MeterProvider)
if err != nil {
+ return nil, metadata, err
+ }
+
+ tracer := operationTracer(options.TracerProvider)
+ spanName := fmt.Sprintf("%s.%s", ServiceID, opID)
+
+ ctx = tracing.WithOperationTracer(ctx, tracer)
+
+ ctx, span := tracer.StartSpan(ctx, spanName, func(o *tracing.SpanOptions) {
+ o.Kind = tracing.SpanKindClient
+ o.Properties.Set("rpc.system", "aws-api")
+ o.Properties.Set("rpc.method", opID)
+ o.Properties.Set("rpc.service", ServiceID)
+ })
+ endTimer := startMetricTimer(ctx, "client.call.duration")
+ defer endTimer()
+ defer span.End()
+
+ handler := smithyhttp.NewClientHandlerWithOptions(options.HTTPClient, func(o *smithyhttp.ClientHandler) {
+ o.Meter = options.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts")
+ })
+ decorated := middleware.DecorateHandler(handler, stack)
+ result, metadata, err = decorated.Handle(ctx, params)
+ if err != nil {
+ span.SetProperty("exception.type", fmt.Sprintf("%T", err))
+ span.SetProperty("exception.message", err.Error())
+
+ var aerr smithy.APIError
+ if errors.As(err, &aerr) {
+ span.SetProperty("api.error_code", aerr.ErrorCode())
+ span.SetProperty("api.error_message", aerr.ErrorMessage())
+ span.SetProperty("api.error_fault", aerr.ErrorFault().String())
+ }
+
err = &smithy.OperationError{
ServiceID: ServiceID,
OperationName: opID,
Err: err,
}
}
+
+ span.SetProperty("error", err != nil)
+ if err == nil {
+ span.SetStatus(tracing.SpanStatusOK)
+ } else {
+ span.SetStatus(tracing.SpanStatusError)
+ }
+
return result, metadata, err
}
+type operationInputKey struct{}
+
+func setOperationInput(ctx context.Context, input interface{}) context.Context {
+ return middleware.WithStackValue(ctx, operationInputKey{}, input)
+}
+
+func getOperationInput(ctx context.Context) interface{} {
+ return middleware.GetStackValue(ctx, operationInputKey{})
+}
+
+type setOperationInputMiddleware struct {
+}
+
+func (*setOperationInputMiddleware) ID() string {
+ return "setOperationInput"
+}
+
+func (m *setOperationInputMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ ctx = setOperationInput(ctx, in.Parameters)
+ return next.HandleSerialize(ctx, in)
+}
+
+func addProtocolFinalizerMiddlewares(stack *middleware.Stack, options Options, operation string) error {
+ if err := stack.Finalize.Add(&resolveAuthSchemeMiddleware{operation: operation, options: options}, middleware.Before); err != nil {
+ return fmt.Errorf("add ResolveAuthScheme: %w", err)
+ }
+ if err := stack.Finalize.Insert(&getIdentityMiddleware{options: options}, "ResolveAuthScheme", middleware.After); err != nil {
+ return fmt.Errorf("add GetIdentity: %v", err)
+ }
+ if err := stack.Finalize.Insert(&resolveEndpointV2Middleware{options: options}, "GetIdentity", middleware.After); err != nil {
+ return fmt.Errorf("add ResolveEndpointV2: %v", err)
+ }
+ if err := stack.Finalize.Insert(&signRequestMiddleware{options: options}, "ResolveEndpointV2", middleware.After); err != nil {
+ return fmt.Errorf("add Signing: %w", err)
+ }
+ return nil
+}
+func resolveAuthSchemeResolver(options *Options) {
+ if options.AuthSchemeResolver == nil {
+ options.AuthSchemeResolver = &defaultAuthSchemeResolver{}
+ }
+}
+
+func resolveAuthSchemes(options *Options) {
+ if options.AuthSchemes == nil {
+ options.AuthSchemes = []smithyhttp.AuthScheme{
+ internalauth.NewHTTPAuthScheme("aws.auth#sigv4", &internalauthsmithy.V4SignerAdapter{
+ Signer: options.HTTPSignerV4,
+ Logger: options.Logger,
+ LogSigning: options.ClientLogMode.IsSigning(),
+ }),
+ }
+ }
+}
+
type noSmithyDocumentSerde = smithydocument.NoSerde
+type legacyEndpointContextSetter struct {
+ LegacyResolver EndpointResolver
+}
+
+func (*legacyEndpointContextSetter) ID() string {
+ return "legacyEndpointContextSetter"
+}
+
+func (m *legacyEndpointContextSetter) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.LegacyResolver != nil {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, true)
+ }
+
+ return next.HandleInitialize(ctx, in)
+
+}
+func addlegacyEndpointContextSetter(stack *middleware.Stack, o Options) error {
+ return stack.Initialize.Add(&legacyEndpointContextSetter{
+ LegacyResolver: o.EndpointResolver,
+ }, middleware.Before)
+}
+
func resolveDefaultLogger(o *Options) {
if o.Logger != nil {
return
@@ -236,6 +431,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
APIOptions: cfg.APIOptions,
Logger: cfg.Logger,
ClientLogMode: cfg.ClientLogMode,
+ AppID: cfg.AppID,
}
resolveAWSRetryerProvider(cfg, &opts)
resolveAWSRetryMaxAttempts(cfg, &opts)
@@ -243,6 +439,7 @@ func NewFromConfig(cfg aws.Config, optFns ...func(*Options)) *Client {
resolveAWSEndpointResolver(cfg, &opts)
resolveUseDualStackEndpoint(cfg, &opts)
resolveUseFIPSEndpoint(cfg, &opts)
+ resolveBaseEndpoint(cfg, &opts)
return New(opts, optFns...)
}
@@ -334,7 +531,15 @@ func resolveAWSRetryMaxAttempts(cfg aws.Config, o *Options) {
o.RetryMaxAttempts = cfg.RetryMaxAttempts
}
-func finalizeRetryMaxAttemptOptions(o *Options, client Client) {
+func finalizeRetryMaxAttempts(o *Options) {
+ if o.RetryMaxAttempts == 0 {
+ return
+ }
+
+ o.Retryer = retry.AddWithMaxAttempts(o.Retryer, o.RetryMaxAttempts)
+}
+
+func finalizeOperationRetryMaxAttempts(o *Options, client Client) {
if v := o.RetryMaxAttempts; v == 0 || v == client.options.RetryMaxAttempts {
return
}
@@ -346,20 +551,39 @@ func resolveAWSEndpointResolver(cfg aws.Config, o *Options) {
if cfg.EndpointResolver == nil && cfg.EndpointResolverWithOptions == nil {
return
}
- o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions, NewDefaultEndpointResolver())
+ o.EndpointResolver = withEndpointResolver(cfg.EndpointResolver, cfg.EndpointResolverWithOptions)
}
-func addClientUserAgent(stack *middleware.Stack) error {
- return awsmiddleware.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion)(stack)
+func addClientUserAgent(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddSDKAgentKeyValue(awsmiddleware.APIMetadata, "sts", goModuleVersion)
+ if len(options.AppID) > 0 {
+ ua.AddSDKAgentKey(awsmiddleware.ApplicationIdentifier, options.AppID)
+ }
+
+ return nil
}
-func addHTTPSignerV4Middleware(stack *middleware.Stack, o Options) error {
- mw := v4.NewSignHTTPRequestMiddleware(v4.SignHTTPRequestMiddlewareOptions{
- CredentialsProvider: o.Credentials,
- Signer: o.HTTPSignerV4,
- LogSigning: o.ClientLogMode.IsSigning(),
- })
- return stack.Finalize.Add(mw, middleware.After)
+func getOrAddRequestUserAgent(stack *middleware.Stack) (*awsmiddleware.RequestUserAgent, error) {
+ id := (*awsmiddleware.RequestUserAgent)(nil).ID()
+ mw, ok := stack.Build.Get(id)
+ if !ok {
+ mw = awsmiddleware.NewRequestUserAgent()
+ if err := stack.Build.Add(mw, middleware.After); err != nil {
+ return nil, err
+ }
+ }
+
+ ua, ok := mw.(*awsmiddleware.RequestUserAgent)
+ if !ok {
+ return nil, fmt.Errorf("%T for %s middleware did not match expected type", mw, id)
+ }
+
+ return ua, nil
}
type HTTPSignerV4 interface {
@@ -380,12 +604,97 @@ func newDefaultV4Signer(o Options) *v4.Signer {
})
}
-func addRetryMiddlewares(stack *middleware.Stack, o Options) error {
- mo := retry.AddRetryMiddlewaresOptions{
- Retryer: o.Retryer,
- LogRetryAttempts: o.ClientLogMode.IsRetries(),
+func addClientRequestID(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.ClientRequestID{}, middleware.After)
+}
+
+func addComputeContentLength(stack *middleware.Stack) error {
+ return stack.Build.Add(&smithyhttp.ComputeContentLength{}, middleware.After)
+}
+
+func addRawResponseToMetadata(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.AddRawResponse{}, middleware.Before)
+}
+
+func addRecordResponseTiming(stack *middleware.Stack) error {
+ return stack.Deserialize.Add(&awsmiddleware.RecordResponseTiming{}, middleware.After)
+}
+
+func addSpanRetryLoop(stack *middleware.Stack, options Options) error {
+ return stack.Finalize.Insert(&spanRetryLoop{options: options}, "Retry", middleware.Before)
+}
+
+type spanRetryLoop struct {
+ options Options
+}
+
+func (*spanRetryLoop) ID() string {
+ return "spanRetryLoop"
+}
+
+func (m *spanRetryLoop) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ middleware.FinalizeOutput, middleware.Metadata, error,
+) {
+ tracer := operationTracer(m.options.TracerProvider)
+ ctx, span := tracer.StartSpan(ctx, "RetryLoop")
+ defer span.End()
+
+ return next.HandleFinalize(ctx, in)
+}
+func addStreamingEventsPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Add(&v4.StreamingEventsPayload{}, middleware.Before)
+}
+
+func addUnsignedPayload(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.UnsignedPayload{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addComputePayloadSHA256(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ComputePayloadSHA256{}, "ResolveEndpointV2", middleware.After)
+}
+
+func addContentSHA256Header(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&v4.ContentSHA256Header{}, (*v4.ComputePayloadSHA256)(nil).ID(), middleware.After)
+}
+
+func addIsWaiterUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureWaiter)
+ return nil
+ })
+}
+
+func addIsPaginatorUserAgent(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(stack *middleware.Stack) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeaturePaginator)
+ return nil
+ })
+}
+
+func addRetry(stack *middleware.Stack, o Options) error {
+ attempt := retry.NewAttemptMiddleware(o.Retryer, smithyhttp.RequestCloner, func(m *retry.Attempt) {
+ m.LogAttempts = o.ClientLogMode.IsRetries()
+ m.OperationMeter = o.MeterProvider.Meter("github.com/aws/aws-sdk-go-v2/service/sts")
+ })
+ if err := stack.Finalize.Insert(attempt, "ResolveAuthScheme", middleware.Before); err != nil {
+ return err
}
- return retry.AddRetryMiddlewares(stack, mo)
+ if err := stack.Finalize.Insert(&retry.MetricsHeader{}, attempt.ID(), middleware.After); err != nil {
+ return err
+ }
+ return nil
}
// resolves dual-stack endpoint configuration
@@ -418,12 +727,99 @@ func resolveUseFIPSEndpoint(cfg aws.Config, o *Options) error {
return nil
}
+func resolveAccountID(identity smithyauth.Identity, mode aws.AccountIDEndpointMode) *string {
+ if mode == aws.AccountIDEndpointModeDisabled {
+ return nil
+ }
+
+ if ca, ok := identity.(*internalauthsmithy.CredentialsAdapter); ok && ca.Credentials.AccountID != "" {
+ return aws.String(ca.Credentials.AccountID)
+ }
+
+ return nil
+}
+
+func addTimeOffsetBuild(stack *middleware.Stack, c *Client) error {
+ mw := internalmiddleware.AddTimeOffsetMiddleware{Offset: c.timeOffset}
+ if err := stack.Build.Add(&mw, middleware.After); err != nil {
+ return err
+ }
+ return stack.Deserialize.Insert(&mw, "RecordResponseTiming", middleware.Before)
+}
+func initializeTimeOffsetResolver(c *Client) {
+ c.timeOffset = new(atomic.Int64)
+}
+
+func addUserAgentRetryMode(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ switch options.Retryer.(type) {
+ case *retry.Standard:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeStandard)
+ case *retry.AdaptiveMode:
+ ua.AddUserAgentFeature(awsmiddleware.UserAgentFeatureRetryModeAdaptive)
+ }
+ return nil
+}
+
+type setCredentialSourceMiddleware struct {
+ ua *awsmiddleware.RequestUserAgent
+ options Options
+}
+
+func (m setCredentialSourceMiddleware) ID() string { return "SetCredentialSourceMiddleware" }
+
+func (m setCredentialSourceMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ asProviderSource, ok := m.options.Credentials.(aws.CredentialProviderSource)
+ if !ok {
+ return next.HandleBuild(ctx, in)
+ }
+ providerSources := asProviderSource.ProviderSources()
+ for _, source := range providerSources {
+ m.ua.AddCredentialsSource(source)
+ }
+ return next.HandleBuild(ctx, in)
+}
+
+func addCredentialSource(stack *middleware.Stack, options Options) error {
+ ua, err := getOrAddRequestUserAgent(stack)
+ if err != nil {
+ return err
+ }
+
+ mw := setCredentialSourceMiddleware{ua: ua, options: options}
+ return stack.Build.Insert(&mw, "UserAgent", middleware.Before)
+}
+
+func resolveTracerProvider(options *Options) {
+ if options.TracerProvider == nil {
+ options.TracerProvider = &tracing.NopTracerProvider{}
+ }
+}
+
+func resolveMeterProvider(options *Options) {
+ if options.MeterProvider == nil {
+ options.MeterProvider = metrics.NopMeterProvider{}
+ }
+}
+
+func addRecursionDetection(stack *middleware.Stack) error {
+ return stack.Build.Add(&awsmiddleware.RecursionDetection{}, middleware.After)
+}
+
func addRequestIDRetrieverMiddleware(stack *middleware.Stack) error {
- return awsmiddleware.AddRequestIDRetrieverMiddleware(stack)
+ return stack.Deserialize.Insert(&awsmiddleware.RequestIDRetriever{}, "OperationDeserializer", middleware.Before)
+
}
func addResponseErrorMiddleware(stack *middleware.Stack) error {
- return awshttp.AddResponseErrorMiddleware(stack)
+ return stack.Deserialize.Insert(&awshttp.ResponseErrorWrapper{}, "RequestIDRetriever", middleware.Before)
+
}
// HTTPPresignerV4 represents presigner interface used by presign url client
@@ -496,20 +892,67 @@ func withNopHTTPClientAPIOption(o *Options) {
o.HTTPClient = smithyhttp.NopClient{}
}
+type presignContextPolyfillMiddleware struct {
+}
+
+func (*presignContextPolyfillMiddleware) ID() string {
+ return "presignContextPolyfill"
+}
+
+func (m *presignContextPolyfillMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ schemeID := rscheme.Scheme.SchemeID()
+
+ if schemeID == "aws.auth#sigv4" || schemeID == "com.amazonaws.s3#sigv4express" {
+ if sn, ok := smithyhttp.GetSigV4SigningName(&rscheme.SignerProperties); ok {
+ ctx = awsmiddleware.SetSigningName(ctx, sn)
+ }
+ if sr, ok := smithyhttp.GetSigV4SigningRegion(&rscheme.SignerProperties); ok {
+ ctx = awsmiddleware.SetSigningRegion(ctx, sr)
+ }
+ } else if schemeID == "aws.auth#sigv4a" {
+ if sn, ok := smithyhttp.GetSigV4ASigningName(&rscheme.SignerProperties); ok {
+ ctx = awsmiddleware.SetSigningName(ctx, sn)
+ }
+ if sr, ok := smithyhttp.GetSigV4ASigningRegions(&rscheme.SignerProperties); ok {
+ ctx = awsmiddleware.SetSigningRegion(ctx, sr[0])
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
type presignConverter PresignOptions
func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, options Options) (err error) {
- stack.Finalize.Clear()
+ if _, ok := stack.Finalize.Get((*acceptencodingcust.DisableGzip)(nil).ID()); ok {
+ stack.Finalize.Remove((*acceptencodingcust.DisableGzip)(nil).ID())
+ }
+ if _, ok := stack.Finalize.Get((*retry.Attempt)(nil).ID()); ok {
+ stack.Finalize.Remove((*retry.Attempt)(nil).ID())
+ }
+ if _, ok := stack.Finalize.Get((*retry.MetricsHeader)(nil).ID()); ok {
+ stack.Finalize.Remove((*retry.MetricsHeader)(nil).ID())
+ }
stack.Deserialize.Clear()
stack.Build.Remove((*awsmiddleware.ClientRequestID)(nil).ID())
stack.Build.Remove("UserAgent")
+ if err := stack.Finalize.Insert(&presignContextPolyfillMiddleware{}, "Signing", middleware.Before); err != nil {
+ return err
+ }
+
pmw := v4.NewPresignHTTPRequestMiddleware(v4.PresignHTTPRequestMiddlewareOptions{
CredentialsProvider: options.Credentials,
Presigner: c.Presigner,
LogSigning: options.ClientLogMode.IsSigning(),
})
- err = stack.Finalize.Add(pmw, middleware.After)
- if err != nil {
+ if _, err := stack.Finalize.Swap("Signing", pmw); err != nil {
return err
}
if err = smithyhttp.AddNoPayloadDefaultContentTypeRemover(stack); err != nil {
@@ -520,7 +963,7 @@ func (c presignConverter) convertToPresignMiddleware(stack *middleware.Stack, op
if err != nil {
return err
}
- err = presignedurlcust.AddAsIsPresigingMiddleware(stack)
+ err = presignedurlcust.AddAsIsPresigningMiddleware(stack)
if err != nil {
return err
}
@@ -535,3 +978,118 @@ func addRequestResponseLogging(stack *middleware.Stack, o Options) error {
LogResponseWithBody: o.ClientLogMode.IsResponseWithBody(),
}, middleware.After)
}
+
+type disableHTTPSMiddleware struct {
+ DisableHTTPS bool
+}
+
+func (*disableHTTPSMiddleware) ID() string {
+ return "disableHTTPS"
+}
+
+func (m *disableHTTPSMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.DisableHTTPS && !smithyhttp.GetHostnameImmutable(ctx) {
+ req.URL.Scheme = "http"
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addDisableHTTPSMiddleware(stack *middleware.Stack, o Options) error {
+ return stack.Finalize.Insert(&disableHTTPSMiddleware{
+ DisableHTTPS: o.EndpointOptions.DisableHTTPS,
+ }, "ResolveEndpointV2", middleware.After)
+}
+
+type spanInitializeStart struct {
+}
+
+func (*spanInitializeStart) ID() string {
+ return "spanInitializeStart"
+}
+
+func (m *spanInitializeStart) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "Initialize")
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanInitializeEnd struct {
+}
+
+func (*spanInitializeEnd) ID() string {
+ return "spanInitializeEnd"
+}
+
+func (m *spanInitializeEnd) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ middleware.InitializeOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleInitialize(ctx, in)
+}
+
+type spanBuildRequestStart struct {
+}
+
+func (*spanBuildRequestStart) ID() string {
+ return "spanBuildRequestStart"
+}
+
+func (m *spanBuildRequestStart) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ middleware.SerializeOutput, middleware.Metadata, error,
+) {
+ ctx, _ = tracing.StartSpan(ctx, "BuildRequest")
+
+ return next.HandleSerialize(ctx, in)
+}
+
+type spanBuildRequestEnd struct {
+}
+
+func (*spanBuildRequestEnd) ID() string {
+ return "spanBuildRequestEnd"
+}
+
+func (m *spanBuildRequestEnd) HandleBuild(
+ ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler,
+) (
+ middleware.BuildOutput, middleware.Metadata, error,
+) {
+ ctx, span := tracing.PopSpan(ctx)
+ span.End()
+
+ return next.HandleBuild(ctx, in)
+}
+
+func addSpanInitializeStart(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeStart{}, middleware.Before)
+}
+
+func addSpanInitializeEnd(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&spanInitializeEnd{}, middleware.After)
+}
+
+func addSpanBuildRequestStart(stack *middleware.Stack) error {
+ return stack.Serialize.Add(&spanBuildRequestStart{}, middleware.Before)
+}
+
+func addSpanBuildRequestEnd(stack *middleware.Stack) error {
+ return stack.Build.Add(&spanBuildRequestEnd{}, middleware.After)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
index 7d00b6bd7..524e36eb6 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRole.go
@@ -4,6 +4,7 @@ package sts
import (
"context"
+ "fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/sts/types"
@@ -12,87 +13,101 @@ import (
)
// Returns a set of temporary security credentials that you can use to access
-// Amazon Web Services resources that you might not normally have access to. These
-// temporary credentials consist of an access key ID, a secret access key, and a
-// security token. Typically, you use AssumeRole within your account or for
-// cross-account access. For a comparison of AssumeRole with other API operations
-// that produce temporary credentials, see Requesting Temporary Security
-// Credentials
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide. Permissions The temporary security credentials created by
-// AssumeRole can be used to make API calls to any Amazon Web Services service with
-// the following exception: You cannot call the Amazon Web Services STS
-// GetFederationToken or GetSessionToken API operations. (Optional) You can pass
-// inline or managed session policies
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policies to use as
-// managed session policies. The plaintext that you use for both inline and managed
-// session policies can't exceed 2,048 characters. Passing policies to this
+// Amazon Web Services resources. These temporary credentials consist of an access
+// key ID, a secret access key, and a security token. Typically, you use AssumeRole
+// within your account or for cross-account access. For a comparison of AssumeRole
+// with other API operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the
+// IAM User Guide.
+//
+// # Permissions
+//
+// The temporary security credentials created by AssumeRole can be used to make
+// API calls to any Amazon Web Services service with the following exception: You
+// cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken
+// API operations.
+//
+// (Optional) You can pass inline or managed session policies to this operation.
+// You can pass a single JSON policy document to use as an inline session policy.
+// You can also specify up to 10 managed policy Amazon Resource Names (ARNs) to use
+// as managed session policies. The plaintext that you use for both inline and
+// managed session policies can't exceed 2,048 characters. Passing policies to this
// operation returns new temporary credentials. The resulting session's permissions
// are the intersection of the role's identity-based policy and the session
// policies. You can use the role's temporary credentials in subsequent Amazon Web
// Services API calls to access resources in the account that owns the role. You
// cannot use session policies to grant more permissions than those allowed by the
// identity-based policy of the role that is being assumed. For more information,
-// see Session Policies
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// in the IAM User Guide. When you create a role, you create two policies: A role
-// trust policy that specifies who can assume the role and a permissions policy
-// that specifies what can be done with the role. You specify the trusted principal
-// who is allowed to assume the role in the role trust policy. To assume a role
-// from a different account, your Amazon Web Services account must be trusted by
-// the role. The trust relationship is defined in the role's trust policy when the
-// role is created. That trust policy states which accounts are allowed to delegate
-// that access to users in the account. A user who wants to access a role in a
-// different account must also have permissions that are delegated from the user
-// account administrator. The administrator must attach a policy that allows the
-// user to call AssumeRole for the ARN of the role in the other account. To allow a
-// user to assume a role in the same account, you can do either of the
+// see [Session Policies]in the IAM User Guide.
+//
+// When you create a role, you create two policies: a role trust policy that
+// specifies who can assume the role, and a permissions policy that specifies what
+// can be done with the role. You specify the trusted principal that is allowed to
+// assume the role in the role trust policy.
+//
+// To assume a role from a different account, your Amazon Web Services account
+// must be trusted by the role. The trust relationship is defined in the role's
+// trust policy when the role is created. That trust policy states which accounts
+// are allowed to delegate that access to users in the account.
+//
+// A user who wants to access a role in a different account must also have
+// permissions that are delegated from the account administrator. The administrator
+// must attach a policy that allows the user to call AssumeRole for the ARN of the
+// role in the other account.
+//
+// To allow a user to assume a role in the same account, you can do either of the
// following:
//
-// * Attach a policy to the user that allows the user to call
-// AssumeRole (as long as the role's trust policy trusts the account).
+// - Attach a policy to the user that allows the user to call AssumeRole (as long
+// as the role's trust policy trusts the account).
+//
+// - Add the user as a principal directly in the role's trust policy.
+//
+// You can do either because the role’s trust policy acts as an IAM resource-based
+// policy. When a resource-based policy grants access to a principal in the same
+// account, no additional identity-based policy is required. For more information
+// about trust policies and resource-based policies, see [IAM Policies]in the IAM User Guide.
+//
+// # Tags
+//
+// (Optional) You can pass tag key-value pairs to your session. These tags are
+// called session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM
+// User Guide.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during role
+// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+//
+// # Using MFA with AssumeRole
//
-// * Add the
-// user as a principal directly in the role's trust policy.
+// (Optional) You can include multi-factor authentication (MFA) information when
+// you call AssumeRole . This is useful for cross-account scenarios to ensure that
+// the user that assumes the role has been authenticated with an Amazon Web
+// Services MFA device. In that scenario, the trust policy of the role being
+// assumed includes a condition that tests for MFA authentication. If the caller
+// does not include valid MFA information, the request to assume the role is
+// denied. The condition in a trust policy that tests for MFA authentication might
+// look like the following example.
//
-// You can do either
-// because the role’s trust policy acts as an IAM resource-based policy. When a
-// resource-based policy grants access to a principal in the same account, no
-// additional identity-based policy is required. For more information about trust
-// policies and resource-based policies, see IAM Policies
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) in the
-// IAM User Guide. Tags (Optional) You can pass tag key-value pairs to your
-// session. These tags are called session tags. For more information about session
-// tags, see Passing Session Tags in STS
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
-// IAM User Guide. An administrator must grant you the permissions necessary to
-// pass session tags. The administrator can also create granular permissions to
-// allow you to pass only specific session tags. For more information, see
-// Tutorial: Using Tags for Attribute-Based Access Control
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
-// in the IAM User Guide. You can set the session tags as transitive. Transitive
-// tags persist during role chaining. For more information, see Chaining Roles with
-// Session Tags
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
-// in the IAM User Guide. Using MFA with AssumeRole (Optional) You can include
-// multi-factor authentication (MFA) information when you call AssumeRole. This is
-// useful for cross-account scenarios to ensure that the user that assumes the role
-// has been authenticated with an Amazon Web Services MFA device. In that scenario,
-// the trust policy of the role being assumed includes a condition that tests for
-// MFA authentication. If the caller does not include valid MFA information, the
-// request to assume the role is denied. The condition in a trust policy that tests
-// for MFA authentication might look like the following example. "Condition":
-// {"Bool": {"aws:MultiFactorAuthPresent": true}} For more information, see
-// Configuring MFA-Protected API Access
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) in the
-// IAM User Guide guide. To use MFA with AssumeRole, you pass values for the
-// SerialNumber and TokenCode parameters. The SerialNumber value identifies the
-// user's hardware or virtual MFA device. The TokenCode is the time-based one-time
-// password (TOTP) that the MFA device produces.
+// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
+//
+// For more information, see [Configuring MFA-Protected API Access] in the IAM User Guide guide.
+//
+// To use MFA with AssumeRole , you pass values for the SerialNumber and TokenCode
+// parameters. The SerialNumber value identifies the user's hardware or virtual
+// MFA device. The TokenCode is the time-based one-time password (TOTP) that the
+// MFA device produces.
+//
+// [Configuring MFA-Protected API Access]: https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
+// [IAM Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
func (c *Client) AssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*Options)) (*AssumeRoleOutput, error) {
if params == nil {
params = &AssumeRoleInput{}
@@ -115,17 +130,27 @@ type AssumeRoleInput struct {
// This member is required.
RoleArn *string
- // An identifier for the assumed role session. Use the role session name to
- // uniquely identify a session when the same role is assumed by different
- // principals or for different reasons. In cross-account scenarios, the role
- // session name is visible to, and can be logged by the account that owns the role.
- // The role session name is also used in the ARN of the assumed role principal.
- // This means that subsequent cross-account API requests that use the temporary
- // security credentials will expose the role session name to the external account
- // in their CloudTrail logs. The regex used to validate this parameter is a string
- // of characters consisting of upper- and lower-case alphanumeric characters with
- // no spaces. You can also include underscores or any of the following characters:
- // =,.@-
+ // An identifier for the assumed role session.
+ //
+ // Use the role session name to uniquely identify a session when the same role is
+ // assumed by different principals or for different reasons. In cross-account
+ // scenarios, the role session name is visible to, and can be logged by the account
+ // that owns the role. The role session name is also used in the ARN of the assumed
+ // role principal. This means that subsequent cross-account API requests that use
+ // the temporary security credentials will expose the role session name to the
+ // external account in their CloudTrail logs.
+ //
+ // For security purposes, administrators can view this field in [CloudTrail logs] to help identify
+ // who performed an action in Amazon Web Services. Your administrator might require
+ // that you specify your user name as the session name when you assume the role.
+ // For more information, see [sts:RoleSessionName]sts:RoleSessionName .
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds
+ // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname
//
// This member is required.
RoleSessionName *string
@@ -136,25 +161,27 @@ type AssumeRoleInput struct {
// hours. If you specify a value higher than this setting or the administrator
// setting (whichever is lower), the operation fails. For example, if you specify a
// session duration of 12 hours, but your administrator set the maximum session
- // duration to 6 hours, your operation fails. Role chaining limits your Amazon Web
- // Services CLI or Amazon Web Services API role session to a maximum of one hour.
- // When you use the AssumeRole API operation to assume a role, you can specify the
- // duration of your role session with the DurationSeconds parameter. You can
- // specify a parameter value of up to 43200 seconds (12 hours), depending on the
- // maximum session duration setting for your role. However, if you assume a role
- // using role chaining and provide a DurationSeconds parameter value greater than
- // one hour, the operation fails. To learn how to view the maximum value for your
- // role, see View the Maximum Session Duration Setting for a Role
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
- // in the IAM User Guide. By default, the value is set to 3600 seconds. The
- // DurationSeconds parameter is separate from the duration of a console session
- // that you might request using the returned credentials. The request to the
- // federation endpoint for a console sign-in token takes a SessionDuration
+ // duration to 6 hours, your operation fails.
+ //
+ // Role chaining limits your Amazon Web Services CLI or Amazon Web Services API
+ // role session to a maximum of one hour. When you use the AssumeRole API
+ // operation to assume a role, you can specify the duration of your role session
+ // with the DurationSeconds parameter. You can specify a parameter value of up to
+ // 43200 seconds (12 hours), depending on the maximum session duration setting for
+ // your role. However, if you assume a role using role chaining and provide a
+ // DurationSeconds parameter value greater than one hour, the operation fails. To
+ // learn how to view the maximum value for your role, see [Update the maximum session duration for a role].
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request to
+ // the federation endpoint for a console sign-in token takes a SessionDuration
// parameter that specifies the maximum length of the console session. For more
- // information, see Creating a URL that Enables Federated Users to Access the
- // Amazon Web Services Management Console
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
- // in the IAM User Guide.
+ // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide.
+ //
+ // [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration
+ // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html
DurationSeconds *int32
// A unique identifier that might be required when you assume a role in another
@@ -165,156 +192,196 @@ type AssumeRoleInput struct {
// the administrator of the trusting account might send an external ID to the
// administrator of the trusted account. That way, only someone with the ID can
// assume the role, rather than everyone in the account. For more information about
- // the external ID, see How to Use an External ID When Granting Access to Your
- // Amazon Web Services Resources to a Third Party
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
- // in the IAM User Guide. The regex used to validate this parameter is a string of
- // characters consisting of upper- and lower-case alphanumeric characters with no
- // spaces. You can also include underscores or any of the following characters:
- // =,.@:/-
+ // the external ID, see [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]in the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@:/-
+ //
+ // [How to Use an External ID When Granting Access to Your Amazon Web Services Resources to a Third Party]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html
ExternalId *string
// An IAM policy in JSON format that you want to use as an inline session policy.
+ //
// This parameter is optional. Passing policies to this operation returns new
// temporary credentials. The resulting session's permissions are the intersection
// of the role's identity-based policy and the session policies. You can use the
// role's temporary credentials in subsequent Amazon Web Services API calls to
// access resources in the account that owns the role. You cannot use session
// policies to grant more permissions than those allowed by the identity-based
- // policy of the role that is being assumed. For more information, see Session
- // Policies
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide. The plaintext that you use for both inline and managed
- // session policies can't exceed 2,048 characters. The JSON policy characters can
- // be any ASCII character from the space character to the end of the valid
- // character list (\u0020 through \u00FF). It can also include the tab (\u0009),
- // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web
- // Services conversion compresses the passed session policies and session tags into
- // a packed binary format that has a separate limit. Your request can fail for this
- // limit even if your plaintext meets the other requirements. The PackedPolicySize
- // response element indicates by percentage how close the policies and tags for
- // your request are to the upper size limit.
+ // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM
+ // User Guide.
+ //
+ // The plaintext that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // For more information about role session permissions, see [Session policies].
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
Policy *string
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
// use as managed session policies. The policies must exist in the same account as
- // the role. This parameter is optional. You can provide up to 10 managed policy
- // ARNs. However, the plaintext that you use for both inline and managed session
- // policies can't exceed 2,048 characters. For more information about ARNs, see
- // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces
- // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in
- // the Amazon Web Services General Reference. An Amazon Web Services conversion
- // compresses the passed session policies and session tags into a packed binary
- // format that has a separate limit. Your request can fail for this limit even if
- // your plaintext meets the other requirements. The PackedPolicySize response
- // element indicates by percentage how close the policies and tags for your request
- // are to the upper size limit. Passing policies to this operation returns new
- // temporary credentials. The resulting session's permissions are the intersection
- // of the role's identity-based policy and the session policies. You can use the
- // role's temporary credentials in subsequent Amazon Web Services API calls to
- // access resources in the account that owns the role. You cannot use session
- // policies to grant more permissions than those allowed by the identity-based
- // policy of the role that is being assumed. For more information, see Session
- // Policies
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
+ // the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plaintext that you use for both inline and managed session policies
+ // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the
+ // Amazon Web Services General Reference.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's
+ // identity-based policy and the session policies. You can use the role's temporary
+ // credentials in subsequent Amazon Web Services API calls to access resources in
+ // the account that owns the role. You cannot use session policies to grant more
+ // permissions than those allowed by the identity-based policy of the role that is
+ // being assumed. For more information, see [Session Policies]in the IAM User Guide.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
PolicyArns []types.PolicyDescriptorType
- // The identification number of the MFA device that is associated with the user who
- // is making the AssumeRole call. Specify this value if the trust policy of the
- // role being assumed includes a condition that requires MFA authentication. The
- // value is either the serial number for a hardware device (such as GAHT12345678)
- // or an Amazon Resource Name (ARN) for a virtual device (such as
- // arn:aws:iam::123456789012:mfa/user). The regex used to validate this parameter
- // is a string of characters consisting of upper- and lower-case alphanumeric
- // characters with no spaces. You can also include underscores or any of the
- // following characters: =,.@-
+ // A list of previously acquired trusted context assertions in the format of a
+ // JSON array. The trusted context assertion is signed and encrypted by Amazon Web
+ // Services STS.
+ //
+ // The following is an example of a ProvidedContext value that includes a single
+ // trusted context assertion and the ARN of the context provider from which the
+ // trusted context assertion was generated.
+ //
+ // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}]
+ ProvidedContexts []types.ProvidedContext
+
+ // The identification number of the MFA device that is associated with the user
+ // who is making the AssumeRole call. Specify this value if the trust policy of
+ // the role being assumed includes a condition that requires MFA authentication.
+ // The value is either the serial number for a hardware device (such as
+ // GAHT12345678 ) or an Amazon Resource Name (ARN) for a virtual device (such as
+ // arn:aws:iam::123456789012:mfa/user ).
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
SerialNumber *string
// The source identity specified by the principal that is calling the AssumeRole
- // operation. You can require users to specify a source identity when they assume a
- // role. You do this by using the sts:SourceIdentity condition key in a role trust
- // policy. You can use source identity information in CloudTrail logs to determine
- // who took actions with a role. You can use the aws:SourceIdentity condition key
- // to further control access to Amazon Web Services resources based on the value of
- // source identity. For more information about using source identity, see Monitor
- // and control actions taken with assumed roles
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
- // in the IAM User Guide. The regex used to validate this parameter is a string of
- // characters consisting of upper- and lower-case alphanumeric characters with no
- // spaces. You can also include underscores or any of the following characters:
- // =,.@-. You cannot use a value that begins with the text aws:. This prefix is
- // reserved for Amazon Web Services internal use.
+ // operation. The source identity value persists across [chained role]sessions.
+ //
+ // You can require users to specify a source identity when they assume a role. You
+ // do this by using the [sts:SourceIdentity]sts:SourceIdentity condition key in a role trust policy.
+ // You can use source identity information in CloudTrail logs to determine who took
+ // actions with a role. You can use the aws:SourceIdentity condition key to
+ // further control access to Amazon Web Services resources based on the value of
+ // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the
+ // IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: +=,.@-. You cannot use a
+ // value that begins with the text aws: . This prefix is reserved for Amazon Web
+ // Services internal use.
+ //
+ // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#iam-term-role-chaining
+ // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
+ // [sts:SourceIdentity]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_condition-keys.html#condition-keys-sourceidentity
SourceIdentity *string
- // A list of session tags that you want to pass. Each session tag consists of a key
- // name and an associated value. For more information about session tags, see
- // Tagging Amazon Web Services STS Sessions
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
- // IAM User Guide. This parameter is optional. You can pass up to 50 session tags.
- // The plaintext session tag keys can’t exceed 128 characters, and the values can’t
- // exceed 256 characters. For these and additional limits, see IAM and STS
- // Character Limits
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
- // in the IAM User Guide. An Amazon Web Services conversion compresses the passed
- // session policies and session tags into a packed binary format that has a
- // separate limit. Your request can fail for this limit even if your plaintext
- // meets the other requirements. The PackedPolicySize response element indicates by
- // percentage how close the policies and tags for your request are to the upper
- // size limit. You can pass a session tag with the same key as a tag that is
- // already attached to the role. When you do, session tags override a role tag with
- // the same key. Tag key–value pairs are not case sensitive, but case is preserved.
- // This means that you cannot have separate Department and department tag keys.
- // Assume that the role has the Department=Marketing tag and you pass the
- // department=engineering session tag. Department and department are not saved as
- // separate tags, and the session tag passed in the request takes precedence over
- // the role tag. Additionally, if you used temporary credentials to perform this
- // operation, the new session inherits any transitive session tags from the calling
- // session. If you pass a session tag with the same key as an inherited tag, the
- // operation fails. To view the inherited tags for a session, see the CloudTrail
- // logs. For more information, see Viewing Session Tags in CloudTrail
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs)
+ // A list of session tags that you want to pass. Each session tag consists of a
+ // key name and an associated value. For more information about session tags, see [Tagging Amazon Web Services STS Sessions]
// in the IAM User Guide.
+ //
+ // This parameter is optional. You can pass up to 50 session tags. The plaintext
+ // session tag keys can’t exceed 128 characters, and the values can’t exceed 256
+ // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // You can pass a session tag with the same key as a tag that is already attached
+ // to the role. When you do, session tags override a role tag with the same key.
+ //
+ // Tag key–value pairs are not case sensitive, but case is preserved. This means
+ // that you cannot have separate Department and department tag keys. Assume that
+ // the role has the Department = Marketing tag and you pass the department =
+ // engineering session tag. Department and department are not saved as separate
+ // tags, and the session tag passed in the request takes precedence over the role
+ // tag.
+ //
+ // Additionally, if you used temporary credentials to perform this operation, the
+ // new session inherits any transitive session tags from the calling session. If
+ // you pass a session tag with the same key as an inherited tag, the operation
+ // fails. To view the inherited tags for a session, see the CloudTrail logs. For
+ // more information, see [Viewing Session Tags in CloudTrail]in the IAM User Guide.
+ //
+ // [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+ // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+ // [Viewing Session Tags in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs
Tags []types.Tag
// The value provided by the MFA device, if the trust policy of the role being
// assumed requires MFA. (In other words, if the policy includes a condition that
// tests for MFA). If the role being assumed requires MFA and if the TokenCode
// value is missing or expired, the AssumeRole call returns an "access denied"
- // error. The format for this parameter, as described by its regex pattern, is a
- // sequence of six numeric digits.
+ // error.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
TokenCode *string
- // A list of keys for session tags that you want to set as transitive. If you set a
- // tag key as transitive, the corresponding key and value passes to subsequent
- // sessions in a role chain. For more information, see Chaining Roles with Session
- // Tags
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
- // in the IAM User Guide. This parameter is optional. When you set session tags as
- // transitive, the session policy and session tags packed binary limit is not
- // affected. If you choose not to specify a transitive tag key, then no tags are
- // passed from this session to any subsequent sessions.
+ // A list of keys for session tags that you want to set as transitive. If you set
+ // a tag key as transitive, the corresponding key and value passes to subsequent
+ // sessions in a role chain. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+ //
+ // This parameter is optional. The transitive status of a session tag does not
+ // impact its packed binary size.
+ //
+ // If you choose not to specify a transitive tag key, then no tags are passed from
+ // this session to any subsequent sessions.
+ //
+ // [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
TransitiveTagKeys []string
noSmithyDocumentSerde
}
-// Contains the response to a successful AssumeRole request, including temporary
-// Amazon Web Services credentials that can be used to make Amazon Web Services
-// requests.
+// Contains the response to a successful AssumeRole request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
type AssumeRoleOutput struct {
// The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
// that you can use to refer to the resulting temporary security credentials. For
// example, you can reference these credentials as a principal in a resource-based
// policy by using the ARN or assumed role ID. The ARN and ID include the
- // RoleSessionName that you specified when you called AssumeRole.
+ // RoleSessionName that you specified when you called AssumeRole .
AssumedRoleUser *types.AssumedRoleUser
// The temporary security credentials, which include an access key ID, a secret
- // access key, and a security (or session) token. The size of the security token
- // that STS API operations return is not fixed. We strongly recommend that you make
- // no assumptions about the maximum size.
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
Credentials *types.Credentials
// A percentage value that indicates the packed size of the session policies and
@@ -324,18 +391,21 @@ type AssumeRoleOutput struct {
PackedPolicySize *int32
// The source identity specified by the principal that is calling the AssumeRole
- // operation. You can require users to specify a source identity when they assume a
- // role. You do this by using the sts:SourceIdentity condition key in a role trust
- // policy. You can use source identity information in CloudTrail logs to determine
- // who took actions with a role. You can use the aws:SourceIdentity condition key
- // to further control access to Amazon Web Services resources based on the value of
- // source identity. For more information about using source identity, see Monitor
- // and control actions taken with assumed roles
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
- // in the IAM User Guide. The regex used to validate this parameter is a string of
- // characters consisting of upper- and lower-case alphanumeric characters with no
- // spaces. You can also include underscores or any of the following characters:
- // =,.@-
+ // operation.
+ //
+ // You can require users to specify a source identity when they assume a role. You
+ // do this by using the sts:SourceIdentity condition key in a role trust policy.
+ // You can use source identity information in CloudTrail logs to determine who took
+ // actions with a role. You can use the aws:SourceIdentity condition key to
+ // further control access to Amazon Web Services resources based on the value of
+ // source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in the
+ // IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
SourceIdentity *string
// Metadata pertaining to the operation's result.
@@ -345,6 +415,9 @@ type AssumeRoleOutput struct {
}
func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRole{}, middleware.After)
if err != nil {
return err
@@ -353,34 +426,41 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRole"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -389,12 +469,27 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = addOpAssumeRoleValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRole(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -404,6 +499,21 @@ func (c *Client) addOperationAssumeRoleMiddlewares(stack *middleware.Stack, opti
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
@@ -411,7 +521,30 @@ func newServiceMetadataMiddleware_opAssumeRole(region string) *awsmiddleware.Reg
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
- SigningName: "sts",
OperationName: "AssumeRole",
}
}
+
+// PresignAssumeRole is used to generate a presigned HTTP Request which contains
+// presigned URL, signed headers and HTTP method used.
+func (c *PresignClient) PresignAssumeRole(ctx context.Context, params *AssumeRoleInput, optFns ...func(*PresignOptions)) (*v4.PresignedHTTPRequest, error) {
+ if params == nil {
+ params = &AssumeRoleInput{}
+ }
+ options := c.options.copy()
+ for _, fn := range optFns {
+ fn(&options)
+ }
+ clientOptFns := append(options.ClientOptions, withNopHTTPClientAPIOption)
+
+ result, _, err := c.client.invokeOperation(ctx, "AssumeRole", params, clientOptFns,
+ c.client.addOperationAssumeRoleMiddlewares,
+ presignConverter(options).convertToPresignMiddleware,
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*v4.PresignedHTTPRequest)
+ return out, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go
index e12315e4c..400f809e3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithSAML.go
@@ -4,6 +4,7 @@ package sts
import (
"context"
+ "fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/service/sts/types"
"github.com/aws/smithy-go/middleware"
@@ -15,112 +16,132 @@ import (
// mechanism for tying an enterprise identity store or directory to role-based
// Amazon Web Services access without user-specific credentials or configuration.
// For a comparison of AssumeRoleWithSAML with the other API operations that
-// produce temporary credentials, see Requesting Temporary Security Credentials
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide. The temporary security credentials returned by this
-// operation consist of an access key ID, a secret access key, and a security
-// token. Applications can use these temporary security credentials to sign calls
-// to Amazon Web Services services. Session Duration By default, the temporary
-// security credentials created by AssumeRoleWithSAML last for one hour. However,
-// you can use the optional DurationSeconds parameter to specify the duration of
-// your session. Your role session lasts for the duration that you specify, or
-// until the time specified in the SAML authentication response's
-// SessionNotOnOrAfter value, whichever is shorter. You can provide a
-// DurationSeconds value from 900 seconds (15 minutes) up to the maximum session
-// duration setting for the role. This setting can have a value from 1 hour to 12
-// hours. To learn how to view the maximum value for your role, see View the
-// Maximum Session Duration Setting for a Role
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
-// in the IAM User Guide. The maximum session duration limit applies when you use
-// the AssumeRole* API operations or the assume-role* CLI commands. However the
-// limit does not apply when you use those operations to create a console URL. For
-// more information, see Using IAM Roles
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM
-// User Guide. Role chaining
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining)
-// limits your CLI or Amazon Web Services API role session to a maximum of one
+// produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide.
+//
+// The temporary security credentials returned by this operation consist of an
+// access key ID, a secret access key, and a security token. Applications can use
+// these temporary security credentials to sign calls to Amazon Web Services
+// services.
+//
+// # Session Duration
+//
+// By default, the temporary security credentials created by AssumeRoleWithSAML
+// last for one hour. However, you can use the optional DurationSeconds parameter
+// to specify the duration of your session. Your role session lasts for the
+// duration that you specify, or until the time specified in the SAML
+// authentication response's SessionNotOnOrAfter value, whichever is shorter. You
+// can provide a DurationSeconds value from 900 seconds (15 minutes) up to the
+// maximum session duration setting for the role. This setting can have a value
+// from 1 hour to 12 hours. To learn how to view the maximum value for your role,
+// see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide. The maximum session duration limit applies when you
+// use the AssumeRole* API operations or the assume-role* CLI commands. However
+// the limit does not apply when you use those operations to create a console URL.
+// For more information, see [Using IAM Roles]in the IAM User Guide.
+//
+// [Role chaining]limits your CLI or Amazon Web Services API role session to a maximum of one
// hour. When you use the AssumeRole API operation to assume a role, you can
// specify the duration of your role session with the DurationSeconds parameter.
// You can specify a parameter value of up to 43200 seconds (12 hours), depending
// on the maximum session duration setting for your role. However, if you assume a
// role using role chaining and provide a DurationSeconds parameter value greater
-// than one hour, the operation fails. Permissions The temporary security
-// credentials created by AssumeRoleWithSAML can be used to make API calls to any
-// Amazon Web Services service with the following exception: you cannot call the
-// STS GetFederationToken or GetSessionToken API operations. (Optional) You can
-// pass inline or managed session policies
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policies to use as
-// managed session policies. The plaintext that you use for both inline and managed
-// session policies can't exceed 2,048 characters. Passing policies to this
-// operation returns new temporary credentials. The resulting session's permissions
-// are the intersection of the role's identity-based policy and the session
-// policies. You can use the role's temporary credentials in subsequent Amazon Web
-// Services API calls to access resources in the account that owns the role. You
-// cannot use session policies to grant more permissions than those allowed by the
+// than one hour, the operation fails.
+//
+// # Permissions
+//
+// The temporary security credentials created by AssumeRoleWithSAML can be used to
+// make API calls to any Amazon Web Services service with the following exception:
+// you cannot call the STS GetFederationToken or GetSessionToken API operations.
+//
+// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a
+// single JSON policy document to use as an inline session policy. You can also
+// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed
+// session policies. The plaintext that you use for both inline and managed session
+// policies can't exceed 2,048 characters. Passing policies to this operation
+// returns new temporary credentials. The resulting session's permissions are the
+// intersection of the role's identity-based policy and the session policies. You
+// can use the role's temporary credentials in subsequent Amazon Web Services API
+// calls to access resources in the account that owns the role. You cannot use
+// session policies to grant more permissions than those allowed by the
// identity-based policy of the role that is being assumed. For more information,
-// see Session Policies
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// in the IAM User Guide. Calling AssumeRoleWithSAML does not require the use of
-// Amazon Web Services security credentials. The identity of the caller is
-// validated by using keys in the metadata document that is uploaded for the SAML
-// provider entity for your identity provider. Calling AssumeRoleWithSAML can
-// result in an entry in your CloudTrail logs. The entry includes the value in the
-// NameID element of the SAML assertion. We recommend that you use a NameIDType
-// that is not associated with any personally identifiable information (PII). For
-// example, you could instead use the persistent identifier
-// (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). Tags (Optional) You can
-// configure your IdP to pass attributes into your SAML assertion as session tags.
-// Each session tag consists of a key name and an associated value. For more
-// information about session tags, see Passing Session Tags in STS
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
-// IAM User Guide. You can pass up to 50 session tags. The plaintext session tag
-// keys can’t exceed 128 characters and the values can’t exceed 256 characters. For
-// these and additional limits, see IAM and STS Character Limits
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
-// in the IAM User Guide. An Amazon Web Services conversion compresses the passed
-// session policies and session tags into a packed binary format that has a
+// see [Session Policies]in the IAM User Guide.
+//
+// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services
+// security credentials. The identity of the caller is validated by using keys in
+// the metadata document that is uploaded for the SAML provider entity for your
+// identity provider.
+//
+// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs. The
+// entry includes the value in the NameID element of the SAML assertion. We
+// recommend that you use a NameIDType that is not associated with any personally
+// identifiable information (PII). For example, you could instead use the
+// persistent identifier ( urn:oasis:names:tc:SAML:2.0:nameid-format:persistent ).
+//
+// # Tags
+//
+// (Optional) You can configure your IdP to pass attributes into your SAML
+// assertion as session tags. Each session tag consists of a key name and an
+// associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User
+// Guide.
+//
+// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed
+// 128 characters and the values can’t exceed 256 characters. For these and
+// additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+//
+// An Amazon Web Services conversion compresses the passed inline session policy,
+// managed policy ARNs, and session tags into a packed binary format that has a
// separate limit. Your request can fail for this limit even if your plaintext
-// meets the other requirements. The PackedPolicySize response element indicates by
-// percentage how close the policies and tags for your request are to the upper
-// size limit. You can pass a session tag with the same key as a tag that is
-// attached to the role. When you do, session tags override the role's tags with
-// the same key. An administrator must grant you the permissions necessary to pass
-// session tags. The administrator can also create granular permissions to allow
-// you to pass only specific session tags. For more information, see Tutorial:
-// Using Tags for Attribute-Based Access Control
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
-// in the IAM User Guide. You can set the session tags as transitive. Transitive
-// tags persist during role chaining. For more information, see Chaining Roles with
-// Session Tags
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
-// in the IAM User Guide. SAML Configuration Before your application can call
-// AssumeRoleWithSAML, you must configure your SAML identity provider (IdP) to
-// issue the claims required by Amazon Web Services. Additionally, you must use
-// Identity and Access Management (IAM) to create a SAML provider entity in your
-// Amazon Web Services account that represents your identity provider. You must
-// also create an IAM role that specifies this SAML provider in its trust policy.
+// meets the other requirements. The PackedPolicySize response element indicates
+// by percentage how close the policies and tags for your request are to the upper
+// size limit.
+//
+// You can pass a session tag with the same key as a tag that is attached to the
+// role. When you do, session tags override the role's tags with the same key.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during role
+// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+//
+// # SAML Configuration
+//
+// Before your application can call AssumeRoleWithSAML , you must configure your
+// SAML identity provider (IdP) to issue the claims required by Amazon Web
+// Services. Additionally, you must use Identity and Access Management (IAM) to
+// create a SAML provider entity in your Amazon Web Services account that
+// represents your identity provider. You must also create an IAM role that
+// specifies this SAML provider in its trust policy.
+//
// For more information, see the following resources:
//
-// * About SAML 2.0-based
-// Federation
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
-// in the IAM User Guide.
+// [About SAML 2.0-based Federation]
+// - in the IAM User Guide.
//
-// * Creating SAML Identity Providers
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
-// in the IAM User Guide.
+// [Creating SAML Identity Providers]
+// - in the IAM User Guide.
//
-// * Configuring a Relying Party and Claims
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
-// in the IAM User Guide.
+// [Configuring a Relying Party and Claims]
+// - in the IAM User Guide.
//
-// * Creating a Role for SAML 2.0 Federation
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
-// in the IAM User Guide.
+// [Creating a Role for SAML 2.0 Federation]
+// - in the IAM User Guide.
+//
+// [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+// [Creating a Role for SAML 2.0 Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html
+// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+// [Creating SAML Identity Providers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html
+// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
+// [Configuring a Relying Party and Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html
+// [Role chaining]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining
+// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [About SAML 2.0-based Federation]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html
+// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
func (c *Client) AssumeRoleWithSAML(ctx context.Context, params *AssumeRoleWithSAMLInput, optFns ...func(*Options)) (*AssumeRoleWithSAMLOutput, error) {
if params == nil {
params = &AssumeRoleWithSAMLInput{}
@@ -149,10 +170,11 @@ type AssumeRoleWithSAMLInput struct {
// This member is required.
RoleArn *string
- // The base64 encoded SAML authentication response provided by the IdP. For more
- // information, see Configuring a Relying Party and Adding Claims
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
- // in the IAM User Guide.
+ // The base64 encoded SAML authentication response provided by the IdP.
+ //
+ // For more information, see [Configuring a Relying Party and Adding Claims] in the IAM User Guide.
+ //
+ // [Configuring a Relying Party and Adding Claims]: https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html
//
// This member is required.
SAMLAssertion *string
@@ -166,85 +188,98 @@ type AssumeRoleWithSAMLInput struct {
// than this setting, the operation fails. For example, if you specify a session
// duration of 12 hours, but your administrator set the maximum session duration to
// 6 hours, your operation fails. To learn how to view the maximum value for your
- // role, see View the Maximum Session Duration Setting for a Role
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
- // in the IAM User Guide. By default, the value is set to 3600 seconds. The
- // DurationSeconds parameter is separate from the duration of a console session
- // that you might request using the returned credentials. The request to the
- // federation endpoint for a console sign-in token takes a SessionDuration
+ // role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request to
+ // the federation endpoint for a console sign-in token takes a SessionDuration
// parameter that specifies the maximum length of the console session. For more
- // information, see Creating a URL that Enables Federated Users to Access the
- // Amazon Web Services Management Console
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
- // in the IAM User Guide.
+ // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide.
+ //
+ // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+ // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html
DurationSeconds *int32
// An IAM policy in JSON format that you want to use as an inline session policy.
+ //
// This parameter is optional. Passing policies to this operation returns new
// temporary credentials. The resulting session's permissions are the intersection
// of the role's identity-based policy and the session policies. You can use the
// role's temporary credentials in subsequent Amazon Web Services API calls to
// access resources in the account that owns the role. You cannot use session
// policies to grant more permissions than those allowed by the identity-based
- // policy of the role that is being assumed. For more information, see Session
- // Policies
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide. The plaintext that you use for both inline and managed
- // session policies can't exceed 2,048 characters. The JSON policy characters can
- // be any ASCII character from the space character to the end of the valid
- // character list (\u0020 through \u00FF). It can also include the tab (\u0009),
- // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web
- // Services conversion compresses the passed session policies and session tags into
- // a packed binary format that has a separate limit. Your request can fail for this
- // limit even if your plaintext meets the other requirements. The PackedPolicySize
- // response element indicates by percentage how close the policies and tags for
- // your request are to the upper size limit.
+ // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM
+ // User Guide.
+ //
+ // The plaintext that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters.
+ //
+ // For more information about role session permissions, see [Session policies].
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
Policy *string
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
// use as managed session policies. The policies must exist in the same account as
- // the role. This parameter is optional. You can provide up to 10 managed policy
- // ARNs. However, the plaintext that you use for both inline and managed session
- // policies can't exceed 2,048 characters. For more information about ARNs, see
- // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces
- // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in
- // the Amazon Web Services General Reference. An Amazon Web Services conversion
- // compresses the passed session policies and session tags into a packed binary
- // format that has a separate limit. Your request can fail for this limit even if
- // your plaintext meets the other requirements. The PackedPolicySize response
- // element indicates by percentage how close the policies and tags for your request
- // are to the upper size limit. Passing policies to this operation returns new
- // temporary credentials. The resulting session's permissions are the intersection
- // of the role's identity-based policy and the session policies. You can use the
- // role's temporary credentials in subsequent Amazon Web Services API calls to
- // access resources in the account that owns the role. You cannot use session
- // policies to grant more permissions than those allowed by the identity-based
- // policy of the role that is being assumed. For more information, see Session
- // Policies
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
+ // the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plaintext that you use for both inline and managed session policies
+ // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the
+ // Amazon Web Services General Reference.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's
+ // identity-based policy and the session policies. You can use the role's temporary
+ // credentials in subsequent Amazon Web Services API calls to access resources in
+ // the account that owns the role. You cannot use session policies to grant more
+ // permissions than those allowed by the identity-based policy of the role that is
+ // being assumed. For more information, see [Session Policies]in the IAM User Guide.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
PolicyArns []types.PolicyDescriptorType
noSmithyDocumentSerde
}
-// Contains the response to a successful AssumeRoleWithSAML request, including
-// temporary Amazon Web Services credentials that can be used to make Amazon Web
-// Services requests.
+// Contains the response to a successful AssumeRoleWithSAML request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
type AssumeRoleWithSAMLOutput struct {
// The identifiers for the temporary security credentials that the operation
// returns.
AssumedRoleUser *types.AssumedRoleUser
- // The value of the Recipient attribute of the SubjectConfirmationData element of
+ // The value of the Recipient attribute of the SubjectConfirmationData element of
// the SAML assertion.
Audience *string
// The temporary security credentials, which include an access key ID, a secret
- // access key, and a security (or session) token. The size of the security token
- // that STS API operations return is not fixed. We strongly recommend that you make
- // no assumptions about the maximum size.
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
Credentials *types.Credentials
// The value of the Issuer element of the SAML assertion.
@@ -252,18 +287,18 @@ type AssumeRoleWithSAMLOutput struct {
// A hash value based on the concatenation of the following:
//
- // * The Issuer response
- // value.
+ // - The Issuer response value.
+ //
+ // - The Amazon Web Services account ID.
+ //
+ // - The friendly name (the last part of the ARN) of the SAML provider in IAM.
//
- // * The Amazon Web Services account ID.
+ // The combination of NameQualifier and Subject can be used to uniquely identify a
+ // user.
//
- // * The friendly name (the last
- // part of the ARN) of the SAML provider in IAM.
+ // The following pseudocode shows how the hash value is calculated:
//
- // The combination of NameQualifier
- // and Subject can be used to uniquely identify a federated user. The following
- // pseudocode shows how the hash value is calculated: BASE64 ( SHA1 (
- // "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) )
+ // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" ) )
NameQualifier *string
// A percentage value that indicates the packed size of the session policies and
@@ -272,35 +307,39 @@ type AssumeRoleWithSAMLOutput struct {
// allowed space.
PackedPolicySize *int32
- // The value in the SourceIdentity attribute in the SAML assertion. You can require
- // users to set a source identity value when they assume a role. You do this by
- // using the sts:SourceIdentity condition key in a role trust policy. That way,
- // actions that are taken with the role are associated with that user. After the
- // source identity is set, the value cannot be changed. It is present in the
- // request for all actions that are taken by the role and persists across chained
- // role
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining)
- // sessions. You can configure your SAML identity provider to use an attribute
- // associated with your users, like user name or email, as the source identity when
- // calling AssumeRoleWithSAML. You do this by adding an attribute to the SAML
- // assertion. For more information about using source identity, see Monitor and
- // control actions taken with assumed roles
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
- // in the IAM User Guide. The regex used to validate this parameter is a string of
- // characters consisting of upper- and lower-case alphanumeric characters with no
- // spaces. You can also include underscores or any of the following characters:
- // =,.@-
+ // The value in the SourceIdentity attribute in the SAML assertion. The source
+ // identity value persists across [chained role]sessions.
+ //
+ // You can require users to set a source identity value when they assume a role.
+ // You do this by using the sts:SourceIdentity condition key in a role trust
+ // policy. That way, actions that are taken with the role are associated with that
+ // user. After the source identity is set, the value cannot be changed. It is
+ // present in the request for all actions that are taken by the role and persists
+ // across [chained role]sessions. You can configure your SAML identity provider to use an
+ // attribute associated with your users, like user name or email, as the source
+ // identity when calling AssumeRoleWithSAML . You do this by adding an attribute to
+ // the SAML assertion. For more information about using source identity, see [Monitor and control actions taken with assumed roles]in
+ // the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts
+ // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
SourceIdentity *string
// The value of the NameID element in the Subject element of the SAML assertion.
Subject *string
- // The format of the name ID, as defined by the Format attribute in the NameID
+ // The format of the name ID, as defined by the Format attribute in the NameID
// element of the SAML assertion. Typical examples of the format are transient or
- // persistent. If the format includes the prefix
- // urn:oasis:names:tc:SAML:2.0:nameid-format, that prefix is removed. For example,
- // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient. If
- // the format includes any other prefix, the format is returned with no
+ // persistent .
+ //
+ // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format ,
+ // that prefix is removed. For example,
+ // urn:oasis:names:tc:SAML:2.0:nameid-format:transient is returned as transient .
+ // If the format includes any other prefix, the format is returned with no
// modifications.
SubjectType *string
@@ -311,6 +350,9 @@ type AssumeRoleWithSAMLOutput struct {
}
func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithSAML{}, middleware.After)
if err != nil {
return err
@@ -319,28 +361,38 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithSAML"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -349,12 +401,27 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = addOpAssumeRoleWithSAMLValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithSAML(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -364,6 +431,21 @@ func (c *Client) addOperationAssumeRoleWithSAMLMiddlewares(stack *middleware.Sta
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
@@ -371,7 +453,6 @@ func newServiceMetadataMiddleware_opAssumeRoleWithSAML(region string) *awsmiddle
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
- SigningName: "sts",
OperationName: "AssumeRoleWithSAML",
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
index 2e8b51c98..e5708cbd1 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoleWithWebIdentity.go
@@ -4,6 +4,7 @@ package sts
import (
"context"
+ "fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/service/sts/types"
"github.com/aws/smithy-go/middleware"
@@ -13,132 +14,132 @@ import (
// Returns a set of temporary security credentials for users who have been
// authenticated in a mobile or web application with a web identity provider.
// Example providers include the OAuth 2.0 providers Login with Amazon and
-// Facebook, or any OpenID Connect-compatible identity provider such as Google or
-// Amazon Cognito federated identities
-// (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html).
+// Facebook, or any OpenID Connect-compatible identity provider such as Google or [Amazon Cognito federated identities].
+//
// For mobile applications, we recommend that you use Amazon Cognito. You can use
-// Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide
-// (http://aws.amazon.com/sdkforios/) and the Amazon Web Services SDK for Android
-// Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely identify a
-// user. You can also supply the user with a consistent identity throughout the
-// lifetime of an application. To learn more about Amazon Cognito, see Amazon
-// Cognito Overview
-// (https://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840)
-// in Amazon Web Services SDK for Android Developer Guide and Amazon Cognito
-// Overview
-// (https://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664)
-// in the Amazon Web Services SDK for iOS Developer Guide. Calling
-// AssumeRoleWithWebIdentity does not require the use of Amazon Web Services
-// security credentials. Therefore, you can distribute an application (for example,
-// on mobile devices) that requests temporary security credentials without
+// Amazon Cognito with the [Amazon Web Services SDK for iOS Developer Guide]and the [Amazon Web Services SDK for Android Developer Guide] to uniquely identify a user. You can also
+// supply the user with a consistent identity throughout the lifetime of an
+// application.
+//
+// To learn more about Amazon Cognito, see [Amazon Cognito identity pools] in Amazon Cognito Developer Guide.
+//
+// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web
+// Services security credentials. Therefore, you can distribute an application (for
+// example, on mobile devices) that requests temporary security credentials without
// including long-term Amazon Web Services credentials in the application. You also
// don't need to deploy server-based proxy services that use long-term Amazon Web
// Services credentials. Instead, the identity of the caller is validated by using
// a token from the web identity provider. For a comparison of
// AssumeRoleWithWebIdentity with the other API operations that produce temporary
-// credentials, see Requesting Temporary Security Credentials
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide. The temporary security credentials returned by this API
-// consist of an access key ID, a secret access key, and a security token.
-// Applications can use these temporary security credentials to sign calls to
-// Amazon Web Services service API operations. Session Duration By default, the
-// temporary security credentials created by AssumeRoleWithWebIdentity last for one
-// hour. However, you can use the optional DurationSeconds parameter to specify the
-// duration of your session. You can provide a value from 900 seconds (15 minutes)
-// up to the maximum session duration setting for the role. This setting can have a
-// value from 1 hour to 12 hours. To learn how to view the maximum value for your
-// role, see View the Maximum Session Duration Setting for a Role
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
-// in the IAM User Guide. The maximum session duration limit applies when you use
-// the AssumeRole* API operations or the assume-role* CLI commands. However the
-// limit does not apply when you use those operations to create a console URL. For
-// more information, see Using IAM Roles
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the IAM
-// User Guide. Permissions The temporary security credentials created by
-// AssumeRoleWithWebIdentity can be used to make API calls to any Amazon Web
-// Services service with the following exception: you cannot call the STS
-// GetFederationToken or GetSessionToken API operations. (Optional) You can pass
-// inline or managed session policies
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policies to use as
-// managed session policies. The plaintext that you use for both inline and managed
-// session policies can't exceed 2,048 characters. Passing policies to this
-// operation returns new temporary credentials. The resulting session's permissions
-// are the intersection of the role's identity-based policy and the session
-// policies. You can use the role's temporary credentials in subsequent Amazon Web
-// Services API calls to access resources in the account that owns the role. You
-// cannot use session policies to grant more permissions than those allowed by the
+// credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide.
+//
+// The temporary security credentials returned by this API consist of an access
+// key ID, a secret access key, and a security token. Applications can use these
+// temporary security credentials to sign calls to Amazon Web Services service API
+// operations.
+//
+// # Session Duration
+//
+// By default, the temporary security credentials created by
+// AssumeRoleWithWebIdentity last for one hour. However, you can use the optional
+// DurationSeconds parameter to specify the duration of your session. You can
+// provide a value from 900 seconds (15 minutes) up to the maximum session duration
+// setting for the role. This setting can have a value from 1 hour to 12 hours. To
+// learn how to view the maximum value for your role, see [Update the maximum session duration for a role]in the IAM User Guide.
+// The maximum session duration limit applies when you use the AssumeRole* API
+// operations or the assume-role* CLI commands. However the limit does not apply
+// when you use those operations to create a console URL. For more information, see
+// [Using IAM Roles]in the IAM User Guide.
+//
+// # Permissions
+//
+// The temporary security credentials created by AssumeRoleWithWebIdentity can be
+// used to make API calls to any Amazon Web Services service with the following
+// exception: you cannot call the STS GetFederationToken or GetSessionToken API
+// operations.
+//
+// (Optional) You can pass inline or managed [session policies] to this operation. You can pass a
+// single JSON policy document to use as an inline session policy. You can also
+// specify up to 10 managed policy Amazon Resource Names (ARNs) to use as managed
+// session policies. The plaintext that you use for both inline and managed session
+// policies can't exceed 2,048 characters. Passing policies to this operation
+// returns new temporary credentials. The resulting session's permissions are the
+// intersection of the role's identity-based policy and the session policies. You
+// can use the role's temporary credentials in subsequent Amazon Web Services API
+// calls to access resources in the account that owns the role. You cannot use
+// session policies to grant more permissions than those allowed by the
// identity-based policy of the role that is being assumed. For more information,
-// see Session Policies
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// in the IAM User Guide. Tags (Optional) You can configure your IdP to pass
-// attributes into your web identity token as session tags. Each session tag
-// consists of a key name and an associated value. For more information about
-// session tags, see Passing Session Tags in STS
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
-// IAM User Guide. You can pass up to 50 session tags. The plaintext session tag
-// keys can’t exceed 128 characters and the values can’t exceed 256 characters. For
-// these and additional limits, see IAM and STS Character Limits
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
-// in the IAM User Guide. An Amazon Web Services conversion compresses the passed
-// session policies and session tags into a packed binary format that has a
+// see [Session Policies]in the IAM User Guide.
+//
+// # Tags
+//
+// (Optional) You can configure your IdP to pass attributes into your web identity
+// token as session tags. Each session tag consists of a key name and an associated
+// value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User Guide.
+//
+// You can pass up to 50 session tags. The plaintext session tag keys can’t exceed
+// 128 characters and the values can’t exceed 256 characters. For these and
+// additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+//
+// An Amazon Web Services conversion compresses the passed inline session policy,
+// managed policy ARNs, and session tags into a packed binary format that has a
// separate limit. Your request can fail for this limit even if your plaintext
-// meets the other requirements. The PackedPolicySize response element indicates by
-// percentage how close the policies and tags for your request are to the upper
-// size limit. You can pass a session tag with the same key as a tag that is
-// attached to the role. When you do, the session tag overrides the role tag with
-// the same key. An administrator must grant you the permissions necessary to pass
-// session tags. The administrator can also create granular permissions to allow
-// you to pass only specific session tags. For more information, see Tutorial:
-// Using Tags for Attribute-Based Access Control
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
-// in the IAM User Guide. You can set the session tags as transitive. Transitive
-// tags persist during role chaining. For more information, see Chaining Roles with
-// Session Tags
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
-// in the IAM User Guide. Identities Before your application can call
-// AssumeRoleWithWebIdentity, you must have an identity token from a supported
-// identity provider and create a role that the application can assume. The role
-// that your application assumes must trust the identity provider that is
-// associated with the identity token. In other words, the identity provider must
-// be specified in the role's trust policy. Calling AssumeRoleWithWebIdentity can
-// result in an entry in your CloudTrail logs. The entry includes the Subject
-// (http://openid.net/specs/openid-connect-core-1_0.html#Claims) of the provided
-// web identity token. We recommend that you avoid using any personally
-// identifiable information (PII) in this field. For example, you could instead use
-// a GUID or a pairwise identifier, as suggested in the OIDC specification
-// (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). For more
-// information about how to use web identity federation and the
-// AssumeRoleWithWebIdentity API, see the following resources:
+// meets the other requirements. The PackedPolicySize response element indicates
+// by percentage how close the policies and tags for your request are to the upper
+// size limit.
+//
+// You can pass a session tag with the same key as a tag that is attached to the
+// role. When you do, the session tag overrides the role tag with the same key.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// You can set the session tags as transitive. Transitive tags persist during role
+// chaining. For more information, see [Chaining Roles with Session Tags]in the IAM User Guide.
+//
+// # Identities
+//
+// Before your application can call AssumeRoleWithWebIdentity , you must have an
+// identity token from a supported identity provider and create a role that the
+// application can assume. The role that your application assumes must trust the
+// identity provider that is associated with the identity token. In other words,
+// the identity provider must be specified in the role's trust policy.
+//
+// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail
+// logs. The entry includes the [Subject]of the provided web identity token. We recommend
+// that you avoid using any personally identifiable information (PII) in this
+// field. For example, you could instead use a GUID or a pairwise identifier, as [suggested in the OIDC specification].
//
-// * Using Web
-// Identity Federation API Operations for Mobile Apps
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
-// and Federation Through a Web-based Identity Provider
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
+// For more information about how to use OIDC federation and the
+// AssumeRoleWithWebIdentity API, see the following resources:
//
-// *
-// Web Identity Federation Playground
-// (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/).
-// Walk through the process of authenticating through Login with Amazon, Facebook,
-// or Google, getting temporary security credentials, and then using those
-// credentials to make a request to Amazon Web Services.
+// [Using Web Identity Federation API Operations for Mobile Apps]
+// - and [Federation Through a Web-based Identity Provider].
//
-// * Amazon Web Services SDK
-// for iOS Developer Guide (http://aws.amazon.com/sdkforios/) and Amazon Web
-// Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/).
-// These toolkits contain sample apps that show how to invoke the identity
-// providers. The toolkits then show how to use the information from these
-// providers to get and use temporary security credentials.
+// [Amazon Web Services SDK for iOS Developer Guide]
+// - and [Amazon Web Services SDK for Android Developer Guide]. These toolkits contain sample apps that show how to invoke the
+// identity providers. The toolkits then show how to use the information from these
+// providers to get and use temporary security credentials.
//
-// * Web Identity
-// Federation with Mobile Applications
-// (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
-// This article discusses web identity federation and shows an example of how to
-// use web identity federation to get access to content in Amazon S3.
+// [Amazon Web Services SDK for iOS Developer Guide]: http://aws.amazon.com/sdkforios/
+// [Amazon Web Services SDK for Android Developer Guide]: http://aws.amazon.com/sdkforandroid/
+// [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
+// [session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html
+// [Subject]: http://openid.net/specs/openid-connect-core-1_0.html#Claims
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
+// [Amazon Cognito identity pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html
+// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity
+// [Using IAM Roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Amazon Cognito federated identities]: https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [Chaining Roles with Session Tags]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining
+// [Update the maximum session duration for a role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_update-role-settings.html#id_roles_update-session-duration
+// [Using Web Identity Federation API Operations for Mobile Apps]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html
+// [suggested in the OIDC specification]: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes
func (c *Client) AssumeRoleWithWebIdentity(ctx context.Context, params *AssumeRoleWithWebIdentityInput, optFns ...func(*Options)) (*AssumeRoleWithWebIdentityOutput, error) {
if params == nil {
params = &AssumeRoleWithWebIdentityInput{}
@@ -158,6 +159,17 @@ type AssumeRoleWithWebIdentityInput struct {
// The Amazon Resource Name (ARN) of the role that the caller is assuming.
//
+ // Additional considerations apply to Amazon Cognito identity pools that assume [cross-account IAM roles].
+ // The trust policies of these roles must accept the cognito-identity.amazonaws.com
+ // service principal and must contain the cognito-identity.amazonaws.com:aud
+ // condition key to restrict role assumption to users from your intended identity
+ // pools. A policy that trusts Amazon Cognito identity pools without this condition
+ // creates a risk that a user from an unintended identity pool can assume the role.
+ // For more information, see [Trust policies for IAM roles in Basic (Classic) authentication]in the Amazon Cognito Developer Guide.
+ //
+ // [cross-account IAM roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies-cross-account-resource-access.html
+ // [Trust policies for IAM roles in Basic (Classic) authentication]: https://docs.aws.amazon.com/cognito/latest/developerguide/iam-roles.html#trust-policies
+ //
// This member is required.
RoleArn *string
@@ -165,10 +177,19 @@ type AssumeRoleWithWebIdentityInput struct {
// identifier that is associated with the user who is using your application. That
// way, the temporary security credentials that your application will use are
// associated with that user. This session name is included as part of the ARN and
- // assumed role ID in the AssumedRoleUser response element. The regex used to
- // validate this parameter is a string of characters consisting of upper- and
- // lower-case alphanumeric characters with no spaces. You can also include
- // underscores or any of the following characters: =,.@-
+ // assumed role ID in the AssumedRoleUser response element.
+ //
+ // For security purposes, administrators can view this field in [CloudTrail logs] to help identify
+ // who performed an action in Amazon Web Services. Your administrator might require
+ // that you specify your user name as the session name when you assume the role.
+ // For more information, see [sts:RoleSessionName]sts:RoleSessionName .
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html#cloudtrail-integration_signin-tempcreds
+ // [sts:RoleSessionName]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_iam-condition-keys.html#ck_rolesessionname
//
// This member is required.
RoleSessionName *string
@@ -176,7 +197,10 @@ type AssumeRoleWithWebIdentityInput struct {
// The OAuth 2.0 access token or OpenID Connect ID token that is provided by the
// identity provider. Your application must get this token by authenticating the
// user who is using your application with a web identity provider before the
- // application makes an AssumeRoleWithWebIdentity call.
+ // application makes an AssumeRoleWithWebIdentity call. Timestamps in the token
+ // must be formatted as either an integer or a long integer. Tokens must be signed
+ // using either RSA keys (RS256, RS384, or RS512) or ECDSA keys (ES256, ES384, or
+ // ES512).
//
// This member is required.
WebIdentityToken *string
@@ -187,85 +211,100 @@ type AssumeRoleWithWebIdentityInput struct {
// higher than this setting, the operation fails. For example, if you specify a
// session duration of 12 hours, but your administrator set the maximum session
// duration to 6 hours, your operation fails. To learn how to view the maximum
- // value for your role, see View the Maximum Session Duration Setting for a Role
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
- // in the IAM User Guide. By default, the value is set to 3600 seconds. The
- // DurationSeconds parameter is separate from the duration of a console session
- // that you might request using the returned credentials. The request to the
- // federation endpoint for a console sign-in token takes a SessionDuration
+ // value for your role, see [View the Maximum Session Duration Setting for a Role]in the IAM User Guide.
+ //
+ // By default, the value is set to 3600 seconds.
+ //
+ // The DurationSeconds parameter is separate from the duration of a console
+ // session that you might request using the returned credentials. The request to
+ // the federation endpoint for a console sign-in token takes a SessionDuration
// parameter that specifies the maximum length of the console session. For more
- // information, see Creating a URL that Enables Federated Users to Access the
- // Amazon Web Services Management Console
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
- // in the IAM User Guide.
+ // information, see [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]in the IAM User Guide.
+ //
+ // [View the Maximum Session Duration Setting for a Role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session
+ // [Creating a URL that Enables Federated Users to Access the Amazon Web Services Management Console]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html
DurationSeconds *int32
// An IAM policy in JSON format that you want to use as an inline session policy.
+ //
// This parameter is optional. Passing policies to this operation returns new
// temporary credentials. The resulting session's permissions are the intersection
// of the role's identity-based policy and the session policies. You can use the
// role's temporary credentials in subsequent Amazon Web Services API calls to
// access resources in the account that owns the role. You cannot use session
// policies to grant more permissions than those allowed by the identity-based
- // policy of the role that is being assumed. For more information, see Session
- // Policies
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide. The plaintext that you use for both inline and managed
- // session policies can't exceed 2,048 characters. The JSON policy characters can
- // be any ASCII character from the space character to the end of the valid
- // character list (\u0020 through \u00FF). It can also include the tab (\u0009),
- // linefeed (\u000A), and carriage return (\u000D) characters. An Amazon Web
- // Services conversion compresses the passed session policies and session tags into
- // a packed binary format that has a separate limit. Your request can fail for this
- // limit even if your plaintext meets the other requirements. The PackedPolicySize
- // response element indicates by percentage how close the policies and tags for
- // your request are to the upper size limit.
+ // policy of the role that is being assumed. For more information, see [Session Policies]in the IAM
+ // User Guide.
+ //
+ // The plaintext that you use for both inline and managed session policies can't
+ // exceed 2,048 characters. The JSON policy characters can be any ASCII character
+ // from the space character to the end of the valid character list (\u0020 through
+ // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
+ // return (\u000D) characters.
+ //
+ // For more information about role session permissions, see [Session policies].
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Session policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
Policy *string
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
// use as managed session policies. The policies must exist in the same account as
- // the role. This parameter is optional. You can provide up to 10 managed policy
- // ARNs. However, the plaintext that you use for both inline and managed session
- // policies can't exceed 2,048 characters. For more information about ARNs, see
- // Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces
- // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in
- // the Amazon Web Services General Reference. An Amazon Web Services conversion
- // compresses the passed session policies and session tags into a packed binary
- // format that has a separate limit. Your request can fail for this limit even if
- // your plaintext meets the other requirements. The PackedPolicySize response
- // element indicates by percentage how close the policies and tags for your request
- // are to the upper size limit. Passing policies to this operation returns new
- // temporary credentials. The resulting session's permissions are the intersection
- // of the role's identity-based policy and the session policies. You can use the
- // role's temporary credentials in subsequent Amazon Web Services API calls to
- // access resources in the account that owns the role. You cannot use session
- // policies to grant more permissions than those allowed by the identity-based
- // policy of the role that is being assumed. For more information, see Session
- // Policies
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
+ // the role.
+ //
+ // This parameter is optional. You can provide up to 10 managed policy ARNs.
+ // However, the plaintext that you use for both inline and managed session policies
+ // can't exceed 2,048 characters. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the
+ // Amazon Web Services General Reference.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // Passing policies to this operation returns new temporary credentials. The
+ // resulting session's permissions are the intersection of the role's
+ // identity-based policy and the session policies. You can use the role's temporary
+ // credentials in subsequent Amazon Web Services API calls to access resources in
+ // the account that owns the role. You cannot use session policies to grant more
+ // permissions than those allowed by the identity-based policy of the role that is
+ // being assumed. For more information, see [Session Policies]in the IAM User Guide.
+ //
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
PolicyArns []types.PolicyDescriptorType
// The fully qualified host component of the domain name of the OAuth 2.0 identity
// provider. Do not specify this value for an OpenID Connect identity provider.
+ //
// Currently www.amazon.com and graph.facebook.com are the only supported identity
// providers for OAuth 2.0 access tokens. Do not include URL schemes and port
- // numbers. Do not specify this value for OpenID Connect ID tokens.
+ // numbers.
+ //
+ // Do not specify this value for OpenID Connect ID tokens.
ProviderId *string
noSmithyDocumentSerde
}
-// Contains the response to a successful AssumeRoleWithWebIdentity request,
-// including temporary Amazon Web Services credentials that can be used to make
-// Amazon Web Services requests.
+// Contains the response to a successful AssumeRoleWithWebIdentity request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
type AssumeRoleWithWebIdentityOutput struct {
// The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
// that you can use to refer to the resulting temporary security credentials. For
// example, you can reference these credentials as a principal in a resource-based
// policy by using the ARN or assumed role ID. The ARN and ID include the
- // RoleSessionName that you specified when you called AssumeRole.
+ // RoleSessionName that you specified when you called AssumeRole .
AssumedRoleUser *types.AssumedRoleUser
// The intended audience (also known as client ID) of the web identity token. This
@@ -274,9 +313,10 @@ type AssumeRoleWithWebIdentityOutput struct {
Audience *string
// The temporary security credentials, which include an access key ID, a secret
- // access key, and a security token. The size of the security token that STS API
- // operations return is not fixed. We strongly recommend that you make no
- // assumptions about the maximum size.
+ // access key, and a security token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
Credentials *types.Credentials
// A percentage value that indicates the packed size of the session policies and
@@ -285,33 +325,34 @@ type AssumeRoleWithWebIdentityOutput struct {
// allowed space.
PackedPolicySize *int32
- // The issuing authority of the web identity token presented. For OpenID Connect ID
- // tokens, this contains the value of the iss field. For OAuth 2.0 access tokens,
- // this contains the value of the ProviderId parameter that was passed in the
- // AssumeRoleWithWebIdentity request.
+ // The issuing authority of the web identity token presented. For OpenID Connect
+ // ID tokens, this contains the value of the iss field. For OAuth 2.0 access
+ // tokens, this contains the value of the ProviderId parameter that was passed in
+ // the AssumeRoleWithWebIdentity request.
Provider *string
// The value of the source identity that is returned in the JSON web token (JWT)
- // from the identity provider. You can require users to set a source identity value
- // when they assume a role. You do this by using the sts:SourceIdentity condition
- // key in a role trust policy. That way, actions that are taken with the role are
- // associated with that user. After the source identity is set, the value cannot be
- // changed. It is present in the request for all actions that are taken by the role
- // and persists across chained role
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining)
- // sessions. You can configure your identity provider to use an attribute
+ // from the identity provider.
+ //
+ // You can require users to set a source identity value when they assume a role.
+ // You do this by using the sts:SourceIdentity condition key in a role trust
+ // policy. That way, actions that are taken with the role are associated with that
+ // user. After the source identity is set, the value cannot be changed. It is
+ // present in the request for all actions that are taken by the role and persists
+ // across [chained role]sessions. You can configure your identity provider to use an attribute
// associated with your users, like user name or email, as the source identity when
- // calling AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web
- // token. To learn more about OIDC tokens and claims, see Using Tokens with User
- // Pools
- // (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html)
- // in the Amazon Cognito Developer Guide. For more information about using source
- // identity, see Monitor and control actions taken with assumed roles
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
- // in the IAM User Guide. The regex used to validate this parameter is a string of
- // characters consisting of upper- and lower-case alphanumeric characters with no
- // spaces. You can also include underscores or any of the following characters:
- // =,.@-
+ // calling AssumeRoleWithWebIdentity . You do this by adding a claim to the JSON
+ // web token. To learn more about OIDC tokens and claims, see [Using Tokens with User Pools]in the Amazon
+ // Cognito Developer Guide. For more information about using source identity, see [Monitor and control actions taken with assumed roles]
+ // in the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [chained role]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html#id_roles_terms-and-concepts
+ // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
+ // [Using Tokens with User Pools]: https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html
SourceIdentity *string
// The unique user identifier that is returned by the identity provider. This
@@ -329,6 +370,9 @@ type AssumeRoleWithWebIdentityOutput struct {
}
func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoleWithWebIdentity{}, middleware.After)
if err != nil {
return err
@@ -337,28 +381,38 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoleWithWebIdentity"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -367,12 +421,27 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = addOpAssumeRoleWithWebIdentityValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -382,6 +451,21 @@ func (c *Client) addOperationAssumeRoleWithWebIdentityMiddlewares(stack *middlew
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
@@ -389,7 +473,6 @@ func newServiceMetadataMiddleware_opAssumeRoleWithWebIdentity(region string) *aw
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
- SigningName: "sts",
OperationName: "AssumeRoleWithWebIdentity",
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go
new file mode 100644
index 000000000..a0f7a4671
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_AssumeRoot.go
@@ -0,0 +1,223 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ "github.com/aws/aws-sdk-go-v2/service/sts/types"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// Returns a set of short term credentials you can use to perform privileged tasks
+// on a member account in your organization.
+//
+// Before you can launch a privileged session, you must have centralized root
+// access in your organization. For steps to enable this feature, see [Centralize root access for member accounts]in the IAM
+// User Guide.
+//
+// The STS global endpoint is not supported for AssumeRoot. You must send this
+// request to a Regional STS endpoint. For more information, see [Endpoints].
+//
+// You can track AssumeRoot in CloudTrail logs to determine what actions were
+// performed in a session. For more information, see [Track privileged tasks in CloudTrail]in the IAM User Guide.
+//
+// [Endpoints]: https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html#sts-endpoints
+// [Track privileged tasks in CloudTrail]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-track-privileged-tasks.html
+// [Centralize root access for member accounts]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-enable-root-access.html
+func (c *Client) AssumeRoot(ctx context.Context, params *AssumeRootInput, optFns ...func(*Options)) (*AssumeRootOutput, error) {
+ if params == nil {
+ params = &AssumeRootInput{}
+ }
+
+ result, metadata, err := c.invokeOperation(ctx, "AssumeRoot", params, optFns, c.addOperationAssumeRootMiddlewares)
+ if err != nil {
+ return nil, err
+ }
+
+ out := result.(*AssumeRootOutput)
+ out.ResultMetadata = metadata
+ return out, nil
+}
+
+type AssumeRootInput struct {
+
+ // The member account principal ARN or account ID.
+ //
+ // This member is required.
+ TargetPrincipal *string
+
+ // The identity based policy that scopes the session to the privileged tasks that
+ // can be performed. You can use one of following Amazon Web Services managed
+ // policies to scope root session actions.
+ //
+ // [IAMAuditRootUserCredentials]
+ //
+ // [IAMCreateRootUserPassword]
+ //
+ // [IAMDeleteRootUserCredentials]
+ //
+ // [S3UnlockBucketPolicy]
+ //
+ // [SQSUnlockQueuePolicy]
+ //
+ // [IAMDeleteRootUserCredentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMDeleteRootUserCredentials
+ // [IAMCreateRootUserPassword]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMCreateRootUserPassword
+ // [IAMAuditRootUserCredentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-IAMAuditRootUserCredentials
+ // [S3UnlockBucketPolicy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-S3UnlockBucketPolicy
+ // [SQSUnlockQueuePolicy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/security-iam-awsmanpol.html#security-iam-awsmanpol-SQSUnlockQueuePolicy
+ //
+ // This member is required.
+ TaskPolicyArn *types.PolicyDescriptorType
+
+ // The duration, in seconds, of the privileged session. The value can range from 0
+ // seconds up to the maximum session duration of 900 seconds (15 minutes). If you
+ // specify a value higher than this setting, the operation fails.
+ //
+ // By default, the value is set to 900 seconds.
+ DurationSeconds *int32
+
+ noSmithyDocumentSerde
+}
+
+type AssumeRootOutput struct {
+
+ // The temporary security credentials, which include an access key ID, a secret
+ // access key, and a security token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
+ Credentials *types.Credentials
+
+ // The source identity specified by the principal that is calling the AssumeRoot
+ // operation.
+ //
+ // You can use the aws:SourceIdentity condition key to control access based on the
+ // value of source identity. For more information about using source identity, see [Monitor and control actions taken with assumed roles]
+ // in the IAM User Guide.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
+ //
+ // [Monitor and control actions taken with assumed roles]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
+ SourceIdentity *string
+
+ // Metadata pertaining to the operation's result.
+ ResultMetadata middleware.Metadata
+
+ noSmithyDocumentSerde
+}
+
+func (c *Client) addOperationAssumeRootMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
+ err = stack.Serialize.Add(&awsAwsquery_serializeOpAssumeRoot{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ err = stack.Deserialize.Add(&awsAwsquery_deserializeOpAssumeRoot{}, middleware.After)
+ if err != nil {
+ return err
+ }
+ if err := addProtocolFinalizerMiddlewares(stack, options, "AssumeRoot"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
+ if err = addSetLoggerMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addClientRequestID(stack); err != nil {
+ return err
+ }
+ if err = addComputeContentLength(stack); err != nil {
+ return err
+ }
+ if err = addResolveEndpointMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addComputePayloadSHA256(stack); err != nil {
+ return err
+ }
+ if err = addRetry(stack, options); err != nil {
+ return err
+ }
+ if err = addRawResponseToMetadata(stack); err != nil {
+ return err
+ }
+ if err = addRecordResponseTiming(stack); err != nil {
+ return err
+ }
+ if err = addSpanRetryLoop(stack, options); err != nil {
+ return err
+ }
+ if err = addClientUserAgent(stack, options); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
+ if err = addOpAssumeRootValidationMiddleware(stack); err != nil {
+ return err
+ }
+ if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAssumeRoot(options.Region), middleware.Before); err != nil {
+ return err
+ }
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
+ if err = addRequestIDRetrieverMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addResponseErrorMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addRequestResponseLogging(stack, options); err != nil {
+ return err
+ }
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newServiceMetadataMiddleware_opAssumeRoot(region string) *awsmiddleware.RegisterServiceMetadata {
+ return &awsmiddleware.RegisterServiceMetadata{
+ Region: region,
+ ServiceID: ServiceID,
+ OperationName: "AssumeRoot",
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go
index b7a637d42..9e7cb17d3 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_DecodeAuthorizationMessage.go
@@ -4,45 +4,46 @@ package sts
import (
"context"
+ "fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Decodes additional information about the authorization status of a request from
-// an encoded message returned in response to an Amazon Web Services request. For
-// example, if a user is not authorized to perform an operation that he or she has
-// requested, the request returns a Client.UnauthorizedOperation response (an HTTP
-// 403 response). Some Amazon Web Services operations additionally return an
-// encoded message that can provide details about this authorization failure. Only
-// certain Amazon Web Services operations return an encoded authorization message.
-// The documentation for an individual operation indicates whether that operation
-// returns an encoded message in addition to returning an HTTP code. The message is
-// encoded because the details of the authorization status can contain privileged
-// information that the user who requested the operation should not see. To decode
-// an authorization status message, a user must be granted permissions through an
-// IAM policy
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html) to
-// request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage) action.
+// an encoded message returned in response to an Amazon Web Services request.
+//
+// For example, if a user is not authorized to perform an operation that he or she
+// has requested, the request returns a Client.UnauthorizedOperation response (an
+// HTTP 403 response). Some Amazon Web Services operations additionally return an
+// encoded message that can provide details about this authorization failure.
+//
+// Only certain Amazon Web Services operations return an encoded authorization
+// message. The documentation for an individual operation indicates whether that
+// operation returns an encoded message in addition to returning an HTTP code.
+//
+// The message is encoded because the details of the authorization status can
+// contain privileged information that the user who requested the operation should
+// not see. To decode an authorization status message, a user must be granted
+// permissions through an IAM [policy]to request the DecodeAuthorizationMessage (
+// sts:DecodeAuthorizationMessage ) action.
+//
// The decoded message includes the following type of information:
//
-// * Whether the
-// request was denied due to an explicit deny or due to the absence of an explicit
-// allow. For more information, see Determining Whether a Request is Allowed or
-// Denied
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
-// in the IAM User Guide.
+// - Whether the request was denied due to an explicit deny or due to the
+// absence of an explicit allow. For more information, see [Determining Whether a Request is Allowed or Denied]in the IAM User
+// Guide.
//
-// * The principal who made the request.
+// - The principal who made the request.
//
-// * The requested
-// action.
+// - The requested action.
//
-// * The requested resource.
+// - The requested resource.
//
-// * The values of condition keys in the
-// context of the user's request.
+// - The values of condition keys in the context of the user's request.
+//
+// [Determining Whether a Request is Allowed or Denied]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow
+// [policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html
func (c *Client) DecodeAuthorizationMessage(ctx context.Context, params *DecodeAuthorizationMessageInput, optFns ...func(*Options)) (*DecodeAuthorizationMessageOutput, error) {
if params == nil {
params = &DecodeAuthorizationMessageInput{}
@@ -83,6 +84,9 @@ type DecodeAuthorizationMessageOutput struct {
}
func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsAwsquery_serializeOpDecodeAuthorizationMessage{}, middleware.After)
if err != nil {
return err
@@ -91,34 +95,41 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "DecodeAuthorizationMessage"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -127,12 +138,27 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = addOpDecodeAuthorizationMessageValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opDecodeAuthorizationMessage(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -142,6 +168,21 @@ func (c *Client) addOperationDecodeAuthorizationMessageMiddlewares(stack *middle
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
@@ -149,7 +190,6 @@ func newServiceMetadataMiddleware_opDecodeAuthorizationMessage(region string) *a
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
- SigningName: "sts",
OperationName: "DecodeAuthorizationMessage",
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go
index b86a425d0..28c05f13b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetAccessKeyInfo.go
@@ -4,32 +4,37 @@ package sts
import (
"context"
+ "fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// Returns the account identifier for the specified access key ID. Access keys
-// consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE) and a
-// secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY). For
-// more information about access keys, see Managing Access Keys for IAM Users
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html)
-// in the IAM User Guide. When you pass an access key ID to this operation, it
-// returns the ID of the Amazon Web Services account to which the keys belong.
-// Access key IDs beginning with AKIA are long-term credentials for an IAM user or
-// the Amazon Web Services account root user. Access key IDs beginning with ASIA
-// are temporary credentials that are created using STS operations. If the account
-// in the response belongs to you, you can sign in as the root user and review your
-// root user access keys. Then, you can pull a credentials report
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html)
-// to learn which IAM user owns the keys. To learn who requested the temporary
-// credentials for an ASIA access key, view the STS events in your CloudTrail logs
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html)
-// in the IAM User Guide. This operation does not indicate the state of the access
-// key. The key might be active, inactive, or deleted. Active keys might not have
-// permissions to perform an operation. Providing a deleted access key might return
-// an error that the key doesn't exist.
+// Returns the account identifier for the specified access key ID.
+//
+// Access keys consist of two parts: an access key ID (for example,
+// AKIAIOSFODNN7EXAMPLE ) and a secret access key (for example,
+// wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ). For more information about access
+// keys, see [Managing Access Keys for IAM Users]in the IAM User Guide.
+//
+// When you pass an access key ID to this operation, it returns the ID of the
+// Amazon Web Services account to which the keys belong. Access key IDs beginning
+// with AKIA are long-term credentials for an IAM user or the Amazon Web Services
+// account root user. Access key IDs beginning with ASIA are temporary credentials
+// that are created using STS operations. If the account in the response belongs to
+// you, you can sign in as the root user and review your root user access keys.
+// Then, you can pull a [credentials report]to learn which IAM user owns the keys. To learn who
+// requested the temporary credentials for an ASIA access key, view the STS events
+// in your [CloudTrail logs]in the IAM User Guide.
+//
+// This operation does not indicate the state of the access key. The key might be
+// active, inactive, or deleted. Active keys might not have permissions to perform
+// an operation. Providing a deleted access key might return an error that the key
+// doesn't exist.
+//
+// [credentials report]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html
+// [CloudTrail logs]: https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html
+// [Managing Access Keys for IAM Users]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html
func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoInput, optFns ...func(*Options)) (*GetAccessKeyInfoOutput, error) {
if params == nil {
params = &GetAccessKeyInfoInput{}
@@ -47,9 +52,10 @@ func (c *Client) GetAccessKeyInfo(ctx context.Context, params *GetAccessKeyInfoI
type GetAccessKeyInfoInput struct {
- // The identifier of an access key. This parameter allows (through its regex
- // pattern) a string of characters that can consist of any upper- or lowercase
- // letter or digit.
+ // The identifier of an access key.
+ //
+ // This parameter allows (through its regex pattern) a string of characters that
+ // can consist of any upper- or lowercase letter or digit.
//
// This member is required.
AccessKeyId *string
@@ -69,6 +75,9 @@ type GetAccessKeyInfoOutput struct {
}
func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsAwsquery_serializeOpGetAccessKeyInfo{}, middleware.After)
if err != nil {
return err
@@ -77,34 +86,41 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetAccessKeyInfo"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -113,12 +129,27 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = addOpGetAccessKeyInfoValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetAccessKeyInfo(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -128,6 +159,21 @@ func (c *Client) addOperationGetAccessKeyInfoMiddlewares(stack *middleware.Stack
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
@@ -135,7 +181,6 @@ func newServiceMetadataMiddleware_opGetAccessKeyInfo(region string) *awsmiddlewa
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
- SigningName: "sts",
OperationName: "GetAccessKeyInfo",
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go
index a7f96c220..de137b7dc 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetCallerIdentity.go
@@ -4,6 +4,7 @@ package sts
import (
"context"
+ "fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/smithy-go/middleware"
@@ -11,14 +12,15 @@ import (
)
// Returns details about the IAM user or role whose credentials are used to call
-// the operation. No permissions are required to perform this operation. If an
-// administrator adds a policy to your IAM user or role that explicitly denies
-// access to the sts:GetCallerIdentity action, you can still perform this
-// operation. Permissions are not required because the same information is returned
-// when an IAM user or role is denied access. To view an example response, see I Am
-// Not Authorized to Perform: iam:DeleteVirtualMFADevice
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa)
-// in the IAM User Guide.
+// the operation.
+//
+// No permissions are required to perform this operation. If an administrator
+// attaches a policy to your identity that explicitly denies access to the
+// sts:GetCallerIdentity action, you can still perform this operation. Permissions
+// are not required because the same information is returned when access is denied.
+// To view an example response, see [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]in the IAM User Guide.
+//
+// [I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice]: https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa
func (c *Client) GetCallerIdentity(ctx context.Context, params *GetCallerIdentityInput, optFns ...func(*Options)) (*GetCallerIdentityOutput, error) {
if params == nil {
params = &GetCallerIdentityInput{}
@@ -38,8 +40,8 @@ type GetCallerIdentityInput struct {
noSmithyDocumentSerde
}
-// Contains the response to a successful GetCallerIdentity request, including
-// information about the entity making the request.
+// Contains the response to a successful GetCallerIdentity request, including information about the
+// entity making the request.
type GetCallerIdentityOutput struct {
// The Amazon Web Services account ID number of the account that owns or contains
@@ -49,11 +51,12 @@ type GetCallerIdentityOutput struct {
// The Amazon Web Services ARN associated with the calling entity.
Arn *string
- // The unique identifier of the calling entity. The exact value depends on the type
- // of entity that is making the call. The values returned are those listed in the
- // aws:userid column in the Principal table
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
- // found on the Policy Variables reference page in the IAM User Guide.
+ // The unique identifier of the calling entity. The exact value depends on the
+ // type of entity that is making the call. The values returned are those listed in
+ // the aws:userid column in the [Principal table]found on the Policy Variables reference page in
+ // the IAM User Guide.
+ //
+ // [Principal table]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable
UserId *string
// Metadata pertaining to the operation's result.
@@ -63,6 +66,9 @@ type GetCallerIdentityOutput struct {
}
func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsAwsquery_serializeOpGetCallerIdentity{}, middleware.After)
if err != nil {
return err
@@ -71,34 +77,41 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetCallerIdentity"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -107,9 +120,24 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetCallerIdentity(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -119,6 +147,21 @@ func (c *Client) addOperationGetCallerIdentityMiddlewares(stack *middleware.Stac
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
@@ -126,7 +169,6 @@ func newServiceMetadataMiddleware_opGetCallerIdentity(region string) *awsmiddlew
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
- SigningName: "sts",
OperationName: "GetCallerIdentity",
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go
index 01a3d411b..67c041b30 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetFederationToken.go
@@ -4,98 +4,110 @@ package sts
import (
"context"
+ "fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/sts/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// Returns a set of temporary security credentials (consisting of an access key ID,
-// a secret access key, and a security token) for a federated user. A typical use
-// is in a proxy application that gets temporary security credentials on behalf of
-// distributed applications inside a corporate network. You must call the
-// GetFederationToken operation using the long-term security credentials of an IAM
-// user. As a result, this call is appropriate in contexts where those credentials
-// can be safely stored, usually in a server-based application. For a comparison of
-// GetFederationToken with the other API operations that produce temporary
-// credentials, see Requesting Temporary Security Credentials
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide. You can create a mobile-based or browser-based app that
-// can authenticate users using a web identity provider like Login with Amazon,
-// Facebook, Google, or an OpenID Connect-compatible identity provider. In this
-// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/)
-// or AssumeRoleWithWebIdentity. For more information, see Federation Through a
-// Web-based Identity Provider
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity)
-// in the IAM User Guide. You can also call GetFederationToken using the security
-// credentials of an Amazon Web Services account root user, but we do not recommend
-// it. Instead, we recommend that you create an IAM user for the purpose of the
-// proxy application. Then attach a policy to the IAM user that limits federated
-// users to only the actions and resources that they need to access. For more
-// information, see IAM Best Practices
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) in the
-// IAM User Guide. Session duration The temporary credentials are valid for the
-// specified duration, from 900 seconds (15 minutes) up to a maximum of 129,600
-// seconds (36 hours). The default session duration is 43,200 seconds (12 hours).
-// Temporary credentials obtained by using the Amazon Web Services account root
-// user credentials have a maximum duration of 3,600 seconds (1 hour). Permissions
+// Returns a set of temporary security credentials (consisting of an access key
+// ID, a secret access key, and a security token) for a user. A typical use is in a
+// proxy application that gets temporary security credentials on behalf of
+// distributed applications inside a corporate network.
+//
+// You must call the GetFederationToken operation using the long-term security
+// credentials of an IAM user. As a result, this call is appropriate in contexts
+// where those credentials can be safeguarded, usually in a server-based
+// application. For a comparison of GetFederationToken with the other API
+// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide.
+//
+// Although it is possible to call GetFederationToken using the security
+// credentials of an Amazon Web Services account root user rather than an IAM user
+// that you create for the purpose of a proxy application, we do not recommend it.
+// For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in the IAM User Guide.
+//
+// You can create a mobile-based or browser-based app that can authenticate users
+// using a web identity provider like Login with Amazon, Facebook, Google, or an
+// OpenID Connect-compatible identity provider. In this case, we recommend that you
+// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User
+// Guide.
+//
+// # Session duration
+//
+// The temporary credentials are valid for the specified duration, from 900
+// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
+// session duration is 43,200 seconds (12 hours). Temporary credentials obtained by
+// using the root user credentials have a maximum duration of 3,600 seconds (1
+// hour).
+//
+// # Permissions
+//
// You can use the temporary credentials created by GetFederationToken in any
-// Amazon Web Services service except the following:
+// Amazon Web Services service with the following exceptions:
+//
+// - You cannot call any IAM operations using the CLI or the Amazon Web Services
+// API. This limitation does not apply to console sessions.
+//
+// - You cannot call any STS operations except GetCallerIdentity .
//
-// * You cannot call any IAM
-// operations using the CLI or the Amazon Web Services API.
+// You can use temporary credentials for single sign-on (SSO) to the console.
//
-// * You cannot call any
-// STS operations except GetCallerIdentity.
+// You must pass an inline or managed [session policy] to this operation. You can pass a single
+// JSON policy document to use as an inline session policy. You can also specify up
+// to 10 managed policy Amazon Resource Names (ARNs) to use as managed session
+// policies. The plaintext that you use for both inline and managed session
+// policies can't exceed 2,048 characters.
+//
+// Though the session policy parameters are optional, if you do not pass a policy,
+// then the resulting federated user session has no permissions. When you pass
+// session policies, the session permissions are the intersection of the IAM user
+// policies and the session policies that you pass. This gives you a way to further
+// restrict the permissions for a federated user. You cannot use session policies
+// to grant more permissions than those that are defined in the permissions policy
+// of the IAM user. For more information, see [Session Policies]in the IAM User Guide. For
+// information about using GetFederationToken to create temporary security
+// credentials, see [GetFederationToken—Federation Through a Custom Identity Broker].
//
-// You must pass an inline or managed
-// session policy
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policies to use as
-// managed session policies. The plaintext that you use for both inline and managed
-// session policies can't exceed 2,048 characters. Though the session policy
-// parameters are optional, if you do not pass a policy, then the resulting
-// federated user session has no permissions. When you pass session policies, the
-// session permissions are the intersection of the IAM user policies and the
-// session policies that you pass. This gives you a way to further restrict the
-// permissions for a federated user. You cannot use session policies to grant more
-// permissions than those that are defined in the permissions policy of the IAM
-// user. For more information, see Session Policies
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// in the IAM User Guide. For information about using GetFederationToken to create
-// temporary security credentials, see GetFederationToken—Federation Through a
-// Custom Identity Broker
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
// You can use the credentials to access a resource that has a resource-based
// policy. If that policy specifically references the federated user session in the
// Principal element of the policy, the session has the permissions allowed by the
// policy. These permissions are granted in addition to the permissions granted by
-// the session policies. Tags (Optional) You can pass tag key-value pairs to your
-// session. These are called session tags. For more information about session tags,
-// see Passing Session Tags in STS
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
-// IAM User Guide. You can create a mobile-based or browser-based app that can
-// authenticate users using a web identity provider like Login with Amazon,
-// Facebook, Google, or an OpenID Connect-compatible identity provider. In this
-// case, we recommend that you use Amazon Cognito (http://aws.amazon.com/cognito/)
-// or AssumeRoleWithWebIdentity. For more information, see Federation Through a
-// Web-based Identity Provider
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity)
-// in the IAM User Guide. An administrator must grant you the permissions necessary
-// to pass session tags. The administrator can also create granular permissions to
-// allow you to pass only specific session tags. For more information, see
-// Tutorial: Using Tags for Attribute-Based Access Control
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
-// in the IAM User Guide. Tag key–value pairs are not case sensitive, but case is
-// preserved. This means that you cannot have separate Department and department
-// tag keys. Assume that the user that you are federating has the
-// Department=Marketing tag and you pass the department=engineering session tag.
-// Department and department are not saved as separate tags, and the session tag
-// passed in the request takes precedence over the user tag.
+// the session policies.
+//
+// # Tags
+//
+// (Optional) You can pass tag key-value pairs to your session. These are called
+// session tags. For more information about session tags, see [Passing Session Tags in STS]in the IAM User
+// Guide.
+//
+// You can create a mobile-based or browser-based app that can authenticate users
+// using a web identity provider like Login with Amazon, Facebook, Google, or an
+// OpenID Connect-compatible identity provider. In this case, we recommend that you
+// use [Amazon Cognito]or AssumeRoleWithWebIdentity . For more information, see [Federation Through a Web-based Identity Provider] in the IAM User
+// Guide.
+//
+// An administrator must grant you the permissions necessary to pass session tags.
+// The administrator can also create granular permissions to allow you to pass only
+// specific session tags. For more information, see [Tutorial: Using Tags for Attribute-Based Access Control]in the IAM User Guide.
+//
+// Tag key–value pairs are not case sensitive, but case is preserved. This means
+// that you cannot have separate Department and department tag keys. Assume that
+// the user that you are federating has the Department = Marketing tag and you
+// pass the department = engineering session tag. Department and department are
+// not saved as separate tags, and the session tag passed in the request takes
+// precedence over the user tag.
+//
+// [Federation Through a Web-based Identity Provider]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity
+// [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Amazon Cognito]: http://aws.amazon.com/cognito/
+// [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [GetFederationToken—Federation Through a Custom Identity Broker]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken
+// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html
+// [Tutorial: Using Tags for Attribute-Based Access Control]: https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html
func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTokenInput, optFns ...func(*Options)) (*GetFederationTokenOutput, error) {
if params == nil {
params = &GetFederationTokenInput{}
@@ -114,127 +126,151 @@ func (c *Client) GetFederationToken(ctx context.Context, params *GetFederationTo
type GetFederationTokenInput struct {
// The name of the federated user. The name is used as an identifier for the
- // temporary security credentials (such as Bob). For example, you can reference the
- // federated user name in a resource-based policy, such as in an Amazon S3 bucket
- // policy. The regex used to validate this parameter is a string of characters
- // consisting of upper- and lower-case alphanumeric characters with no spaces. You
- // can also include underscores or any of the following characters: =,.@-
+ // temporary security credentials (such as Bob ). For example, you can reference
+ // the federated user name in a resource-based policy, such as in an Amazon S3
+ // bucket policy.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@-
//
// This member is required.
Name *string
- // The duration, in seconds, that the session should last. Acceptable durations for
- // federation sessions range from 900 seconds (15 minutes) to 129,600 seconds (36
- // hours), with 43,200 seconds (12 hours) as the default. Sessions obtained using
- // Amazon Web Services account root user credentials are restricted to a maximum of
- // 3,600 seconds (one hour). If the specified duration is longer than one hour, the
- // session obtained by using root user credentials defaults to one hour.
+ // The duration, in seconds, that the session should last. Acceptable durations
+ // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds
+ // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained
+ // using root user credentials are restricted to a maximum of 3,600 seconds (one
+ // hour). If the specified duration is longer than one hour, the session obtained
+ // by using root user credentials defaults to one hour.
DurationSeconds *int32
// An IAM policy in JSON format that you want to use as an inline session policy.
- // You must pass an inline or managed session policy
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // to this operation. You can pass a single JSON policy document to use as an
- // inline session policy. You can also specify up to 10 managed policies to use as
- // managed session policies. This parameter is optional. However, if you do not
- // pass any session policies, then the resulting federated user session has no
- // permissions. When you pass session policies, the session permissions are the
- // intersection of the IAM user policies and the session policies that you pass.
- // This gives you a way to further restrict the permissions for a federated user.
- // You cannot use session policies to grant more permissions than those that are
- // defined in the permissions policy of the IAM user. For more information, see
- // Session Policies
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide. The resulting credentials can be used to access a
- // resource that has a resource-based policy. If that policy specifically
- // references the federated user session in the Principal element of the policy,
- // the session has the permissions allowed by the policy. These permissions are
- // granted in addition to the permissions that are granted by the session policies.
+ //
+ // You must pass an inline or managed [session policy] to this operation. You can pass a single
+ // JSON policy document to use as an inline session policy. You can also specify up
+ // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session
+ // policies.
+ //
+ // This parameter is optional. However, if you do not pass any session policies,
+ // then the resulting federated user session has no permissions.
+ //
+ // When you pass session policies, the session permissions are the intersection of
+ // the IAM user policies and the session policies that you pass. This gives you a
+ // way to further restrict the permissions for a federated user. You cannot use
+ // session policies to grant more permissions than those that are defined in the
+ // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User
+ // Guide.
+ //
+ // The resulting credentials can be used to access a resource that has a
+ // resource-based policy. If that policy specifically references the federated user
+ // session in the Principal element of the policy, the session has the permissions
+ // allowed by the policy. These permissions are granted in addition to the
+ // permissions that are granted by the session policies.
+ //
// The plaintext that you use for both inline and managed session policies can't
// exceed 2,048 characters. The JSON policy characters can be any ASCII character
// from the space character to the end of the valid character list (\u0020 through
// \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
- // return (\u000D) characters. An Amazon Web Services conversion compresses the
- // passed session policies and session tags into a packed binary format that has a
+ // return (\u000D) characters.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
// separate limit. Your request can fail for this limit even if your plaintext
- // meets the other requirements. The PackedPolicySize response element indicates by
- // percentage how close the policies and tags for your request are to the upper
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
// size limit.
+ //
+ // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
Policy *string
// The Amazon Resource Names (ARNs) of the IAM managed policies that you want to
// use as a managed session policy. The policies must exist in the same account as
- // the IAM user that is requesting federated access. You must pass an inline or
- // managed session policy
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // to this operation. You can pass a single JSON policy document to use as an
- // inline session policy. You can also specify up to 10 managed policies to use as
- // managed session policies. The plaintext that you use for both inline and managed
- // session policies can't exceed 2,048 characters. You can provide up to 10 managed
- // policy ARNs. For more information about ARNs, see Amazon Resource Names (ARNs)
- // and Amazon Web Services Service Namespaces
- // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in
- // the Amazon Web Services General Reference. This parameter is optional. However,
- // if you do not pass any session policies, then the resulting federated user
- // session has no permissions. When you pass session policies, the session
- // permissions are the intersection of the IAM user policies and the session
- // policies that you pass. This gives you a way to further restrict the permissions
- // for a federated user. You cannot use session policies to grant more permissions
- // than those that are defined in the permissions policy of the IAM user. For more
- // information, see Session Policies
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide. The resulting credentials can be used to access a
- // resource that has a resource-based policy. If that policy specifically
- // references the federated user session in the Principal element of the policy,
- // the session has the permissions allowed by the policy. These permissions are
- // granted in addition to the permissions that are granted by the session policies.
- // An Amazon Web Services conversion compresses the passed session policies and
- // session tags into a packed binary format that has a separate limit. Your request
- // can fail for this limit even if your plaintext meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit.
+ // the IAM user that is requesting federated access.
+ //
+ // You must pass an inline or managed [session policy] to this operation. You can pass a single
+ // JSON policy document to use as an inline session policy. You can also specify up
+ // to 10 managed policy Amazon Resource Names (ARNs) to use as managed session
+ // policies. The plaintext that you use for both inline and managed session
+ // policies can't exceed 2,048 characters. You can provide up to 10 managed policy
+ // ARNs. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web Services General
+ // Reference.
+ //
+ // This parameter is optional. However, if you do not pass any session policies,
+ // then the resulting federated user session has no permissions.
+ //
+ // When you pass session policies, the session permissions are the intersection of
+ // the IAM user policies and the session policies that you pass. This gives you a
+ // way to further restrict the permissions for a federated user. You cannot use
+ // session policies to grant more permissions than those that are defined in the
+ // permissions policy of the IAM user. For more information, see [Session Policies]in the IAM User
+ // Guide.
+ //
+ // The resulting credentials can be used to access a resource that has a
+ // resource-based policy. If that policy specifically references the federated user
+ // session in the Principal element of the policy, the session has the permissions
+ // allowed by the policy. These permissions are granted in addition to the
+ // permissions that are granted by the session policies.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
+ // separate limit. Your request can fail for this limit even if your plaintext
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // [session policy]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Session Policies]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
PolicyArns []types.PolicyDescriptorType
// A list of session tags. Each session tag consists of a key name and an
- // associated value. For more information about session tags, see Passing Session
- // Tags in STS
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
- // IAM User Guide. This parameter is optional. You can pass up to 50 session tags.
- // The plaintext session tag keys can’t exceed 128 characters and the values can’t
- // exceed 256 characters. For these and additional limits, see IAM and STS
- // Character Limits
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
- // in the IAM User Guide. An Amazon Web Services conversion compresses the passed
- // session policies and session tags into a packed binary format that has a
+ // associated value. For more information about session tags, see [Passing Session Tags in STS]in the IAM User
+ // Guide.
+ //
+ // This parameter is optional. You can pass up to 50 session tags. The plaintext
+ // session tag keys can’t exceed 128 characters and the values can’t exceed 256
+ // characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User Guide.
+ //
+ // An Amazon Web Services conversion compresses the passed inline session policy,
+ // managed policy ARNs, and session tags into a packed binary format that has a
// separate limit. Your request can fail for this limit even if your plaintext
- // meets the other requirements. The PackedPolicySize response element indicates by
- // percentage how close the policies and tags for your request are to the upper
- // size limit. You can pass a session tag with the same key as a tag that is
- // already attached to the user you are federating. When you do, session tags
- // override a user tag with the same key. Tag key–value pairs are not case
- // sensitive, but case is preserved. This means that you cannot have separate
- // Department and department tag keys. Assume that the role has the
- // Department=Marketing tag and you pass the department=engineering session tag.
- // Department and department are not saved as separate tags, and the session tag
- // passed in the request takes precedence over the role tag.
+ // meets the other requirements. The PackedPolicySize response element indicates
+ // by percentage how close the policies and tags for your request are to the upper
+ // size limit.
+ //
+ // You can pass a session tag with the same key as a tag that is already attached
+ // to the user you are federating. When you do, session tags override a user tag
+ // with the same key.
+ //
+ // Tag key–value pairs are not case sensitive, but case is preserved. This means
+ // that you cannot have separate Department and department tag keys. Assume that
+ // the role has the Department = Marketing tag and you pass the department =
+ // engineering session tag. Department and department are not saved as separate
+ // tags, and the session tag passed in the request takes precedence over the role
+ // tag.
+ //
+ // [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+ // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
Tags []types.Tag
noSmithyDocumentSerde
}
-// Contains the response to a successful GetFederationToken request, including
-// temporary Amazon Web Services credentials that can be used to make Amazon Web
-// Services requests.
+// Contains the response to a successful GetFederationToken request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
type GetFederationTokenOutput struct {
// The temporary security credentials, which include an access key ID, a secret
- // access key, and a security (or session) token. The size of the security token
- // that STS API operations return is not fixed. We strongly recommend that you make
- // no assumptions about the maximum size.
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
Credentials *types.Credentials
// Identifiers for the federated user associated with the credentials (such as
- // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You can use
+ // arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob ). You can use
// the federated user's ARN in your resource-based policies, such as an Amazon S3
// bucket policy.
FederatedUser *types.FederatedUser
@@ -252,6 +288,9 @@ type GetFederationTokenOutput struct {
}
func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsAwsquery_serializeOpGetFederationToken{}, middleware.After)
if err != nil {
return err
@@ -260,34 +299,41 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetFederationToken"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -296,12 +342,27 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = addOpGetFederationTokenValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetFederationToken(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -311,6 +372,21 @@ func (c *Client) addOperationGetFederationTokenMiddlewares(stack *middleware.Sta
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
@@ -318,7 +394,6 @@ func newServiceMetadataMiddleware_opGetFederationToken(region string) *awsmiddle
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
- SigningName: "sts",
OperationName: "GetFederationToken",
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go
index bfde51689..903d151ce 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/api_op_GetSessionToken.go
@@ -4,67 +4,69 @@ package sts
import (
"context"
+ "fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
- "github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/sts/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
-// Returns a set of temporary credentials for an Amazon Web Services account or IAM
-// user. The credentials consist of an access key ID, a secret access key, and a
-// security token. Typically, you use GetSessionToken if you want to use MFA to
+// Returns a set of temporary credentials for an Amazon Web Services account or
+// IAM user. The credentials consist of an access key ID, a secret access key, and
+// a security token. Typically, you use GetSessionToken if you want to use MFA to
// protect programmatic calls to specific Amazon Web Services API operations like
-// Amazon EC2 StopInstances. MFA-enabled IAM users would need to call
-// GetSessionToken and submit an MFA code that is associated with their MFA device.
-// Using the temporary security credentials that are returned from the call, IAM
-// users can then make programmatic calls to API operations that require MFA
-// authentication. If you do not supply a correct MFA code, then the API returns an
-// access denied error. For a comparison of GetSessionToken with the other API
-// operations that produce temporary credentials, see Requesting Temporary Security
-// Credentials
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide. No permissions are required for users to perform this
-// operation. The purpose of the sts:GetSessionToken operation is to authenticate
-// the user using MFA. You cannot use policies to control authentication
-// operations. For more information, see Permissions for GetSessionToken
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html)
-// in the IAM User Guide. Session Duration The GetSessionToken operation must be
-// called by using the long-term Amazon Web Services security credentials of the
-// Amazon Web Services account root user or an IAM user. Credentials that are
-// created by IAM users are valid for the duration that you specify. This duration
-// can range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36
-// hours), with a default of 43,200 seconds (12 hours). Credentials based on
-// account credentials can range from 900 seconds (15 minutes) up to 3,600 seconds
-// (1 hour), with a default of 1 hour. Permissions The temporary security
-// credentials created by GetSessionToken can be used to make API calls to any
-// Amazon Web Services service with the following exceptions:
+// Amazon EC2 StopInstances .
//
-// * You cannot call
-// any IAM API operations unless MFA authentication information is included in the
-// request.
+// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that is
+// associated with their MFA device. Using the temporary security credentials that
+// the call returns, IAM users can then make programmatic calls to API operations
+// that require MFA authentication. An incorrect MFA code causes the API to return
+// an access denied error. For a comparison of GetSessionToken with the other API
+// operations that produce temporary credentials, see [Requesting Temporary Security Credentials]and [Compare STS credentials] in the IAM User Guide.
//
-// * You cannot call any STS API except AssumeRole or
-// GetCallerIdentity.
+// No permissions are required for users to perform this operation. The purpose of
+// the sts:GetSessionToken operation is to authenticate the user using MFA. You
+// cannot use policies to control authentication operations. For more information,
+// see [Permissions for GetSessionToken]in the IAM User Guide.
//
-// We recommend that you do not call GetSessionToken with
-// Amazon Web Services account root user credentials. Instead, follow our best
-// practices
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users)
-// by creating one or more IAM users, giving them the necessary permissions, and
-// using IAM users for everyday interaction with Amazon Web Services. The
-// credentials that are returned by GetSessionToken are based on permissions
-// associated with the user whose credentials were used to call the operation. If
-// GetSessionToken is called using Amazon Web Services account root user
-// credentials, the temporary credentials have root user permissions. Similarly, if
-// GetSessionToken is called using the credentials of an IAM user, the temporary
-// credentials have the same permissions as the IAM user. For more information
-// about using GetSessionToken to create temporary credentials, go to Temporary
-// Credentials for Users in Untrusted Environments
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
-// in the IAM User Guide.
+// # Session Duration
+//
+// The GetSessionToken operation must be called by using the long-term Amazon Web
+// Services security credentials of an IAM user. Credentials that are created by
+// IAM users are valid for the duration that you specify. This duration can range
+// from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours),
+// with a default of 43,200 seconds (12 hours). Credentials based on account
+// credentials can range from 900 seconds (15 minutes) up to 3,600 seconds (1
+// hour), with a default of 1 hour.
+//
+// # Permissions
+//
+// The temporary security credentials created by GetSessionToken can be used to
+// make API calls to any Amazon Web Services service with the following exceptions:
+//
+// - You cannot call any IAM API operations unless MFA authentication
+// information is included in the request.
+//
+// - You cannot call any STS API except AssumeRole or GetCallerIdentity .
+//
+// The credentials that GetSessionToken returns are based on permissions
+// associated with the IAM user whose credentials were used to call the operation.
+// The temporary credentials have the same permissions as the IAM user.
+//
+// Although it is possible to call GetSessionToken using the security credentials
+// of an Amazon Web Services account root user rather than an IAM user, we do not
+// recommend it. If GetSessionToken is called using root user credentials, the
+// temporary credentials have root user permissions. For more information, see [Safeguard your root user credentials and don't use them for everyday tasks]in
+// the IAM User Guide
+//
+// For more information about using GetSessionToken to create temporary
+// credentials, see [Temporary Credentials for Users in Untrusted Environments]in the IAM User Guide.
+//
+// [Permissions for GetSessionToken]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html
+// [Temporary Credentials for Users in Untrusted Environments]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken
+// [Safeguard your root user credentials and don't use them for everyday tasks]: https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials
+// [Requesting Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html
+// [Compare STS credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_sts-comparison.html
func (c *Client) GetSessionToken(ctx context.Context, params *GetSessionTokenInput, optFns ...func(*Options)) (*GetSessionTokenOutput, error) {
if params == nil {
params = &GetSessionTokenInput{}
@@ -90,39 +92,42 @@ type GetSessionTokenInput struct {
// Services account owners defaults to one hour.
DurationSeconds *int32
- // The identification number of the MFA device that is associated with the IAM user
- // who is making the GetSessionToken call. Specify this value if the IAM user has a
- // policy that requires MFA authentication. The value is either the serial number
- // for a hardware device (such as GAHT12345678) or an Amazon Resource Name (ARN)
- // for a virtual device (such as arn:aws:iam::123456789012:mfa/user). You can find
- // the device for an IAM user by going to the Amazon Web Services Management
- // Console and viewing the user's security credentials. The regex used to validate
- // this parameter is a string of characters consisting of upper- and lower-case
- // alphanumeric characters with no spaces. You can also include underscores or any
- // of the following characters: =,.@:/-
+ // The identification number of the MFA device that is associated with the IAM
+ // user who is making the GetSessionToken call. Specify this value if the IAM user
+ // has a policy that requires MFA authentication. The value is either the serial
+ // number for a hardware device (such as GAHT12345678 ) or an Amazon Resource Name
+ // (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user ). You
+ // can find the device for an IAM user by going to the Amazon Web Services
+ // Management Console and viewing the user's security credentials.
+ //
+ // The regex used to validate this parameter is a string of characters consisting
+ // of upper- and lower-case alphanumeric characters with no spaces. You can also
+ // include underscores or any of the following characters: =,.@:/-
SerialNumber *string
- // The value provided by the MFA device, if MFA is required. If any policy requires
- // the IAM user to submit an MFA code, specify this value. If MFA authentication is
- // required, the user must provide a code when requesting a set of temporary
- // security credentials. A user who fails to provide the code receives an "access
- // denied" response when requesting resources that require MFA authentication. The
- // format for this parameter, as described by its regex pattern, is a sequence of
- // six numeric digits.
+ // The value provided by the MFA device, if MFA is required. If any policy
+ // requires the IAM user to submit an MFA code, specify this value. If MFA
+ // authentication is required, the user must provide a code when requesting a set
+ // of temporary security credentials. A user who fails to provide the code receives
+ // an "access denied" response when requesting resources that require MFA
+ // authentication.
+ //
+ // The format for this parameter, as described by its regex pattern, is a sequence
+ // of six numeric digits.
TokenCode *string
noSmithyDocumentSerde
}
-// Contains the response to a successful GetSessionToken request, including
-// temporary Amazon Web Services credentials that can be used to make Amazon Web
-// Services requests.
+// Contains the response to a successful GetSessionToken request, including temporary Amazon Web
+// Services credentials that can be used to make Amazon Web Services requests.
type GetSessionTokenOutput struct {
// The temporary security credentials, which include an access key ID, a secret
- // access key, and a security (or session) token. The size of the security token
- // that STS API operations return is not fixed. We strongly recommend that you make
- // no assumptions about the maximum size.
+ // access key, and a security (or session) token.
+ //
+ // The size of the security token that STS API operations return is not fixed. We
+ // strongly recommend that you make no assumptions about the maximum size.
Credentials *types.Credentials
// Metadata pertaining to the operation's result.
@@ -132,6 +137,9 @@ type GetSessionTokenOutput struct {
}
func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack, options Options) (err error) {
+ if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
+ return err
+ }
err = stack.Serialize.Add(&awsAwsquery_serializeOpGetSessionToken{}, middleware.After)
if err != nil {
return err
@@ -140,34 +148,41 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack,
if err != nil {
return err
}
+ if err := addProtocolFinalizerMiddlewares(stack, options, "GetSessionToken"); err != nil {
+ return fmt.Errorf("add protocol finalizers: %v", err)
+ }
+
+ if err = addlegacyEndpointContextSetter(stack, options); err != nil {
+ return err
+ }
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
- if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
+ if err = addClientRequestID(stack); err != nil {
return err
}
- if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
+ if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
- if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
+ if err = addComputePayloadSHA256(stack); err != nil {
return err
}
- if err = addRetryMiddlewares(stack, options); err != nil {
+ if err = addRetry(stack, options); err != nil {
return err
}
- if err = addHTTPSignerV4Middleware(stack, options); err != nil {
+ if err = addRawResponseToMetadata(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
+ if err = addRecordResponseTiming(stack); err != nil {
return err
}
- if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
+ if err = addSpanRetryLoop(stack, options); err != nil {
return err
}
- if err = addClientUserAgent(stack); err != nil {
+ if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
@@ -176,9 +191,24 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack,
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
+ if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
+ return err
+ }
+ if err = addTimeOffsetBuild(stack, c); err != nil {
+ return err
+ }
+ if err = addUserAgentRetryMode(stack, options); err != nil {
+ return err
+ }
+ if err = addCredentialSource(stack, options); err != nil {
+ return err
+ }
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opGetSessionToken(options.Region), middleware.Before); err != nil {
return err
}
+ if err = addRecursionDetection(stack); err != nil {
+ return err
+ }
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
@@ -188,6 +218,21 @@ func (c *Client) addOperationGetSessionTokenMiddlewares(stack *middleware.Stack,
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
+ if err = addDisableHTTPSMiddleware(stack, options); err != nil {
+ return err
+ }
+ if err = addSpanInitializeStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanInitializeEnd(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestStart(stack); err != nil {
+ return err
+ }
+ if err = addSpanBuildRequestEnd(stack); err != nil {
+ return err
+ }
return nil
}
@@ -195,7 +240,6 @@ func newServiceMetadataMiddleware_opGetSessionToken(region string) *awsmiddlewar
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
- SigningName: "sts",
OperationName: "GetSessionToken",
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go
new file mode 100644
index 000000000..a90b2b736
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/auth.go
@@ -0,0 +1,325 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "fmt"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+func bindAuthParamsRegion(_ interface{}, params *AuthResolverParameters, _ interface{}, options Options) {
+ params.Region = options.Region
+}
+
+type setLegacyContextSigningOptionsMiddleware struct {
+}
+
+func (*setLegacyContextSigningOptionsMiddleware) ID() string {
+ return "setLegacyContextSigningOptions"
+}
+
+func (m *setLegacyContextSigningOptionsMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ rscheme := getResolvedAuthScheme(ctx)
+ schemeID := rscheme.Scheme.SchemeID()
+
+ if sn := awsmiddleware.GetSigningName(ctx); sn != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningName(&rscheme.SignerProperties, sn)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningName(&rscheme.SignerProperties, sn)
+ }
+ }
+
+ if sr := awsmiddleware.GetSigningRegion(ctx); sr != "" {
+ if schemeID == "aws.auth#sigv4" {
+ smithyhttp.SetSigV4SigningRegion(&rscheme.SignerProperties, sr)
+ } else if schemeID == "aws.auth#sigv4a" {
+ smithyhttp.SetSigV4ASigningRegions(&rscheme.SignerProperties, []string{sr})
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+func addSetLegacyContextSigningOptionsMiddleware(stack *middleware.Stack) error {
+ return stack.Finalize.Insert(&setLegacyContextSigningOptionsMiddleware{}, "Signing", middleware.Before)
+}
+
+type withAnonymous struct {
+ resolver AuthSchemeResolver
+}
+
+var _ AuthSchemeResolver = (*withAnonymous)(nil)
+
+func (v *withAnonymous) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ opts, err := v.resolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return nil, err
+ }
+
+ opts = append(opts, &smithyauth.Option{
+ SchemeID: smithyauth.SchemeIDAnonymous,
+ })
+ return opts, nil
+}
+
+func wrapWithAnonymousAuth(options *Options) {
+ if _, ok := options.AuthSchemeResolver.(*defaultAuthSchemeResolver); !ok {
+ return
+ }
+
+ options.AuthSchemeResolver = &withAnonymous{
+ resolver: options.AuthSchemeResolver,
+ }
+}
+
+// AuthResolverParameters contains the set of inputs necessary for auth scheme
+// resolution.
+type AuthResolverParameters struct {
+ // The name of the operation being invoked.
+ Operation string
+
+ // The region in which the operation is being invoked.
+ Region string
+}
+
+func bindAuthResolverParams(ctx context.Context, operation string, input interface{}, options Options) *AuthResolverParameters {
+ params := &AuthResolverParameters{
+ Operation: operation,
+ }
+
+ bindAuthParamsRegion(ctx, params, input, options)
+
+ return params
+}
+
+// AuthSchemeResolver returns a set of possible authentication options for an
+// operation.
+type AuthSchemeResolver interface {
+ ResolveAuthSchemes(context.Context, *AuthResolverParameters) ([]*smithyauth.Option, error)
+}
+
+type defaultAuthSchemeResolver struct{}
+
+var _ AuthSchemeResolver = (*defaultAuthSchemeResolver)(nil)
+
+func (*defaultAuthSchemeResolver) ResolveAuthSchemes(ctx context.Context, params *AuthResolverParameters) ([]*smithyauth.Option, error) {
+ if overrides, ok := operationAuthOptions[params.Operation]; ok {
+ return overrides(params), nil
+ }
+ return serviceAuthOptions(params), nil
+}
+
+var operationAuthOptions = map[string]func(*AuthResolverParameters) []*smithyauth.Option{
+ "AssumeRoleWithSAML": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+
+ "AssumeRoleWithWebIdentity": func(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {SchemeID: smithyauth.SchemeIDAnonymous},
+ }
+ },
+}
+
+func serviceAuthOptions(params *AuthResolverParameters) []*smithyauth.Option {
+ return []*smithyauth.Option{
+ {
+ SchemeID: smithyauth.SchemeIDSigV4,
+ SignerProperties: func() smithy.Properties {
+ var props smithy.Properties
+ smithyhttp.SetSigV4SigningName(&props, "sts")
+ smithyhttp.SetSigV4SigningRegion(&props, params.Region)
+ return props
+ }(),
+ },
+ }
+}
+
+type resolveAuthSchemeMiddleware struct {
+ operation string
+ options Options
+}
+
+func (*resolveAuthSchemeMiddleware) ID() string {
+ return "ResolveAuthScheme"
+}
+
+func (m *resolveAuthSchemeMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveAuthScheme")
+ defer span.End()
+
+ params := bindAuthResolverParams(ctx, m.operation, getOperationInput(ctx), m.options)
+ options, err := m.options.AuthSchemeResolver.ResolveAuthSchemes(ctx, params)
+ if err != nil {
+ return out, metadata, fmt.Errorf("resolve auth scheme: %w", err)
+ }
+
+ scheme, ok := m.selectScheme(options)
+ if !ok {
+ return out, metadata, fmt.Errorf("could not select an auth scheme")
+ }
+
+ ctx = setResolvedAuthScheme(ctx, scheme)
+
+ span.SetProperty("auth.scheme_id", scheme.Scheme.SchemeID())
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+func (m *resolveAuthSchemeMiddleware) selectScheme(options []*smithyauth.Option) (*resolvedAuthScheme, bool) {
+ for _, option := range options {
+ if option.SchemeID == smithyauth.SchemeIDAnonymous {
+ return newResolvedAuthScheme(smithyhttp.NewAnonymousScheme(), option), true
+ }
+
+ for _, scheme := range m.options.AuthSchemes {
+ if scheme.SchemeID() != option.SchemeID {
+ continue
+ }
+
+ if scheme.IdentityResolver(m.options) != nil {
+ return newResolvedAuthScheme(scheme, option), true
+ }
+ }
+ }
+
+ return nil, false
+}
+
+type resolvedAuthSchemeKey struct{}
+
+type resolvedAuthScheme struct {
+ Scheme smithyhttp.AuthScheme
+ IdentityProperties smithy.Properties
+ SignerProperties smithy.Properties
+}
+
+func newResolvedAuthScheme(scheme smithyhttp.AuthScheme, option *smithyauth.Option) *resolvedAuthScheme {
+ return &resolvedAuthScheme{
+ Scheme: scheme,
+ IdentityProperties: option.IdentityProperties,
+ SignerProperties: option.SignerProperties,
+ }
+}
+
+func setResolvedAuthScheme(ctx context.Context, scheme *resolvedAuthScheme) context.Context {
+ return middleware.WithStackValue(ctx, resolvedAuthSchemeKey{}, scheme)
+}
+
+func getResolvedAuthScheme(ctx context.Context) *resolvedAuthScheme {
+ v, _ := middleware.GetStackValue(ctx, resolvedAuthSchemeKey{}).(*resolvedAuthScheme)
+ return v
+}
+
+type getIdentityMiddleware struct {
+ options Options
+}
+
+func (*getIdentityMiddleware) ID() string {
+ return "GetIdentity"
+}
+
+func (m *getIdentityMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ innerCtx, span := tracing.StartSpan(ctx, "GetIdentity")
+ defer span.End()
+
+ rscheme := getResolvedAuthScheme(innerCtx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ resolver := rscheme.Scheme.IdentityResolver(m.options)
+ if resolver == nil {
+ return out, metadata, fmt.Errorf("no identity resolver")
+ }
+
+ identity, err := timeOperationMetric(ctx, "client.call.resolve_identity_duration",
+ func() (smithyauth.Identity, error) {
+ return resolver.GetIdentity(innerCtx, rscheme.IdentityProperties)
+ },
+ func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("get identity: %w", err)
+ }
+
+ ctx = setIdentity(ctx, identity)
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
+
+type identityKey struct{}
+
+func setIdentity(ctx context.Context, identity smithyauth.Identity) context.Context {
+ return middleware.WithStackValue(ctx, identityKey{}, identity)
+}
+
+func getIdentity(ctx context.Context) smithyauth.Identity {
+ v, _ := middleware.GetStackValue(ctx, identityKey{}).(smithyauth.Identity)
+ return v
+}
+
+type signRequestMiddleware struct {
+ options Options
+}
+
+func (*signRequestMiddleware) ID() string {
+ return "Signing"
+}
+
+func (m *signRequestMiddleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "SignRequest")
+ defer span.End()
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unexpected transport type %T", in.Request)
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ identity := getIdentity(ctx)
+ if identity == nil {
+ return out, metadata, fmt.Errorf("no identity")
+ }
+
+ signer := rscheme.Scheme.Signer()
+ if signer == nil {
+ return out, metadata, fmt.Errorf("no signer")
+ }
+
+ _, err = timeOperationMetric(ctx, "client.call.signing_duration", func() (any, error) {
+ return nil, signer.SignRequest(ctx, req, identity, rscheme.SignerProperties)
+ }, func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("auth.scheme_id", rscheme.Scheme.SchemeID())
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("sign request: %w", err)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go
index 5d634ce35..59349890f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/deserializers.go
@@ -16,12 +16,22 @@ import (
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithytime "github.com/aws/smithy-go/time"
+ "github.com/aws/smithy-go/tracing"
smithyhttp "github.com/aws/smithy-go/transport/http"
"io"
"strconv"
"strings"
+ "time"
)
+func deserializeS3Expires(v string) (*time.Time, error) {
+ t, err := smithytime.ParseHTTPDate(v)
+ if err != nil {
+ return nil, nil
+ }
+ return &t, nil
+}
+
type awsAwsquery_deserializeOpAssumeRole struct {
}
@@ -37,6 +47,10 @@ func (m *awsAwsquery_deserializeOpAssumeRole) HandleDeserialize(ctx context.Cont
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -154,6 +168,10 @@ func (m *awsAwsquery_deserializeOpAssumeRoleWithSAML) HandleDeserialize(ctx cont
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -277,6 +295,10 @@ func (m *awsAwsquery_deserializeOpAssumeRoleWithWebIdentity) HandleDeserialize(c
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -388,6 +410,121 @@ func awsAwsquery_deserializeOpErrorAssumeRoleWithWebIdentity(response *smithyhtt
}
}
+type awsAwsquery_deserializeOpAssumeRoot struct {
+}
+
+func (*awsAwsquery_deserializeOpAssumeRoot) ID() string {
+ return "OperationDeserializer"
+}
+
+func (m *awsAwsquery_deserializeOpAssumeRoot) HandleDeserialize(ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler) (
+ out middleware.DeserializeOutput, metadata middleware.Metadata, err error,
+) {
+ out, metadata, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, metadata, err
+ }
+
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
+ response, ok := out.RawResponse.(*smithyhttp.Response)
+ if !ok {
+ return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
+ }
+
+ if response.StatusCode < 200 || response.StatusCode >= 300 {
+ return out, metadata, awsAwsquery_deserializeOpErrorAssumeRoot(response, &metadata)
+ }
+ output := &AssumeRootOutput{}
+ out.Result = output
+
+ var buff [1024]byte
+ ringBuffer := smithyio.NewRingBuffer(buff[:])
+ body := io.TeeReader(response.Body, ringBuffer)
+ rootDecoder := xml.NewDecoder(body)
+ t, err := smithyxml.FetchRootElement(rootDecoder)
+ if err == io.EOF {
+ return out, metadata, nil
+ }
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ return out, metadata, &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ }
+
+ decoder := smithyxml.WrapNodeDecoder(rootDecoder, t)
+ t, err = decoder.GetElement("AssumeRootResult")
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ decoder = smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ err = awsAwsquery_deserializeOpDocumentAssumeRootOutput(&output, decoder)
+ if err != nil {
+ var snapshot bytes.Buffer
+ io.Copy(&snapshot, ringBuffer)
+ err = &smithy.DeserializationError{
+ Err: fmt.Errorf("failed to decode response body, %w", err),
+ Snapshot: snapshot.Bytes(),
+ }
+ return out, metadata, err
+ }
+
+ return out, metadata, err
+}
+
+func awsAwsquery_deserializeOpErrorAssumeRoot(response *smithyhttp.Response, metadata *middleware.Metadata) error {
+ var errorBuffer bytes.Buffer
+ if _, err := io.Copy(&errorBuffer, response.Body); err != nil {
+ return &smithy.DeserializationError{Err: fmt.Errorf("failed to copy error response body, %w", err)}
+ }
+ errorBody := bytes.NewReader(errorBuffer.Bytes())
+
+ errorCode := "UnknownError"
+ errorMessage := errorCode
+
+ errorComponents, err := awsxml.GetErrorResponseComponents(errorBody, false)
+ if err != nil {
+ return err
+ }
+ if reqID := errorComponents.RequestID; len(reqID) != 0 {
+ awsmiddleware.SetRequestIDMetadata(metadata, reqID)
+ }
+ if len(errorComponents.Code) != 0 {
+ errorCode = errorComponents.Code
+ }
+ if len(errorComponents.Message) != 0 {
+ errorMessage = errorComponents.Message
+ }
+ errorBody.Seek(0, io.SeekStart)
+ switch {
+ case strings.EqualFold("ExpiredTokenException", errorCode):
+ return awsAwsquery_deserializeErrorExpiredTokenException(response, errorBody)
+
+ case strings.EqualFold("RegionDisabledException", errorCode):
+ return awsAwsquery_deserializeErrorRegionDisabledException(response, errorBody)
+
+ default:
+ genericError := &smithy.GenericAPIError{
+ Code: errorCode,
+ Message: errorMessage,
+ }
+ return genericError
+
+ }
+}
+
type awsAwsquery_deserializeOpDecodeAuthorizationMessage struct {
}
@@ -403,6 +540,10 @@ func (m *awsAwsquery_deserializeOpDecodeAuthorizationMessage) HandleDeserialize(
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -511,6 +652,10 @@ func (m *awsAwsquery_deserializeOpGetAccessKeyInfo) HandleDeserialize(ctx contex
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -616,6 +761,10 @@ func (m *awsAwsquery_deserializeOpGetCallerIdentity) HandleDeserialize(ctx conte
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -721,6 +870,10 @@ func (m *awsAwsquery_deserializeOpGetFederationToken) HandleDeserialize(ctx cont
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -835,6 +988,10 @@ func (m *awsAwsquery_deserializeOpGetSessionToken) HandleDeserialize(ctx context
return out, metadata, err
}
+ _, span := tracing.StartSpan(ctx, "OperationDeserializer")
+ endTimer := startMetricTimer(ctx, "client.call.deserialization_duration")
+ defer endTimer()
+ defer span.End()
response, ok := out.RawResponse.(*smithyhttp.Response)
if !ok {
return out, metadata, &smithy.DeserializationError{Err: fmt.Errorf("unknown transport type %T", out.RawResponse)}
@@ -2226,6 +2383,61 @@ func awsAwsquery_deserializeOpDocumentAssumeRoleWithWebIdentityOutput(v **Assume
return nil
}
+func awsAwsquery_deserializeOpDocumentAssumeRootOutput(v **AssumeRootOutput, decoder smithyxml.NodeDecoder) error {
+ if v == nil {
+ return fmt.Errorf("unexpected nil of type %T", v)
+ }
+ var sv *AssumeRootOutput
+ if *v == nil {
+ sv = &AssumeRootOutput{}
+ } else {
+ sv = *v
+ }
+
+ for {
+ t, done, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if done {
+ break
+ }
+ originalDecoder := decoder
+ decoder = smithyxml.WrapNodeDecoder(originalDecoder.Decoder, t)
+ switch {
+ case strings.EqualFold("Credentials", t.Name.Local):
+ nodeDecoder := smithyxml.WrapNodeDecoder(decoder.Decoder, t)
+ if err := awsAwsquery_deserializeDocumentCredentials(&sv.Credentials, nodeDecoder); err != nil {
+ return err
+ }
+
+ case strings.EqualFold("SourceIdentity", t.Name.Local):
+ val, err := decoder.Value()
+ if err != nil {
+ return err
+ }
+ if val == nil {
+ break
+ }
+ {
+ xtv := string(val)
+ sv.SourceIdentity = ptr.String(xtv)
+ }
+
+ default:
+ // Do nothing and ignore the unexpected tag element
+ err = decoder.Decoder.Skip()
+ if err != nil {
+ return err
+ }
+
+ }
+ decoder = originalDecoder
+ }
+ *v = sv
+ return nil
+}
+
func awsAwsquery_deserializeOpDocumentDecodeAuthorizationMessageOutput(v **DecodeAuthorizationMessageOutput, decoder smithyxml.NodeDecoder) error {
if v == nil {
return fmt.Errorf("unexpected nil of type %T", v)
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go
index 7cabbb97e..cbb19c7f6 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/doc.go
@@ -3,10 +3,11 @@
// Package sts provides the API client, operations, and parameter types for AWS
// Security Token Service.
//
-// Security Token Service Security Token Service (STS) enables you to request
-// temporary, limited-privilege credentials for Identity and Access Management
-// (IAM) users or for users that you authenticate (federated users). This guide
-// provides descriptions of the STS API. For more information about using this
-// service, see Temporary Security Credentials
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+// # Security Token Service
+//
+// Security Token Service (STS) enables you to request temporary,
+// limited-privilege credentials for users. This guide provides descriptions of the
+// STS API. For more information about using this service, see [Temporary Security Credentials].
+//
+// [Temporary Security Credentials]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html
package sts
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go
index cababea22..dca2ce359 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/endpoints.go
@@ -8,10 +8,20 @@ import (
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalConfig "github.com/aws/aws-sdk-go-v2/internal/configsources"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints"
+ "github.com/aws/aws-sdk-go-v2/internal/endpoints/awsrulesfn"
internalendpoints "github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints"
+ smithy "github.com/aws/smithy-go"
+ smithyauth "github.com/aws/smithy-go/auth"
+ smithyendpoints "github.com/aws/smithy-go/endpoints"
"github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/ptr"
+ "github.com/aws/smithy-go/tracing"
smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
"net/url"
+ "os"
"strings"
)
@@ -39,13 +49,6 @@ func (fn EndpointResolverFunc) ResolveEndpoint(region string, options EndpointRe
return fn(region, options)
}
-func resolveDefaultEndpointConfiguration(o *Options) {
- if o.EndpointResolver != nil {
- return
- }
- o.EndpointResolver = NewDefaultEndpointResolver()
-}
-
// EndpointResolverFromURL returns an EndpointResolver configured using the
// provided endpoint url. By default, the resolved endpoint resolver uses the
// client region as signing region, and the endpoint source is set to
@@ -79,6 +82,10 @@ func (*ResolveEndpoint) ID() string {
func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ if !awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleSerialize(ctx, in)
+ }
+
req, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
@@ -94,6 +101,11 @@ func (m *ResolveEndpoint) HandleSerialize(ctx context.Context, in middleware.Ser
var endpoint aws.Endpoint
endpoint, err = m.Resolver.ResolveEndpoint(awsmiddleware.GetRegion(ctx), eo)
if err != nil {
+ nf := (&aws.EndpointNotFoundError{})
+ if errors.As(err, &nf) {
+ ctx = awsmiddleware.SetRequiresLegacyEndpoints(ctx, false)
+ return next.HandleSerialize(ctx, in)
+ }
return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
}
@@ -129,27 +141,10 @@ func removeResolveEndpointMiddleware(stack *middleware.Stack) error {
type wrappedEndpointResolver struct {
awsResolver aws.EndpointResolverWithOptions
- resolver EndpointResolver
}
func (w *wrappedEndpointResolver) ResolveEndpoint(region string, options EndpointResolverOptions) (endpoint aws.Endpoint, err error) {
- if w.awsResolver == nil {
- goto fallback
- }
- endpoint, err = w.awsResolver.ResolveEndpoint(ServiceID, region, options)
- if err == nil {
- return endpoint, nil
- }
-
- if nf := (&aws.EndpointNotFoundError{}); !errors.As(err, &nf) {
- return endpoint, err
- }
-
-fallback:
- if w.resolver == nil {
- return endpoint, fmt.Errorf("default endpoint resolver provided was nil")
- }
- return w.resolver.ResolveEndpoint(region, options)
+ return w.awsResolver.ResolveEndpoint(ServiceID, region, options)
}
type awsEndpointResolverAdaptor func(service, region string) (aws.Endpoint, error)
@@ -160,12 +155,13 @@ func (a awsEndpointResolverAdaptor) ResolveEndpoint(service, region string, opti
var _ aws.EndpointResolverWithOptions = awsEndpointResolverAdaptor(nil)
-// withEndpointResolver returns an EndpointResolver that first delegates endpoint resolution to the awsResolver.
-// If awsResolver returns aws.EndpointNotFoundError error, the resolver will use the the provided
-// fallbackResolver for resolution.
+// withEndpointResolver returns an aws.EndpointResolverWithOptions that first delegates endpoint resolution to the awsResolver.
+// If awsResolver returns aws.EndpointNotFoundError error, the v1 resolver middleware will swallow the error,
+// and set an appropriate context flag such that fallback will occur when EndpointResolverV2 is invoked
+// via its middleware.
//
-// fallbackResolver must not be nil
-func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions, fallbackResolver EndpointResolver) EndpointResolver {
+// If another error (besides aws.EndpointNotFoundError) is returned, then that error will be propagated.
+func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptions aws.EndpointResolverWithOptions) EndpointResolver {
var resolver aws.EndpointResolverWithOptions
if awsResolverWithOptions != nil {
@@ -176,7 +172,6 @@ func withEndpointResolver(awsResolver aws.EndpointResolver, awsResolverWithOptio
return &wrappedEndpointResolver{
awsResolver: resolver,
- resolver: fallbackResolver,
}
}
@@ -198,3 +193,944 @@ func finalizeClientEndpointResolverOptions(options *Options) {
}
}
+
+func resolveEndpointResolverV2(options *Options) {
+ if options.EndpointResolverV2 == nil {
+ options.EndpointResolverV2 = NewDefaultEndpointResolverV2()
+ }
+}
+
+func resolveBaseEndpoint(cfg aws.Config, o *Options) {
+ if cfg.BaseEndpoint != nil {
+ o.BaseEndpoint = cfg.BaseEndpoint
+ }
+
+ _, g := os.LookupEnv("AWS_ENDPOINT_URL")
+ _, s := os.LookupEnv("AWS_ENDPOINT_URL_STS")
+
+ if g && !s {
+ return
+ }
+
+ value, found, err := internalConfig.ResolveServiceBaseEndpoint(context.Background(), "STS", cfg.ConfigSources)
+ if found && err == nil {
+ o.BaseEndpoint = &value
+ }
+}
+
+func bindRegion(region string) *string {
+ if region == "" {
+ return nil
+ }
+ return aws.String(endpoints.MapFIPSRegion(region))
+}
+
+// EndpointParameters provides the parameters that influence how endpoints are
+// resolved.
+type EndpointParameters struct {
+ // The AWS region used to dispatch the request.
+ //
+ // Parameter is
+ // required.
+ //
+ // AWS::Region
+ Region *string
+
+ // When true, use the dual-stack endpoint. If the configured endpoint does not
+ // support dual-stack, dispatching the request MAY return an error.
+ //
+ // Defaults to
+ // false if no value is provided.
+ //
+ // AWS::UseDualStack
+ UseDualStack *bool
+
+ // When true, send this request to the FIPS-compliant regional endpoint. If the
+ // configured endpoint does not have a FIPS compliant endpoint, dispatching the
+ // request will return an error.
+ //
+ // Defaults to false if no value is
+ // provided.
+ //
+ // AWS::UseFIPS
+ UseFIPS *bool
+
+ // Override the endpoint used to send this request
+ //
+ // Parameter is
+ // required.
+ //
+ // SDK::Endpoint
+ Endpoint *string
+
+ // Whether the global endpoint should be used, rather then the regional endpoint
+ // for us-east-1.
+ //
+ // Defaults to false if no value is
+ // provided.
+ //
+ // AWS::STS::UseGlobalEndpoint
+ UseGlobalEndpoint *bool
+}
+
+// ValidateRequired validates required parameters are set.
+func (p EndpointParameters) ValidateRequired() error {
+ if p.UseDualStack == nil {
+ return fmt.Errorf("parameter UseDualStack is required")
+ }
+
+ if p.UseFIPS == nil {
+ return fmt.Errorf("parameter UseFIPS is required")
+ }
+
+ if p.UseGlobalEndpoint == nil {
+ return fmt.Errorf("parameter UseGlobalEndpoint is required")
+ }
+
+ return nil
+}
+
+// WithDefaults returns a shallow copy of EndpointParameterswith default values
+// applied to members where applicable.
+func (p EndpointParameters) WithDefaults() EndpointParameters {
+ if p.UseDualStack == nil {
+ p.UseDualStack = ptr.Bool(false)
+ }
+
+ if p.UseFIPS == nil {
+ p.UseFIPS = ptr.Bool(false)
+ }
+
+ if p.UseGlobalEndpoint == nil {
+ p.UseGlobalEndpoint = ptr.Bool(false)
+ }
+ return p
+}
+
+type stringSlice []string
+
+func (s stringSlice) Get(i int) *string {
+ if i < 0 || i >= len(s) {
+ return nil
+ }
+
+ v := s[i]
+ return &v
+}
+
+// EndpointResolverV2 provides the interface for resolving service endpoints.
+type EndpointResolverV2 interface {
+ // ResolveEndpoint attempts to resolve the endpoint with the provided options,
+ // returning the endpoint if found. Otherwise an error is returned.
+ ResolveEndpoint(ctx context.Context, params EndpointParameters) (
+ smithyendpoints.Endpoint, error,
+ )
+}
+
+// resolver provides the implementation for resolving endpoints.
+type resolver struct{}
+
+func NewDefaultEndpointResolverV2() EndpointResolverV2 {
+ return &resolver{}
+}
+
+// ResolveEndpoint attempts to resolve the endpoint with the provided options,
+// returning the endpoint if found. Otherwise an error is returned.
+func (r *resolver) ResolveEndpoint(
+ ctx context.Context, params EndpointParameters,
+) (
+ endpoint smithyendpoints.Endpoint, err error,
+) {
+ params = params.WithDefaults()
+ if err = params.ValidateRequired(); err != nil {
+ return endpoint, fmt.Errorf("endpoint parameters are not valid, %w", err)
+ }
+ _UseDualStack := *params.UseDualStack
+ _UseFIPS := *params.UseFIPS
+ _UseGlobalEndpoint := *params.UseGlobalEndpoint
+
+ if _UseGlobalEndpoint == true {
+ if !(params.Endpoint != nil) {
+ if exprVal := params.Region; exprVal != nil {
+ _Region := *exprVal
+ _ = _Region
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _PartitionResult := *exprVal
+ _ = _PartitionResult
+ if _UseFIPS == false {
+ if _UseDualStack == false {
+ if _Region == "ap-northeast-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "ap-south-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "ap-southeast-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "ap-southeast-2" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "aws-global" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "ca-central-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "eu-central-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "eu-north-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "eu-west-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "eu-west-2" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "eu-west-3" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "sa-east-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "us-east-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "us-east-2" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "us-west-1" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ if _Region == "us-west-2" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://sts.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, _Region)
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ }
+ }
+ }
+ }
+ }
+ if exprVal := params.Endpoint; exprVal != nil {
+ _Endpoint := *exprVal
+ _ = _Endpoint
+ if _UseFIPS == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: FIPS and custom endpoint are not supported")
+ }
+ if _UseDualStack == true {
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Dualstack and custom endpoint are not supported")
+ }
+ uriString := _Endpoint
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ if exprVal := params.Region; exprVal != nil {
+ _Region := *exprVal
+ _ = _Region
+ if exprVal := awsrulesfn.GetPartition(_Region); exprVal != nil {
+ _PartitionResult := *exprVal
+ _ = _PartitionResult
+ if _UseFIPS == true {
+ if _UseDualStack == true {
+ if true == _PartitionResult.SupportsFIPS {
+ if true == _PartitionResult.SupportsDualStack {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://sts-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DualStackDnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS and DualStack are enabled, but this partition does not support one or both")
+ }
+ }
+ if _UseFIPS == true {
+ if _PartitionResult.SupportsFIPS == true {
+ if _PartitionResult.Name == "aws-us-gov" {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://sts.")
+ out.WriteString(_Region)
+ out.WriteString(".amazonaws.com")
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://sts-fips.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "FIPS is enabled but this partition does not support FIPS")
+ }
+ if _UseDualStack == true {
+ if true == _PartitionResult.SupportsDualStack {
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://sts.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DualStackDnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "DualStack is enabled but this partition does not support DualStack")
+ }
+ if _Region == "aws-global" {
+ uriString := "https://sts.amazonaws.com"
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ Properties: func() smithy.Properties {
+ var out smithy.Properties
+ smithyauth.SetAuthOptions(&out, []*smithyauth.Option{
+ {
+ SchemeID: "aws.auth#sigv4",
+ SignerProperties: func() smithy.Properties {
+ var sp smithy.Properties
+ smithyhttp.SetSigV4SigningName(&sp, "sts")
+ smithyhttp.SetSigV4ASigningName(&sp, "sts")
+
+ smithyhttp.SetSigV4SigningRegion(&sp, "us-east-1")
+ return sp
+ }(),
+ },
+ })
+ return out
+ }(),
+ }, nil
+ }
+ uriString := func() string {
+ var out strings.Builder
+ out.WriteString("https://sts.")
+ out.WriteString(_Region)
+ out.WriteString(".")
+ out.WriteString(_PartitionResult.DnsSuffix)
+ return out.String()
+ }()
+
+ uri, err := url.Parse(uriString)
+ if err != nil {
+ return endpoint, fmt.Errorf("Failed to parse uri: %s", uriString)
+ }
+
+ return smithyendpoints.Endpoint{
+ URI: *uri,
+ Headers: http.Header{},
+ }, nil
+ }
+ return endpoint, fmt.Errorf("Endpoint resolution failed. Invalid operation or environment input.")
+ }
+ return endpoint, fmt.Errorf("endpoint rule error, %s", "Invalid Configuration: Missing Region")
+}
+
+type endpointParamsBinder interface {
+ bindEndpointParams(*EndpointParameters)
+}
+
+func bindEndpointParams(ctx context.Context, input interface{}, options Options) *EndpointParameters {
+ params := &EndpointParameters{}
+
+ params.Region = bindRegion(options.Region)
+ params.UseDualStack = aws.Bool(options.EndpointOptions.UseDualStackEndpoint == aws.DualStackEndpointStateEnabled)
+ params.UseFIPS = aws.Bool(options.EndpointOptions.UseFIPSEndpoint == aws.FIPSEndpointStateEnabled)
+ params.Endpoint = options.BaseEndpoint
+
+ if b, ok := input.(endpointParamsBinder); ok {
+ b.bindEndpointParams(params)
+ }
+
+ return params
+}
+
+type resolveEndpointV2Middleware struct {
+ options Options
+}
+
+func (*resolveEndpointV2Middleware) ID() string {
+ return "ResolveEndpointV2"
+}
+
+func (m *resolveEndpointV2Middleware) HandleFinalize(ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler) (
+ out middleware.FinalizeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "ResolveEndpoint")
+ defer span.End()
+
+ if awsmiddleware.GetRequiresLegacyEndpoints(ctx) {
+ return next.HandleFinalize(ctx, in)
+ }
+
+ req, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ if m.options.EndpointResolverV2 == nil {
+ return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil")
+ }
+
+ params := bindEndpointParams(ctx, getOperationInput(ctx), m.options)
+ endpt, err := timeOperationMetric(ctx, "client.call.resolve_endpoint_duration",
+ func() (smithyendpoints.Endpoint, error) {
+ return m.options.EndpointResolverV2.ResolveEndpoint(ctx, *params)
+ })
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err)
+ }
+
+ span.SetProperty("client.call.resolved_endpoint", endpt.URI.String())
+
+ if endpt.URI.RawPath == "" && req.URL.RawPath != "" {
+ endpt.URI.RawPath = endpt.URI.Path
+ }
+ req.URL.Scheme = endpt.URI.Scheme
+ req.URL.Host = endpt.URI.Host
+ req.URL.Path = smithyhttp.JoinPath(endpt.URI.Path, req.URL.Path)
+ req.URL.RawPath = smithyhttp.JoinPath(endpt.URI.RawPath, req.URL.RawPath)
+ for k := range endpt.Headers {
+ req.Header.Set(k, endpt.Headers.Get(k))
+ }
+
+ rscheme := getResolvedAuthScheme(ctx)
+ if rscheme == nil {
+ return out, metadata, fmt.Errorf("no resolved auth scheme")
+ }
+
+ opts, _ := smithyauth.GetAuthOptions(&endpt.Properties)
+ for _, o := range opts {
+ rscheme.SignerProperties.SetAll(&o.SignerProperties)
+ }
+
+ span.End()
+ return next.HandleFinalize(ctx, in)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
index 86341bb7d..86bb3b79b 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/generated.json
@@ -3,6 +3,7 @@
"github.com/aws/aws-sdk-go-v2": "v1.4.0",
"github.com/aws/aws-sdk-go-v2/internal/configsources": "v0.0.0-00010101000000-000000000000",
"github.com/aws/aws-sdk-go-v2/internal/endpoints/v2": "v2.0.0-00010101000000-000000000000",
+ "github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding": "v1.0.5",
"github.com/aws/aws-sdk-go-v2/service/internal/presigned-url": "v1.0.7",
"github.com/aws/smithy-go": "v1.4.0"
},
@@ -12,24 +13,31 @@
"api_op_AssumeRole.go",
"api_op_AssumeRoleWithSAML.go",
"api_op_AssumeRoleWithWebIdentity.go",
+ "api_op_AssumeRoot.go",
"api_op_DecodeAuthorizationMessage.go",
"api_op_GetAccessKeyInfo.go",
"api_op_GetCallerIdentity.go",
"api_op_GetFederationToken.go",
"api_op_GetSessionToken.go",
+ "auth.go",
"deserializers.go",
"doc.go",
"endpoints.go",
+ "endpoints_config_test.go",
+ "endpoints_test.go",
"generated.json",
"internal/endpoints/endpoints.go",
"internal/endpoints/endpoints_test.go",
+ "options.go",
"protocol_test.go",
"serializers.go",
+ "snapshot_test.go",
+ "sra_operation_order_test.go",
"types/errors.go",
"types/types.go",
"validators.go"
],
- "go": "1.15",
+ "go": "1.22",
"module": "github.com/aws/aws-sdk-go-v2/service/sts",
"unstable": false
}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
index 9b9e052e3..44e2944a5 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/go_module_metadata.go
@@ -3,4 +3,4 @@
package sts
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.16.19"
+const goModuleVersion = "1.33.20"
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
index d061a4e99..3dfa51e5f 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/internal/endpoints/endpoints.go
@@ -87,15 +87,21 @@ func New() *Resolver {
var partitionRegexp = struct {
Aws *regexp.Regexp
AwsCn *regexp.Regexp
+ AwsEusc *regexp.Regexp
AwsIso *regexp.Regexp
AwsIsoB *regexp.Regexp
+ AwsIsoE *regexp.Regexp
+ AwsIsoF *regexp.Regexp
AwsUsGov *regexp.Regexp
}{
- Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$"),
+ Aws: regexp.MustCompile("^(us|eu|ap|sa|ca|me|af|il|mx)\\-\\w+\\-\\d+$"),
AwsCn: regexp.MustCompile("^cn\\-\\w+\\-\\d+$"),
+ AwsEusc: regexp.MustCompile("^eusc\\-(de)\\-\\w+\\-\\d+$"),
AwsIso: regexp.MustCompile("^us\\-iso\\-\\w+\\-\\d+$"),
AwsIsoB: regexp.MustCompile("^us\\-isob\\-\\w+\\-\\d+$"),
+ AwsIsoE: regexp.MustCompile("^eu\\-isoe\\-\\w+\\-\\d+$"),
+ AwsIsoF: regexp.MustCompile("^us\\-isof\\-\\w+\\-\\d+$"),
AwsUsGov: regexp.MustCompile("^us\\-gov\\-\\w+\\-\\d+$"),
}
@@ -141,6 +147,9 @@ var defaultPartitions = endpoints.Partitions{
endpoints.EndpointKey{
Region: "ap-east-1",
}: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-east-2",
+ }: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "ap-northeast-1",
}: endpoints.Endpoint{},
@@ -153,6 +162,9 @@ var defaultPartitions = endpoints.Partitions{
endpoints.EndpointKey{
Region: "ap-south-1",
}: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-south-2",
+ }: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "ap-southeast-1",
}: endpoints.Endpoint{},
@@ -162,6 +174,15 @@ var defaultPartitions = endpoints.Partitions{
endpoints.EndpointKey{
Region: "ap-southeast-3",
}: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-4",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-5",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ap-southeast-7",
+ }: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "aws-global",
}: endpoints.Endpoint{
@@ -173,15 +194,24 @@ var defaultPartitions = endpoints.Partitions{
endpoints.EndpointKey{
Region: "ca-central-1",
}: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "ca-west-1",
+ }: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "eu-central-1",
}: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-central-2",
+ }: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "eu-north-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "eu-south-1",
}: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "eu-south-2",
+ }: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "eu-west-1",
}: endpoints.Endpoint{},
@@ -191,12 +221,18 @@ var defaultPartitions = endpoints.Partitions{
endpoints.EndpointKey{
Region: "eu-west-3",
}: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "il-central-1",
+ }: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "me-central-1",
}: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "me-south-1",
}: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "mx-central-1",
+ }: endpoints.Endpoint{},
endpoints.EndpointKey{
Region: "sa-east-1",
}: endpoints.Endpoint{},
@@ -317,6 +353,27 @@ var defaultPartitions = endpoints.Partitions{
}: endpoints.Endpoint{},
},
},
+ {
+ ID: "aws-eusc",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts-fips.{region}.amazonaws.eu",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "sts.{region}.amazonaws.eu",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsEusc,
+ IsRegionalized: true,
+ },
{
ID: "aws-iso",
Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
@@ -372,6 +429,61 @@ var defaultPartitions = endpoints.Partitions{
}: endpoints.Endpoint{},
},
},
+ {
+ ID: "aws-iso-e",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts-fips.{region}.cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "sts.{region}.cloud.adc-e.uk",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoE,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "eu-isoe-west-1",
+ }: endpoints.Endpoint{},
+ },
+ },
+ {
+ ID: "aws-iso-f",
+ Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
+ {
+ Variant: endpoints.FIPSVariant,
+ }: {
+ Hostname: "sts-fips.{region}.csp.hci.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ {
+ Variant: 0,
+ }: {
+ Hostname: "sts.{region}.csp.hci.ic.gov",
+ Protocols: []string{"https"},
+ SignatureVersions: []string{"v4"},
+ },
+ },
+ RegionRegex: partitionRegexp.AwsIsoF,
+ IsRegionalized: true,
+ Endpoints: endpoints.Endpoints{
+ endpoints.EndpointKey{
+ Region: "us-isof-east-1",
+ }: endpoints.Endpoint{},
+ endpoints.EndpointKey{
+ Region: "us-isof-south-1",
+ }: endpoints.Endpoint{},
+ },
+ },
{
ID: "aws-us-gov",
Defaults: map[endpoints.DefaultKey]endpoints.Endpoint{
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go
new file mode 100644
index 000000000..e1398f3bb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/options.go
@@ -0,0 +1,232 @@
+// Code generated by smithy-go-codegen DO NOT EDIT.
+
+package sts
+
+import (
+ "context"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
+ internalauthsmithy "github.com/aws/aws-sdk-go-v2/internal/auth/smithy"
+ smithyauth "github.com/aws/smithy-go/auth"
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/metrics"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "net/http"
+)
+
+type HTTPClient interface {
+ Do(*http.Request) (*http.Response, error)
+}
+
+type Options struct {
+ // Set of options to modify how an operation is invoked. These apply to all
+ // operations invoked for this client. Use functional options on operation call to
+ // modify this list for per operation behavior.
+ APIOptions []func(*middleware.Stack) error
+
+ // The optional application specific identifier appended to the User-Agent header.
+ AppID string
+
+ // This endpoint will be given as input to an EndpointResolverV2. It is used for
+ // providing a custom base endpoint that is subject to modifications by the
+ // processing EndpointResolverV2.
+ BaseEndpoint *string
+
+ // Configures the events that will be sent to the configured logger.
+ ClientLogMode aws.ClientLogMode
+
+ // The credentials object to use when signing requests.
+ Credentials aws.CredentialsProvider
+
+ // The configuration DefaultsMode that the SDK should use when constructing the
+ // clients initial default settings.
+ DefaultsMode aws.DefaultsMode
+
+ // The endpoint options to be used when attempting to resolve an endpoint.
+ EndpointOptions EndpointResolverOptions
+
+ // The service endpoint resolver.
+ //
+ // Deprecated: Deprecated: EndpointResolver and WithEndpointResolver. Providing a
+ // value for this field will likely prevent you from using any endpoint-related
+ // service features released after the introduction of EndpointResolverV2 and
+ // BaseEndpoint.
+ //
+ // To migrate an EndpointResolver implementation that uses a custom endpoint, set
+ // the client option BaseEndpoint instead.
+ EndpointResolver EndpointResolver
+
+ // Resolves the endpoint used for a particular service operation. This should be
+ // used over the deprecated EndpointResolver.
+ EndpointResolverV2 EndpointResolverV2
+
+ // Signature Version 4 (SigV4) Signer
+ HTTPSignerV4 HTTPSignerV4
+
+ // The logger writer interface to write logging messages to.
+ Logger logging.Logger
+
+ // The client meter provider.
+ MeterProvider metrics.MeterProvider
+
+ // The region to send requests to. (Required)
+ Region string
+
+ // RetryMaxAttempts specifies the maximum number attempts an API client will call
+ // an operation that fails with a retryable error. A value of 0 is ignored, and
+ // will not be used to configure the API client created default retryer, or modify
+ // per operation call's retry max attempts.
+ //
+ // If specified in an operation call's functional options with a value that is
+ // different than the constructed client's Options, the Client's Retryer will be
+ // wrapped to use the operation's specific RetryMaxAttempts value.
+ RetryMaxAttempts int
+
+ // RetryMode specifies the retry mode the API client will be created with, if
+ // Retryer option is not also specified.
+ //
+ // When creating a new API Clients this member will only be used if the Retryer
+ // Options member is nil. This value will be ignored if Retryer is not nil.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ RetryMode aws.RetryMode
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable
+ // failures. When nil the API client will use a default retryer. The kind of
+ // default retry created by the API client can be changed with the RetryMode
+ // option.
+ Retryer aws.Retryer
+
+ // The RuntimeEnvironment configuration, only populated if the DefaultsMode is set
+ // to DefaultsModeAuto and is initialized using config.LoadDefaultConfig . You
+ // should not populate this structure programmatically, or rely on the values here
+ // within your applications.
+ RuntimeEnvironment aws.RuntimeEnvironment
+
+ // The client tracer provider.
+ TracerProvider tracing.TracerProvider
+
+ // The initial DefaultsMode used when the client options were constructed. If the
+ // DefaultsMode was set to aws.DefaultsModeAuto this will store what the resolved
+ // value was at that point in time.
+ //
+ // Currently does not support per operation call overrides, may in the future.
+ resolvedDefaultsMode aws.DefaultsMode
+
+ // The HTTP client to invoke API calls with. Defaults to client's default HTTP
+ // implementation if nil.
+ HTTPClient HTTPClient
+
+ // The auth scheme resolver which determines how to authenticate for each
+ // operation.
+ AuthSchemeResolver AuthSchemeResolver
+
+ // The list of auth schemes supported by the client.
+ AuthSchemes []smithyhttp.AuthScheme
+}
+
+// Copy creates a clone where the APIOptions list is deep copied.
+func (o Options) Copy() Options {
+ to := o
+ to.APIOptions = make([]func(*middleware.Stack) error, len(o.APIOptions))
+ copy(to.APIOptions, o.APIOptions)
+
+ return to
+}
+
+func (o Options) GetIdentityResolver(schemeID string) smithyauth.IdentityResolver {
+ if schemeID == "aws.auth#sigv4" {
+ return getSigV4IdentityResolver(o)
+ }
+ if schemeID == "smithy.api#noAuth" {
+ return &smithyauth.AnonymousIdentityResolver{}
+ }
+ return nil
+}
+
+// WithAPIOptions returns a functional option for setting the Client's APIOptions
+// option.
+func WithAPIOptions(optFns ...func(*middleware.Stack) error) func(*Options) {
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, optFns...)
+ }
+}
+
+// Deprecated: EndpointResolver and WithEndpointResolver. Providing a value for
+// this field will likely prevent you from using any endpoint-related service
+// features released after the introduction of EndpointResolverV2 and BaseEndpoint.
+//
+// To migrate an EndpointResolver implementation that uses a custom endpoint, set
+// the client option BaseEndpoint instead.
+func WithEndpointResolver(v EndpointResolver) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolver = v
+ }
+}
+
+// WithEndpointResolverV2 returns a functional option for setting the Client's
+// EndpointResolverV2 option.
+func WithEndpointResolverV2(v EndpointResolverV2) func(*Options) {
+ return func(o *Options) {
+ o.EndpointResolverV2 = v
+ }
+}
+
+func getSigV4IdentityResolver(o Options) smithyauth.IdentityResolver {
+ if o.Credentials != nil {
+ return &internalauthsmithy.CredentialsProviderAdapter{Provider: o.Credentials}
+ }
+ return nil
+}
+
+// WithSigV4SigningName applies an override to the authentication workflow to
+// use the given signing name for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing name from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningName(name string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningName(ctx, name), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningName", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+// WithSigV4SigningRegion applies an override to the authentication workflow to
+// use the given signing region for SigV4-authenticated operations.
+//
+// This is an advanced setting. The value here is FINAL, taking precedence over
+// the resolved signing region from both auth scheme resolution and endpoint
+// resolution.
+func WithSigV4SigningRegion(region string) func(*Options) {
+ fn := func(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+ ) {
+ return next.HandleInitialize(awsmiddleware.SetSigningRegion(ctx, region), in)
+ }
+ return func(o *Options) {
+ o.APIOptions = append(o.APIOptions, func(s *middleware.Stack) error {
+ return s.Initialize.Add(
+ middleware.InitializeMiddlewareFunc("withSigV4SigningRegion", fn),
+ middleware.Before,
+ )
+ })
+ }
+}
+
+func ignoreAnonymousAuth(options *Options) {
+ if aws.IsCredentialsProvider(options.Credentials, (*aws.AnonymousCredentials)(nil)) {
+ options.Credentials = nil
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go
index 05531d369..96b222136 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/serializers.go
@@ -11,6 +11,7 @@ import (
smithy "github.com/aws/smithy-go"
"github.com/aws/smithy-go/encoding/httpbinding"
"github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
smithyhttp "github.com/aws/smithy-go/transport/http"
"path"
)
@@ -25,6 +26,10 @@ func (*awsAwsquery_serializeOpAssumeRole) ID() string {
func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -76,6 +81,8 @@ func (m *awsAwsquery_serializeOpAssumeRole) HandleSerialize(ctx context.Context,
}
in.Request = request
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
@@ -89,6 +96,10 @@ func (*awsAwsquery_serializeOpAssumeRoleWithSAML) ID() string {
func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -140,6 +151,8 @@ func (m *awsAwsquery_serializeOpAssumeRoleWithSAML) HandleSerialize(ctx context.
}
in.Request = request
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
@@ -153,6 +166,10 @@ func (*awsAwsquery_serializeOpAssumeRoleWithWebIdentity) ID() string {
func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -204,6 +221,78 @@ func (m *awsAwsquery_serializeOpAssumeRoleWithWebIdentity) HandleSerialize(ctx c
}
in.Request = request
+ endTimer()
+ span.End()
+ return next.HandleSerialize(ctx, in)
+}
+
+type awsAwsquery_serializeOpAssumeRoot struct {
+}
+
+func (*awsAwsquery_serializeOpAssumeRoot) ID() string {
+ return "OperationSerializer"
+}
+
+func (m *awsAwsquery_serializeOpAssumeRoot) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
+ request, ok := in.Request.(*smithyhttp.Request)
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
+ }
+
+ input, ok := in.Parameters.(*AssumeRootInput)
+ _ = input
+ if !ok {
+ return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown input parameters type %T", in.Parameters)}
+ }
+
+ operationPath := "/"
+ if len(request.Request.URL.Path) == 0 {
+ request.Request.URL.Path = operationPath
+ } else {
+ request.Request.URL.Path = path.Join(request.Request.URL.Path, operationPath)
+ if request.Request.URL.Path != "/" && operationPath[len(operationPath)-1] == '/' {
+ request.Request.URL.Path += "/"
+ }
+ }
+ request.Request.Method = "POST"
+ httpBindingEncoder, err := httpbinding.NewEncoder(request.URL.Path, request.URL.RawQuery, request.Header)
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ httpBindingEncoder.SetHeader("Content-Type").String("application/x-www-form-urlencoded")
+
+ bodyWriter := bytes.NewBuffer(nil)
+ bodyEncoder := query.NewEncoder(bodyWriter)
+ body := bodyEncoder.Object()
+ body.Key("Action").String("AssumeRoot")
+ body.Key("Version").String("2011-06-15")
+
+ if err := awsAwsquery_serializeOpDocumentAssumeRootInput(input, bodyEncoder.Value); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ err = bodyEncoder.Encode()
+ if err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request, err = request.SetStream(bytes.NewReader(bodyWriter.Bytes())); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+
+ if request.Request, err = httpBindingEncoder.Encode(request.Request); err != nil {
+ return out, metadata, &smithy.SerializationError{Err: err}
+ }
+ in.Request = request
+
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
@@ -217,6 +306,10 @@ func (*awsAwsquery_serializeOpDecodeAuthorizationMessage) ID() string {
func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -268,6 +361,8 @@ func (m *awsAwsquery_serializeOpDecodeAuthorizationMessage) HandleSerialize(ctx
}
in.Request = request
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
@@ -281,6 +376,10 @@ func (*awsAwsquery_serializeOpGetAccessKeyInfo) ID() string {
func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -332,6 +431,8 @@ func (m *awsAwsquery_serializeOpGetAccessKeyInfo) HandleSerialize(ctx context.Co
}
in.Request = request
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
@@ -345,6 +446,10 @@ func (*awsAwsquery_serializeOpGetCallerIdentity) ID() string {
func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -392,6 +497,8 @@ func (m *awsAwsquery_serializeOpGetCallerIdentity) HandleSerialize(ctx context.C
}
in.Request = request
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
@@ -405,6 +512,10 @@ func (*awsAwsquery_serializeOpGetFederationToken) ID() string {
func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -456,6 +567,8 @@ func (m *awsAwsquery_serializeOpGetFederationToken) HandleSerialize(ctx context.
}
in.Request = request
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
@@ -469,6 +582,10 @@ func (*awsAwsquery_serializeOpGetSessionToken) ID() string {
func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) (
out middleware.SerializeOutput, metadata middleware.Metadata, err error,
) {
+ _, span := tracing.StartSpan(ctx, "OperationSerializer")
+ endTimer := startMetricTimer(ctx, "client.call.serialization_duration")
+ defer endTimer()
+ defer span.End()
request, ok := in.Request.(*smithyhttp.Request)
if !ok {
return out, metadata, &smithy.SerializationError{Err: fmt.Errorf("unknown transport type %T", in.Request)}
@@ -520,12 +637,11 @@ func (m *awsAwsquery_serializeOpGetSessionToken) HandleSerialize(ctx context.Con
}
in.Request = request
+ endTimer()
+ span.End()
return next.HandleSerialize(ctx, in)
}
func awsAwsquery_serializeDocumentPolicyDescriptorListType(v []types.PolicyDescriptorType, value query.Value) error {
- if len(v) == 0 {
- return nil
- }
array := value.Array("member")
for i := range v {
@@ -549,6 +665,35 @@ func awsAwsquery_serializeDocumentPolicyDescriptorType(v *types.PolicyDescriptor
return nil
}
+func awsAwsquery_serializeDocumentProvidedContext(v *types.ProvidedContext, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.ContextAssertion != nil {
+ objectKey := object.Key("ContextAssertion")
+ objectKey.String(*v.ContextAssertion)
+ }
+
+ if v.ProviderArn != nil {
+ objectKey := object.Key("ProviderArn")
+ objectKey.String(*v.ProviderArn)
+ }
+
+ return nil
+}
+
+func awsAwsquery_serializeDocumentProvidedContextsListType(v []types.ProvidedContext, value query.Value) error {
+ array := value.Array("member")
+
+ for i := range v {
+ av := array.Value()
+ if err := awsAwsquery_serializeDocumentProvidedContext(&v[i], av); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error {
object := value.Object()
_ = object
@@ -567,9 +712,6 @@ func awsAwsquery_serializeDocumentTag(v *types.Tag, value query.Value) error {
}
func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value) error {
- if len(v) == 0 {
- return nil
- }
array := value.Array("member")
for i := range v {
@@ -580,9 +722,6 @@ func awsAwsquery_serializeDocumentTagKeyListType(v []string, value query.Value)
}
func awsAwsquery_serializeDocumentTagListType(v []types.Tag, value query.Value) error {
- if len(v) == 0 {
- return nil
- }
array := value.Array("member")
for i := range v {
@@ -620,6 +759,13 @@ func awsAwsquery_serializeOpDocumentAssumeRoleInput(v *AssumeRoleInput, value qu
}
}
+ if v.ProvidedContexts != nil {
+ objectKey := object.Key("ProvidedContexts")
+ if err := awsAwsquery_serializeDocumentProvidedContextsListType(v.ProvidedContexts, objectKey); err != nil {
+ return err
+ }
+ }
+
if v.RoleArn != nil {
objectKey := object.Key("RoleArn")
objectKey.String(*v.RoleArn)
@@ -745,6 +891,30 @@ func awsAwsquery_serializeOpDocumentAssumeRoleWithWebIdentityInput(v *AssumeRole
return nil
}
+func awsAwsquery_serializeOpDocumentAssumeRootInput(v *AssumeRootInput, value query.Value) error {
+ object := value.Object()
+ _ = object
+
+ if v.DurationSeconds != nil {
+ objectKey := object.Key("DurationSeconds")
+ objectKey.Integer(*v.DurationSeconds)
+ }
+
+ if v.TargetPrincipal != nil {
+ objectKey := object.Key("TargetPrincipal")
+ objectKey.String(*v.TargetPrincipal)
+ }
+
+ if v.TaskPolicyArn != nil {
+ objectKey := object.Key("TaskPolicyArn")
+ if err := awsAwsquery_serializeDocumentPolicyDescriptorType(v.TaskPolicyArn, objectKey); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
func awsAwsquery_serializeOpDocumentDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput, value query.Value) error {
object := value.Object()
_ = object
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go
index b109fe5fc..041629bba 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/errors.go
@@ -12,6 +12,8 @@ import (
type ExpiredTokenException struct {
Message *string
+ ErrorCodeOverride *string
+
noSmithyDocumentSerde
}
@@ -24,7 +26,12 @@ func (e *ExpiredTokenException) ErrorMessage() string {
}
return *e.Message
}
-func (e *ExpiredTokenException) ErrorCode() string { return "ExpiredTokenException" }
+func (e *ExpiredTokenException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "ExpiredTokenException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The request could not be fulfilled because the identity provider (IDP) that was
@@ -35,6 +42,8 @@ func (e *ExpiredTokenException) ErrorFault() smithy.ErrorFault { return smithy.F
type IDPCommunicationErrorException struct {
Message *string
+ ErrorCodeOverride *string
+
noSmithyDocumentSerde
}
@@ -47,16 +56,24 @@ func (e *IDPCommunicationErrorException) ErrorMessage() string {
}
return *e.Message
}
-func (e *IDPCommunicationErrorException) ErrorCode() string { return "IDPCommunicationError" }
+func (e *IDPCommunicationErrorException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "IDPCommunicationError"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *IDPCommunicationErrorException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The identity provider (IdP) reported that authentication failed. This might be
-// because the claim is invalid. If this error is returned for the
-// AssumeRoleWithWebIdentity operation, it can also mean that the claim has expired
-// or has been explicitly revoked.
+// because the claim is invalid.
+//
+// If this error is returned for the AssumeRoleWithWebIdentity operation, it can
+// also mean that the claim has expired or has been explicitly revoked.
type IDPRejectedClaimException struct {
Message *string
+ ErrorCodeOverride *string
+
noSmithyDocumentSerde
}
@@ -69,15 +86,22 @@ func (e *IDPRejectedClaimException) ErrorMessage() string {
}
return *e.Message
}
-func (e *IDPRejectedClaimException) ErrorCode() string { return "IDPRejectedClaim" }
+func (e *IDPRejectedClaimException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "IDPRejectedClaim"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *IDPRejectedClaimException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The error returned if the message passed to DecodeAuthorizationMessage was
-// invalid. This can happen if the token contains invalid characters, such as
-// linebreaks.
+// invalid. This can happen if the token contains invalid characters, such as line
+// breaks, or if the message has expired.
type InvalidAuthorizationMessageException struct {
Message *string
+ ErrorCodeOverride *string
+
noSmithyDocumentSerde
}
@@ -91,7 +115,10 @@ func (e *InvalidAuthorizationMessageException) ErrorMessage() string {
return *e.Message
}
func (e *InvalidAuthorizationMessageException) ErrorCode() string {
- return "InvalidAuthorizationMessageException"
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidAuthorizationMessageException"
+ }
+ return *e.ErrorCodeOverride
}
func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault {
return smithy.FaultClient
@@ -103,6 +130,8 @@ func (e *InvalidAuthorizationMessageException) ErrorFault() smithy.ErrorFault {
type InvalidIdentityTokenException struct {
Message *string
+ ErrorCodeOverride *string
+
noSmithyDocumentSerde
}
@@ -115,7 +144,12 @@ func (e *InvalidIdentityTokenException) ErrorMessage() string {
}
return *e.Message
}
-func (e *InvalidIdentityTokenException) ErrorCode() string { return "InvalidIdentityToken" }
+func (e *InvalidIdentityTokenException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "InvalidIdentityToken"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The request was rejected because the policy document was malformed. The error
@@ -123,6 +157,8 @@ func (e *InvalidIdentityTokenException) ErrorFault() smithy.ErrorFault { return
type MalformedPolicyDocumentException struct {
Message *string
+ ErrorCodeOverride *string
+
noSmithyDocumentSerde
}
@@ -135,7 +171,12 @@ func (e *MalformedPolicyDocumentException) ErrorMessage() string {
}
return *e.Message
}
-func (e *MalformedPolicyDocumentException) ErrorCode() string { return "MalformedPolicyDocument" }
+func (e *MalformedPolicyDocumentException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "MalformedPolicyDocument"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
// The request was rejected because the total packed size of the session policies
@@ -143,16 +184,18 @@ func (e *MalformedPolicyDocumentException) ErrorFault() smithy.ErrorFault { retu
// compresses the session policy document, session policy ARNs, and session tags
// into a packed binary format that has a separate limit. The error message
// indicates by percentage how close the policies and tags are to the upper size
-// limit. For more information, see Passing Session Tags in STS
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
-// IAM User Guide. You could receive this error even though you meet other defined
-// session policy and session tag limits. For more information, see IAM and STS
-// Entity Character Limits
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
-// in the IAM User Guide.
+// limit. For more information, see [Passing Session Tags in STS]in the IAM User Guide.
+//
+// You could receive this error even though you meet other defined session policy
+// and session tag limits. For more information, see [IAM and STS Entity Character Limits]in the IAM User Guide.
+//
+// [Passing Session Tags in STS]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
+// [IAM and STS Entity Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length
type PackedPolicyTooLargeException struct {
Message *string
+ ErrorCodeOverride *string
+
noSmithyDocumentSerde
}
@@ -165,18 +208,25 @@ func (e *PackedPolicyTooLargeException) ErrorMessage() string {
}
return *e.Message
}
-func (e *PackedPolicyTooLargeException) ErrorCode() string { return "PackedPolicyTooLarge" }
+func (e *PackedPolicyTooLargeException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "PackedPolicyTooLarge"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *PackedPolicyTooLargeException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
-// STS is not activated in the requested region for the account that is being asked
-// to generate credentials. The account administrator must use the IAM console to
-// activate STS in that region. For more information, see Activating and
-// Deactivating Amazon Web Services STS in an Amazon Web Services Region
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
-// in the IAM User Guide.
+// STS is not activated in the requested region for the account that is being
+// asked to generate credentials. The account administrator must use the IAM
+// console to activate STS in that region. For more information, see [Activating and Deactivating STS in an Amazon Web Services Region]in the IAM
+// User Guide.
+//
+// [Activating and Deactivating STS in an Amazon Web Services Region]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html
type RegionDisabledException struct {
Message *string
+ ErrorCodeOverride *string
+
noSmithyDocumentSerde
}
@@ -189,5 +239,10 @@ func (e *RegionDisabledException) ErrorMessage() string {
}
return *e.Message
}
-func (e *RegionDisabledException) ErrorCode() string { return "RegionDisabledException" }
+func (e *RegionDisabledException) ErrorCode() string {
+ if e == nil || e.ErrorCodeOverride == nil {
+ return "RegionDisabledException"
+ }
+ return *e.ErrorCodeOverride
+}
func (e *RegionDisabledException) ErrorFault() smithy.ErrorFault { return smithy.FaultClient }
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go
index 86e509905..dff7a3c2e 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/types/types.go
@@ -11,12 +11,12 @@ import (
// returns.
type AssumedRoleUser struct {
- // The ARN of the temporary security credentials that are returned from the
- // AssumeRole action. For more information about ARNs and how to use them in
- // policies, see IAM Identifiers
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) in
+ // The ARN of the temporary security credentials that are returned from the AssumeRole
+ // action. For more information about ARNs and how to use them in policies, see [IAM Identifiers]in
// the IAM User Guide.
//
+ // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html
+ //
// This member is required.
Arn *string
@@ -62,9 +62,9 @@ type FederatedUser struct {
// The ARN that specifies the federated user that is associated with the
// credentials. For more information about ARNs and how to use them in policies,
- // see IAM Identifiers
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) in
- // the IAM User Guide.
+ // see [IAM Identifiers]in the IAM User Guide.
+ //
+ // [IAM Identifiers]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html
//
// This member is required.
Arn *string
@@ -83,37 +83,57 @@ type FederatedUser struct {
type PolicyDescriptorType struct {
// The Amazon Resource Name (ARN) of the IAM managed policy to use as a session
- // policy for the role. For more information about ARNs, see Amazon Resource Names
- // (ARNs) and Amazon Web Services Service Namespaces
- // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) in
- // the Amazon Web Services General Reference.
+ // policy for the role. For more information about ARNs, see [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]in the Amazon Web
+ // Services General Reference.
+ //
+ // [Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
Arn *string
noSmithyDocumentSerde
}
-// You can pass custom key-value pair attributes when you assume a role or federate
-// a user. These are called session tags. You can then use the session tags to
-// control access to resources. For more information, see Tagging Amazon Web
-// Services STS Sessions
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html) in the
-// IAM User Guide.
+// Contains information about the provided context. This includes the signed and
+// encrypted trusted context assertion and the context provider ARN from which the
+// trusted context assertion was generated.
+type ProvidedContext struct {
+
+ // The signed and encrypted trusted context assertion generated by the context
+ // provider. The trusted context assertion is signed and encrypted by Amazon Web
+ // Services STS.
+ ContextAssertion *string
+
+ // The context provider ARN from which the trusted context assertion was generated.
+ ProviderArn *string
+
+ noSmithyDocumentSerde
+}
+
+// You can pass custom key-value pair attributes when you assume a role or
+// federate a user. These are called session tags. You can then use the session
+// tags to control access to resources. For more information, see [Tagging Amazon Web Services STS Sessions]in the IAM User
+// Guide.
+//
+// [Tagging Amazon Web Services STS Sessions]: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html
type Tag struct {
- // The key for a session tag. You can pass up to 50 session tags. The plain text
- // session tag keys can’t exceed 128 characters. For these and additional limits,
- // see IAM and STS Character Limits
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
- // in the IAM User Guide.
+ // The key for a session tag.
+ //
+ // You can pass up to 50 session tags. The plain text session tag keys can’t
+ // exceed 128 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User
+ // Guide.
+ //
+ // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
//
// This member is required.
Key *string
- // The value for a session tag. You can pass up to 50 session tags. The plain text
- // session tag values can’t exceed 256 characters. For these and additional limits,
- // see IAM and STS Character Limits
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
- // in the IAM User Guide.
+ // The value for a session tag.
+ //
+ // You can pass up to 50 session tags. The plain text session tag values can’t
+ // exceed 256 characters. For these and additional limits, see [IAM and STS Character Limits]in the IAM User
+ // Guide.
+ //
+ // [IAM and STS Character Limits]: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length
//
// This member is required.
Value *string
diff --git a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go
index 3e4bad2a9..1026e2211 100644
--- a/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go
+++ b/vendor/github.com/aws/aws-sdk-go-v2/service/sts/validators.go
@@ -70,6 +70,26 @@ func (m *validateOpAssumeRoleWithWebIdentity) HandleInitialize(ctx context.Conte
return next.HandleInitialize(ctx, in)
}
+type validateOpAssumeRoot struct {
+}
+
+func (*validateOpAssumeRoot) ID() string {
+ return "OperationInputValidation"
+}
+
+func (m *validateOpAssumeRoot) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ input, ok := in.Parameters.(*AssumeRootInput)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown input parameters type %T", in.Parameters)
+ }
+ if err := validateOpAssumeRootInput(input); err != nil {
+ return out, metadata, err
+ }
+ return next.HandleInitialize(ctx, in)
+}
+
type validateOpDecodeAuthorizationMessage struct {
}
@@ -142,6 +162,10 @@ func addOpAssumeRoleWithWebIdentityValidationMiddleware(stack *middleware.Stack)
return stack.Initialize.Add(&validateOpAssumeRoleWithWebIdentity{}, middleware.After)
}
+func addOpAssumeRootValidationMiddleware(stack *middleware.Stack) error {
+ return stack.Initialize.Add(&validateOpAssumeRoot{}, middleware.After)
+}
+
func addOpDecodeAuthorizationMessageValidationMiddleware(stack *middleware.Stack) error {
return stack.Initialize.Add(&validateOpDecodeAuthorizationMessage{}, middleware.After)
}
@@ -254,6 +278,24 @@ func validateOpAssumeRoleWithWebIdentityInput(v *AssumeRoleWithWebIdentityInput)
}
}
+func validateOpAssumeRootInput(v *AssumeRootInput) error {
+ if v == nil {
+ return nil
+ }
+ invalidParams := smithy.InvalidParamsError{Context: "AssumeRootInput"}
+ if v.TargetPrincipal == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TargetPrincipal"))
+ }
+ if v.TaskPolicyArn == nil {
+ invalidParams.Add(smithy.NewErrParamRequired("TaskPolicyArn"))
+ }
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ } else {
+ return nil
+ }
+}
+
func validateOpDecodeAuthorizationMessageInput(v *DecodeAuthorizationMessageInput) error {
if v == nil {
return nil
diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
deleted file mode 100644
index 899129ecc..000000000
--- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-AWS SDK for Go
-Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
-Copyright 2014-2015 Stripe, Inc.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go b/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go
deleted file mode 100644
index dd950a286..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/auth/bearer/token.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package bearer
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "time"
-)
-
-// Token provides a type wrapping a bearer token and expiration metadata.
-type Token struct {
- Value string
-
- CanExpire bool
- Expires time.Time
-}
-
-// Expired returns if the token's Expires time is before or equal to the time
-// provided. If CanExpire is false, Expired will always return false.
-func (t Token) Expired(now time.Time) bool {
- if !t.CanExpire {
- return false
- }
- now = now.Round(0)
- return now.Equal(t.Expires) || now.After(t.Expires)
-}
-
-// TokenProvider provides interface for retrieving bearer tokens.
-type TokenProvider interface {
- RetrieveBearerToken(aws.Context) (Token, error)
-}
-
-// TokenProviderFunc provides a helper utility to wrap a function as a type
-// that implements the TokenProvider interface.
-type TokenProviderFunc func(aws.Context) (Token, error)
-
-// RetrieveBearerToken calls the wrapped function, returning the Token or
-// error.
-func (fn TokenProviderFunc) RetrieveBearerToken(ctx aws.Context) (Token, error) {
- return fn(ctx)
-}
-
-// StaticTokenProvider provides a utility for wrapping a static bearer token
-// value within an implementation of a token provider.
-type StaticTokenProvider struct {
- Token Token
-}
-
-// RetrieveBearerToken returns the static token specified.
-func (s StaticTokenProvider) RetrieveBearerToken(aws.Context) (Token, error) {
- return s.Token, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
deleted file mode 100644
index 99849c0e1..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Package awserr represents API error interface accessors for the SDK.
-package awserr
-
-// An Error wraps lower level errors with code, message and an original error.
-// The underlying concrete error type may also satisfy other interfaces which
-// can be to used to obtain more specific information about the error.
-//
-// Calling Error() or String() will always include the full information about
-// an error based on its underlying type.
-//
-// Example:
-//
-// output, err := s3manage.Upload(svc, input, opts)
-// if err != nil {
-// if awsErr, ok := err.(awserr.Error); ok {
-// // Get error details
-// log.Println("Error:", awsErr.Code(), awsErr.Message())
-//
-// // Prints out full error message, including original error if there was one.
-// log.Println("Error:", awsErr.Error())
-//
-// // Get original error
-// if origErr := awsErr.OrigErr(); origErr != nil {
-// // operate on original error.
-// }
-// } else {
-// fmt.Println(err.Error())
-// }
-// }
-//
-type Error interface {
- // Satisfy the generic error interface.
- error
-
- // Returns the short phrase depicting the classification of the error.
- Code() string
-
- // Returns the error details message.
- Message() string
-
- // Returns the original error if one was set. Nil is returned if not set.
- OrigErr() error
-}
-
-// BatchError is a batch of errors which also wraps lower level errors with
-// code, message, and original errors. Calling Error() will include all errors
-// that occurred in the batch.
-//
-// Deprecated: Replaced with BatchedErrors. Only defined for backwards
-// compatibility.
-type BatchError interface {
- // Satisfy the generic error interface.
- error
-
- // Returns the short phrase depicting the classification of the error.
- Code() string
-
- // Returns the error details message.
- Message() string
-
- // Returns the original error if one was set. Nil is returned if not set.
- OrigErrs() []error
-}
-
-// BatchedErrors is a batch of errors which also wraps lower level errors with
-// code, message, and original errors. Calling Error() will include all errors
-// that occurred in the batch.
-//
-// Replaces BatchError
-type BatchedErrors interface {
- // Satisfy the base Error interface.
- Error
-
- // Returns the original error if one was set. Nil is returned if not set.
- OrigErrs() []error
-}
-
-// New returns an Error object described by the code, message, and origErr.
-//
-// If origErr satisfies the Error interface it will not be wrapped within a new
-// Error object and will instead be returned.
-func New(code, message string, origErr error) Error {
- var errs []error
- if origErr != nil {
- errs = append(errs, origErr)
- }
- return newBaseError(code, message, errs)
-}
-
-// NewBatchError returns an BatchedErrors with a collection of errors as an
-// array of errors.
-func NewBatchError(code, message string, errs []error) BatchedErrors {
- return newBaseError(code, message, errs)
-}
-
-// A RequestFailure is an interface to extract request failure information from
-// an Error such as the request ID of the failed request returned by a service.
-// RequestFailures may not always have a requestID value if the request failed
-// prior to reaching the service such as a connection error.
-//
-// Example:
-//
-// output, err := s3manage.Upload(svc, input, opts)
-// if err != nil {
-// if reqerr, ok := err.(RequestFailure); ok {
-// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
-// } else {
-// log.Println("Error:", err.Error())
-// }
-// }
-//
-// Combined with awserr.Error:
-//
-// output, err := s3manage.Upload(svc, input, opts)
-// if err != nil {
-// if awsErr, ok := err.(awserr.Error); ok {
-// // Generic AWS Error with Code, Message, and original error (if any)
-// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
-//
-// if reqErr, ok := err.(awserr.RequestFailure); ok {
-// // A service error occurred
-// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
-// }
-// } else {
-// fmt.Println(err.Error())
-// }
-// }
-//
-type RequestFailure interface {
- Error
-
- // The status code of the HTTP response.
- StatusCode() int
-
- // The request ID returned by the service for a request failure. This will
- // be empty if no request ID is available such as the request failed due
- // to a connection error.
- RequestID() string
-}
-
-// NewRequestFailure returns a wrapped error with additional information for
-// request status code, and service requestID.
-//
-// Should be used to wrap all request which involve service requests. Even if
-// the request failed without a service response, but had an HTTP status code
-// that may be meaningful.
-func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
- return newRequestError(err, statusCode, reqID)
-}
-
-// UnmarshalError provides the interface for the SDK failing to unmarshal data.
-type UnmarshalError interface {
- awsError
- Bytes() []byte
-}
-
-// NewUnmarshalError returns an initialized UnmarshalError error wrapper adding
-// the bytes that fail to unmarshal to the error.
-func NewUnmarshalError(err error, msg string, bytes []byte) UnmarshalError {
- return &unmarshalError{
- awsError: New("UnmarshalError", msg, err),
- bytes: bytes,
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
deleted file mode 100644
index 9cf7eaf40..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
+++ /dev/null
@@ -1,221 +0,0 @@
-package awserr
-
-import (
- "encoding/hex"
- "fmt"
-)
-
-// SprintError returns a string of the formatted error code.
-//
-// Both extra and origErr are optional. If they are included their lines
-// will be added, but if they are not included their lines will be ignored.
-func SprintError(code, message, extra string, origErr error) string {
- msg := fmt.Sprintf("%s: %s", code, message)
- if extra != "" {
- msg = fmt.Sprintf("%s\n\t%s", msg, extra)
- }
- if origErr != nil {
- msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
- }
- return msg
-}
-
-// A baseError wraps the code and message which defines an error. It also
-// can be used to wrap an original error object.
-//
-// Should be used as the root for errors satisfying the awserr.Error. Also
-// for any error which does not fit into a specific error wrapper type.
-type baseError struct {
- // Classification of error
- code string
-
- // Detailed information about error
- message string
-
- // Optional original error this error is based off of. Allows building
- // chained errors.
- errs []error
-}
-
-// newBaseError returns an error object for the code, message, and errors.
-//
-// code is a short no whitespace phrase depicting the classification of
-// the error that is being created.
-//
-// message is the free flow string containing detailed information about the
-// error.
-//
-// origErrs is the error objects which will be nested under the new errors to
-// be returned.
-func newBaseError(code, message string, origErrs []error) *baseError {
- b := &baseError{
- code: code,
- message: message,
- errs: origErrs,
- }
-
- return b
-}
-
-// Error returns the string representation of the error.
-//
-// See ErrorWithExtra for formatting.
-//
-// Satisfies the error interface.
-func (b baseError) Error() string {
- size := len(b.errs)
- if size > 0 {
- return SprintError(b.code, b.message, "", errorList(b.errs))
- }
-
- return SprintError(b.code, b.message, "", nil)
-}
-
-// String returns the string representation of the error.
-// Alias for Error to satisfy the stringer interface.
-func (b baseError) String() string {
- return b.Error()
-}
-
-// Code returns the short phrase depicting the classification of the error.
-func (b baseError) Code() string {
- return b.code
-}
-
-// Message returns the error details message.
-func (b baseError) Message() string {
- return b.message
-}
-
-// OrigErr returns the original error if one was set. Nil is returned if no
-// error was set. This only returns the first element in the list. If the full
-// list is needed, use BatchedErrors.
-func (b baseError) OrigErr() error {
- switch len(b.errs) {
- case 0:
- return nil
- case 1:
- return b.errs[0]
- default:
- if err, ok := b.errs[0].(Error); ok {
- return NewBatchError(err.Code(), err.Message(), b.errs[1:])
- }
- return NewBatchError("BatchedErrors",
- "multiple errors occurred", b.errs)
- }
-}
-
-// OrigErrs returns the original errors if one was set. An empty slice is
-// returned if no error was set.
-func (b baseError) OrigErrs() []error {
- return b.errs
-}
-
-// So that the Error interface type can be included as an anonymous field
-// in the requestError struct and not conflict with the error.Error() method.
-type awsError Error
-
-// A requestError wraps a request or service error.
-//
-// Composed of baseError for code, message, and original error.
-type requestError struct {
- awsError
- statusCode int
- requestID string
- bytes []byte
-}
-
-// newRequestError returns a wrapped error with additional information for
-// request status code, and service requestID.
-//
-// Should be used to wrap all request which involve service requests. Even if
-// the request failed without a service response, but had an HTTP status code
-// that may be meaningful.
-//
-// Also wraps original errors via the baseError.
-func newRequestError(err Error, statusCode int, requestID string) *requestError {
- return &requestError{
- awsError: err,
- statusCode: statusCode,
- requestID: requestID,
- }
-}
-
-// Error returns the string representation of the error.
-// Satisfies the error interface.
-func (r requestError) Error() string {
- extra := fmt.Sprintf("status code: %d, request id: %s",
- r.statusCode, r.requestID)
- return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
-}
-
-// String returns the string representation of the error.
-// Alias for Error to satisfy the stringer interface.
-func (r requestError) String() string {
- return r.Error()
-}
-
-// StatusCode returns the wrapped status code for the error
-func (r requestError) StatusCode() int {
- return r.statusCode
-}
-
-// RequestID returns the wrapped requestID
-func (r requestError) RequestID() string {
- return r.requestID
-}
-
-// OrigErrs returns the original errors if one was set. An empty slice is
-// returned if no error was set.
-func (r requestError) OrigErrs() []error {
- if b, ok := r.awsError.(BatchedErrors); ok {
- return b.OrigErrs()
- }
- return []error{r.OrigErr()}
-}
-
-type unmarshalError struct {
- awsError
- bytes []byte
-}
-
-// Error returns the string representation of the error.
-// Satisfies the error interface.
-func (e unmarshalError) Error() string {
- extra := hex.Dump(e.bytes)
- return SprintError(e.Code(), e.Message(), extra, e.OrigErr())
-}
-
-// String returns the string representation of the error.
-// Alias for Error to satisfy the stringer interface.
-func (e unmarshalError) String() string {
- return e.Error()
-}
-
-// Bytes returns the bytes that failed to unmarshal.
-func (e unmarshalError) Bytes() []byte {
- return e.bytes
-}
-
-// An error list that satisfies the golang interface
-type errorList []error
-
-// Error returns the string representation of the error.
-//
-// Satisfies the error interface.
-func (e errorList) Error() string {
- msg := ""
- // How do we want to handle the array size being zero
- if size := len(e); size > 0 {
- for i := 0; i < size; i++ {
- msg += e[i].Error()
- // We check the next index to see if it is within the slice.
- // If it is, then we append a newline. We do this, because unit tests
- // could be broken with the additional '\n'
- if i+1 < size {
- msg += "\n"
- }
- }
- }
- return msg
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
deleted file mode 100644
index 142a7a01c..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package awsutil
-
-import (
- "reflect"
-)
-
-// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
-// In addition to this, this method will also dereference the input values if
-// possible so the DeepEqual performed will not fail if one parameter is a
-// pointer and the other is not.
-//
-// DeepEqual will not perform indirection of nested values of the input parameters.
-func DeepEqual(a, b interface{}) bool {
- ra := reflect.Indirect(reflect.ValueOf(a))
- rb := reflect.Indirect(reflect.ValueOf(b))
-
- if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
- // If the elements are both nil, and of the same type they are equal
- // If they are of different types they are not equal
- return reflect.TypeOf(a) == reflect.TypeOf(b)
- } else if raValid != rbValid {
- // Both values must be valid to be equal
- return false
- }
-
- return reflect.DeepEqual(ra.Interface(), rb.Interface())
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
deleted file mode 100644
index a4eb6a7f4..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
+++ /dev/null
@@ -1,221 +0,0 @@
-package awsutil
-
-import (
- "reflect"
- "regexp"
- "strconv"
- "strings"
-
- "github.com/jmespath/go-jmespath"
-)
-
-var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
-
-// rValuesAtPath returns a slice of values found in value v. The values
-// in v are explored recursively so all nested values are collected.
-func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
- pathparts := strings.Split(path, "||")
- if len(pathparts) > 1 {
- for _, pathpart := range pathparts {
- vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
- if len(vals) > 0 {
- return vals
- }
- }
- return nil
- }
-
- values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
- components := strings.Split(path, ".")
- for len(values) > 0 && len(components) > 0 {
- var index *int64
- var indexStar bool
- c := strings.TrimSpace(components[0])
- if c == "" { // no actual component, illegal syntax
- return nil
- } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
- // TODO normalize case for user
- return nil // don't support unexported fields
- }
-
- // parse this component
- if m := indexRe.FindStringSubmatch(c); m != nil {
- c = m[1]
- if m[2] == "" {
- index = nil
- indexStar = true
- } else {
- i, _ := strconv.ParseInt(m[2], 10, 32)
- index = &i
- indexStar = false
- }
- }
-
- nextvals := []reflect.Value{}
- for _, value := range values {
- // pull component name out of struct member
- if value.Kind() != reflect.Struct {
- continue
- }
-
- if c == "*" { // pull all members
- for i := 0; i < value.NumField(); i++ {
- if f := reflect.Indirect(value.Field(i)); f.IsValid() {
- nextvals = append(nextvals, f)
- }
- }
- continue
- }
-
- value = value.FieldByNameFunc(func(name string) bool {
- if c == name {
- return true
- } else if !caseSensitive && strings.EqualFold(name, c) {
- return true
- }
- return false
- })
-
- if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
- if !value.IsNil() {
- value.Set(reflect.Zero(value.Type()))
- }
- return []reflect.Value{value}
- }
-
- if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
- // TODO if the value is the terminus it should not be created
- // if the value to be set to its position is nil.
- value.Set(reflect.New(value.Type().Elem()))
- value = value.Elem()
- } else {
- value = reflect.Indirect(value)
- }
-
- if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
- if !createPath && value.IsNil() {
- value = reflect.ValueOf(nil)
- }
- }
-
- if value.IsValid() {
- nextvals = append(nextvals, value)
- }
- }
- values = nextvals
-
- if indexStar || index != nil {
- nextvals = []reflect.Value{}
- for _, valItem := range values {
- value := reflect.Indirect(valItem)
- if value.Kind() != reflect.Slice {
- continue
- }
-
- if indexStar { // grab all indices
- for i := 0; i < value.Len(); i++ {
- idx := reflect.Indirect(value.Index(i))
- if idx.IsValid() {
- nextvals = append(nextvals, idx)
- }
- }
- continue
- }
-
- // pull out index
- i := int(*index)
- if i >= value.Len() { // check out of bounds
- if createPath {
- // TODO resize slice
- } else {
- continue
- }
- } else if i < 0 { // support negative indexing
- i = value.Len() + i
- }
- value = reflect.Indirect(value.Index(i))
-
- if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
- if !createPath && value.IsNil() {
- value = reflect.ValueOf(nil)
- }
- }
-
- if value.IsValid() {
- nextvals = append(nextvals, value)
- }
- }
- values = nextvals
- }
-
- components = components[1:]
- }
- return values
-}
-
-// ValuesAtPath returns a list of values at the case insensitive lexical
-// path inside of a structure.
-func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
- result, err := jmespath.Search(path, i)
- if err != nil {
- return nil, err
- }
-
- v := reflect.ValueOf(result)
- if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
- return nil, nil
- }
- if s, ok := result.([]interface{}); ok {
- return s, err
- }
- if v.Kind() == reflect.Map && v.Len() == 0 {
- return nil, nil
- }
- if v.Kind() == reflect.Slice {
- out := make([]interface{}, v.Len())
- for i := 0; i < v.Len(); i++ {
- out[i] = v.Index(i).Interface()
- }
- return out, nil
- }
-
- return []interface{}{result}, nil
-}
-
-// SetValueAtPath sets a value at the case insensitive lexical path inside
-// of a structure.
-func SetValueAtPath(i interface{}, path string, v interface{}) {
- rvals := rValuesAtPath(i, path, true, false, v == nil)
- for _, rval := range rvals {
- if rval.Kind() == reflect.Ptr && rval.IsNil() {
- continue
- }
- setValue(rval, v)
- }
-}
-
-func setValue(dstVal reflect.Value, src interface{}) {
- if dstVal.Kind() == reflect.Ptr {
- dstVal = reflect.Indirect(dstVal)
- }
- srcVal := reflect.ValueOf(src)
-
- if !srcVal.IsValid() { // src is literal nil
- if dstVal.CanAddr() {
- // Convert to pointer so that pointer's value can be nil'ed
- // dstVal = dstVal.Addr()
- }
- dstVal.Set(reflect.Zero(dstVal.Type()))
-
- } else if srcVal.Kind() == reflect.Ptr {
- if srcVal.IsNil() {
- srcVal = reflect.Zero(dstVal.Type())
- } else {
- srcVal = reflect.ValueOf(src).Elem()
- }
- dstVal.Set(srcVal)
- } else {
- dstVal.Set(srcVal)
- }
-
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
deleted file mode 100644
index b147f103c..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package client
-
-import (
- "fmt"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// A Config provides configuration to a service client instance.
-type Config struct {
- Config *aws.Config
- Handlers request.Handlers
- PartitionID string
- Endpoint string
- SigningRegion string
- SigningName string
- ResolvedRegion string
-
- // States that the signing name did not come from a modeled source but
- // was derived based on other data. Used by service client constructors
- // to determine if the signin name can be overridden based on metadata the
- // service has.
- SigningNameDerived bool
-}
-
-// ConfigProvider provides a generic way for a service client to receive
-// the ClientConfig without circular dependencies.
-type ConfigProvider interface {
- ClientConfig(serviceName string, cfgs ...*aws.Config) Config
-}
-
-// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not
-// resolve the endpoint automatically. The service client's endpoint must be
-// provided via the aws.Config.Endpoint field.
-type ConfigNoResolveEndpointProvider interface {
- ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config
-}
-
-// A Client implements the base client request and response handling
-// used by all service clients.
-type Client struct {
- request.Retryer
- metadata.ClientInfo
-
- Config aws.Config
- Handlers request.Handlers
-}
-
-// New will return a pointer to a new initialized service client.
-func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
- svc := &Client{
- Config: cfg,
- ClientInfo: info,
- Handlers: handlers.Copy(),
- }
-
- switch retryer, ok := cfg.Retryer.(request.Retryer); {
- case ok:
- svc.Retryer = retryer
- case cfg.Retryer != nil && cfg.Logger != nil:
- s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
- cfg.Logger.Log(s)
- fallthrough
- default:
- maxRetries := aws.IntValue(cfg.MaxRetries)
- if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
- maxRetries = DefaultRetryerMaxNumRetries
- }
- svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
- }
-
- svc.AddDebugHandlers()
-
- for _, option := range options {
- option(svc)
- }
-
- return svc
-}
-
-// NewRequest returns a new Request pointer for the service API
-// operation and parameters.
-func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
- return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
-}
-
-// AddDebugHandlers injects debug logging handlers into the service to log request
-// debug information.
-func (c *Client) AddDebugHandlers() {
- c.Handlers.Send.PushFrontNamed(LogHTTPRequestHandler)
- c.Handlers.Send.PushBackNamed(LogHTTPResponseHandler)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
deleted file mode 100644
index 9f6af19dd..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package client
-
-import (
- "math"
- "strconv"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/internal/sdkrand"
-)
-
-// DefaultRetryer implements basic retry logic using exponential backoff for
-// most services. If you want to implement custom retry logic, you can implement the
-// request.Retryer interface.
-//
-type DefaultRetryer struct {
- // Num max Retries is the number of max retries that will be performed.
- // By default, this is zero.
- NumMaxRetries int
-
- // MinRetryDelay is the minimum retry delay after which retry will be performed.
- // If not set, the value is 0ns.
- MinRetryDelay time.Duration
-
- // MinThrottleRetryDelay is the minimum retry delay when throttled.
- // If not set, the value is 0ns.
- MinThrottleDelay time.Duration
-
- // MaxRetryDelay is the maximum retry delay before which retry must be performed.
- // If not set, the value is 0ns.
- MaxRetryDelay time.Duration
-
- // MaxThrottleDelay is the maximum retry delay when throttled.
- // If not set, the value is 0ns.
- MaxThrottleDelay time.Duration
-}
-
-const (
- // DefaultRetryerMaxNumRetries sets maximum number of retries
- DefaultRetryerMaxNumRetries = 3
-
- // DefaultRetryerMinRetryDelay sets minimum retry delay
- DefaultRetryerMinRetryDelay = 30 * time.Millisecond
-
- // DefaultRetryerMinThrottleDelay sets minimum delay when throttled
- DefaultRetryerMinThrottleDelay = 500 * time.Millisecond
-
- // DefaultRetryerMaxRetryDelay sets maximum retry delay
- DefaultRetryerMaxRetryDelay = 300 * time.Second
-
- // DefaultRetryerMaxThrottleDelay sets maximum delay when throttled
- DefaultRetryerMaxThrottleDelay = 300 * time.Second
-)
-
-// MaxRetries returns the number of maximum returns the service will use to make
-// an individual API request.
-func (d DefaultRetryer) MaxRetries() int {
- return d.NumMaxRetries
-}
-
-// setRetryerDefaults sets the default values of the retryer if not set
-func (d *DefaultRetryer) setRetryerDefaults() {
- if d.MinRetryDelay == 0 {
- d.MinRetryDelay = DefaultRetryerMinRetryDelay
- }
- if d.MaxRetryDelay == 0 {
- d.MaxRetryDelay = DefaultRetryerMaxRetryDelay
- }
- if d.MinThrottleDelay == 0 {
- d.MinThrottleDelay = DefaultRetryerMinThrottleDelay
- }
- if d.MaxThrottleDelay == 0 {
- d.MaxThrottleDelay = DefaultRetryerMaxThrottleDelay
- }
-}
-
-// RetryRules returns the delay duration before retrying this request again
-func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
-
- // if number of max retries is zero, no retries will be performed.
- if d.NumMaxRetries == 0 {
- return 0
- }
-
- // Sets default value for retryer members
- d.setRetryerDefaults()
-
- // minDelay is the minimum retryer delay
- minDelay := d.MinRetryDelay
-
- var initialDelay time.Duration
-
- isThrottle := r.IsErrorThrottle()
- if isThrottle {
- if delay, ok := getRetryAfterDelay(r); ok {
- initialDelay = delay
- }
- minDelay = d.MinThrottleDelay
- }
-
- retryCount := r.RetryCount
-
- // maxDelay the maximum retryer delay
- maxDelay := d.MaxRetryDelay
-
- if isThrottle {
- maxDelay = d.MaxThrottleDelay
- }
-
- var delay time.Duration
-
- // Logic to cap the retry count based on the minDelay provided
- actualRetryCount := int(math.Log2(float64(minDelay))) + 1
- if actualRetryCount < 63-retryCount {
- delay = time.Duration(1< maxDelay {
- delay = getJitterDelay(maxDelay / 2)
- }
- } else {
- delay = getJitterDelay(maxDelay / 2)
- }
- return delay + initialDelay
-}
-
-// getJitterDelay returns a jittered delay for retry
-func getJitterDelay(duration time.Duration) time.Duration {
- return time.Duration(sdkrand.SeededRand.Int63n(int64(duration)) + int64(duration))
-}
-
-// ShouldRetry returns true if the request should be retried.
-func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
-
- // ShouldRetry returns false if number of max retries is 0.
- if d.NumMaxRetries == 0 {
- return false
- }
-
- // If one of the other handlers already set the retry state
- // we don't want to override it based on the service's state
- if r.Retryable != nil {
- return *r.Retryable
- }
- return r.IsErrorRetryable() || r.IsErrorThrottle()
-}
-
-// This will look in the Retry-After header, RFC 7231, for how long
-// it will wait before attempting another request
-func getRetryAfterDelay(r *request.Request) (time.Duration, bool) {
- if !canUseRetryAfterHeader(r) {
- return 0, false
- }
-
- delayStr := r.HTTPResponse.Header.Get("Retry-After")
- if len(delayStr) == 0 {
- return 0, false
- }
-
- delay, err := strconv.Atoi(delayStr)
- if err != nil {
- return 0, false
- }
-
- return time.Duration(delay) * time.Second, true
-}
-
-// Will look at the status code to see if the retry header pertains to
-// the status code.
-func canUseRetryAfterHeader(r *request.Request) bool {
- switch r.HTTPResponse.StatusCode {
- case 429:
- case 503:
- default:
- return false
- }
-
- return true
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
deleted file mode 100644
index 5ac5c24a1..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
+++ /dev/null
@@ -1,206 +0,0 @@
-package client
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "net/http/httputil"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-const logReqMsg = `DEBUG: Request %s/%s Details:
----[ REQUEST POST-SIGN ]-----------------------------
-%s
------------------------------------------------------`
-
-const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
----[ REQUEST DUMP ERROR ]-----------------------------
-%s
-------------------------------------------------------`
-
-type logWriter struct {
- // Logger is what we will use to log the payload of a response.
- Logger aws.Logger
- // buf stores the contents of what has been read
- buf *bytes.Buffer
-}
-
-func (logger *logWriter) Write(b []byte) (int, error) {
- return logger.buf.Write(b)
-}
-
-type teeReaderCloser struct {
- // io.Reader will be a tee reader that is used during logging.
- // This structure will read from a body and write the contents to a logger.
- io.Reader
- // Source is used just to close when we are done reading.
- Source io.ReadCloser
-}
-
-func (reader *teeReaderCloser) Close() error {
- return reader.Source.Close()
-}
-
-// LogHTTPRequestHandler is a SDK request handler to log the HTTP request sent
-// to a service. Will include the HTTP request body if the LogLevel of the
-// request matches LogDebugWithHTTPBody.
-var LogHTTPRequestHandler = request.NamedHandler{
- Name: "awssdk.client.LogRequest",
- Fn: logRequest,
-}
-
-func logRequest(r *request.Request) {
- if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
- return
- }
-
- logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
- bodySeekable := aws.IsReaderSeekable(r.Body)
-
- b, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
- if err != nil {
- r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
- r.ClientInfo.ServiceName, r.Operation.Name, err))
- return
- }
-
- if logBody {
- if !bodySeekable {
- r.SetReaderBody(aws.ReadSeekCloser(r.HTTPRequest.Body))
- }
- // Reset the request body because dumpRequest will re-wrap the
- // r.HTTPRequest's Body as a NoOpCloser and will not be reset after
- // read by the HTTP client reader.
- if err := r.Error; err != nil {
- r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
- r.ClientInfo.ServiceName, r.Operation.Name, err))
- return
- }
- }
-
- r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
- r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
-}
-
-// LogHTTPRequestHeaderHandler is a SDK request handler to log the HTTP request sent
-// to a service. Will only log the HTTP request's headers. The request payload
-// will not be read.
-var LogHTTPRequestHeaderHandler = request.NamedHandler{
- Name: "awssdk.client.LogRequestHeader",
- Fn: logRequestHeader,
-}
-
-func logRequestHeader(r *request.Request) {
- if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
- return
- }
-
- b, err := httputil.DumpRequestOut(r.HTTPRequest, false)
- if err != nil {
- r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg,
- r.ClientInfo.ServiceName, r.Operation.Name, err))
- return
- }
-
- r.Config.Logger.Log(fmt.Sprintf(logReqMsg,
- r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
-}
-
-const logRespMsg = `DEBUG: Response %s/%s Details:
----[ RESPONSE ]--------------------------------------
-%s
------------------------------------------------------`
-
-const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
----[ RESPONSE DUMP ERROR ]-----------------------------
-%s
------------------------------------------------------`
-
-// LogHTTPResponseHandler is a SDK request handler to log the HTTP response
-// received from a service. Will include the HTTP response body if the LogLevel
-// of the request matches LogDebugWithHTTPBody.
-var LogHTTPResponseHandler = request.NamedHandler{
- Name: "awssdk.client.LogResponse",
- Fn: logResponse,
-}
-
-func logResponse(r *request.Request) {
- if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
- return
- }
-
- lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
-
- if r.HTTPResponse == nil {
- lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
- r.ClientInfo.ServiceName, r.Operation.Name, "request's HTTPResponse is nil"))
- return
- }
-
- logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
- if logBody {
- r.HTTPResponse.Body = &teeReaderCloser{
- Reader: io.TeeReader(r.HTTPResponse.Body, lw),
- Source: r.HTTPResponse.Body,
- }
- }
-
- handlerFn := func(req *request.Request) {
- b, err := httputil.DumpResponse(req.HTTPResponse, false)
- if err != nil {
- lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
- req.ClientInfo.ServiceName, req.Operation.Name, err))
- return
- }
-
- lw.Logger.Log(fmt.Sprintf(logRespMsg,
- req.ClientInfo.ServiceName, req.Operation.Name, string(b)))
-
- if logBody {
- b, err := ioutil.ReadAll(lw.buf)
- if err != nil {
- lw.Logger.Log(fmt.Sprintf(logRespErrMsg,
- req.ClientInfo.ServiceName, req.Operation.Name, err))
- return
- }
-
- lw.Logger.Log(string(b))
- }
- }
-
- const handlerName = "awsdk.client.LogResponse.ResponseBody"
-
- r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{
- Name: handlerName, Fn: handlerFn,
- })
- r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{
- Name: handlerName, Fn: handlerFn,
- })
-}
-
-// LogHTTPResponseHeaderHandler is a SDK request handler to log the HTTP
-// response received from a service. Will only log the HTTP response's headers.
-// The response payload will not be read.
-var LogHTTPResponseHeaderHandler = request.NamedHandler{
- Name: "awssdk.client.LogResponseHeader",
- Fn: logResponseHeader,
-}
-
-func logResponseHeader(r *request.Request) {
- if !r.Config.LogLevel.AtLeast(aws.LogDebug) || r.Config.Logger == nil {
- return
- }
-
- b, err := httputil.DumpResponse(r.HTTPResponse, false)
- if err != nil {
- r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg,
- r.ClientInfo.ServiceName, r.Operation.Name, err))
- return
- }
-
- r.Config.Logger.Log(fmt.Sprintf(logRespMsg,
- r.ClientInfo.ServiceName, r.Operation.Name, string(b)))
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
deleted file mode 100644
index a7530ebb3..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
+++ /dev/null
@@ -1,15 +0,0 @@
-package metadata
-
-// ClientInfo wraps immutable data from the client.Client structure.
-type ClientInfo struct {
- ServiceName string
- ServiceID string
- APIVersion string
- PartitionID string
- Endpoint string
- SigningName string
- SigningRegion string
- JSONVersion string
- TargetPrefix string
- ResolvedRegion string
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go
deleted file mode 100644
index 881d575f0..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/no_op_retryer.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package client
-
-import (
- "time"
-
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// NoOpRetryer provides a retryer that performs no retries.
-// It should be used when we do not want retries to be performed.
-type NoOpRetryer struct{}
-
-// MaxRetries returns the number of maximum returns the service will use to make
-// an individual API; For NoOpRetryer the MaxRetries will always be zero.
-func (d NoOpRetryer) MaxRetries() int {
- return 0
-}
-
-// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
-func (d NoOpRetryer) ShouldRetry(_ *request.Request) bool {
- return false
-}
-
-// RetryRules returns the delay duration before retrying this request again;
-// since NoOpRetryer does not retry, RetryRules always returns 0.
-func (d NoOpRetryer) RetryRules(_ *request.Request) time.Duration {
- return 0
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
deleted file mode 100644
index c483e0cb8..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/config.go
+++ /dev/null
@@ -1,670 +0,0 @@
-package aws
-
-import (
- "net/http"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/endpoints"
-)
-
-// UseServiceDefaultRetries instructs the config to use the service's own
-// default number of retries. This will be the default action if
-// Config.MaxRetries is nil also.
-const UseServiceDefaultRetries = -1
-
-// RequestRetryer is an alias for a type that implements the request.Retryer
-// interface.
-type RequestRetryer interface{}
-
-// A Config provides service configuration for service clients. By default,
-// all clients will use the defaults.DefaultConfig structure.
-//
-// // Create Session with MaxRetries configuration to be shared by multiple
-// // service clients.
-// sess := session.Must(session.NewSession(&aws.Config{
-// MaxRetries: aws.Int(3),
-// }))
-//
-// // Create S3 service client with a specific Region.
-// svc := s3.New(sess, &aws.Config{
-// Region: aws.String("us-west-2"),
-// })
-type Config struct {
- // Enables verbose error printing of all credential chain errors.
- // Should be used when wanting to see all errors while attempting to
- // retrieve credentials.
- CredentialsChainVerboseErrors *bool
-
- // The credentials object to use when signing requests. Defaults to a
- // chain of credential providers to search for credentials in environment
- // variables, shared credential file, and EC2 Instance Roles.
- Credentials *credentials.Credentials
-
- // An optional endpoint URL (hostname only or fully qualified URI)
- // that overrides the default generated endpoint for a client. Set this
- // to `nil` or the value to `""` to use the default generated endpoint.
- //
- // Note: You must still provide a `Region` value when specifying an
- // endpoint for a client.
- Endpoint *string
-
- // The resolver to use for looking up endpoints for AWS service clients
- // to use based on region.
- EndpointResolver endpoints.Resolver
-
- // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
- // ShouldRetry regardless of whether or not if request.Retryable is set.
- // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
- // is not set, then ShouldRetry will only be called if request.Retryable is nil.
- // Proper handling of the request.Retryable field is important when setting this field.
- EnforceShouldRetryCheck *bool
-
- // The region to send requests to. This parameter is required and must
- // be configured globally or on a per-client basis unless otherwise
- // noted. A full list of regions is found in the "Regions and Endpoints"
- // document.
- //
- // See http://docs.aws.amazon.com/general/latest/gr/rande.html for AWS
- // Regions and Endpoints.
- Region *string
-
- // Set this to `true` to disable SSL when sending requests. Defaults
- // to `false`.
- DisableSSL *bool
-
- // The HTTP client to use when sending requests. Defaults to
- // `http.DefaultClient`.
- HTTPClient *http.Client
-
- // An integer value representing the logging level. The default log level
- // is zero (LogOff), which represents no logging. To enable logging set
- // to a LogLevel Value.
- LogLevel *LogLevelType
-
- // The logger writer interface to write logging messages to. Defaults to
- // standard out.
- Logger Logger
-
- // The maximum number of times that a request will be retried for failures.
- // Defaults to -1, which defers the max retry setting to the service
- // specific configuration.
- MaxRetries *int
-
- // Retryer guides how HTTP requests should be retried in case of
- // recoverable failures.
- //
- // When nil or the value does not implement the request.Retryer interface,
- // the client.DefaultRetryer will be used.
- //
- // When both Retryer and MaxRetries are non-nil, the former is used and
- // the latter ignored.
- //
- // To set the Retryer field in a type-safe manner and with chaining, use
- // the request.WithRetryer helper function:
- //
- // cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
- //
- Retryer RequestRetryer
-
- // Disables semantic parameter validation, which validates input for
- // missing required fields and/or other semantic request input errors.
- DisableParamValidation *bool
-
- // Disables the computation of request and response checksums, e.g.,
- // CRC32 checksums in Amazon DynamoDB.
- DisableComputeChecksums *bool
-
- // Set this to `true` to force the request to use path-style addressing,
- // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client
- // will use virtual hosted bucket addressing when possible
- // (`http://BUCKET.s3.amazonaws.com/KEY`).
- //
- // Note: This configuration option is specific to the Amazon S3 service.
- //
- // See http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
- // for Amazon S3: Virtual Hosting of Buckets
- S3ForcePathStyle *bool
-
- // Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
- // header to PUT requests over 2MB of content. 100-Continue instructs the
- // HTTP client not to send the body until the service responds with a
- // `continue` status. This is useful to prevent sending the request body
- // until after the request is authenticated, and validated.
- //
- // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
- //
- // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
- // `ExpectContinueTimeout` for information on adjusting the continue wait
- // timeout. https://golang.org/pkg/net/http/#Transport
- //
- // You should use this flag to disable 100-Continue if you experience issues
- // with proxies or third party S3 compatible services.
- S3Disable100Continue *bool
-
- // Set this to `true` to enable S3 Accelerate feature. For all operations
- // compatible with S3 Accelerate will use the accelerate endpoint for
- // requests. Requests not compatible will fall back to normal S3 requests.
- //
- // The bucket must be enable for accelerate to be used with S3 client with
- // accelerate enabled. If the bucket is not enabled for accelerate an error
- // will be returned. The bucket name must be DNS compatible to also work
- // with accelerate.
- S3UseAccelerate *bool
-
- // S3DisableContentMD5Validation config option is temporarily disabled,
- // For S3 GetObject API calls, #1837.
- //
- // Set this to `true` to disable the S3 service client from automatically
- // adding the ContentMD5 to S3 Object Put and Upload API calls. This option
- // will also disable the SDK from performing object ContentMD5 validation
- // on GetObject API calls.
- S3DisableContentMD5Validation *bool
-
- // Set this to `true` to have the S3 service client to use the region specified
- // in the ARN, when an ARN is provided as an argument to a bucket parameter.
- S3UseARNRegion *bool
-
- // Set this to `true` to enable the SDK to unmarshal API response header maps to
- // normalized lower case map keys.
- //
- // For example S3's X-Amz-Meta prefixed header will be unmarshaled to lower case
- // Metadata member's map keys. The value of the header in the map is unaffected.
- //
- // The AWS SDK for Go v2, uses lower case header maps by default. The v1
- // SDK provides this opt-in for this option, for backwards compatibility.
- LowerCaseHeaderMaps *bool
-
- // Set this to `true` to disable the EC2Metadata client from overriding the
- // default http.Client's Timeout. This is helpful if you do not want the
- // EC2Metadata client to create a new http.Client. This options is only
- // meaningful if you're not already using a custom HTTP client with the
- // SDK. Enabled by default.
- //
- // Must be set and provided to the session.NewSession() in order to disable
- // the EC2Metadata overriding the timeout for default credentials chain.
- //
- // Example:
- // sess := session.Must(session.NewSession(aws.NewConfig()
- // .WithEC2MetadataDisableTimeoutOverride(true)))
- //
- // svc := s3.New(sess)
- //
- EC2MetadataDisableTimeoutOverride *bool
-
- // Set this to `false` to disable EC2Metadata client from falling back to IMDSv1.
- // By default, EC2 role credentials will fall back to IMDSv1 as needed for backwards compatibility.
- // You can disable this behavior by explicitly setting this flag to `false`. When false, the EC2Metadata
- // client will return any errors encountered from attempting to fetch a token instead of silently
- // using the insecure data flow of IMDSv1.
- //
- // Example:
- // sess := session.Must(session.NewSession(aws.NewConfig()
- // .WithEC2MetadataEnableFallback(false)))
- //
- // svc := s3.New(sess)
- //
- // See [configuring IMDS] for more information.
- //
- // [configuring IMDS]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
- EC2MetadataEnableFallback *bool
-
- // Instructs the endpoint to be generated for a service client to
- // be the dual stack endpoint. The dual stack endpoint will support
- // both IPv4 and IPv6 addressing.
- //
- // Setting this for a service which does not support dual stack will fail
- // to make requests. It is not recommended to set this value on the session
- // as it will apply to all service clients created with the session. Even
- // services which don't support dual stack endpoints.
- //
- // If the Endpoint config value is also provided the UseDualStack flag
- // will be ignored.
- //
- // Only supported with.
- //
- // sess := session.Must(session.NewSession())
- //
- // svc := s3.New(sess, &aws.Config{
- // UseDualStack: aws.Bool(true),
- // })
- //
- // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility.
- // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients
- // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher
- // precedence then this option.
- UseDualStack *bool
-
- // Sets the resolver to resolve a dual-stack endpoint for the service.
- UseDualStackEndpoint endpoints.DualStackEndpointState
-
- // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
- UseFIPSEndpoint endpoints.FIPSEndpointState
-
- // SleepDelay is an override for the func the SDK will call when sleeping
- // during the lifecycle of a request. Specifically this will be used for
- // request delays. This value should only be used for testing. To adjust
- // the delay of a request see the aws/client.DefaultRetryer and
- // aws/request.Retryer.
- //
- // SleepDelay will prevent any Context from being used for canceling retry
- // delay of an API operation. It is recommended to not use SleepDelay at all
- // and specify a Retryer instead.
- SleepDelay func(time.Duration)
-
- // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
- // Will default to false. This would only be used for empty directory names in s3 requests.
- //
- // Example:
- // sess := session.Must(session.NewSession(&aws.Config{
- // DisableRestProtocolURICleaning: aws.Bool(true),
- // }))
- //
- // svc := s3.New(sess)
- // out, err := svc.GetObject(&s3.GetObjectInput {
- // Bucket: aws.String("bucketname"),
- // Key: aws.String("//foo//bar//moo"),
- // })
- DisableRestProtocolURICleaning *bool
-
- // EnableEndpointDiscovery will allow for endpoint discovery on operations that
- // have the definition in its model. By default, endpoint discovery is off.
- // To use EndpointDiscovery, Endpoint should be unset or set to an empty string.
- //
- // Example:
- // sess := session.Must(session.NewSession(&aws.Config{
- // EnableEndpointDiscovery: aws.Bool(true),
- // }))
- //
- // svc := s3.New(sess)
- // out, err := svc.GetObject(&s3.GetObjectInput {
- // Bucket: aws.String("bucketname"),
- // Key: aws.String("/foo/bar/moo"),
- // })
- EnableEndpointDiscovery *bool
-
- // DisableEndpointHostPrefix will disable the SDK's behavior of prefixing
- // request endpoint hosts with modeled information.
- //
- // Disabling this feature is useful when you want to use local endpoints
- // for testing that do not support the modeled host prefix pattern.
- DisableEndpointHostPrefix *bool
-
- // STSRegionalEndpoint will enable regional or legacy endpoint resolving
- STSRegionalEndpoint endpoints.STSRegionalEndpoint
-
- // S3UsEast1RegionalEndpoint will enable regional or legacy endpoint resolving
- S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
-}
-
-// NewConfig returns a new Config pointer that can be chained with builder
-// methods to set multiple configuration values inline without using pointers.
-//
-// // Create Session with MaxRetries configuration to be shared by multiple
-// // service clients.
-// sess := session.Must(session.NewSession(aws.NewConfig().
-// WithMaxRetries(3),
-// ))
-//
-// // Create S3 service client with a specific Region.
-// svc := s3.New(sess, aws.NewConfig().
-// WithRegion("us-west-2"),
-// )
-func NewConfig() *Config {
- return &Config{}
-}
-
-// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
-// a Config pointer.
-func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
- c.CredentialsChainVerboseErrors = &verboseErrs
- return c
-}
-
-// WithCredentials sets a config Credentials value returning a Config pointer
-// for chaining.
-func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
- c.Credentials = creds
- return c
-}
-
-// WithEndpoint sets a config Endpoint value returning a Config pointer for
-// chaining.
-func (c *Config) WithEndpoint(endpoint string) *Config {
- c.Endpoint = &endpoint
- return c
-}
-
-// WithEndpointResolver sets a config EndpointResolver value returning a
-// Config pointer for chaining.
-func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config {
- c.EndpointResolver = resolver
- return c
-}
-
-// WithRegion sets a config Region value returning a Config pointer for
-// chaining.
-func (c *Config) WithRegion(region string) *Config {
- c.Region = ®ion
- return c
-}
-
-// WithDisableSSL sets a config DisableSSL value returning a Config pointer
-// for chaining.
-func (c *Config) WithDisableSSL(disable bool) *Config {
- c.DisableSSL = &disable
- return c
-}
-
-// WithHTTPClient sets a config HTTPClient value returning a Config pointer
-// for chaining.
-func (c *Config) WithHTTPClient(client *http.Client) *Config {
- c.HTTPClient = client
- return c
-}
-
-// WithMaxRetries sets a config MaxRetries value returning a Config pointer
-// for chaining.
-func (c *Config) WithMaxRetries(max int) *Config {
- c.MaxRetries = &max
- return c
-}
-
-// WithDisableParamValidation sets a config DisableParamValidation value
-// returning a Config pointer for chaining.
-func (c *Config) WithDisableParamValidation(disable bool) *Config {
- c.DisableParamValidation = &disable
- return c
-}
-
-// WithDisableComputeChecksums sets a config DisableComputeChecksums value
-// returning a Config pointer for chaining.
-func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
- c.DisableComputeChecksums = &disable
- return c
-}
-
-// WithLogLevel sets a config LogLevel value returning a Config pointer for
-// chaining.
-func (c *Config) WithLogLevel(level LogLevelType) *Config {
- c.LogLevel = &level
- return c
-}
-
-// WithLogger sets a config Logger value returning a Config pointer for
-// chaining.
-func (c *Config) WithLogger(logger Logger) *Config {
- c.Logger = logger
- return c
-}
-
-// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
-// pointer for chaining.
-func (c *Config) WithS3ForcePathStyle(force bool) *Config {
- c.S3ForcePathStyle = &force
- return c
-}
-
-// WithS3Disable100Continue sets a config S3Disable100Continue value returning
-// a Config pointer for chaining.
-func (c *Config) WithS3Disable100Continue(disable bool) *Config {
- c.S3Disable100Continue = &disable
- return c
-}
-
-// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
-// pointer for chaining.
-func (c *Config) WithS3UseAccelerate(enable bool) *Config {
- c.S3UseAccelerate = &enable
- return c
-
-}
-
-// WithS3DisableContentMD5Validation sets a config
-// S3DisableContentMD5Validation value returning a Config pointer for chaining.
-func (c *Config) WithS3DisableContentMD5Validation(enable bool) *Config {
- c.S3DisableContentMD5Validation = &enable
- return c
-
-}
-
-// WithS3UseARNRegion sets a config S3UseARNRegion value and
-// returning a Config pointer for chaining
-func (c *Config) WithS3UseARNRegion(enable bool) *Config {
- c.S3UseARNRegion = &enable
- return c
-}
-
-// WithUseDualStack sets a config UseDualStack value returning a Config
-// pointer for chaining.
-func (c *Config) WithUseDualStack(enable bool) *Config {
- c.UseDualStack = &enable
- return c
-}
-
-// WithUseFIPSEndpoint sets a config UseFIPSEndpoint value returning a Config
-// pointer for chaining.
-func (c *Config) WithUseFIPSEndpoint(enable bool) *Config {
- if enable {
- c.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled
- } else {
- c.UseFIPSEndpoint = endpoints.FIPSEndpointStateDisabled
- }
- return c
-}
-
-// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
-// returning a Config pointer for chaining.
-func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
- c.EC2MetadataDisableTimeoutOverride = &enable
- return c
-}
-
-// WithEC2MetadataEnableFallback sets a config EC2MetadataEnableFallback value
-// returning a Config pointer for chaining.
-func (c *Config) WithEC2MetadataEnableFallback(v bool) *Config {
- c.EC2MetadataEnableFallback = &v
- return c
-}
-
-// WithSleepDelay overrides the function used to sleep while waiting for the
-// next retry. Defaults to time.Sleep.
-func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
- c.SleepDelay = fn
- return c
-}
-
-// WithEndpointDiscovery will set whether or not to use endpoint discovery.
-func (c *Config) WithEndpointDiscovery(t bool) *Config {
- c.EnableEndpointDiscovery = &t
- return c
-}
-
-// WithDisableEndpointHostPrefix will set whether or not to use modeled host prefix
-// when making requests.
-func (c *Config) WithDisableEndpointHostPrefix(t bool) *Config {
- c.DisableEndpointHostPrefix = &t
- return c
-}
-
-// WithSTSRegionalEndpoint will set whether or not to use regional endpoint flag
-// when resolving the endpoint for a service
-func (c *Config) WithSTSRegionalEndpoint(sre endpoints.STSRegionalEndpoint) *Config {
- c.STSRegionalEndpoint = sre
- return c
-}
-
-// WithS3UsEast1RegionalEndpoint will set whether or not to use regional endpoint flag
-// when resolving the endpoint for a service
-func (c *Config) WithS3UsEast1RegionalEndpoint(sre endpoints.S3UsEast1RegionalEndpoint) *Config {
- c.S3UsEast1RegionalEndpoint = sre
- return c
-}
-
-// WithLowerCaseHeaderMaps sets a config LowerCaseHeaderMaps value
-// returning a Config pointer for chaining.
-func (c *Config) WithLowerCaseHeaderMaps(t bool) *Config {
- c.LowerCaseHeaderMaps = &t
- return c
-}
-
-// WithDisableRestProtocolURICleaning sets a config DisableRestProtocolURICleaning value
-// returning a Config pointer for chaining.
-func (c *Config) WithDisableRestProtocolURICleaning(t bool) *Config {
- c.DisableRestProtocolURICleaning = &t
- return c
-}
-
-// MergeIn merges the passed in configs into the existing config object.
-func (c *Config) MergeIn(cfgs ...*Config) {
- for _, other := range cfgs {
- mergeInConfig(c, other)
- }
-}
-
-func mergeInConfig(dst *Config, other *Config) {
- if other == nil {
- return
- }
-
- if other.CredentialsChainVerboseErrors != nil {
- dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
- }
-
- if other.Credentials != nil {
- dst.Credentials = other.Credentials
- }
-
- if other.Endpoint != nil {
- dst.Endpoint = other.Endpoint
- }
-
- if other.EndpointResolver != nil {
- dst.EndpointResolver = other.EndpointResolver
- }
-
- if other.Region != nil {
- dst.Region = other.Region
- }
-
- if other.DisableSSL != nil {
- dst.DisableSSL = other.DisableSSL
- }
-
- if other.HTTPClient != nil {
- dst.HTTPClient = other.HTTPClient
- }
-
- if other.LogLevel != nil {
- dst.LogLevel = other.LogLevel
- }
-
- if other.Logger != nil {
- dst.Logger = other.Logger
- }
-
- if other.MaxRetries != nil {
- dst.MaxRetries = other.MaxRetries
- }
-
- if other.Retryer != nil {
- dst.Retryer = other.Retryer
- }
-
- if other.DisableParamValidation != nil {
- dst.DisableParamValidation = other.DisableParamValidation
- }
-
- if other.DisableComputeChecksums != nil {
- dst.DisableComputeChecksums = other.DisableComputeChecksums
- }
-
- if other.S3ForcePathStyle != nil {
- dst.S3ForcePathStyle = other.S3ForcePathStyle
- }
-
- if other.S3Disable100Continue != nil {
- dst.S3Disable100Continue = other.S3Disable100Continue
- }
-
- if other.S3UseAccelerate != nil {
- dst.S3UseAccelerate = other.S3UseAccelerate
- }
-
- if other.S3DisableContentMD5Validation != nil {
- dst.S3DisableContentMD5Validation = other.S3DisableContentMD5Validation
- }
-
- if other.S3UseARNRegion != nil {
- dst.S3UseARNRegion = other.S3UseARNRegion
- }
-
- if other.UseDualStack != nil {
- dst.UseDualStack = other.UseDualStack
- }
-
- if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset {
- dst.UseDualStackEndpoint = other.UseDualStackEndpoint
- }
-
- if other.EC2MetadataDisableTimeoutOverride != nil {
- dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
- }
-
- if other.EC2MetadataEnableFallback != nil {
- dst.EC2MetadataEnableFallback = other.EC2MetadataEnableFallback
- }
-
- if other.SleepDelay != nil {
- dst.SleepDelay = other.SleepDelay
- }
-
- if other.DisableRestProtocolURICleaning != nil {
- dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
- }
-
- if other.EnforceShouldRetryCheck != nil {
- dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
- }
-
- if other.EnableEndpointDiscovery != nil {
- dst.EnableEndpointDiscovery = other.EnableEndpointDiscovery
- }
-
- if other.DisableEndpointHostPrefix != nil {
- dst.DisableEndpointHostPrefix = other.DisableEndpointHostPrefix
- }
-
- if other.STSRegionalEndpoint != endpoints.UnsetSTSEndpoint {
- dst.STSRegionalEndpoint = other.STSRegionalEndpoint
- }
-
- if other.S3UsEast1RegionalEndpoint != endpoints.UnsetS3UsEast1Endpoint {
- dst.S3UsEast1RegionalEndpoint = other.S3UsEast1RegionalEndpoint
- }
-
- if other.LowerCaseHeaderMaps != nil {
- dst.LowerCaseHeaderMaps = other.LowerCaseHeaderMaps
- }
-
- if other.UseDualStackEndpoint != endpoints.DualStackEndpointStateUnset {
- dst.UseDualStackEndpoint = other.UseDualStackEndpoint
- }
-
- if other.UseFIPSEndpoint != endpoints.FIPSEndpointStateUnset {
- dst.UseFIPSEndpoint = other.UseFIPSEndpoint
- }
-}
-
-// Copy will return a shallow copy of the Config object. If any additional
-// configurations are provided they will be merged into the new config returned.
-func (c *Config) Copy(cfgs ...*Config) *Config {
- dst := &Config{}
- dst.MergeIn(c)
-
- for _, cfg := range cfgs {
- dst.MergeIn(cfg)
- }
-
- return dst
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
deleted file mode 100644
index 89aad2c67..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_5.go
+++ /dev/null
@@ -1,38 +0,0 @@
-//go:build !go1.9
-// +build !go1.9
-
-package aws
-
-import "time"
-
-// Context is an copy of the Go v1.7 stdlib's context.Context interface.
-// It is represented as a SDK interface to enable you to use the "WithContext"
-// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
-//
-// See https://golang.org/pkg/context on how to use contexts.
-type Context interface {
- // Deadline returns the time when work done on behalf of this context
- // should be canceled. Deadline returns ok==false when no deadline is
- // set. Successive calls to Deadline return the same results.
- Deadline() (deadline time.Time, ok bool)
-
- // Done returns a channel that's closed when work done on behalf of this
- // context should be canceled. Done may return nil if this context can
- // never be canceled. Successive calls to Done return the same value.
- Done() <-chan struct{}
-
- // Err returns a non-nil error value after Done is closed. Err returns
- // Canceled if the context was canceled or DeadlineExceeded if the
- // context's deadline passed. No other values for Err are defined.
- // After Done is closed, successive calls to Err return the same value.
- Err() error
-
- // Value returns the value associated with this context for key, or nil
- // if no value is associated with key. Successive calls to Value with
- // the same key returns the same result.
- //
- // Use context values only for request-scoped data that transits
- // processes and API boundaries, not for passing optional parameters to
- // functions.
- Value(key interface{}) interface{}
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
deleted file mode 100644
index 6ee9ddd18..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_9.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build go1.9
-// +build go1.9
-
-package aws
-
-import "context"
-
-// Context is an alias of the Go stdlib's context.Context interface.
-// It can be used within the SDK's API operation "WithContext" methods.
-//
-// See https://golang.org/pkg/context on how to use contexts.
-type Context = context.Context
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
deleted file mode 100644
index 313218190..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_5.go
+++ /dev/null
@@ -1,23 +0,0 @@
-//go:build !go1.7
-// +build !go1.7
-
-package aws
-
-import (
- "github.com/aws/aws-sdk-go/internal/context"
-)
-
-// BackgroundContext returns a context that will never be canceled, has no
-// values, and no deadline. This context is used by the SDK to provide
-// backwards compatibility with non-context API operations and functionality.
-//
-// Go 1.6 and before:
-// This context function is equivalent to context.Background in the Go stdlib.
-//
-// Go 1.7 and later:
-// The context returned will be the value returned by context.Background()
-//
-// See https://golang.org/pkg/context for more information on Contexts.
-func BackgroundContext() Context {
- return context.BackgroundCtx
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
deleted file mode 100644
index 9975d561b..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/context_background_1_7.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build go1.7
-// +build go1.7
-
-package aws
-
-import "context"
-
-// BackgroundContext returns a context that will never be canceled, has no
-// values, and no deadline. This context is used by the SDK to provide
-// backwards compatibility with non-context API operations and functionality.
-//
-// Go 1.6 and before:
-// This context function is equivalent to context.Background in the Go stdlib.
-//
-// Go 1.7 and later:
-// The context returned will be the value returned by context.Background()
-//
-// See https://golang.org/pkg/context for more information on Contexts.
-func BackgroundContext() Context {
- return context.Background()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go b/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
deleted file mode 100644
index 304fd1561..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/context_sleep.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package aws
-
-import (
- "time"
-)
-
-// SleepWithContext will wait for the timer duration to expire, or the context
-// is canceled. Which ever happens first. If the context is canceled the Context's
-// error will be returned.
-//
-// Expects Context to always return a non-nil error if the Done channel is closed.
-func SleepWithContext(ctx Context, dur time.Duration) error {
- t := time.NewTimer(dur)
- defer t.Stop()
-
- select {
- case <-t.C:
- break
- case <-ctx.Done():
- return ctx.Err()
- }
-
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
deleted file mode 100644
index 4e076c183..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
+++ /dev/null
@@ -1,918 +0,0 @@
-package aws
-
-import "time"
-
-// String returns a pointer to the string value passed in.
-func String(v string) *string {
- return &v
-}
-
-// StringValue returns the value of the string pointer passed in or
-// "" if the pointer is nil.
-func StringValue(v *string) string {
- if v != nil {
- return *v
- }
- return ""
-}
-
-// StringSlice converts a slice of string values into a slice of
-// string pointers
-func StringSlice(src []string) []*string {
- dst := make([]*string, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// StringValueSlice converts a slice of string pointers into a slice of
-// string values
-func StringValueSlice(src []*string) []string {
- dst := make([]string, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// StringMap converts a string map of string values into a string
-// map of string pointers
-func StringMap(src map[string]string) map[string]*string {
- dst := make(map[string]*string)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// StringValueMap converts a string map of string pointers into a string
-// map of string values
-func StringValueMap(src map[string]*string) map[string]string {
- dst := make(map[string]string)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Bool returns a pointer to the bool value passed in.
-func Bool(v bool) *bool {
- return &v
-}
-
-// BoolValue returns the value of the bool pointer passed in or
-// false if the pointer is nil.
-func BoolValue(v *bool) bool {
- if v != nil {
- return *v
- }
- return false
-}
-
-// BoolSlice converts a slice of bool values into a slice of
-// bool pointers
-func BoolSlice(src []bool) []*bool {
- dst := make([]*bool, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// BoolValueSlice converts a slice of bool pointers into a slice of
-// bool values
-func BoolValueSlice(src []*bool) []bool {
- dst := make([]bool, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// BoolMap converts a string map of bool values into a string
-// map of bool pointers
-func BoolMap(src map[string]bool) map[string]*bool {
- dst := make(map[string]*bool)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// BoolValueMap converts a string map of bool pointers into a string
-// map of bool values
-func BoolValueMap(src map[string]*bool) map[string]bool {
- dst := make(map[string]bool)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Int returns a pointer to the int value passed in.
-func Int(v int) *int {
- return &v
-}
-
-// IntValue returns the value of the int pointer passed in or
-// 0 if the pointer is nil.
-func IntValue(v *int) int {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// IntSlice converts a slice of int values into a slice of
-// int pointers
-func IntSlice(src []int) []*int {
- dst := make([]*int, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// IntValueSlice converts a slice of int pointers into a slice of
-// int values
-func IntValueSlice(src []*int) []int {
- dst := make([]int, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// IntMap converts a string map of int values into a string
-// map of int pointers
-func IntMap(src map[string]int) map[string]*int {
- dst := make(map[string]*int)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// IntValueMap converts a string map of int pointers into a string
-// map of int values
-func IntValueMap(src map[string]*int) map[string]int {
- dst := make(map[string]int)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Uint returns a pointer to the uint value passed in.
-func Uint(v uint) *uint {
- return &v
-}
-
-// UintValue returns the value of the uint pointer passed in or
-// 0 if the pointer is nil.
-func UintValue(v *uint) uint {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// UintSlice converts a slice of uint values uinto a slice of
-// uint pointers
-func UintSlice(src []uint) []*uint {
- dst := make([]*uint, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// UintValueSlice converts a slice of uint pointers uinto a slice of
-// uint values
-func UintValueSlice(src []*uint) []uint {
- dst := make([]uint, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// UintMap converts a string map of uint values uinto a string
-// map of uint pointers
-func UintMap(src map[string]uint) map[string]*uint {
- dst := make(map[string]*uint)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// UintValueMap converts a string map of uint pointers uinto a string
-// map of uint values
-func UintValueMap(src map[string]*uint) map[string]uint {
- dst := make(map[string]uint)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Int8 returns a pointer to the int8 value passed in.
-func Int8(v int8) *int8 {
- return &v
-}
-
-// Int8Value returns the value of the int8 pointer passed in or
-// 0 if the pointer is nil.
-func Int8Value(v *int8) int8 {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// Int8Slice converts a slice of int8 values into a slice of
-// int8 pointers
-func Int8Slice(src []int8) []*int8 {
- dst := make([]*int8, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// Int8ValueSlice converts a slice of int8 pointers into a slice of
-// int8 values
-func Int8ValueSlice(src []*int8) []int8 {
- dst := make([]int8, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// Int8Map converts a string map of int8 values into a string
-// map of int8 pointers
-func Int8Map(src map[string]int8) map[string]*int8 {
- dst := make(map[string]*int8)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// Int8ValueMap converts a string map of int8 pointers into a string
-// map of int8 values
-func Int8ValueMap(src map[string]*int8) map[string]int8 {
- dst := make(map[string]int8)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Int16 returns a pointer to the int16 value passed in.
-func Int16(v int16) *int16 {
- return &v
-}
-
-// Int16Value returns the value of the int16 pointer passed in or
-// 0 if the pointer is nil.
-func Int16Value(v *int16) int16 {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// Int16Slice converts a slice of int16 values into a slice of
-// int16 pointers
-func Int16Slice(src []int16) []*int16 {
- dst := make([]*int16, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// Int16ValueSlice converts a slice of int16 pointers into a slice of
-// int16 values
-func Int16ValueSlice(src []*int16) []int16 {
- dst := make([]int16, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// Int16Map converts a string map of int16 values into a string
-// map of int16 pointers
-func Int16Map(src map[string]int16) map[string]*int16 {
- dst := make(map[string]*int16)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// Int16ValueMap converts a string map of int16 pointers into a string
-// map of int16 values
-func Int16ValueMap(src map[string]*int16) map[string]int16 {
- dst := make(map[string]int16)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Int32 returns a pointer to the int32 value passed in.
-func Int32(v int32) *int32 {
- return &v
-}
-
-// Int32Value returns the value of the int32 pointer passed in or
-// 0 if the pointer is nil.
-func Int32Value(v *int32) int32 {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// Int32Slice converts a slice of int32 values into a slice of
-// int32 pointers
-func Int32Slice(src []int32) []*int32 {
- dst := make([]*int32, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// Int32ValueSlice converts a slice of int32 pointers into a slice of
-// int32 values
-func Int32ValueSlice(src []*int32) []int32 {
- dst := make([]int32, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// Int32Map converts a string map of int32 values into a string
-// map of int32 pointers
-func Int32Map(src map[string]int32) map[string]*int32 {
- dst := make(map[string]*int32)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// Int32ValueMap converts a string map of int32 pointers into a string
-// map of int32 values
-func Int32ValueMap(src map[string]*int32) map[string]int32 {
- dst := make(map[string]int32)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Int64 returns a pointer to the int64 value passed in.
-func Int64(v int64) *int64 {
- return &v
-}
-
-// Int64Value returns the value of the int64 pointer passed in or
-// 0 if the pointer is nil.
-func Int64Value(v *int64) int64 {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// Int64Slice converts a slice of int64 values into a slice of
-// int64 pointers
-func Int64Slice(src []int64) []*int64 {
- dst := make([]*int64, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// Int64ValueSlice converts a slice of int64 pointers into a slice of
-// int64 values
-func Int64ValueSlice(src []*int64) []int64 {
- dst := make([]int64, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// Int64Map converts a string map of int64 values into a string
-// map of int64 pointers
-func Int64Map(src map[string]int64) map[string]*int64 {
- dst := make(map[string]*int64)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// Int64ValueMap converts a string map of int64 pointers into a string
-// map of int64 values
-func Int64ValueMap(src map[string]*int64) map[string]int64 {
- dst := make(map[string]int64)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Uint8 returns a pointer to the uint8 value passed in.
-func Uint8(v uint8) *uint8 {
- return &v
-}
-
-// Uint8Value returns the value of the uint8 pointer passed in or
-// 0 if the pointer is nil.
-func Uint8Value(v *uint8) uint8 {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// Uint8Slice converts a slice of uint8 values into a slice of
-// uint8 pointers
-func Uint8Slice(src []uint8) []*uint8 {
- dst := make([]*uint8, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// Uint8ValueSlice converts a slice of uint8 pointers into a slice of
-// uint8 values
-func Uint8ValueSlice(src []*uint8) []uint8 {
- dst := make([]uint8, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// Uint8Map converts a string map of uint8 values into a string
-// map of uint8 pointers
-func Uint8Map(src map[string]uint8) map[string]*uint8 {
- dst := make(map[string]*uint8)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// Uint8ValueMap converts a string map of uint8 pointers into a string
-// map of uint8 values
-func Uint8ValueMap(src map[string]*uint8) map[string]uint8 {
- dst := make(map[string]uint8)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Uint16 returns a pointer to the uint16 value passed in.
-func Uint16(v uint16) *uint16 {
- return &v
-}
-
-// Uint16Value returns the value of the uint16 pointer passed in or
-// 0 if the pointer is nil.
-func Uint16Value(v *uint16) uint16 {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// Uint16Slice converts a slice of uint16 values into a slice of
-// uint16 pointers
-func Uint16Slice(src []uint16) []*uint16 {
- dst := make([]*uint16, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// Uint16ValueSlice converts a slice of uint16 pointers into a slice of
-// uint16 values
-func Uint16ValueSlice(src []*uint16) []uint16 {
- dst := make([]uint16, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// Uint16Map converts a string map of uint16 values into a string
-// map of uint16 pointers
-func Uint16Map(src map[string]uint16) map[string]*uint16 {
- dst := make(map[string]*uint16)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// Uint16ValueMap converts a string map of uint16 pointers into a string
-// map of uint16 values
-func Uint16ValueMap(src map[string]*uint16) map[string]uint16 {
- dst := make(map[string]uint16)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Uint32 returns a pointer to the uint32 value passed in.
-func Uint32(v uint32) *uint32 {
- return &v
-}
-
-// Uint32Value returns the value of the uint32 pointer passed in or
-// 0 if the pointer is nil.
-func Uint32Value(v *uint32) uint32 {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// Uint32Slice converts a slice of uint32 values into a slice of
-// uint32 pointers
-func Uint32Slice(src []uint32) []*uint32 {
- dst := make([]*uint32, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// Uint32ValueSlice converts a slice of uint32 pointers into a slice of
-// uint32 values
-func Uint32ValueSlice(src []*uint32) []uint32 {
- dst := make([]uint32, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// Uint32Map converts a string map of uint32 values into a string
-// map of uint32 pointers
-func Uint32Map(src map[string]uint32) map[string]*uint32 {
- dst := make(map[string]*uint32)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// Uint32ValueMap converts a string map of uint32 pointers into a string
-// map of uint32 values
-func Uint32ValueMap(src map[string]*uint32) map[string]uint32 {
- dst := make(map[string]uint32)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Uint64 returns a pointer to the uint64 value passed in.
-func Uint64(v uint64) *uint64 {
- return &v
-}
-
-// Uint64Value returns the value of the uint64 pointer passed in or
-// 0 if the pointer is nil.
-func Uint64Value(v *uint64) uint64 {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// Uint64Slice converts a slice of uint64 values into a slice of
-// uint64 pointers
-func Uint64Slice(src []uint64) []*uint64 {
- dst := make([]*uint64, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// Uint64ValueSlice converts a slice of uint64 pointers into a slice of
-// uint64 values
-func Uint64ValueSlice(src []*uint64) []uint64 {
- dst := make([]uint64, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// Uint64Map converts a string map of uint64 values into a string
-// map of uint64 pointers
-func Uint64Map(src map[string]uint64) map[string]*uint64 {
- dst := make(map[string]*uint64)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// Uint64ValueMap converts a string map of uint64 pointers into a string
-// map of uint64 values
-func Uint64ValueMap(src map[string]*uint64) map[string]uint64 {
- dst := make(map[string]uint64)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Float32 returns a pointer to the float32 value passed in.
-func Float32(v float32) *float32 {
- return &v
-}
-
-// Float32Value returns the value of the float32 pointer passed in or
-// 0 if the pointer is nil.
-func Float32Value(v *float32) float32 {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// Float32Slice converts a slice of float32 values into a slice of
-// float32 pointers
-func Float32Slice(src []float32) []*float32 {
- dst := make([]*float32, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// Float32ValueSlice converts a slice of float32 pointers into a slice of
-// float32 values
-func Float32ValueSlice(src []*float32) []float32 {
- dst := make([]float32, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// Float32Map converts a string map of float32 values into a string
-// map of float32 pointers
-func Float32Map(src map[string]float32) map[string]*float32 {
- dst := make(map[string]*float32)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// Float32ValueMap converts a string map of float32 pointers into a string
-// map of float32 values
-func Float32ValueMap(src map[string]*float32) map[string]float32 {
- dst := make(map[string]float32)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Float64 returns a pointer to the float64 value passed in.
-func Float64(v float64) *float64 {
- return &v
-}
-
-// Float64Value returns the value of the float64 pointer passed in or
-// 0 if the pointer is nil.
-func Float64Value(v *float64) float64 {
- if v != nil {
- return *v
- }
- return 0
-}
-
-// Float64Slice converts a slice of float64 values into a slice of
-// float64 pointers
-func Float64Slice(src []float64) []*float64 {
- dst := make([]*float64, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// Float64ValueSlice converts a slice of float64 pointers into a slice of
-// float64 values
-func Float64ValueSlice(src []*float64) []float64 {
- dst := make([]float64, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// Float64Map converts a string map of float64 values into a string
-// map of float64 pointers
-func Float64Map(src map[string]float64) map[string]*float64 {
- dst := make(map[string]*float64)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// Float64ValueMap converts a string map of float64 pointers into a string
-// map of float64 values
-func Float64ValueMap(src map[string]*float64) map[string]float64 {
- dst := make(map[string]float64)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
-
-// Time returns a pointer to the time.Time value passed in.
-func Time(v time.Time) *time.Time {
- return &v
-}
-
-// TimeValue returns the value of the time.Time pointer passed in or
-// time.Time{} if the pointer is nil.
-func TimeValue(v *time.Time) time.Time {
- if v != nil {
- return *v
- }
- return time.Time{}
-}
-
-// SecondsTimeValue converts an int64 pointer to a time.Time value
-// representing seconds since Epoch or time.Time{} if the pointer is nil.
-func SecondsTimeValue(v *int64) time.Time {
- if v != nil {
- return time.Unix((*v / 1000), 0)
- }
- return time.Time{}
-}
-
-// MillisecondsTimeValue converts an int64 pointer to a time.Time value
-// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
-func MillisecondsTimeValue(v *int64) time.Time {
- if v != nil {
- return time.Unix(0, (*v * 1000000))
- }
- return time.Time{}
-}
-
-// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
-// The result is undefined if the Unix time cannot be represented by an int64.
-// Which includes calling TimeUnixMilli on a zero Time is undefined.
-//
-// This utility is useful for service API's such as CloudWatch Logs which require
-// their unix time values to be in milliseconds.
-//
-// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
-func TimeUnixMilli(t time.Time) int64 {
- return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
-}
-
-// TimeSlice converts a slice of time.Time values into a slice of
-// time.Time pointers
-func TimeSlice(src []time.Time) []*time.Time {
- dst := make([]*time.Time, len(src))
- for i := 0; i < len(src); i++ {
- dst[i] = &(src[i])
- }
- return dst
-}
-
-// TimeValueSlice converts a slice of time.Time pointers into a slice of
-// time.Time values
-func TimeValueSlice(src []*time.Time) []time.Time {
- dst := make([]time.Time, len(src))
- for i := 0; i < len(src); i++ {
- if src[i] != nil {
- dst[i] = *(src[i])
- }
- }
- return dst
-}
-
-// TimeMap converts a string map of time.Time values into a string
-// map of time.Time pointers
-func TimeMap(src map[string]time.Time) map[string]*time.Time {
- dst := make(map[string]*time.Time)
- for k, val := range src {
- v := val
- dst[k] = &v
- }
- return dst
-}
-
-// TimeValueMap converts a string map of time.Time pointers into a string
-// map of time.Time values
-func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
- dst := make(map[string]time.Time)
- for k, val := range src {
- if val != nil {
- dst[k] = *val
- }
- }
- return dst
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go
deleted file mode 100644
index 140242dd1..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/awsinternal.go
+++ /dev/null
@@ -1,4 +0,0 @@
-// DO NOT EDIT
-package corehandlers
-
-const isAwsInternal = ""
\ No newline at end of file
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
deleted file mode 100644
index 36a915efe..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
+++ /dev/null
@@ -1,232 +0,0 @@
-package corehandlers
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "regexp"
- "strconv"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// Interface for matching types which also have a Len method.
-type lener interface {
- Len() int
-}
-
-// BuildContentLengthHandler builds the content length of a request based on the body,
-// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
-// to determine request body length and no "Content-Length" was specified it will panic.
-//
-// The Content-Length will only be added to the request if the length of the body
-// is greater than 0. If the body is empty or the current `Content-Length`
-// header is <= 0, the header will also be stripped.
-var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
- var length int64
-
- if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
- length, _ = strconv.ParseInt(slength, 10, 64)
- } else {
- if r.Body != nil {
- var err error
- length, err = aws.SeekerLen(r.Body)
- if err != nil {
- r.Error = awserr.New(request.ErrCodeSerialization, "failed to get request body's length", err)
- return
- }
- }
- }
-
- if length > 0 {
- r.HTTPRequest.ContentLength = length
- r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
- } else {
- r.HTTPRequest.ContentLength = 0
- r.HTTPRequest.Header.Del("Content-Length")
- }
-}}
-
-var reStatusCode = regexp.MustCompile(`^(\d{3})`)
-
-// ValidateReqSigHandler is a request handler to ensure that the request's
-// signature doesn't expire before it is sent. This can happen when a request
-// is built and signed significantly before it is sent. Or significant delays
-// occur when retrying requests that would cause the signature to expire.
-var ValidateReqSigHandler = request.NamedHandler{
- Name: "core.ValidateReqSigHandler",
- Fn: func(r *request.Request) {
- // Unsigned requests are not signed
- if r.Config.Credentials == credentials.AnonymousCredentials {
- return
- }
-
- signedTime := r.Time
- if !r.LastSignedAt.IsZero() {
- signedTime = r.LastSignedAt
- }
-
- // 5 minutes to allow for some clock skew/delays in transmission.
- // Would be improved with aws/aws-sdk-go#423
- if signedTime.Add(5 * time.Minute).After(time.Now()) {
- return
- }
-
- fmt.Println("request expired, resigning")
- r.Sign()
- },
-}
-
-// SendHandler is a request handler to send service request using HTTP client.
-var SendHandler = request.NamedHandler{
- Name: "core.SendHandler",
- Fn: func(r *request.Request) {
- sender := sendFollowRedirects
- if r.DisableFollowRedirects {
- sender = sendWithoutFollowRedirects
- }
-
- if request.NoBody == r.HTTPRequest.Body {
- // Strip off the request body if the NoBody reader was used as a
- // place holder for a request body. This prevents the SDK from
- // making requests with a request body when it would be invalid
- // to do so.
- //
- // Use a shallow copy of the http.Request to ensure the race condition
- // of transport on Body will not trigger
- reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest
- reqCopy.Body = nil
- r.HTTPRequest = &reqCopy
- defer func() {
- r.HTTPRequest = reqOrig
- }()
- }
-
- var err error
- r.HTTPResponse, err = sender(r)
- if err != nil {
- handleSendError(r, err)
- }
- },
-}
-
-func sendFollowRedirects(r *request.Request) (*http.Response, error) {
- return r.Config.HTTPClient.Do(r.HTTPRequest)
-}
-
-func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
- transport := r.Config.HTTPClient.Transport
- if transport == nil {
- transport = http.DefaultTransport
- }
-
- return transport.RoundTrip(r.HTTPRequest)
-}
-
-func handleSendError(r *request.Request, err error) {
- // Prevent leaking if an HTTPResponse was returned. Clean up
- // the body.
- if r.HTTPResponse != nil {
- r.HTTPResponse.Body.Close()
- }
- // Capture the case where url.Error is returned for error processing
- // response. e.g. 301 without location header comes back as string
- // error and r.HTTPResponse is nil. Other URL redirect errors will
- // comeback in a similar method.
- if e, ok := err.(*url.Error); ok && e.Err != nil {
- if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
- code, _ := strconv.ParseInt(s[1], 10, 64)
- r.HTTPResponse = &http.Response{
- StatusCode: int(code),
- Status: http.StatusText(int(code)),
- Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
- }
- return
- }
- }
- if r.HTTPResponse == nil {
- // Add a dummy request response object to ensure the HTTPResponse
- // value is consistent.
- r.HTTPResponse = &http.Response{
- StatusCode: int(0),
- Status: http.StatusText(int(0)),
- Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
- }
- }
- // Catch all request errors, and let the default retrier determine
- // if the error is retryable.
- r.Error = awserr.New(request.ErrCodeRequestError, "send request failed", err)
-
- // Override the error with a context canceled error, if that was canceled.
- ctx := r.Context()
- select {
- case <-ctx.Done():
- r.Error = awserr.New(request.CanceledErrorCode,
- "request context canceled", ctx.Err())
- r.Retryable = aws.Bool(false)
- default:
- }
-}
-
-// ValidateResponseHandler is a request handler to validate service response.
-var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
- if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
- // this may be replaced by an UnmarshalError handler
- r.Error = awserr.New("UnknownError", "unknown error", r.Error)
- }
-}}
-
-// AfterRetryHandler performs final checks to determine if the request should
-// be retried and how long to delay.
-var AfterRetryHandler = request.NamedHandler{
- Name: "core.AfterRetryHandler",
- Fn: func(r *request.Request) {
- // If one of the other handlers already set the retry state
- // we don't want to override it based on the service's state
- if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
- r.Retryable = aws.Bool(r.ShouldRetry(r))
- }
-
- if r.WillRetry() {
- r.RetryDelay = r.RetryRules(r)
-
- if sleepFn := r.Config.SleepDelay; sleepFn != nil {
- // Support SleepDelay for backwards compatibility and testing
- sleepFn(r.RetryDelay)
- } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
- r.Error = awserr.New(request.CanceledErrorCode,
- "request context canceled", err)
- r.Retryable = aws.Bool(false)
- return
- }
-
- // when the expired token exception occurs the credentials
- // need to be expired locally so that the next request to
- // get credentials will trigger a credentials refresh.
- if r.IsErrorExpired() {
- r.Config.Credentials.Expire()
- }
-
- r.RetryCount++
- r.Error = nil
- }
- }}
-
-// ValidateEndpointHandler is a request handler to validate a request had the
-// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
-// region is not valid.
-var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
- if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
- r.Error = aws.ErrMissingRegion
- } else if r.ClientInfo.Endpoint == "" {
- // Was any endpoint provided by the user, or one was derived by the
- // SDK's endpoint resolver?
- r.Error = aws.ErrMissingEndpoint
- }
-}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
deleted file mode 100644
index 7d50b1557..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
+++ /dev/null
@@ -1,17 +0,0 @@
-package corehandlers
-
-import "github.com/aws/aws-sdk-go/aws/request"
-
-// ValidateParametersHandler is a request handler to validate the input parameters.
-// Validating parameters only has meaning if done prior to the request being sent.
-var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
- if !r.ParamsFilled() {
- return
- }
-
- if v, ok := r.Params.(request.Validator); ok {
- if err := v.Validate(); err != nil {
- r.Error = err
- }
- }
-}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
deleted file mode 100644
index ac842c55d..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/user_agent.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package corehandlers
-
-import (
- "os"
- "runtime"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// SDKVersionUserAgentHandler is a request handler for adding the SDK Version
-// to the user agent.
-var SDKVersionUserAgentHandler = request.NamedHandler{
- Name: "core.SDKVersionUserAgentHandler",
- Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
- runtime.Version(), runtime.GOOS, runtime.GOARCH),
-}
-
-const execEnvVar = `AWS_EXECUTION_ENV`
-const execEnvUAKey = `exec-env`
-
-// AddHostExecEnvUserAgentHander is a request handler appending the SDK's
-// execution environment to the user agent.
-//
-// If the environment variable AWS_EXECUTION_ENV is set, its value will be
-// appended to the user agent string.
-var AddHostExecEnvUserAgentHander = request.NamedHandler{
- Name: "core.AddHostExecEnvUserAgentHander",
- Fn: func(r *request.Request) {
- v := os.Getenv(execEnvVar)
- if len(v) == 0 {
- return
- }
-
- request.AddToUserAgent(r, execEnvUAKey+"/"+v)
- },
-}
-
-var AddAwsInternal = request.NamedHandler{
- Name: "core.AddAwsInternal",
- Fn: func(r *request.Request) {
- if len(isAwsInternal) == 0 {
- return
- }
- request.AddToUserAgent(r, isAwsInternal)
- },
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
deleted file mode 100644
index 3ad1e798d..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package credentials
-
-import (
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-var (
- // ErrNoValidProvidersFoundInChain Is returned when there are no valid
- // providers in the ChainProvider.
- //
- // This has been deprecated. For verbose error messaging set
- // aws.Config.CredentialsChainVerboseErrors to true.
- ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
- `no valid providers in chain. Deprecated.
- For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
- nil)
-)
-
-// A ChainProvider will search for a provider which returns credentials
-// and cache that provider until Retrieve is called again.
-//
-// The ChainProvider provides a way of chaining multiple providers together
-// which will pick the first available using priority order of the Providers
-// in the list.
-//
-// If none of the Providers retrieve valid credentials Value, ChainProvider's
-// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
-//
-// If a Provider is found which returns valid credentials Value ChainProvider
-// will cache that Provider for all calls to IsExpired(), until Retrieve is
-// called again.
-//
-// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
-// In this example EnvProvider will first check if any credentials are available
-// via the environment variables. If there are none ChainProvider will check
-// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
-// does not return any credentials ChainProvider will return the error
-// ErrNoValidProvidersFoundInChain
-//
-// creds := credentials.NewChainCredentials(
-// []credentials.Provider{
-// &credentials.EnvProvider{},
-// &ec2rolecreds.EC2RoleProvider{
-// Client: ec2metadata.New(sess),
-// },
-// })
-//
-// // Usage of ChainCredentials with aws.Config
-// svc := ec2.New(session.Must(session.NewSession(&aws.Config{
-// Credentials: creds,
-// })))
-//
-type ChainProvider struct {
- Providers []Provider
- curr Provider
- VerboseErrors bool
-}
-
-// NewChainCredentials returns a pointer to a new Credentials object
-// wrapping a chain of providers.
-func NewChainCredentials(providers []Provider) *Credentials {
- return NewCredentials(&ChainProvider{
- Providers: append([]Provider{}, providers...),
- })
-}
-
-// Retrieve returns the credentials value or error if no provider returned
-// without error.
-//
-// If a provider is found it will be cached and any calls to IsExpired()
-// will return the expired state of the cached provider.
-func (c *ChainProvider) Retrieve() (Value, error) {
- var errs []error
- for _, p := range c.Providers {
- creds, err := p.Retrieve()
- if err == nil {
- c.curr = p
- return creds, nil
- }
- errs = append(errs, err)
- }
- c.curr = nil
-
- var err error
- err = ErrNoValidProvidersFoundInChain
- if c.VerboseErrors {
- err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
- }
- return Value{}, err
-}
-
-// IsExpired will returned the expired state of the currently cached provider
-// if there is one. If there is no current provider, true will be returned.
-func (c *ChainProvider) IsExpired() bool {
- if c.curr != nil {
- return c.curr.IsExpired()
- }
-
- return true
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go
deleted file mode 100644
index 6e3406b1f..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.5.go
+++ /dev/null
@@ -1,23 +0,0 @@
-//go:build !go1.7
-// +build !go1.7
-
-package credentials
-
-import (
- "github.com/aws/aws-sdk-go/internal/context"
-)
-
-// backgroundContext returns a context that will never be canceled, has no
-// values, and no deadline. This context is used by the SDK to provide
-// backwards compatibility with non-context API operations and functionality.
-//
-// Go 1.6 and before:
-// This context function is equivalent to context.Background in the Go stdlib.
-//
-// Go 1.7 and later:
-// The context returned will be the value returned by context.Background()
-//
-// See https://golang.org/pkg/context for more information on Contexts.
-func backgroundContext() Context {
- return context.BackgroundCtx
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go
deleted file mode 100644
index a68df0ee7..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_background_go1.7.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build go1.7
-// +build go1.7
-
-package credentials
-
-import "context"
-
-// backgroundContext returns a context that will never be canceled, has no
-// values, and no deadline. This context is used by the SDK to provide
-// backwards compatibility with non-context API operations and functionality.
-//
-// Go 1.6 and before:
-// This context function is equivalent to context.Background in the Go stdlib.
-//
-// Go 1.7 and later:
-// The context returned will be the value returned by context.Background()
-//
-// See https://golang.org/pkg/context for more information on Contexts.
-func backgroundContext() Context {
- return context.Background()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go
deleted file mode 100644
index 0345fab2d..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.5.go
+++ /dev/null
@@ -1,40 +0,0 @@
-//go:build !go1.9
-// +build !go1.9
-
-package credentials
-
-import "time"
-
-// Context is an copy of the Go v1.7 stdlib's context.Context interface.
-// It is represented as a SDK interface to enable you to use the "WithContext"
-// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
-//
-// This type, aws.Context, and context.Context are equivalent.
-//
-// See https://golang.org/pkg/context on how to use contexts.
-type Context interface {
- // Deadline returns the time when work done on behalf of this context
- // should be canceled. Deadline returns ok==false when no deadline is
- // set. Successive calls to Deadline return the same results.
- Deadline() (deadline time.Time, ok bool)
-
- // Done returns a channel that's closed when work done on behalf of this
- // context should be canceled. Done may return nil if this context can
- // never be canceled. Successive calls to Done return the same value.
- Done() <-chan struct{}
-
- // Err returns a non-nil error value after Done is closed. Err returns
- // Canceled if the context was canceled or DeadlineExceeded if the
- // context's deadline passed. No other values for Err are defined.
- // After Done is closed, successive calls to Err return the same value.
- Err() error
-
- // Value returns the value associated with this context for key, or nil
- // if no value is associated with key. Successive calls to Value with
- // the same key returns the same result.
- //
- // Use context values only for request-scoped data that transits
- // processes and API boundaries, not for passing optional parameters to
- // functions.
- Value(key interface{}) interface{}
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go
deleted file mode 100644
index 79018aba7..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/context_go1.9.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//go:build go1.9
-// +build go1.9
-
-package credentials
-
-import "context"
-
-// Context is an alias of the Go stdlib's context.Context interface.
-// It can be used within the SDK's API operation "WithContext" methods.
-//
-// This type, aws.Context, and context.Context are equivalent.
-//
-// See https://golang.org/pkg/context on how to use contexts.
-type Context = context.Context
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
deleted file mode 100644
index a880a3de8..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
+++ /dev/null
@@ -1,383 +0,0 @@
-// Package credentials provides credential retrieval and management
-//
-// The Credentials is the primary method of getting access to and managing
-// credentials Values. Using dependency injection retrieval of the credential
-// values is handled by a object which satisfies the Provider interface.
-//
-// By default the Credentials.Get() will cache the successful result of a
-// Provider's Retrieve() until Provider.IsExpired() returns true. At which
-// point Credentials will call Provider's Retrieve() to get new credential Value.
-//
-// The Provider is responsible for determining when credentials Value have expired.
-// It is also important to note that Credentials will always call Retrieve the
-// first time Credentials.Get() is called.
-//
-// Example of using the environment variable credentials.
-//
-// creds := credentials.NewEnvCredentials()
-//
-// // Retrieve the credentials value
-// credValue, err := creds.Get()
-// if err != nil {
-// // handle error
-// }
-//
-// Example of forcing credentials to expire and be refreshed on the next Get().
-// This may be helpful to proactively expire credentials and refresh them sooner
-// than they would naturally expire on their own.
-//
-// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
-// creds.Expire()
-// credsValue, err := creds.Get()
-// // New credentials will be retrieved instead of from cache.
-//
-//
-// Custom Provider
-//
-// Each Provider built into this package also provides a helper method to generate
-// a Credentials pointer setup with the provider. To use a custom Provider just
-// create a type which satisfies the Provider interface and pass it to the
-// NewCredentials method.
-//
-// type MyProvider struct{}
-// func (m *MyProvider) Retrieve() (Value, error) {...}
-// func (m *MyProvider) IsExpired() bool {...}
-//
-// creds := credentials.NewCredentials(&MyProvider{})
-// credValue, err := creds.Get()
-//
-package credentials
-
-import (
- "fmt"
- "sync"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/internal/sync/singleflight"
-)
-
-// AnonymousCredentials is an empty Credential object that can be used as
-// dummy placeholder credentials for requests that do not need signed.
-//
-// This Credentials can be used to configure a service to not sign requests
-// when making service API calls. For example, when accessing public
-// s3 buckets.
-//
-// svc := s3.New(session.Must(session.NewSession(&aws.Config{
-// Credentials: credentials.AnonymousCredentials,
-// })))
-// // Access public S3 buckets.
-var AnonymousCredentials = NewStaticCredentials("", "", "")
-
-// A Value is the AWS credentials value for individual credential fields.
-type Value struct {
- // AWS Access key ID
- AccessKeyID string
-
- // AWS Secret Access Key
- SecretAccessKey string
-
- // AWS Session Token
- SessionToken string
-
- // Provider used to get credentials
- ProviderName string
-}
-
-// HasKeys returns if the credentials Value has both AccessKeyID and
-// SecretAccessKey value set.
-func (v Value) HasKeys() bool {
- return len(v.AccessKeyID) != 0 && len(v.SecretAccessKey) != 0
-}
-
-// A Provider is the interface for any component which will provide credentials
-// Value. A provider is required to manage its own Expired state, and what to
-// be expired means.
-//
-// The Provider should not need to implement its own mutexes, because
-// that will be managed by Credentials.
-type Provider interface {
- // Retrieve returns nil if it successfully retrieved the value.
- // Error is returned if the value were not obtainable, or empty.
- Retrieve() (Value, error)
-
- // IsExpired returns if the credentials are no longer valid, and need
- // to be retrieved.
- IsExpired() bool
-}
-
-// ProviderWithContext is a Provider that can retrieve credentials with a Context
-type ProviderWithContext interface {
- Provider
-
- RetrieveWithContext(Context) (Value, error)
-}
-
-// An Expirer is an interface that Providers can implement to expose the expiration
-// time, if known. If the Provider cannot accurately provide this info,
-// it should not implement this interface.
-type Expirer interface {
- // The time at which the credentials are no longer valid
- ExpiresAt() time.Time
-}
-
-// An ErrorProvider is a stub credentials provider that always returns an error
-// this is used by the SDK when construction a known provider is not possible
-// due to an error.
-type ErrorProvider struct {
- // The error to be returned from Retrieve
- Err error
-
- // The provider name to set on the Retrieved returned Value
- ProviderName string
-}
-
-// Retrieve will always return the error that the ErrorProvider was created with.
-func (p ErrorProvider) Retrieve() (Value, error) {
- return Value{ProviderName: p.ProviderName}, p.Err
-}
-
-// IsExpired will always return not expired.
-func (p ErrorProvider) IsExpired() bool {
- return false
-}
-
-// A Expiry provides shared expiration logic to be used by credentials
-// providers to implement expiry functionality.
-//
-// The best method to use this struct is as an anonymous field within the
-// provider's struct.
-//
-// Example:
-// type EC2RoleProvider struct {
-// Expiry
-// ...
-// }
-type Expiry struct {
- // The date/time when to expire on
- expiration time.Time
-
- // If set will be used by IsExpired to determine the current time.
- // Defaults to time.Now if CurrentTime is not set. Available for testing
- // to be able to mock out the current time.
- CurrentTime func() time.Time
-}
-
-// SetExpiration sets the expiration IsExpired will check when called.
-//
-// If window is greater than 0 the expiration time will be reduced by the
-// window value.
-//
-// Using a window is helpful to trigger credentials to expire sooner than
-// the expiration time given to ensure no requests are made with expired
-// tokens.
-func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
- // Passed in expirations should have the monotonic clock values stripped.
- // This ensures time comparisons will be based on wall-time.
- e.expiration = expiration.Round(0)
- if window > 0 {
- e.expiration = e.expiration.Add(-window)
- }
-}
-
-// IsExpired returns if the credentials are expired.
-func (e *Expiry) IsExpired() bool {
- curTime := e.CurrentTime
- if curTime == nil {
- curTime = time.Now
- }
- return e.expiration.Before(curTime())
-}
-
-// ExpiresAt returns the expiration time of the credential
-func (e *Expiry) ExpiresAt() time.Time {
- return e.expiration
-}
-
-// A Credentials provides concurrency safe retrieval of AWS credentials Value.
-// Credentials will cache the credentials value until they expire. Once the value
-// expires the next Get will attempt to retrieve valid credentials.
-//
-// Credentials is safe to use across multiple goroutines and will manage the
-// synchronous state so the Providers do not need to implement their own
-// synchronization.
-//
-// The first Credentials.Get() will always call Provider.Retrieve() to get the
-// first instance of the credentials Value. All calls to Get() after that
-// will return the cached credentials Value until IsExpired() returns true.
-type Credentials struct {
- sf singleflight.Group
-
- m sync.RWMutex
- creds Value
- provider Provider
-}
-
-// NewCredentials returns a pointer to a new Credentials with the provider set.
-func NewCredentials(provider Provider) *Credentials {
- c := &Credentials{
- provider: provider,
- }
- return c
-}
-
-// GetWithContext returns the credentials value, or error if the credentials
-// Value failed to be retrieved. Will return early if the passed in context is
-// canceled.
-//
-// Will return the cached credentials Value if it has not expired. If the
-// credentials Value has expired the Provider's Retrieve() will be called
-// to refresh the credentials.
-//
-// If Credentials.Expire() was called the credentials Value will be force
-// expired, and the next call to Get() will cause them to be refreshed.
-//
-// Passed in Context is equivalent to aws.Context, and context.Context.
-func (c *Credentials) GetWithContext(ctx Context) (Value, error) {
- // Check if credentials are cached, and not expired.
- select {
- case curCreds, ok := <-c.asyncIsExpired():
- // ok will only be true, of the credentials were not expired. ok will
- // be false and have no value if the credentials are expired.
- if ok {
- return curCreds, nil
- }
- case <-ctx.Done():
- return Value{}, awserr.New("RequestCanceled",
- "request context canceled", ctx.Err())
- }
-
- // Cannot pass context down to the actual retrieve, because the first
- // context would cancel the whole group when there is not direct
- // association of items in the group.
- resCh := c.sf.DoChan("", func() (interface{}, error) {
- return c.singleRetrieve(&suppressedContext{ctx})
- })
- select {
- case res := <-resCh:
- return res.Val.(Value), res.Err
- case <-ctx.Done():
- return Value{}, awserr.New("RequestCanceled",
- "request context canceled", ctx.Err())
- }
-}
-
-func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) {
- c.m.Lock()
- defer c.m.Unlock()
-
- if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
- return curCreds, nil
- }
-
- var creds Value
- var err error
- if p, ok := c.provider.(ProviderWithContext); ok {
- creds, err = p.RetrieveWithContext(ctx)
- } else {
- creds, err = c.provider.Retrieve()
- }
- if err == nil {
- c.creds = creds
- }
-
- return creds, err
-}
-
-// Get returns the credentials value, or error if the credentials Value failed
-// to be retrieved.
-//
-// Will return the cached credentials Value if it has not expired. If the
-// credentials Value has expired the Provider's Retrieve() will be called
-// to refresh the credentials.
-//
-// If Credentials.Expire() was called the credentials Value will be force
-// expired, and the next call to Get() will cause them to be refreshed.
-func (c *Credentials) Get() (Value, error) {
- return c.GetWithContext(backgroundContext())
-}
-
-// Expire expires the credentials and forces them to be retrieved on the
-// next call to Get().
-//
-// This will override the Provider's expired state, and force Credentials
-// to call the Provider's Retrieve().
-func (c *Credentials) Expire() {
- c.m.Lock()
- defer c.m.Unlock()
-
- c.creds = Value{}
-}
-
-// IsExpired returns if the credentials are no longer valid, and need
-// to be retrieved.
-//
-// If the Credentials were forced to be expired with Expire() this will
-// reflect that override.
-func (c *Credentials) IsExpired() bool {
- c.m.RLock()
- defer c.m.RUnlock()
-
- return c.isExpiredLocked(c.creds)
-}
-
-// asyncIsExpired returns a channel of credentials Value. If the channel is
-// closed the credentials are expired and credentials value are not empty.
-func (c *Credentials) asyncIsExpired() <-chan Value {
- ch := make(chan Value, 1)
- go func() {
- c.m.RLock()
- defer c.m.RUnlock()
-
- if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
- ch <- curCreds
- }
-
- close(ch)
- }()
-
- return ch
-}
-
-// isExpiredLocked helper method wrapping the definition of expired credentials.
-func (c *Credentials) isExpiredLocked(creds interface{}) bool {
- return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired()
-}
-
-// ExpiresAt provides access to the functionality of the Expirer interface of
-// the underlying Provider, if it supports that interface. Otherwise, it returns
-// an error.
-func (c *Credentials) ExpiresAt() (time.Time, error) {
- c.m.RLock()
- defer c.m.RUnlock()
-
- expirer, ok := c.provider.(Expirer)
- if !ok {
- return time.Time{}, awserr.New("ProviderNotExpirer",
- fmt.Sprintf("provider %s does not support ExpiresAt()",
- c.creds.ProviderName),
- nil)
- }
- if c.creds == (Value{}) {
- // set expiration time to the distant past
- return time.Time{}, nil
- }
- return expirer.ExpiresAt(), nil
-}
-
-type suppressedContext struct {
- Context
-}
-
-func (s *suppressedContext) Deadline() (deadline time.Time, ok bool) {
- return time.Time{}, false
-}
-
-func (s *suppressedContext) Done() <-chan struct{} {
- return nil
-}
-
-func (s *suppressedContext) Err() error {
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
deleted file mode 100644
index 92af5b725..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package ec2rolecreds
-
-import (
- "bufio"
- "encoding/json"
- "fmt"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/ec2metadata"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/internal/sdkuri"
-)
-
-// ProviderName provides a name of EC2Role provider
-const ProviderName = "EC2RoleProvider"
-
-// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
-// those credentials are expired.
-//
-// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
-// or ExpiryWindow
-//
-// p := &ec2rolecreds.EC2RoleProvider{
-// // Pass in a custom timeout to be used when requesting
-// // IAM EC2 Role credentials.
-// Client: ec2metadata.New(sess, aws.Config{
-// HTTPClient: &http.Client{Timeout: 10 * time.Second},
-// }),
-//
-// // Do not use early expiry of credentials. If a non zero value is
-// // specified the credentials will be expired early
-// ExpiryWindow: 0,
-// }
-type EC2RoleProvider struct {
- credentials.Expiry
-
- // Required EC2Metadata client to use when connecting to EC2 metadata service.
- Client *ec2metadata.EC2Metadata
-
- // ExpiryWindow will allow the credentials to trigger refreshing prior to
- // the credentials actually expiring. This is beneficial so race conditions
- // with expiring credentials do not cause request to fail unexpectedly
- // due to ExpiredTokenException exceptions.
- //
- // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
- // 10 seconds before the credentials are actually expired.
- //
- // If ExpiryWindow is 0 or less it will be ignored.
- ExpiryWindow time.Duration
-}
-
-// NewCredentials returns a pointer to a new Credentials object wrapping
-// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
-// The ConfigProvider is satisfied by the session.Session type.
-func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
- p := &EC2RoleProvider{
- Client: ec2metadata.New(c),
- }
-
- for _, option := range options {
- option(p)
- }
-
- return credentials.NewCredentials(p)
-}
-
-// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
-// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
-// metadata service.
-func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
- p := &EC2RoleProvider{
- Client: client,
- }
-
- for _, option := range options {
- option(p)
- }
-
- return credentials.NewCredentials(p)
-}
-
-// Retrieve retrieves credentials from the EC2 service.
-// Error will be returned if the request fails, or unable to extract
-// the desired credentials.
-func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
- return m.RetrieveWithContext(aws.BackgroundContext())
-}
-
-// RetrieveWithContext retrieves credentials from the EC2 service.
-// Error will be returned if the request fails, or unable to extract
-// the desired credentials.
-func (m *EC2RoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
- credsList, err := requestCredList(ctx, m.Client)
- if err != nil {
- return credentials.Value{ProviderName: ProviderName}, err
- }
-
- if len(credsList) == 0 {
- return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
- }
- credsName := credsList[0]
-
- roleCreds, err := requestCred(ctx, m.Client, credsName)
- if err != nil {
- return credentials.Value{ProviderName: ProviderName}, err
- }
-
- m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
-
- return credentials.Value{
- AccessKeyID: roleCreds.AccessKeyID,
- SecretAccessKey: roleCreds.SecretAccessKey,
- SessionToken: roleCreds.Token,
- ProviderName: ProviderName,
- }, nil
-}
-
-// A ec2RoleCredRespBody provides the shape for unmarshaling credential
-// request responses.
-type ec2RoleCredRespBody struct {
- // Success State
- Expiration time.Time
- AccessKeyID string
- SecretAccessKey string
- Token string
-
- // Error state
- Code string
- Message string
-}
-
-const iamSecurityCredsPath = "iam/security-credentials/"
-
-// requestCredList requests a list of credentials from the EC2 service.
-// If there are no credentials, or there is an error making or receiving the request
-func requestCredList(ctx aws.Context, client *ec2metadata.EC2Metadata) ([]string, error) {
- resp, err := client.GetMetadataWithContext(ctx, iamSecurityCredsPath)
- if err != nil {
- return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
- }
-
- credsList := []string{}
- s := bufio.NewScanner(strings.NewReader(resp))
- for s.Scan() {
- credsList = append(credsList, s.Text())
- }
-
- if err := s.Err(); err != nil {
- return nil, awserr.New(request.ErrCodeSerialization,
- "failed to read EC2 instance role from metadata service", err)
- }
-
- return credsList, nil
-}
-
-// requestCred requests the credentials for a specific credentials from the EC2 service.
-//
-// If the credentials cannot be found, or there is an error reading the response
-// and error will be returned.
-func requestCred(ctx aws.Context, client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
- resp, err := client.GetMetadataWithContext(ctx, sdkuri.PathJoin(iamSecurityCredsPath, credsName))
- if err != nil {
- return ec2RoleCredRespBody{},
- awserr.New("EC2RoleRequestError",
- fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
- err)
- }
-
- respCreds := ec2RoleCredRespBody{}
- if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
- return ec2RoleCredRespBody{},
- awserr.New(request.ErrCodeSerialization,
- fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
- err)
- }
-
- if respCreds.Code != "Success" {
- // If an error code was returned something failed requesting the role.
- return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
- }
-
- return respCreds, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
deleted file mode 100644
index 329f788a3..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go
+++ /dev/null
@@ -1,255 +0,0 @@
-// Package endpointcreds provides support for retrieving credentials from an
-// arbitrary HTTP endpoint.
-//
-// The credentials endpoint Provider can receive both static and refreshable
-// credentials that will expire. Credentials are static when an "Expiration"
-// value is not provided in the endpoint's response.
-//
-// Static credentials will never expire once they have been retrieved. The format
-// of the static credentials response:
-// {
-// "AccessKeyId" : "MUA...",
-// "SecretAccessKey" : "/7PC5om....",
-// }
-//
-// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration
-// value in the response. The format of the refreshable credentials response:
-// {
-// "AccessKeyId" : "MUA...",
-// "SecretAccessKey" : "/7PC5om....",
-// "Token" : "AQoDY....=",
-// "Expiration" : "2016-02-25T06:03:31Z"
-// }
-//
-// Errors should be returned in the following format and only returned with 400
-// or 500 HTTP status codes.
-// {
-// "code": "ErrorCode",
-// "message": "Helpful error message."
-// }
-package endpointcreds
-
-import (
- "encoding/json"
- "fmt"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
-)
-
-// ProviderName is the name of the credentials provider.
-const ProviderName = `CredentialsEndpointProvider`
-
-// Provider satisfies the credentials.Provider interface, and is a client to
-// retrieve credentials from an arbitrary endpoint.
-type Provider struct {
- staticCreds bool
- credentials.Expiry
-
- // Requires a AWS Client to make HTTP requests to the endpoint with.
- // the Endpoint the request will be made to is provided by the aws.Config's
- // Endpoint value.
- Client *client.Client
-
- // ExpiryWindow will allow the credentials to trigger refreshing prior to
- // the credentials actually expiring. This is beneficial so race conditions
- // with expiring credentials do not cause request to fail unexpectedly
- // due to ExpiredTokenException exceptions.
- //
- // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
- // 10 seconds before the credentials are actually expired.
- //
- // If ExpiryWindow is 0 or less it will be ignored.
- ExpiryWindow time.Duration
-
- // Optional authorization token value if set will be used as the value of
- // the Authorization header of the endpoint credential request.
- //
- // When constructed from environment, the provider will use the value of
- // AWS_CONTAINER_AUTHORIZATION_TOKEN environment variable as the token
- //
- // Will be overridden if AuthorizationTokenProvider is configured
- AuthorizationToken string
-
- // Optional auth provider func to dynamically load the auth token from a file
- // everytime a credential is retrieved
- //
- // When constructed from environment, the provider will read and use the content
- // of the file pointed to by AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE environment variable
- // as the auth token everytime credentials are retrieved
- //
- // Will override AuthorizationToken if configured
- AuthorizationTokenProvider AuthTokenProvider
-}
-
-// AuthTokenProvider defines an interface to dynamically load a value to be passed
-// for the Authorization header of a credentials request.
-type AuthTokenProvider interface {
- GetToken() (string, error)
-}
-
-// TokenProviderFunc is a func type implementing AuthTokenProvider interface
-// and enables customizing token provider behavior
-type TokenProviderFunc func() (string, error)
-
-// GetToken func retrieves auth token according to TokenProviderFunc implementation
-func (p TokenProviderFunc) GetToken() (string, error) {
- return p()
-}
-
-// NewProviderClient returns a credentials Provider for retrieving AWS credentials
-// from arbitrary endpoint.
-func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider {
- p := &Provider{
- Client: client.New(
- cfg,
- metadata.ClientInfo{
- ServiceName: "CredentialsEndpoint",
- Endpoint: endpoint,
- },
- handlers,
- ),
- }
-
- p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler)
- p.Client.Handlers.UnmarshalError.PushBack(unmarshalError)
- p.Client.Handlers.Validate.Clear()
- p.Client.Handlers.Validate.PushBack(validateEndpointHandler)
-
- for _, option := range options {
- option(p)
- }
-
- return p
-}
-
-// NewCredentialsClient returns a pointer to a new Credentials object
-// wrapping the endpoint credentials Provider.
-func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials {
- return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...))
-}
-
-// IsExpired returns true if the credentials retrieved are expired, or not yet
-// retrieved.
-func (p *Provider) IsExpired() bool {
- if p.staticCreds {
- return false
- }
- return p.Expiry.IsExpired()
-}
-
-// Retrieve will attempt to request the credentials from the endpoint the Provider
-// was configured for. And error will be returned if the retrieval fails.
-func (p *Provider) Retrieve() (credentials.Value, error) {
- return p.RetrieveWithContext(aws.BackgroundContext())
-}
-
-// RetrieveWithContext will attempt to request the credentials from the endpoint the Provider
-// was configured for. And error will be returned if the retrieval fails.
-func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
- resp, err := p.getCredentials(ctx)
- if err != nil {
- return credentials.Value{ProviderName: ProviderName},
- awserr.New("CredentialsEndpointError", "failed to load credentials", err)
- }
-
- if resp.Expiration != nil {
- p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
- } else {
- p.staticCreds = true
- }
-
- return credentials.Value{
- AccessKeyID: resp.AccessKeyID,
- SecretAccessKey: resp.SecretAccessKey,
- SessionToken: resp.Token,
- ProviderName: ProviderName,
- }, nil
-}
-
-type getCredentialsOutput struct {
- Expiration *time.Time
- AccessKeyID string
- SecretAccessKey string
- Token string
-}
-
-type errorOutput struct {
- Code string `json:"code"`
- Message string `json:"message"`
-}
-
-func (p *Provider) getCredentials(ctx aws.Context) (*getCredentialsOutput, error) {
- op := &request.Operation{
- Name: "GetCredentials",
- HTTPMethod: "GET",
- }
-
- out := &getCredentialsOutput{}
- req := p.Client.NewRequest(op, nil, out)
- req.SetContext(ctx)
- req.HTTPRequest.Header.Set("Accept", "application/json")
-
- authToken := p.AuthorizationToken
- var err error
- if p.AuthorizationTokenProvider != nil {
- authToken, err = p.AuthorizationTokenProvider.GetToken()
- if err != nil {
- return nil, fmt.Errorf("get authorization token: %v", err)
- }
- }
-
- if strings.ContainsAny(authToken, "\r\n") {
- return nil, fmt.Errorf("authorization token contains invalid newline sequence")
- }
- if len(authToken) != 0 {
- req.HTTPRequest.Header.Set("Authorization", authToken)
- }
-
- return out, req.Send()
-}
-
-func validateEndpointHandler(r *request.Request) {
- if len(r.ClientInfo.Endpoint) == 0 {
- r.Error = aws.ErrMissingEndpoint
- }
-}
-
-func unmarshalHandler(r *request.Request) {
- defer r.HTTPResponse.Body.Close()
-
- out := r.Data.(*getCredentialsOutput)
- if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil {
- r.Error = awserr.New(request.ErrCodeSerialization,
- "failed to decode endpoint credentials",
- err,
- )
- }
-}
-
-func unmarshalError(r *request.Request) {
- defer r.HTTPResponse.Body.Close()
-
- var errOut errorOutput
- err := jsonutil.UnmarshalJSONError(&errOut, r.HTTPResponse.Body)
- if err != nil {
- r.Error = awserr.NewRequestFailure(
- awserr.New(request.ErrCodeSerialization,
- "failed to decode error message", err),
- r.HTTPResponse.StatusCode,
- r.RequestID,
- )
- return
- }
-
- // Response body format is not consistent between metadata endpoints.
- // Grab the error message as a string and include that as the source error
- r.Error = awserr.New(errOut.Code, errOut.Message, nil)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
deleted file mode 100644
index 54c5cf733..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package credentials
-
-import (
- "os"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-// EnvProviderName provides a name of Env provider
-const EnvProviderName = "EnvProvider"
-
-var (
- // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
- // found in the process's environment.
- ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
-
- // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
- // can't be found in the process's environment.
- ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
-)
-
-// A EnvProvider retrieves credentials from the environment variables of the
-// running process. Environment credentials never expire.
-//
-// Environment variables used:
-//
-// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
-//
-// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
-type EnvProvider struct {
- retrieved bool
-}
-
-// NewEnvCredentials returns a pointer to a new Credentials object
-// wrapping the environment variable provider.
-func NewEnvCredentials() *Credentials {
- return NewCredentials(&EnvProvider{})
-}
-
-// Retrieve retrieves the keys from the environment.
-func (e *EnvProvider) Retrieve() (Value, error) {
- e.retrieved = false
-
- id := os.Getenv("AWS_ACCESS_KEY_ID")
- if id == "" {
- id = os.Getenv("AWS_ACCESS_KEY")
- }
-
- secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
- if secret == "" {
- secret = os.Getenv("AWS_SECRET_KEY")
- }
-
- if id == "" {
- return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
- }
-
- if secret == "" {
- return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
- }
-
- e.retrieved = true
- return Value{
- AccessKeyID: id,
- SecretAccessKey: secret,
- SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
- ProviderName: EnvProviderName,
- }, nil
-}
-
-// IsExpired returns if the credentials have been retrieved.
-func (e *EnvProvider) IsExpired() bool {
- return !e.retrieved
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
deleted file mode 100644
index 7fc91d9d2..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
+++ /dev/null
@@ -1,12 +0,0 @@
-[default]
-aws_access_key_id = accessKey
-aws_secret_access_key = secret
-aws_session_token = token
-
-[no_token]
-aws_access_key_id = accessKey
-aws_secret_access_key = secret
-
-[with_colon]
-aws_access_key_id: accessKey
-aws_secret_access_key: secret
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
deleted file mode 100644
index 18694f07f..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/processcreds/provider.go
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
-Package processcreds is a credential Provider to retrieve `credential_process`
-credentials.
-
-WARNING: The following describes a method of sourcing credentials from an external
-process. This can potentially be dangerous, so proceed with caution. Other
-credential providers should be preferred if at all possible. If using this
-option, you should make sure that the config file is as locked down as possible
-using security best practices for your operating system.
-
-You can use credentials from a `credential_process` in a variety of ways.
-
-One way is to setup your shared config file, located in the default
-location, with the `credential_process` key and the command you want to be
-called. You also need to set the AWS_SDK_LOAD_CONFIG environment variable
-(e.g., `export AWS_SDK_LOAD_CONFIG=1`) to use the shared config file.
-
- [default]
- credential_process = /command/to/call
-
-Creating a new session will use the credential process to retrieve credentials.
-NOTE: If there are credentials in the profile you are using, the credential
-process will not be used.
-
- // Initialize a session to load credentials.
- sess, _ := session.NewSession(&aws.Config{
- Region: aws.String("us-east-1")},
- )
-
- // Create S3 service client to use the credentials.
- svc := s3.New(sess)
-
-Another way to use the `credential_process` method is by using
-`credentials.NewCredentials()` and providing a command to be executed to
-retrieve credentials:
-
- // Create credentials using the ProcessProvider.
- creds := processcreds.NewCredentials("/path/to/command")
-
- // Create service client value configured for credentials.
- svc := s3.New(sess, &aws.Config{Credentials: creds})
-
-You can set a non-default timeout for the `credential_process` with another
-constructor, `credentials.NewCredentialsTimeout()`, providing the timeout. To
-set a one minute timeout:
-
- // Create credentials using the ProcessProvider.
- creds := processcreds.NewCredentialsTimeout(
- "/path/to/command",
- time.Duration(500) * time.Millisecond)
-
-If you need more control, you can set any configurable options in the
-credentials using one or more option functions. For example, you can set a two
-minute timeout, a credential duration of 60 minutes, and a maximum stdout
-buffer size of 2k.
-
- creds := processcreds.NewCredentials(
- "/path/to/command",
- func(opt *ProcessProvider) {
- opt.Timeout = time.Duration(2) * time.Minute
- opt.Duration = time.Duration(60) * time.Minute
- opt.MaxBufSize = 2048
- })
-
-You can also use your own `exec.Cmd`:
-
- // Create an exec.Cmd
- myCommand := exec.Command("/path/to/command")
-
- // Create credentials using your exec.Cmd and custom timeout
- creds := processcreds.NewCredentialsCommand(
- myCommand,
- func(opt *processcreds.ProcessProvider) {
- opt.Timeout = time.Duration(1) * time.Second
- })
-*/
-package processcreds
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "os/exec"
- "runtime"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/internal/sdkio"
-)
-
-const (
- // ProviderName is the name this credentials provider will label any
- // returned credentials Value with.
- ProviderName = `ProcessProvider`
-
- // ErrCodeProcessProviderParse error parsing process output
- ErrCodeProcessProviderParse = "ProcessProviderParseError"
-
- // ErrCodeProcessProviderVersion version error in output
- ErrCodeProcessProviderVersion = "ProcessProviderVersionError"
-
- // ErrCodeProcessProviderRequired required attribute missing in output
- ErrCodeProcessProviderRequired = "ProcessProviderRequiredError"
-
- // ErrCodeProcessProviderExecution execution of command failed
- ErrCodeProcessProviderExecution = "ProcessProviderExecutionError"
-
- // errMsgProcessProviderTimeout process took longer than allowed
- errMsgProcessProviderTimeout = "credential process timed out"
-
- // errMsgProcessProviderProcess process error
- errMsgProcessProviderProcess = "error in credential_process"
-
- // errMsgProcessProviderParse problem parsing output
- errMsgProcessProviderParse = "parse failed of credential_process output"
-
- // errMsgProcessProviderVersion version error in output
- errMsgProcessProviderVersion = "wrong version in process output (not 1)"
-
- // errMsgProcessProviderMissKey missing access key id in output
- errMsgProcessProviderMissKey = "missing AccessKeyId in process output"
-
- // errMsgProcessProviderMissSecret missing secret acess key in output
- errMsgProcessProviderMissSecret = "missing SecretAccessKey in process output"
-
- // errMsgProcessProviderPrepareCmd prepare of command failed
- errMsgProcessProviderPrepareCmd = "failed to prepare command"
-
- // errMsgProcessProviderEmptyCmd command must not be empty
- errMsgProcessProviderEmptyCmd = "command must not be empty"
-
- // errMsgProcessProviderPipe failed to initialize pipe
- errMsgProcessProviderPipe = "failed to initialize pipe"
-
- // DefaultDuration is the default amount of time in minutes that the
- // credentials will be valid for.
- DefaultDuration = time.Duration(15) * time.Minute
-
- // DefaultBufSize limits buffer size from growing to an enormous
- // amount due to a faulty process.
- DefaultBufSize = int(8 * sdkio.KibiByte)
-
- // DefaultTimeout default limit on time a process can run.
- DefaultTimeout = time.Duration(1) * time.Minute
-)
-
-// ProcessProvider satisfies the credentials.Provider interface, and is a
-// client to retrieve credentials from a process.
-type ProcessProvider struct {
- staticCreds bool
- credentials.Expiry
- originalCommand []string
-
- // Expiry duration of the credentials. Defaults to 15 minutes if not set.
- Duration time.Duration
-
- // ExpiryWindow will allow the credentials to trigger refreshing prior to
- // the credentials actually expiring. This is beneficial so race conditions
- // with expiring credentials do not cause request to fail unexpectedly
- // due to ExpiredTokenException exceptions.
- //
- // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
- // 10 seconds before the credentials are actually expired.
- //
- // If ExpiryWindow is 0 or less it will be ignored.
- ExpiryWindow time.Duration
-
- // A string representing an os command that should return a JSON with
- // credential information.
- command *exec.Cmd
-
- // MaxBufSize limits memory usage from growing to an enormous
- // amount due to a faulty process.
- MaxBufSize int
-
- // Timeout limits the time a process can run.
- Timeout time.Duration
-}
-
-// NewCredentials returns a pointer to a new Credentials object wrapping the
-// ProcessProvider. The credentials will expire every 15 minutes by default.
-func NewCredentials(command string, options ...func(*ProcessProvider)) *credentials.Credentials {
- p := &ProcessProvider{
- command: exec.Command(command),
- Duration: DefaultDuration,
- Timeout: DefaultTimeout,
- MaxBufSize: DefaultBufSize,
- }
-
- for _, option := range options {
- option(p)
- }
-
- return credentials.NewCredentials(p)
-}
-
-// NewCredentialsTimeout returns a pointer to a new Credentials object with
-// the specified command and timeout, and default duration and max buffer size.
-func NewCredentialsTimeout(command string, timeout time.Duration) *credentials.Credentials {
- p := NewCredentials(command, func(opt *ProcessProvider) {
- opt.Timeout = timeout
- })
-
- return p
-}
-
-// NewCredentialsCommand returns a pointer to a new Credentials object with
-// the specified command, and default timeout, duration and max buffer size.
-func NewCredentialsCommand(command *exec.Cmd, options ...func(*ProcessProvider)) *credentials.Credentials {
- p := &ProcessProvider{
- command: command,
- Duration: DefaultDuration,
- Timeout: DefaultTimeout,
- MaxBufSize: DefaultBufSize,
- }
-
- for _, option := range options {
- option(p)
- }
-
- return credentials.NewCredentials(p)
-}
-
-// A CredentialProcessResponse is the AWS credentials format that must be
-// returned when executing an external credential_process.
-type CredentialProcessResponse struct {
- // As of this writing, the Version key must be set to 1. This might
- // increment over time as the structure evolves.
- Version int
-
- // The access key ID that identifies the temporary security credentials.
- AccessKeyID string `json:"AccessKeyId"`
-
- // The secret access key that can be used to sign requests.
- SecretAccessKey string
-
- // The token that users must pass to the service API to use the temporary credentials.
- SessionToken string
-
- // The date on which the current credentials expire.
- Expiration *time.Time
-}
-
-// Retrieve executes the 'credential_process' and returns the credentials.
-func (p *ProcessProvider) Retrieve() (credentials.Value, error) {
- out, err := p.executeCredentialProcess()
- if err != nil {
- return credentials.Value{ProviderName: ProviderName}, err
- }
-
- // Serialize and validate response
- resp := &CredentialProcessResponse{}
- if err = json.Unmarshal(out, resp); err != nil {
- return credentials.Value{ProviderName: ProviderName}, awserr.New(
- ErrCodeProcessProviderParse,
- fmt.Sprintf("%s: %s", errMsgProcessProviderParse, string(out)),
- err)
- }
-
- if resp.Version != 1 {
- return credentials.Value{ProviderName: ProviderName}, awserr.New(
- ErrCodeProcessProviderVersion,
- errMsgProcessProviderVersion,
- nil)
- }
-
- if len(resp.AccessKeyID) == 0 {
- return credentials.Value{ProviderName: ProviderName}, awserr.New(
- ErrCodeProcessProviderRequired,
- errMsgProcessProviderMissKey,
- nil)
- }
-
- if len(resp.SecretAccessKey) == 0 {
- return credentials.Value{ProviderName: ProviderName}, awserr.New(
- ErrCodeProcessProviderRequired,
- errMsgProcessProviderMissSecret,
- nil)
- }
-
- // Handle expiration
- p.staticCreds = resp.Expiration == nil
- if resp.Expiration != nil {
- p.SetExpiration(*resp.Expiration, p.ExpiryWindow)
- }
-
- return credentials.Value{
- ProviderName: ProviderName,
- AccessKeyID: resp.AccessKeyID,
- SecretAccessKey: resp.SecretAccessKey,
- SessionToken: resp.SessionToken,
- }, nil
-}
-
-// IsExpired returns true if the credentials retrieved are expired, or not yet
-// retrieved.
-func (p *ProcessProvider) IsExpired() bool {
- if p.staticCreds {
- return false
- }
- return p.Expiry.IsExpired()
-}
-
-// prepareCommand prepares the command to be executed.
-func (p *ProcessProvider) prepareCommand() error {
-
- var cmdArgs []string
- if runtime.GOOS == "windows" {
- cmdArgs = []string{"cmd.exe", "/C"}
- } else {
- cmdArgs = []string{"sh", "-c"}
- }
-
- if len(p.originalCommand) == 0 {
- p.originalCommand = make([]string, len(p.command.Args))
- copy(p.originalCommand, p.command.Args)
-
- // check for empty command because it succeeds
- if len(strings.TrimSpace(p.originalCommand[0])) < 1 {
- return awserr.New(
- ErrCodeProcessProviderExecution,
- fmt.Sprintf(
- "%s: %s",
- errMsgProcessProviderPrepareCmd,
- errMsgProcessProviderEmptyCmd),
- nil)
- }
- }
-
- cmdArgs = append(cmdArgs, p.originalCommand...)
- p.command = exec.Command(cmdArgs[0], cmdArgs[1:]...)
- p.command.Env = os.Environ()
-
- return nil
-}
-
-// executeCredentialProcess starts the credential process on the OS and
-// returns the results or an error.
-func (p *ProcessProvider) executeCredentialProcess() ([]byte, error) {
-
- if err := p.prepareCommand(); err != nil {
- return nil, err
- }
-
- // Setup the pipes
- outReadPipe, outWritePipe, err := os.Pipe()
- if err != nil {
- return nil, awserr.New(
- ErrCodeProcessProviderExecution,
- errMsgProcessProviderPipe,
- err)
- }
-
- p.command.Stderr = os.Stderr // display stderr on console for MFA
- p.command.Stdout = outWritePipe // get creds json on process's stdout
- p.command.Stdin = os.Stdin // enable stdin for MFA
-
- output := bytes.NewBuffer(make([]byte, 0, p.MaxBufSize))
-
- stdoutCh := make(chan error, 1)
- go readInput(
- io.LimitReader(outReadPipe, int64(p.MaxBufSize)),
- output,
- stdoutCh)
-
- execCh := make(chan error, 1)
- go executeCommand(*p.command, execCh)
-
- finished := false
- var errors []error
- for !finished {
- select {
- case readError := <-stdoutCh:
- errors = appendError(errors, readError)
- finished = true
- case execError := <-execCh:
- err := outWritePipe.Close()
- errors = appendError(errors, err)
- errors = appendError(errors, execError)
- if errors != nil {
- return output.Bytes(), awserr.NewBatchError(
- ErrCodeProcessProviderExecution,
- errMsgProcessProviderProcess,
- errors)
- }
- case <-time.After(p.Timeout):
- finished = true
- return output.Bytes(), awserr.NewBatchError(
- ErrCodeProcessProviderExecution,
- errMsgProcessProviderTimeout,
- errors) // errors can be nil
- }
- }
-
- out := output.Bytes()
-
- if runtime.GOOS == "windows" {
- // windows adds slashes to quotes
- out = []byte(strings.Replace(string(out), `\"`, `"`, -1))
- }
-
- return out, nil
-}
-
-// appendError conveniently checks for nil before appending slice
-func appendError(errors []error, err error) []error {
- if err != nil {
- return append(errors, err)
- }
- return errors
-}
-
-func executeCommand(cmd exec.Cmd, exec chan error) {
- // Start the command
- err := cmd.Start()
- if err == nil {
- err = cmd.Wait()
- }
-
- exec <- err
-}
-
-func readInput(r io.Reader, w io.Writer, read chan error) {
- tee := io.TeeReader(r, w)
-
- _, err := ioutil.ReadAll(tee)
-
- if err == io.EOF {
- err = nil
- }
-
- read <- err // will only arrive here when write end of pipe is closed
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
deleted file mode 100644
index 22b5c5d9f..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package credentials
-
-import (
- "fmt"
- "os"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/internal/ini"
- "github.com/aws/aws-sdk-go/internal/shareddefaults"
-)
-
-// SharedCredsProviderName provides a name of SharedCreds provider
-const SharedCredsProviderName = "SharedCredentialsProvider"
-
-var (
- // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
- ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
-)
-
-// A SharedCredentialsProvider retrieves access key pair (access key ID,
-// secret access key, and session token if present) credentials from the current
-// user's home directory, and keeps track if those credentials are expired.
-//
-// Profile ini file example: $HOME/.aws/credentials
-type SharedCredentialsProvider struct {
- // Path to the shared credentials file.
- //
- // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
- // env value is empty will default to current user's home directory.
- // Linux/OSX: "$HOME/.aws/credentials"
- // Windows: "%USERPROFILE%\.aws\credentials"
- Filename string
-
- // AWS Profile to extract credentials from the shared credentials file. If empty
- // will default to environment variable "AWS_PROFILE" or "default" if
- // environment variable is also not set.
- Profile string
-
- // retrieved states if the credentials have been successfully retrieved.
- retrieved bool
-}
-
-// NewSharedCredentials returns a pointer to a new Credentials object
-// wrapping the Profile file provider.
-func NewSharedCredentials(filename, profile string) *Credentials {
- return NewCredentials(&SharedCredentialsProvider{
- Filename: filename,
- Profile: profile,
- })
-}
-
-// Retrieve reads and extracts the shared credentials from the current
-// users home directory.
-func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
- p.retrieved = false
-
- filename, err := p.filename()
- if err != nil {
- return Value{ProviderName: SharedCredsProviderName}, err
- }
-
- creds, err := loadProfile(filename, p.profile())
- if err != nil {
- return Value{ProviderName: SharedCredsProviderName}, err
- }
-
- p.retrieved = true
- return creds, nil
-}
-
-// IsExpired returns if the shared credentials have expired.
-func (p *SharedCredentialsProvider) IsExpired() bool {
- return !p.retrieved
-}
-
-// loadProfiles loads from the file pointed to by shared credentials filename for profile.
-// The credentials retrieved from the profile will be returned or error. Error will be
-// returned if it fails to read from the file, or the data is invalid.
-func loadProfile(filename, profile string) (Value, error) {
- config, err := ini.OpenFile(filename)
- if err != nil {
- return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
- }
-
- iniProfile, ok := config.GetSection(profile)
- if !ok {
- return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", nil)
- }
-
- id := iniProfile.String("aws_access_key_id")
- if len(id) == 0 {
- return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
- fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
- nil)
- }
-
- secret := iniProfile.String("aws_secret_access_key")
- if len(secret) == 0 {
- return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
- fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
- nil)
- }
-
- // Default to empty string if not found
- token := iniProfile.String("aws_session_token")
-
- return Value{
- AccessKeyID: id,
- SecretAccessKey: secret,
- SessionToken: token,
- ProviderName: SharedCredsProviderName,
- }, nil
-}
-
-// filename returns the filename to use to read AWS shared credentials.
-//
-// Will return an error if the user's home directory path cannot be found.
-func (p *SharedCredentialsProvider) filename() (string, error) {
- if len(p.Filename) != 0 {
- return p.Filename, nil
- }
-
- if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 {
- return p.Filename, nil
- }
-
- if home := shareddefaults.UserHomeDir(); len(home) == 0 {
- // Backwards compatibility of home directly not found error being returned.
- // This error is too verbose, failure when opening the file would of been
- // a better error to return.
- return "", ErrSharedCredentialsHomeNotFound
- }
-
- p.Filename = shareddefaults.SharedCredentialsFilename()
-
- return p.Filename, nil
-}
-
-// profile returns the AWS shared credentials profile. If empty will read
-// environment variable "AWS_PROFILE". If that is not set profile will
-// return "default".
-func (p *SharedCredentialsProvider) profile() string {
- if p.Profile == "" {
- p.Profile = os.Getenv("AWS_PROFILE")
- }
- if p.Profile == "" {
- p.Profile = "default"
- }
-
- return p.Profile
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go
deleted file mode 100644
index 18c940ab3..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/doc.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Package ssocreds provides a credential provider for retrieving temporary AWS credentials using an SSO access token.
-//
-// IMPORTANT: The provider in this package does not initiate or perform the AWS SSO login flow. The SDK provider
-// expects that you have already performed the SSO login flow using AWS CLI using the "aws sso login" command, or by
-// some other mechanism. The provider must find a valid non-expired access token for the AWS SSO user portal URL in
-// ~/.aws/sso/cache. If a cached token is not found, it is expired, or the file is malformed an error will be returned.
-//
-// Loading AWS SSO credentials with the AWS shared configuration file
-//
-// You can use configure AWS SSO credentials from the AWS shared configuration file by
-// providing the specifying the required keys in the profile:
-//
-// sso_account_id
-// sso_region
-// sso_role_name
-// sso_start_url
-//
-// For example, the following defines a profile "devsso" and specifies the AWS SSO parameters that defines the target
-// account, role, sign-on portal, and the region where the user portal is located. Note: all SSO arguments must be
-// provided, or an error will be returned.
-//
-// [profile devsso]
-// sso_start_url = https://my-sso-portal.awsapps.com/start
-// sso_role_name = SSOReadOnlyRole
-// sso_region = us-east-1
-// sso_account_id = 123456789012
-//
-// Using the config module, you can load the AWS SDK shared configuration, and specify that this profile be used to
-// retrieve credentials. For example:
-//
-// sess, err := session.NewSessionWithOptions(session.Options{
-// SharedConfigState: session.SharedConfigEnable,
-// Profile: "devsso",
-// })
-// if err != nil {
-// return err
-// }
-//
-// Programmatically loading AWS SSO credentials directly
-//
-// You can programmatically construct the AWS SSO Provider in your application, and provide the necessary information
-// to load and retrieve temporary credentials using an access token from ~/.aws/sso/cache.
-//
-// svc := sso.New(sess, &aws.Config{
-// Region: aws.String("us-west-2"), // Client Region must correspond to the AWS SSO user portal region
-// })
-//
-// provider := ssocreds.NewCredentialsWithClient(svc, "123456789012", "SSOReadOnlyRole", "https://my-sso-portal.awsapps.com/start")
-//
-// credentials, err := provider.Get()
-// if err != nil {
-// return err
-// }
-//
-// Additional Resources
-//
-// Configuring the AWS CLI to use AWS Single Sign-On: https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
-//
-// AWS Single Sign-On User Guide: https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html
-package ssocreds
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go
deleted file mode 100644
index d4df39a7a..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os.go
+++ /dev/null
@@ -1,10 +0,0 @@
-//go:build !windows
-// +build !windows
-
-package ssocreds
-
-import "os"
-
-func getHomeDirectory() string {
- return os.Getenv("HOME")
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go
deleted file mode 100644
index eb48f61e5..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/os_windows.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package ssocreds
-
-import "os"
-
-func getHomeDirectory() string {
- return os.Getenv("USERPROFILE")
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go
deleted file mode 100644
index 4138e725d..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/provider.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package ssocreds
-
-import (
- "crypto/sha1"
- "encoding/hex"
- "encoding/json"
- "io/ioutil"
- "path/filepath"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/auth/bearer"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/service/sso"
- "github.com/aws/aws-sdk-go/service/sso/ssoiface"
-)
-
-// ErrCodeSSOProviderInvalidToken is the code type that is returned if loaded token has expired or is otherwise invalid.
-// To refresh the SSO session run aws sso login with the corresponding profile.
-const ErrCodeSSOProviderInvalidToken = "SSOProviderInvalidToken"
-
-const invalidTokenMessage = "the SSO session has expired or is invalid"
-
-func init() {
- nowTime = time.Now
- defaultCacheLocation = defaultCacheLocationImpl
-}
-
-var nowTime func() time.Time
-
-// ProviderName is the name of the provider used to specify the source of credentials.
-const ProviderName = "SSOProvider"
-
-var defaultCacheLocation func() string
-
-func defaultCacheLocationImpl() string {
- return filepath.Join(getHomeDirectory(), ".aws", "sso", "cache")
-}
-
-// Provider is an AWS credential provider that retrieves temporary AWS credentials by exchanging an SSO login token.
-type Provider struct {
- credentials.Expiry
-
- // The Client which is configured for the AWS Region where the AWS SSO user portal is located.
- Client ssoiface.SSOAPI
-
- // The AWS account that is assigned to the user.
- AccountID string
-
- // The role name that is assigned to the user.
- RoleName string
-
- // The URL that points to the organization's AWS Single Sign-On (AWS SSO) user portal.
- StartURL string
-
- // The filepath the cached token will be retrieved from. If unset Provider will
- // use the startURL to determine the filepath at.
- //
- // ~/.aws/sso/cache/.json
- //
- // If custom cached token filepath is used, the Provider's startUrl
- // parameter will be ignored.
- CachedTokenFilepath string
-
- // Used by the SSOCredentialProvider if a token configuration
- // profile is used in the shared config
- TokenProvider bearer.TokenProvider
-}
-
-// NewCredentials returns a new AWS Single Sign-On (AWS SSO) credential provider. The ConfigProvider is expected to be configured
-// for the AWS Region where the AWS SSO user portal is located.
-func NewCredentials(configProvider client.ConfigProvider, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials {
- return NewCredentialsWithClient(sso.New(configProvider), accountID, roleName, startURL, optFns...)
-}
-
-// NewCredentialsWithClient returns a new AWS Single Sign-On (AWS SSO) credential provider. The provided client is expected to be configured
-// for the AWS Region where the AWS SSO user portal is located.
-func NewCredentialsWithClient(client ssoiface.SSOAPI, accountID, roleName, startURL string, optFns ...func(provider *Provider)) *credentials.Credentials {
- p := &Provider{
- Client: client,
- AccountID: accountID,
- RoleName: roleName,
- StartURL: startURL,
- }
-
- for _, fn := range optFns {
- fn(p)
- }
-
- return credentials.NewCredentials(p)
-}
-
-// Retrieve retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
-// by exchanging the accessToken present in ~/.aws/sso/cache.
-func (p *Provider) Retrieve() (credentials.Value, error) {
- return p.RetrieveWithContext(aws.BackgroundContext())
-}
-
-// RetrieveWithContext retrieves temporary AWS credentials from the configured Amazon Single Sign-On (AWS SSO) user portal
-// by exchanging the accessToken present in ~/.aws/sso/cache.
-func (p *Provider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
- var accessToken *string
- if p.TokenProvider != nil {
- token, err := p.TokenProvider.RetrieveBearerToken(ctx)
- if err != nil {
- return credentials.Value{}, err
- }
- accessToken = &token.Value
- } else {
- if p.CachedTokenFilepath == "" {
- cachedTokenFilePath, err := getCachedFilePath(p.StartURL)
- if err != nil {
- return credentials.Value{}, err
- }
- p.CachedTokenFilepath = cachedTokenFilePath
- }
-
- tokenFile, err := loadTokenFile(p.CachedTokenFilepath)
- if err != nil {
- return credentials.Value{}, err
- }
- accessToken = &tokenFile.AccessToken
- }
-
- output, err := p.Client.GetRoleCredentialsWithContext(ctx, &sso.GetRoleCredentialsInput{
- AccessToken: accessToken,
- AccountId: &p.AccountID,
- RoleName: &p.RoleName,
- })
- if err != nil {
- return credentials.Value{}, err
- }
-
- expireTime := time.Unix(0, aws.Int64Value(output.RoleCredentials.Expiration)*int64(time.Millisecond)).UTC()
- p.SetExpiration(expireTime, 0)
-
- return credentials.Value{
- AccessKeyID: aws.StringValue(output.RoleCredentials.AccessKeyId),
- SecretAccessKey: aws.StringValue(output.RoleCredentials.SecretAccessKey),
- SessionToken: aws.StringValue(output.RoleCredentials.SessionToken),
- ProviderName: ProviderName,
- }, nil
-}
-
-func getCachedFilePath(startUrl string) (string, error) {
- hash := sha1.New()
- _, err := hash.Write([]byte(startUrl))
- if err != nil {
- return "", err
- }
- return filepath.Join(defaultCacheLocation(), strings.ToLower(hex.EncodeToString(hash.Sum(nil)))+".json"), nil
-}
-
-type token struct {
- AccessToken string `json:"accessToken"`
- ExpiresAt rfc3339 `json:"expiresAt"`
- Region string `json:"region,omitempty"`
- StartURL string `json:"startUrl,omitempty"`
-}
-
-func (t token) Expired() bool {
- return nowTime().Round(0).After(time.Time(t.ExpiresAt))
-}
-
-func loadTokenFile(cachedTokenPath string) (t token, err error) {
- fileBytes, err := ioutil.ReadFile(cachedTokenPath)
- if err != nil {
- return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
- }
-
- if err := json.Unmarshal(fileBytes, &t); err != nil {
- return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, err)
- }
-
- if len(t.AccessToken) == 0 {
- return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil)
- }
-
- if t.Expired() {
- return token{}, awserr.New(ErrCodeSSOProviderInvalidToken, invalidTokenMessage, nil)
- }
-
- return t, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go
deleted file mode 100644
index f6fa88451..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/sso_cached_token.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package ssocreds
-
-import (
- "crypto/sha1"
- "encoding/hex"
- "encoding/json"
- "fmt"
- "github.com/aws/aws-sdk-go/internal/shareddefaults"
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "time"
-)
-
-var resolvedOsUserHomeDir = shareddefaults.UserHomeDir
-
-// StandardCachedTokenFilepath returns the filepath for the cached SSO token file, or
-// error if unable get derive the path. Key that will be used to compute a SHA1
-// value that is hex encoded.
-//
-// Derives the filepath using the Key as:
-//
-// ~/.aws/sso/cache/.json
-func StandardCachedTokenFilepath(key string) (string, error) {
- homeDir := resolvedOsUserHomeDir()
- if len(homeDir) == 0 {
- return "", fmt.Errorf("unable to get USER's home directory for cached token")
- }
- hash := sha1.New()
- if _, err := hash.Write([]byte(key)); err != nil {
- return "", fmt.Errorf("unable to compute cached token filepath key SHA1 hash, %v", err)
- }
-
- cacheFilename := strings.ToLower(hex.EncodeToString(hash.Sum(nil))) + ".json"
-
- return filepath.Join(homeDir, ".aws", "sso", "cache", cacheFilename), nil
-}
-
-type tokenKnownFields struct {
- AccessToken string `json:"accessToken,omitempty"`
- ExpiresAt *rfc3339 `json:"expiresAt,omitempty"`
-
- RefreshToken string `json:"refreshToken,omitempty"`
- ClientID string `json:"clientId,omitempty"`
- ClientSecret string `json:"clientSecret,omitempty"`
-}
-
-type cachedToken struct {
- tokenKnownFields
- UnknownFields map[string]interface{} `json:"-"`
-}
-
-// MarshalJSON provides custom marshalling because the standard library Go marshaller ignores unknown/unspecified fields
-// when marshalling from a struct: https://pkg.go.dev/encoding/json#Marshal
-// This function adds some extra validation to the known fields and captures unknown fields.
-func (t cachedToken) MarshalJSON() ([]byte, error) {
- fields := map[string]interface{}{}
-
- setTokenFieldString(fields, "accessToken", t.AccessToken)
- setTokenFieldRFC3339(fields, "expiresAt", t.ExpiresAt)
-
- setTokenFieldString(fields, "refreshToken", t.RefreshToken)
- setTokenFieldString(fields, "clientId", t.ClientID)
- setTokenFieldString(fields, "clientSecret", t.ClientSecret)
-
- for k, v := range t.UnknownFields {
- if _, ok := fields[k]; ok {
- return nil, fmt.Errorf("unknown token field %v, duplicates known field", k)
- }
- fields[k] = v
- }
-
- return json.Marshal(fields)
-}
-
-func setTokenFieldString(fields map[string]interface{}, key, value string) {
- if value == "" {
- return
- }
- fields[key] = value
-}
-func setTokenFieldRFC3339(fields map[string]interface{}, key string, value *rfc3339) {
- if value == nil {
- return
- }
- fields[key] = value
-}
-
-// UnmarshalJSON provides custom unmarshalling because the standard library Go unmarshaller ignores unknown/unspecified
-// fields when unmarshalling from a struct: https://pkg.go.dev/encoding/json#Unmarshal
-// This function adds some extra validation to the known fields and captures unknown fields.
-func (t *cachedToken) UnmarshalJSON(b []byte) error {
- var fields map[string]interface{}
- if err := json.Unmarshal(b, &fields); err != nil {
- return nil
- }
-
- t.UnknownFields = map[string]interface{}{}
-
- for k, v := range fields {
- var err error
- switch k {
- case "accessToken":
- err = getTokenFieldString(v, &t.AccessToken)
- case "expiresAt":
- err = getTokenFieldRFC3339(v, &t.ExpiresAt)
- case "refreshToken":
- err = getTokenFieldString(v, &t.RefreshToken)
- case "clientId":
- err = getTokenFieldString(v, &t.ClientID)
- case "clientSecret":
- err = getTokenFieldString(v, &t.ClientSecret)
- default:
- t.UnknownFields[k] = v
- }
-
- if err != nil {
- return fmt.Errorf("field %q, %v", k, err)
- }
- }
-
- return nil
-}
-
-func getTokenFieldString(v interface{}, value *string) error {
- var ok bool
- *value, ok = v.(string)
- if !ok {
- return fmt.Errorf("expect value to be string, got %T", v)
- }
- return nil
-}
-
-func getTokenFieldRFC3339(v interface{}, value **rfc3339) error {
- var stringValue string
- if err := getTokenFieldString(v, &stringValue); err != nil {
- return err
- }
-
- timeValue, err := parseRFC3339(stringValue)
- if err != nil {
- return err
- }
-
- *value = &timeValue
- return nil
-}
-
-func loadCachedToken(filename string) (cachedToken, error) {
- fileBytes, err := ioutil.ReadFile(filename)
- if err != nil {
- return cachedToken{}, fmt.Errorf("failed to read cached SSO token file, %v", err)
- }
-
- var t cachedToken
- if err := json.Unmarshal(fileBytes, &t); err != nil {
- return cachedToken{}, fmt.Errorf("failed to parse cached SSO token file, %v", err)
- }
-
- if len(t.AccessToken) == 0 || t.ExpiresAt == nil || time.Time(*t.ExpiresAt).IsZero() {
- return cachedToken{}, fmt.Errorf(
- "cached SSO token must contain accessToken and expiresAt fields")
- }
-
- return t, nil
-}
-
-func storeCachedToken(filename string, t cachedToken, fileMode os.FileMode) (err error) {
- tmpFilename := filename + ".tmp-" + strconv.FormatInt(nowTime().UnixNano(), 10)
- if err := writeCacheFile(tmpFilename, fileMode, t); err != nil {
- return err
- }
-
- if err := os.Rename(tmpFilename, filename); err != nil {
- return fmt.Errorf("failed to replace old cached SSO token file, %v", err)
- }
-
- return nil
-}
-
-func writeCacheFile(filename string, fileMode os.FileMode, t cachedToken) (err error) {
- var f *os.File
- f, err = os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, fileMode)
- if err != nil {
- return fmt.Errorf("failed to create cached SSO token file %v", err)
- }
-
- defer func() {
- closeErr := f.Close()
- if err == nil && closeErr != nil {
- err = fmt.Errorf("failed to close cached SSO token file, %v", closeErr)
- }
- }()
-
- encoder := json.NewEncoder(f)
-
- if err = encoder.Encode(t); err != nil {
- return fmt.Errorf("failed to serialize cached SSO token, %v", err)
- }
-
- return nil
-}
-
-type rfc3339 time.Time
-
-// UnmarshalJSON decode rfc3339 from JSON format
-func (r *rfc3339) UnmarshalJSON(bytes []byte) error {
- var value string
- var err error
-
- if err = json.Unmarshal(bytes, &value); err != nil {
- return err
- }
-
- *r, err = parseRFC3339(value)
- return err
-}
-
-func parseRFC3339(v string) (rfc3339, error) {
- parsed, err := time.Parse(time.RFC3339, v)
- if err != nil {
- return rfc3339{}, fmt.Errorf("expected RFC3339 timestamp: %v", err)
- }
-
- return rfc3339(parsed), nil
-}
-
-// MarshalJSON encode rfc3339 to JSON format time
-func (r *rfc3339) MarshalJSON() ([]byte, error) {
- value := time.Time(*r).Format(time.RFC3339)
-
- // Use JSON unmarshal to unescape the quoted value making use of JSON's
- // quoting rules.
- return json.Marshal(value)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go
deleted file mode 100644
index 3388b78b4..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ssocreds/token_provider.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package ssocreds
-
-import (
- "fmt"
- "os"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/auth/bearer"
- "github.com/aws/aws-sdk-go/service/ssooidc"
-)
-
-// CreateTokenAPIClient provides the interface for the SSOTokenProvider's API
-// client for calling CreateToken operation to refresh the SSO token.
-type CreateTokenAPIClient interface {
- CreateToken(input *ssooidc.CreateTokenInput) (*ssooidc.CreateTokenOutput, error)
-}
-
-// SSOTokenProviderOptions provides the options for configuring the
-// SSOTokenProvider.
-type SSOTokenProviderOptions struct {
- // Client that can be overridden
- Client CreateTokenAPIClient
-
- // The path the file containing the cached SSO token will be read from.
- // Initialized the NewSSOTokenProvider's cachedTokenFilepath parameter.
- CachedTokenFilepath string
-}
-
-// SSOTokenProvider provides a utility for refreshing SSO AccessTokens for
-// Bearer Authentication. The SSOTokenProvider can only be used to refresh
-// already cached SSO Tokens. This utility cannot perform the initial SSO
-// create token.
-//
-// The initial SSO create token should be preformed with the AWS CLI before the
-// Go application using the SSOTokenProvider will need to retrieve the SSO
-// token. If the AWS CLI has not created the token cache file, this provider
-// will return an error when attempting to retrieve the cached token.
-//
-// This provider will attempt to refresh the cached SSO token periodically if
-// needed when RetrieveBearerToken is called.
-//
-// A utility such as the AWS CLI must be used to initially create the SSO
-// session and cached token file.
-// https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
-type SSOTokenProvider struct {
- options SSOTokenProviderOptions
-}
-
-// NewSSOTokenProvider returns an initialized SSOTokenProvider that will
-// periodically refresh the SSO token cached stored in the cachedTokenFilepath.
-// The cachedTokenFilepath file's content will be rewritten by the token
-// provider when the token is refreshed.
-//
-// The client must be configured for the AWS region the SSO token was created for.
-func NewSSOTokenProvider(client CreateTokenAPIClient, cachedTokenFilepath string, optFns ...func(o *SSOTokenProviderOptions)) *SSOTokenProvider {
- options := SSOTokenProviderOptions{
- Client: client,
- CachedTokenFilepath: cachedTokenFilepath,
- }
- for _, fn := range optFns {
- fn(&options)
- }
-
- provider := &SSOTokenProvider{
- options: options,
- }
-
- return provider
-}
-
-// RetrieveBearerToken returns the SSO token stored in the cachedTokenFilepath
-// the SSOTokenProvider was created with. If the token has expired
-// RetrieveBearerToken will attempt to refresh it. If the token cannot be
-// refreshed or is not present an error will be returned.
-//
-// A utility such as the AWS CLI must be used to initially create the SSO
-// session and cached token file. https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-sso.html
-func (p *SSOTokenProvider) RetrieveBearerToken(ctx aws.Context) (bearer.Token, error) {
- cachedToken, err := loadCachedToken(p.options.CachedTokenFilepath)
- if err != nil {
- return bearer.Token{}, err
- }
-
- if cachedToken.ExpiresAt != nil && nowTime().After(time.Time(*cachedToken.ExpiresAt)) {
- cachedToken, err = p.refreshToken(cachedToken)
- if err != nil {
- return bearer.Token{}, fmt.Errorf("refresh cached SSO token failed, %v", err)
- }
- }
-
- expiresAt := toTime((*time.Time)(cachedToken.ExpiresAt))
- return bearer.Token{
- Value: cachedToken.AccessToken,
- CanExpire: !expiresAt.IsZero(),
- Expires: expiresAt,
- }, nil
-}
-
-func (p *SSOTokenProvider) refreshToken(token cachedToken) (cachedToken, error) {
- if token.ClientSecret == "" || token.ClientID == "" || token.RefreshToken == "" {
- return cachedToken{}, fmt.Errorf("cached SSO token is expired, or not present, and cannot be refreshed")
- }
-
- createResult, err := p.options.Client.CreateToken(&ssooidc.CreateTokenInput{
- ClientId: &token.ClientID,
- ClientSecret: &token.ClientSecret,
- RefreshToken: &token.RefreshToken,
- GrantType: aws.String("refresh_token"),
- })
- if err != nil {
- return cachedToken{}, fmt.Errorf("unable to refresh SSO token, %v", err)
- }
- if createResult.ExpiresIn == nil {
- return cachedToken{}, fmt.Errorf("missing required field ExpiresIn")
- }
- if createResult.AccessToken == nil {
- return cachedToken{}, fmt.Errorf("missing required field AccessToken")
- }
- if createResult.RefreshToken == nil {
- return cachedToken{}, fmt.Errorf("missing required field RefreshToken")
- }
-
- expiresAt := nowTime().Add(time.Duration(*createResult.ExpiresIn) * time.Second)
-
- token.AccessToken = *createResult.AccessToken
- token.ExpiresAt = (*rfc3339)(&expiresAt)
- token.RefreshToken = *createResult.RefreshToken
-
- fileInfo, err := os.Stat(p.options.CachedTokenFilepath)
- if err != nil {
- return cachedToken{}, fmt.Errorf("failed to stat cached SSO token file %v", err)
- }
-
- if err = storeCachedToken(p.options.CachedTokenFilepath, token, fileInfo.Mode()); err != nil {
- return cachedToken{}, fmt.Errorf("unable to cache refreshed SSO token, %v", err)
- }
-
- return token, nil
-}
-
-func toTime(p *time.Time) (v time.Time) {
- if p == nil {
- return v
- }
-
- return *p
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
deleted file mode 100644
index cbba1e3d5..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package credentials
-
-import (
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-// StaticProviderName provides a name of Static provider
-const StaticProviderName = "StaticProvider"
-
-var (
- // ErrStaticCredentialsEmpty is emitted when static credentials are empty.
- ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
-)
-
-// A StaticProvider is a set of credentials which are set programmatically,
-// and will never expire.
-type StaticProvider struct {
- Value
-}
-
-// NewStaticCredentials returns a pointer to a new Credentials object
-// wrapping a static credentials value provider. Token is only required
-// for temporary security credentials retrieved via STS, otherwise an empty
-// string can be passed for this parameter.
-func NewStaticCredentials(id, secret, token string) *Credentials {
- return NewCredentials(&StaticProvider{Value: Value{
- AccessKeyID: id,
- SecretAccessKey: secret,
- SessionToken: token,
- }})
-}
-
-// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object
-// wrapping the static credentials value provide. Same as NewStaticCredentials
-// but takes the creds Value instead of individual fields
-func NewStaticCredentialsFromCreds(creds Value) *Credentials {
- return NewCredentials(&StaticProvider{Value: creds})
-}
-
-// Retrieve returns the credentials or error if the credentials are invalid.
-func (s *StaticProvider) Retrieve() (Value, error) {
- if s.AccessKeyID == "" || s.SecretAccessKey == "" {
- return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
- }
-
- if len(s.Value.ProviderName) == 0 {
- s.Value.ProviderName = StaticProviderName
- }
- return s.Value, nil
-}
-
-// IsExpired returns if the credentials are expired.
-//
-// For StaticProvider, the credentials never expired.
-func (s *StaticProvider) IsExpired() bool {
- return false
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
deleted file mode 100644
index 86db488de..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
+++ /dev/null
@@ -1,371 +0,0 @@
-/*
-Package stscreds are credential Providers to retrieve STS AWS credentials.
-
-STS provides multiple ways to retrieve credentials which can be used when making
-future AWS service API operation calls.
-
-The SDK will ensure that per instance of credentials.Credentials all requests
-to refresh the credentials will be synchronized. But, the SDK is unable to
-ensure synchronous usage of the AssumeRoleProvider if the value is shared
-between multiple Credentials, Sessions or service clients.
-
-# Assume Role
-
-To assume an IAM role using STS with the SDK you can create a new Credentials
-with the SDKs's stscreds package.
-
- // Initial credentials loaded from SDK's default credential chain. Such as
- // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
- // Role. These credentials will be used to to make the STS Assume Role API.
- sess := session.Must(session.NewSession())
-
- // Create the credentials from AssumeRoleProvider to assume the role
- // referenced by the "myRoleARN" ARN.
- creds := stscreds.NewCredentials(sess, "myRoleArn")
-
- // Create service client value configured for credentials
- // from assumed role.
- svc := s3.New(sess, &aws.Config{Credentials: creds})
-
-# Assume Role with static MFA Token
-
-To assume an IAM role with a MFA token you can either specify a MFA token code
-directly or provide a function to prompt the user each time the credentials
-need to refresh the role's credentials. Specifying the TokenCode should be used
-for short lived operations that will not need to be refreshed, and when you do
-not want to have direct control over the user provides their MFA token.
-
-With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
-credentials.
-
- // Create the credentials from AssumeRoleProvider to assume the role
- // referenced by the "myRoleARN" ARN using the MFA token code provided.
- creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
- p.SerialNumber = aws.String("myTokenSerialNumber")
- p.TokenCode = aws.String("00000000")
- })
-
- // Create service client value configured for credentials
- // from assumed role.
- svc := s3.New(sess, &aws.Config{Credentials: creds})
-
-# Assume Role with MFA Token Provider
-
-To assume an IAM role with MFA for longer running tasks where the credentials
-may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
-will allow the credential provider to prompt for new MFA token code when the
-role's credentials need to be refreshed.
-
-The StdinTokenProvider function is available to prompt on stdin to retrieve
-the MFA token code from the user. You can also implement custom prompts by
-satisfing the TokenProvider function signature.
-
-Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
-have undesirable results as the StdinTokenProvider will not be synchronized. A
-single Credentials with an AssumeRoleProvider can be shared safely.
-
- // Create the credentials from AssumeRoleProvider to assume the role
- // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin.
- creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
- p.SerialNumber = aws.String("myTokenSerialNumber")
- p.TokenProvider = stscreds.StdinTokenProvider
- })
-
- // Create service client value configured for credentials
- // from assumed role.
- svc := s3.New(sess, &aws.Config{Credentials: creds})
-*/
-package stscreds
-
-import (
- "fmt"
- "os"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/internal/sdkrand"
- "github.com/aws/aws-sdk-go/service/sts"
-)
-
-// StdinTokenProvider will prompt on stderr and read from stdin for a string value.
-// An error is returned if reading from stdin fails.
-//
-// Use this function to read MFA tokens from stdin. The function makes no attempt
-// to make atomic prompts from stdin across multiple gorouties.
-//
-// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
-// have undesirable results as the StdinTokenProvider will not be synchronized. A
-// single Credentials with an AssumeRoleProvider can be shared safely
-//
-// Will wait forever until something is provided on the stdin.
-func StdinTokenProvider() (string, error) {
- var v string
- fmt.Fprintf(os.Stderr, "Assume Role MFA token code: ")
- _, err := fmt.Scanln(&v)
-
- return v, err
-}
-
-// ProviderName provides a name of AssumeRole provider
-const ProviderName = "AssumeRoleProvider"
-
-// AssumeRoler represents the minimal subset of the STS client API used by this provider.
-type AssumeRoler interface {
- AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
-}
-
-type assumeRolerWithContext interface {
- AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error)
-}
-
-// DefaultDuration is the default amount of time in minutes that the credentials
-// will be valid for.
-var DefaultDuration = time.Duration(15) * time.Minute
-
-// AssumeRoleProvider retrieves temporary credentials from the STS service, and
-// keeps track of their expiration time.
-//
-// This credential provider will be used by the SDKs default credential change
-// when shared configuration is enabled, and the shared config or shared credentials
-// file configure assume role. See Session docs for how to do this.
-//
-// AssumeRoleProvider does not provide any synchronization and it is not safe
-// to share this value across multiple Credentials, Sessions, or service clients
-// without also sharing the same Credentials instance.
-type AssumeRoleProvider struct {
- credentials.Expiry
-
- // STS client to make assume role request with.
- Client AssumeRoler
-
- // Role to be assumed.
- RoleARN string
-
- // Session name, if you wish to reuse the credentials elsewhere.
- RoleSessionName string
-
- // Optional, you can pass tag key-value pairs to your session. These tags are called session tags.
- Tags []*sts.Tag
-
- // A list of keys for session tags that you want to set as transitive.
- // If you set a tag key as transitive, the corresponding key and value passes to subsequent sessions in a role chain.
- TransitiveTagKeys []*string
-
- // Expiry duration of the STS credentials. Defaults to 15 minutes if not set.
- Duration time.Duration
-
- // Optional ExternalID to pass along, defaults to nil if not set.
- ExternalID *string
-
- // The policy plain text must be 2048 bytes or shorter. However, an internal
- // conversion compresses it into a packed binary format with a separate limit.
- // The PackedPolicySize response element indicates by percentage how close to
- // the upper size limit the policy is, with 100% equaling the maximum allowed
- // size.
- Policy *string
-
- // The ARNs of IAM managed policies you want to use as managed session policies.
- // The policies must exist in the same account as the role.
- //
- // This parameter is optional. You can provide up to 10 managed policy ARNs.
- // However, the plain text that you use for both inline and managed session
- // policies can't exceed 2,048 characters.
- //
- // An AWS conversion compresses the passed session policies and session tags
- // into a packed binary format that has a separate limit. Your request can fail
- // for this limit even if your plain text meets the other requirements. The
- // PackedPolicySize response element indicates by percentage how close the policies
- // and tags for your request are to the upper size limit.
- //
- // Passing policies to this operation returns new temporary credentials. The
- // resulting session's permissions are the intersection of the role's identity-based
- // policy and the session policies. You can use the role's temporary credentials
- // in subsequent AWS API calls to access resources in the account that owns
- // the role. You cannot use session policies to grant more permissions than
- // those allowed by the identity-based policy of the role that is being assumed.
- // For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
- PolicyArns []*sts.PolicyDescriptorType
-
- // The identification number of the MFA device that is associated with the user
- // who is making the AssumeRole call. Specify this value if the trust policy
- // of the role being assumed includes a condition that requires MFA authentication.
- // The value is either the serial number for a hardware device (such as GAHT12345678)
- // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
- SerialNumber *string
-
- // The SourceIdentity which is used to identity a persistent identity through the whole session.
- // For more details see https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html
- SourceIdentity *string
-
- // The value provided by the MFA device, if the trust policy of the role being
- // assumed requires MFA (that is, if the policy includes a condition that tests
- // for MFA). If the role being assumed requires MFA and if the TokenCode value
- // is missing or expired, the AssumeRole call returns an "access denied" error.
- //
- // If SerialNumber is set and neither TokenCode nor TokenProvider are also
- // set an error will be returned.
- TokenCode *string
-
- // Async method of providing MFA token code for assuming an IAM role with MFA.
- // The value returned by the function will be used as the TokenCode in the Retrieve
- // call. See StdinTokenProvider for a provider that prompts and reads from stdin.
- //
- // This token provider will be called when ever the assumed role's
- // credentials need to be refreshed when SerialNumber is also set and
- // TokenCode is not set.
- //
- // If both TokenCode and TokenProvider is set, TokenProvider will be used and
- // TokenCode is ignored.
- TokenProvider func() (string, error)
-
- // ExpiryWindow will allow the credentials to trigger refreshing prior to
- // the credentials actually expiring. This is beneficial so race conditions
- // with expiring credentials do not cause request to fail unexpectedly
- // due to ExpiredTokenException exceptions.
- //
- // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
- // 10 seconds before the credentials are actually expired.
- //
- // If ExpiryWindow is 0 or less it will be ignored.
- ExpiryWindow time.Duration
-
- // MaxJitterFrac reduces the effective Duration of each credential requested
- // by a random percentage between 0 and MaxJitterFraction. MaxJitterFrac must
- // have a value between 0 and 1. Any other value may lead to expected behavior.
- // With a MaxJitterFrac value of 0, default) will no jitter will be used.
- //
- // For example, with a Duration of 30m and a MaxJitterFrac of 0.1, the
- // AssumeRole call will be made with an arbitrary Duration between 27m and
- // 30m.
- //
- // MaxJitterFrac should not be negative.
- MaxJitterFrac float64
-}
-
-// NewCredentials returns a pointer to a new Credentials value wrapping the
-// AssumeRoleProvider. The credentials will expire every 15 minutes and the
-// role will be named after a nanosecond timestamp of this operation. The
-// Credentials value will attempt to refresh the credentials using the provider
-// when Credentials.Get is called, if the cached credentials are expiring.
-//
-// Takes a Config provider to create the STS client. The ConfigProvider is
-// satisfied by the session.Session type.
-//
-// It is safe to share the returned Credentials with multiple Sessions and
-// service clients. All access to the credentials and refreshing them
-// will be synchronized.
-func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
- p := &AssumeRoleProvider{
- Client: sts.New(c),
- RoleARN: roleARN,
- Duration: DefaultDuration,
- }
-
- for _, option := range options {
- option(p)
- }
-
- return credentials.NewCredentials(p)
-}
-
-// NewCredentialsWithClient returns a pointer to a new Credentials value wrapping the
-// AssumeRoleProvider. The credentials will expire every 15 minutes and the
-// role will be named after a nanosecond timestamp of this operation. The
-// Credentials value will attempt to refresh the credentials using the provider
-// when Credentials.Get is called, if the cached credentials are expiring.
-//
-// Takes an AssumeRoler which can be satisfied by the STS client.
-//
-// It is safe to share the returned Credentials with multiple Sessions and
-// service clients. All access to the credentials and refreshing them
-// will be synchronized.
-func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
- p := &AssumeRoleProvider{
- Client: svc,
- RoleARN: roleARN,
- Duration: DefaultDuration,
- }
-
- for _, option := range options {
- option(p)
- }
-
- return credentials.NewCredentials(p)
-}
-
-// Retrieve generates a new set of temporary credentials using STS.
-func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
- return p.RetrieveWithContext(aws.BackgroundContext())
-}
-
-// RetrieveWithContext generates a new set of temporary credentials using STS.
-func (p *AssumeRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
- // Apply defaults where parameters are not set.
- if p.RoleSessionName == "" {
- // Try to work out a role name that will hopefully end up unique.
- p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano())
- }
- if p.Duration == 0 {
- // Expire as often as AWS permits.
- p.Duration = DefaultDuration
- }
- jitter := time.Duration(sdkrand.SeededRand.Float64() * p.MaxJitterFrac * float64(p.Duration))
- input := &sts.AssumeRoleInput{
- DurationSeconds: aws.Int64(int64((p.Duration - jitter) / time.Second)),
- RoleArn: aws.String(p.RoleARN),
- RoleSessionName: aws.String(p.RoleSessionName),
- ExternalId: p.ExternalID,
- Tags: p.Tags,
- PolicyArns: p.PolicyArns,
- TransitiveTagKeys: p.TransitiveTagKeys,
- SourceIdentity: p.SourceIdentity,
- }
- if p.Policy != nil {
- input.Policy = p.Policy
- }
- if p.SerialNumber != nil {
- if p.TokenCode != nil {
- input.SerialNumber = p.SerialNumber
- input.TokenCode = p.TokenCode
- } else if p.TokenProvider != nil {
- input.SerialNumber = p.SerialNumber
- code, err := p.TokenProvider()
- if err != nil {
- return credentials.Value{ProviderName: ProviderName}, err
- }
- input.TokenCode = aws.String(code)
- } else {
- return credentials.Value{ProviderName: ProviderName},
- awserr.New("AssumeRoleTokenNotAvailable",
- "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil)
- }
- }
-
- var roleOutput *sts.AssumeRoleOutput
- var err error
-
- if c, ok := p.Client.(assumeRolerWithContext); ok {
- roleOutput, err = c.AssumeRoleWithContext(ctx, input)
- } else {
- roleOutput, err = p.Client.AssumeRole(input)
- }
-
- if err != nil {
- return credentials.Value{ProviderName: ProviderName}, err
- }
-
- // We will proactively generate new credentials before they expire.
- p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow)
-
- return credentials.Value{
- AccessKeyID: *roleOutput.Credentials.AccessKeyId,
- SecretAccessKey: *roleOutput.Credentials.SecretAccessKey,
- SessionToken: *roleOutput.Credentials.SessionToken,
- ProviderName: ProviderName,
- }, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
deleted file mode 100644
index 19ad619aa..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/web_identity_provider.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package stscreds
-
-import (
- "fmt"
- "io/ioutil"
- "strconv"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/service/sts"
- "github.com/aws/aws-sdk-go/service/sts/stsiface"
-)
-
-const (
- // ErrCodeWebIdentity will be used as an error code when constructing
- // a new error to be returned during session creation or retrieval.
- ErrCodeWebIdentity = "WebIdentityErr"
-
- // WebIdentityProviderName is the web identity provider name
- WebIdentityProviderName = "WebIdentityCredentials"
-)
-
-// now is used to return a time.Time object representing
-// the current time. This can be used to easily test and
-// compare test values.
-var now = time.Now
-
-// TokenFetcher should return WebIdentity token bytes or an error
-type TokenFetcher interface {
- FetchToken(credentials.Context) ([]byte, error)
-}
-
-// FetchTokenPath is a path to a WebIdentity token file
-type FetchTokenPath string
-
-// FetchToken returns a token by reading from the filesystem
-func (f FetchTokenPath) FetchToken(ctx credentials.Context) ([]byte, error) {
- data, err := ioutil.ReadFile(string(f))
- if err != nil {
- errMsg := fmt.Sprintf("unable to read file at %s", f)
- return nil, awserr.New(ErrCodeWebIdentity, errMsg, err)
- }
- return data, nil
-}
-
-// WebIdentityRoleProvider is used to retrieve credentials using
-// an OIDC token.
-type WebIdentityRoleProvider struct {
- credentials.Expiry
-
- // The policy ARNs to use with the web identity assumed role.
- PolicyArns []*sts.PolicyDescriptorType
-
- // Duration the STS credentials will be valid for. Truncated to seconds.
- // If unset, the assumed role will use AssumeRoleWithWebIdentity's default
- // expiry duration. See
- // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity
- // for more information.
- Duration time.Duration
-
- // The amount of time the credentials will be refreshed before they expire.
- // This is useful refresh credentials before they expire to reduce risk of
- // using credentials as they expire. If unset, will default to no expiry
- // window.
- ExpiryWindow time.Duration
-
- client stsiface.STSAPI
-
- tokenFetcher TokenFetcher
- roleARN string
- roleSessionName string
-}
-
-// NewWebIdentityCredentials will return a new set of credentials with a given
-// configuration, role arn, and token file path.
-//
-// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible
-// functional options, and wrap with credentials.NewCredentials helper.
-func NewWebIdentityCredentials(c client.ConfigProvider, roleARN, roleSessionName, path string) *credentials.Credentials {
- svc := sts.New(c)
- p := NewWebIdentityRoleProvider(svc, roleARN, roleSessionName, path)
- return credentials.NewCredentials(p)
-}
-
-// NewWebIdentityRoleProvider will return a new WebIdentityRoleProvider with the
-// provided stsiface.STSAPI
-//
-// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible
-// functional options.
-func NewWebIdentityRoleProvider(svc stsiface.STSAPI, roleARN, roleSessionName, path string) *WebIdentityRoleProvider {
- return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, FetchTokenPath(path))
-}
-
-// NewWebIdentityRoleProviderWithToken will return a new WebIdentityRoleProvider with the
-// provided stsiface.STSAPI and a TokenFetcher
-//
-// Deprecated: Use NewWebIdentityRoleProviderWithOptions for flexible
-// functional options.
-func NewWebIdentityRoleProviderWithToken(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher) *WebIdentityRoleProvider {
- return NewWebIdentityRoleProviderWithOptions(svc, roleARN, roleSessionName, tokenFetcher)
-}
-
-// NewWebIdentityRoleProviderWithOptions will return an initialize
-// WebIdentityRoleProvider with the provided stsiface.STSAPI, role ARN, and a
-// TokenFetcher. Additional options can be provided as functional options.
-//
-// TokenFetcher is the implementation that will retrieve the JWT token from to
-// assume the role with. Use the provided FetchTokenPath implementation to
-// retrieve the JWT token using a file system path.
-func NewWebIdentityRoleProviderWithOptions(svc stsiface.STSAPI, roleARN, roleSessionName string, tokenFetcher TokenFetcher, optFns ...func(*WebIdentityRoleProvider)) *WebIdentityRoleProvider {
- p := WebIdentityRoleProvider{
- client: svc,
- tokenFetcher: tokenFetcher,
- roleARN: roleARN,
- roleSessionName: roleSessionName,
- }
-
- for _, fn := range optFns {
- fn(&p)
- }
-
- return &p
-}
-
-// Retrieve will attempt to assume a role from a token which is located at
-// 'WebIdentityTokenFilePath' specified destination and if that is empty an
-// error will be returned.
-func (p *WebIdentityRoleProvider) Retrieve() (credentials.Value, error) {
- return p.RetrieveWithContext(aws.BackgroundContext())
-}
-
-// RetrieveWithContext will attempt to assume a role from a token which is
-// located at 'WebIdentityTokenFilePath' specified destination and if that is
-// empty an error will be returned.
-func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (credentials.Value, error) {
- b, err := p.tokenFetcher.FetchToken(ctx)
- if err != nil {
- return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed fetching WebIdentity token: ", err)
- }
-
- sessionName := p.roleSessionName
- if len(sessionName) == 0 {
- // session name is used to uniquely identify a session. This simply
- // uses unix time in nanoseconds to uniquely identify sessions.
- sessionName = strconv.FormatInt(now().UnixNano(), 10)
- }
-
- var duration *int64
- if p.Duration != 0 {
- duration = aws.Int64(int64(p.Duration / time.Second))
- }
-
- req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
- PolicyArns: p.PolicyArns,
- RoleArn: &p.roleARN,
- RoleSessionName: &sessionName,
- WebIdentityToken: aws.String(string(b)),
- DurationSeconds: duration,
- })
-
- req.SetContext(ctx)
-
- // InvalidIdentityToken error is a temporary error that can occur
- // when assuming an Role with a JWT web identity token.
- req.RetryErrorCodes = append(req.RetryErrorCodes, sts.ErrCodeInvalidIdentityTokenException)
- if err := req.Send(); err != nil {
- return credentials.Value{}, awserr.New(ErrCodeWebIdentity, "failed to retrieve credentials", err)
- }
-
- p.SetExpiration(aws.TimeValue(resp.Credentials.Expiration), p.ExpiryWindow)
-
- value := credentials.Value{
- AccessKeyID: aws.StringValue(resp.Credentials.AccessKeyId),
- SecretAccessKey: aws.StringValue(resp.Credentials.SecretAccessKey),
- SessionToken: aws.StringValue(resp.Credentials.SessionToken),
- ProviderName: WebIdentityProviderName,
- }
- return value, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go
deleted file mode 100644
index eeb3bc0c5..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/crr/cache.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package crr
-
-import (
- "sync/atomic"
-)
-
-// EndpointCache is an LRU cache that holds a series of endpoints
-// based on some key. The datastructure makes use of a read write
-// mutex to enable asynchronous use.
-type EndpointCache struct {
- // size is used to count the number elements in the cache.
- // The atomic package is used to ensure this size is accurate when
- // using multiple goroutines.
- size int64
- endpoints syncMap
- endpointLimit int64
-}
-
-// NewEndpointCache will return a newly initialized cache with a limit
-// of endpointLimit entries.
-func NewEndpointCache(endpointLimit int64) *EndpointCache {
- return &EndpointCache{
- endpointLimit: endpointLimit,
- endpoints: newSyncMap(),
- }
-}
-
-// get is a concurrent safe get operation that will retrieve an endpoint
-// based on endpointKey. A boolean will also be returned to illustrate whether
-// or not the endpoint had been found.
-func (c *EndpointCache) get(endpointKey string) (Endpoint, bool) {
- endpoint, ok := c.endpoints.Load(endpointKey)
- if !ok {
- return Endpoint{}, false
- }
-
- ev := endpoint.(Endpoint)
- ev.Prune()
-
- c.endpoints.Store(endpointKey, ev)
- return endpoint.(Endpoint), true
-}
-
-// Has returns if the enpoint cache contains a valid entry for the endpoint key
-// provided.
-func (c *EndpointCache) Has(endpointKey string) bool {
- endpoint, ok := c.get(endpointKey)
- _, found := endpoint.GetValidAddress()
-
- return ok && found
-}
-
-// Get will retrieve a weighted address based off of the endpoint key. If an endpoint
-// should be retrieved, due to not existing or the current endpoint has expired
-// the Discoverer object that was passed in will attempt to discover a new endpoint
-// and add that to the cache.
-func (c *EndpointCache) Get(d Discoverer, endpointKey string, required bool) (WeightedAddress, error) {
- var err error
- endpoint, ok := c.get(endpointKey)
- weighted, found := endpoint.GetValidAddress()
- shouldGet := !ok || !found
-
- if required && shouldGet {
- if endpoint, err = c.discover(d, endpointKey); err != nil {
- return WeightedAddress{}, err
- }
-
- weighted, _ = endpoint.GetValidAddress()
- } else if shouldGet {
- go c.discover(d, endpointKey)
- }
-
- return weighted, nil
-}
-
-// Add is a concurrent safe operation that will allow new endpoints to be added
-// to the cache. If the cache is full, the number of endpoints equal endpointLimit,
-// then this will remove the oldest entry before adding the new endpoint.
-func (c *EndpointCache) Add(endpoint Endpoint) {
- // de-dups multiple adds of an endpoint with a pre-existing key
- if iface, ok := c.endpoints.Load(endpoint.Key); ok {
- e := iface.(Endpoint)
- if e.Len() > 0 {
- return
- }
- }
- c.endpoints.Store(endpoint.Key, endpoint)
-
- size := atomic.AddInt64(&c.size, 1)
- if size > 0 && size > c.endpointLimit {
- c.deleteRandomKey()
- }
-}
-
-// deleteRandomKey will delete a random key from the cache. If
-// no key was deleted false will be returned.
-func (c *EndpointCache) deleteRandomKey() bool {
- atomic.AddInt64(&c.size, -1)
- found := false
-
- c.endpoints.Range(func(key, value interface{}) bool {
- found = true
- c.endpoints.Delete(key)
-
- return false
- })
-
- return found
-}
-
-// discover will get and store and endpoint using the Discoverer.
-func (c *EndpointCache) discover(d Discoverer, endpointKey string) (Endpoint, error) {
- endpoint, err := d.Discover()
- if err != nil {
- return Endpoint{}, err
- }
-
- endpoint.Key = endpointKey
- c.Add(endpoint)
-
- return endpoint, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go
deleted file mode 100644
index 2b088bdbc..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/crr/endpoint.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package crr
-
-import (
- "net/url"
- "sort"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
-)
-
-// Endpoint represents an endpoint used in endpoint discovery.
-type Endpoint struct {
- Key string
- Addresses WeightedAddresses
-}
-
-// WeightedAddresses represents a list of WeightedAddress.
-type WeightedAddresses []WeightedAddress
-
-// WeightedAddress represents an address with a given weight.
-type WeightedAddress struct {
- URL *url.URL
- Expired time.Time
-}
-
-// HasExpired will return whether or not the endpoint has expired with
-// the exception of a zero expiry meaning does not expire.
-func (e WeightedAddress) HasExpired() bool {
- return e.Expired.Before(time.Now())
-}
-
-// Add will add a given WeightedAddress to the address list of Endpoint.
-func (e *Endpoint) Add(addr WeightedAddress) {
- e.Addresses = append(e.Addresses, addr)
-}
-
-// Len returns the number of valid endpoints where valid means the endpoint
-// has not expired.
-func (e *Endpoint) Len() int {
- validEndpoints := 0
- for _, endpoint := range e.Addresses {
- if endpoint.HasExpired() {
- continue
- }
-
- validEndpoints++
- }
- return validEndpoints
-}
-
-// GetValidAddress will return a non-expired weight endpoint
-func (e *Endpoint) GetValidAddress() (WeightedAddress, bool) {
- for i := 0; i < len(e.Addresses); i++ {
- we := e.Addresses[i]
-
- if we.HasExpired() {
- e.Addresses = append(e.Addresses[:i], e.Addresses[i+1:]...)
- i--
- continue
- }
-
- we.URL = cloneURL(we.URL)
-
- return we, true
- }
-
- return WeightedAddress{}, false
-}
-
-// Prune will prune the expired addresses from the endpoint by allocating a new []WeightAddress.
-// This is not concurrent safe, and should be called from a single owning thread.
-func (e *Endpoint) Prune() bool {
- validLen := e.Len()
- if validLen == len(e.Addresses) {
- return false
- }
- wa := make([]WeightedAddress, 0, validLen)
- for i := range e.Addresses {
- if e.Addresses[i].HasExpired() {
- continue
- }
- wa = append(wa, e.Addresses[i])
- }
- e.Addresses = wa
- return true
-}
-
-// Discoverer is an interface used to discovery which endpoint hit. This
-// allows for specifics about what parameters need to be used to be contained
-// in the Discoverer implementor.
-type Discoverer interface {
- Discover() (Endpoint, error)
-}
-
-// BuildEndpointKey will sort the keys in alphabetical order and then retrieve
-// the values in that order. Those values are then concatenated together to form
-// the endpoint key.
-func BuildEndpointKey(params map[string]*string) string {
- keys := make([]string, len(params))
- i := 0
-
- for k := range params {
- keys[i] = k
- i++
- }
- sort.Strings(keys)
-
- values := make([]string, len(params))
- for i, k := range keys {
- if params[k] == nil {
- continue
- }
-
- values[i] = aws.StringValue(params[k])
- }
-
- return strings.Join(values, ".")
-}
-
-func cloneURL(u *url.URL) (clone *url.URL) {
- clone = &url.URL{}
-
- *clone = *u
-
- if u.User != nil {
- user := *u.User
- clone.User = &user
- }
-
- return clone
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go
deleted file mode 100644
index f7b65ac01..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map.go
+++ /dev/null
@@ -1,30 +0,0 @@
-//go:build go1.9
-// +build go1.9
-
-package crr
-
-import (
- "sync"
-)
-
-type syncMap sync.Map
-
-func newSyncMap() syncMap {
- return syncMap{}
-}
-
-func (m *syncMap) Load(key interface{}) (interface{}, bool) {
- return (*sync.Map)(m).Load(key)
-}
-
-func (m *syncMap) Store(key interface{}, value interface{}) {
- (*sync.Map)(m).Store(key, value)
-}
-
-func (m *syncMap) Delete(key interface{}) {
- (*sync.Map)(m).Delete(key)
-}
-
-func (m *syncMap) Range(f func(interface{}, interface{}) bool) {
- (*sync.Map)(m).Range(f)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go
deleted file mode 100644
index eb4f6aca2..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/crr/sync_map_1_8.go
+++ /dev/null
@@ -1,49 +0,0 @@
-//go:build !go1.9
-// +build !go1.9
-
-package crr
-
-import (
- "sync"
-)
-
-type syncMap struct {
- container map[interface{}]interface{}
- lock sync.RWMutex
-}
-
-func newSyncMap() syncMap {
- return syncMap{
- container: map[interface{}]interface{}{},
- }
-}
-
-func (m *syncMap) Load(key interface{}) (interface{}, bool) {
- m.lock.RLock()
- defer m.lock.RUnlock()
-
- v, ok := m.container[key]
- return v, ok
-}
-
-func (m *syncMap) Store(key interface{}, value interface{}) {
- m.lock.Lock()
- defer m.lock.Unlock()
-
- m.container[key] = value
-}
-
-func (m *syncMap) Delete(key interface{}) {
- m.lock.Lock()
- defer m.lock.Unlock()
-
- delete(m.container, key)
-}
-
-func (m *syncMap) Range(f func(interface{}, interface{}) bool) {
- for k, v := range m.container {
- if !f(k, v) {
- return
- }
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
deleted file mode 100644
index 25a66d1dd..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/csm/doc.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Package csm provides the Client Side Monitoring (CSM) client which enables
-// sending metrics via UDP connection to the CSM agent. This package provides
-// control options, and configuration for the CSM client. The client can be
-// controlled manually, or automatically via the SDK's Session configuration.
-//
-// Enabling CSM client via SDK's Session configuration
-//
-// The CSM client can be enabled automatically via SDK's Session configuration.
-// The SDK's session configuration enables the CSM client if the AWS_CSM_PORT
-// environment variable is set to a non-empty value.
-//
-// The configuration options for the CSM client via the SDK's session
-// configuration are:
-//
-// * AWS_CSM_PORT=
-// The port number the CSM agent will receive metrics on.
-//
-// * AWS_CSM_HOST=
-// The hostname, or IP address the CSM agent will receive metrics on.
-// Without port number.
-//
-// Manually enabling the CSM client
-//
-// The CSM client can be started, paused, and resumed manually. The Start
-// function will enable the CSM client to publish metrics to the CSM agent. It
-// is safe to call Start concurrently, but if Start is called additional times
-// with different ClientID or address it will panic.
-//
-// r, err := csm.Start("clientID", ":31000")
-// if err != nil {
-// panic(fmt.Errorf("failed starting CSM: %v", err))
-// }
-//
-// When controlling the CSM client manually, you must also inject its request
-// handlers into the SDK's Session configuration for the SDK's API clients to
-// publish metrics.
-//
-// sess, err := session.NewSession(&aws.Config{})
-// if err != nil {
-// panic(fmt.Errorf("failed loading session: %v", err))
-// }
-//
-// // Add CSM client's metric publishing request handlers to the SDK's
-// // Session Configuration.
-// r.InjectHandlers(&sess.Handlers)
-//
-// Controlling CSM client
-//
-// Once the CSM client has been enabled the Get function will return a Reporter
-// value that you can use to pause and resume the metrics published to the CSM
-// agent. If Get function is called before the reporter is enabled with the
-// Start function or via SDK's Session configuration nil will be returned.
-//
-// The Pause method can be called to stop the CSM client publishing metrics to
-// the CSM agent. The Continue method will resume metric publishing.
-//
-// // Get the CSM client Reporter.
-// r := csm.Get()
-//
-// // Will pause monitoring
-// r.Pause()
-// resp, err = client.GetObject(&s3.GetObjectInput{
-// Bucket: aws.String("bucket"),
-// Key: aws.String("key"),
-// })
-//
-// // Resume monitoring
-// r.Continue()
-package csm
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
deleted file mode 100644
index 4b19e2800..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/csm/enable.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package csm
-
-import (
- "fmt"
- "strings"
- "sync"
-)
-
-var (
- lock sync.Mutex
-)
-
-const (
- // DefaultPort is used when no port is specified.
- DefaultPort = "31000"
-
- // DefaultHost is the host that will be used when none is specified.
- DefaultHost = "127.0.0.1"
-)
-
-// AddressWithDefaults returns a CSM address built from the host and port
-// values. If the host or port is not set, default values will be used
-// instead. If host is "localhost" it will be replaced with "127.0.0.1".
-func AddressWithDefaults(host, port string) string {
- if len(host) == 0 || strings.EqualFold(host, "localhost") {
- host = DefaultHost
- }
-
- if len(port) == 0 {
- port = DefaultPort
- }
-
- // Only IP6 host can contain a colon
- if strings.Contains(host, ":") {
- return "[" + host + "]:" + port
- }
-
- return host + ":" + port
-}
-
-// Start will start a long running go routine to capture
-// client side metrics. Calling start multiple time will only
-// start the metric listener once and will panic if a different
-// client ID or port is passed in.
-//
-// r, err := csm.Start("clientID", "127.0.0.1:31000")
-// if err != nil {
-// panic(fmt.Errorf("expected no error, but received %v", err))
-// }
-// sess := session.NewSession()
-// r.InjectHandlers(sess.Handlers)
-//
-// svc := s3.New(sess)
-// out, err := svc.GetObject(&s3.GetObjectInput{
-// Bucket: aws.String("bucket"),
-// Key: aws.String("key"),
-// })
-func Start(clientID string, url string) (*Reporter, error) {
- lock.Lock()
- defer lock.Unlock()
-
- if sender == nil {
- sender = newReporter(clientID, url)
- } else {
- if sender.clientID != clientID {
- panic(fmt.Errorf("inconsistent client IDs. %q was expected, but received %q", sender.clientID, clientID))
- }
-
- if sender.url != url {
- panic(fmt.Errorf("inconsistent URLs. %q was expected, but received %q", sender.url, url))
- }
- }
-
- if err := connect(url); err != nil {
- sender = nil
- return nil, err
- }
-
- return sender, nil
-}
-
-// Get will return a reporter if one exists, if one does not exist, nil will
-// be returned.
-func Get() *Reporter {
- lock.Lock()
- defer lock.Unlock()
-
- return sender
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
deleted file mode 100644
index 5bacc791a..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package csm
-
-import (
- "strconv"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
-)
-
-type metricTime time.Time
-
-func (t metricTime) MarshalJSON() ([]byte, error) {
- ns := time.Duration(time.Time(t).UnixNano())
- return []byte(strconv.FormatInt(int64(ns/time.Millisecond), 10)), nil
-}
-
-type metric struct {
- ClientID *string `json:"ClientId,omitempty"`
- API *string `json:"Api,omitempty"`
- Service *string `json:"Service,omitempty"`
- Timestamp *metricTime `json:"Timestamp,omitempty"`
- Type *string `json:"Type,omitempty"`
- Version *int `json:"Version,omitempty"`
-
- AttemptCount *int `json:"AttemptCount,omitempty"`
- Latency *int `json:"Latency,omitempty"`
-
- Fqdn *string `json:"Fqdn,omitempty"`
- UserAgent *string `json:"UserAgent,omitempty"`
- AttemptLatency *int `json:"AttemptLatency,omitempty"`
-
- SessionToken *string `json:"SessionToken,omitempty"`
- Region *string `json:"Region,omitempty"`
- AccessKey *string `json:"AccessKey,omitempty"`
- HTTPStatusCode *int `json:"HttpStatusCode,omitempty"`
- XAmzID2 *string `json:"XAmzId2,omitempty"`
- XAmzRequestID *string `json:"XAmznRequestId,omitempty"`
-
- AWSException *string `json:"AwsException,omitempty"`
- AWSExceptionMessage *string `json:"AwsExceptionMessage,omitempty"`
- SDKException *string `json:"SdkException,omitempty"`
- SDKExceptionMessage *string `json:"SdkExceptionMessage,omitempty"`
-
- FinalHTTPStatusCode *int `json:"FinalHttpStatusCode,omitempty"`
- FinalAWSException *string `json:"FinalAwsException,omitempty"`
- FinalAWSExceptionMessage *string `json:"FinalAwsExceptionMessage,omitempty"`
- FinalSDKException *string `json:"FinalSdkException,omitempty"`
- FinalSDKExceptionMessage *string `json:"FinalSdkExceptionMessage,omitempty"`
-
- DestinationIP *string `json:"DestinationIp,omitempty"`
- ConnectionReused *int `json:"ConnectionReused,omitempty"`
-
- AcquireConnectionLatency *int `json:"AcquireConnectionLatency,omitempty"`
- ConnectLatency *int `json:"ConnectLatency,omitempty"`
- RequestLatency *int `json:"RequestLatency,omitempty"`
- DNSLatency *int `json:"DnsLatency,omitempty"`
- TCPLatency *int `json:"TcpLatency,omitempty"`
- SSLLatency *int `json:"SslLatency,omitempty"`
-
- MaxRetriesExceeded *int `json:"MaxRetriesExceeded,omitempty"`
-}
-
-func (m *metric) TruncateFields() {
- m.ClientID = truncateString(m.ClientID, 255)
- m.UserAgent = truncateString(m.UserAgent, 256)
-
- m.AWSException = truncateString(m.AWSException, 128)
- m.AWSExceptionMessage = truncateString(m.AWSExceptionMessage, 512)
-
- m.SDKException = truncateString(m.SDKException, 128)
- m.SDKExceptionMessage = truncateString(m.SDKExceptionMessage, 512)
-
- m.FinalAWSException = truncateString(m.FinalAWSException, 128)
- m.FinalAWSExceptionMessage = truncateString(m.FinalAWSExceptionMessage, 512)
-
- m.FinalSDKException = truncateString(m.FinalSDKException, 128)
- m.FinalSDKExceptionMessage = truncateString(m.FinalSDKExceptionMessage, 512)
-}
-
-func truncateString(v *string, l int) *string {
- if v != nil && len(*v) > l {
- nv := (*v)[:l]
- return &nv
- }
-
- return v
-}
-
-func (m *metric) SetException(e metricException) {
- switch te := e.(type) {
- case awsException:
- m.AWSException = aws.String(te.exception)
- m.AWSExceptionMessage = aws.String(te.message)
- case sdkException:
- m.SDKException = aws.String(te.exception)
- m.SDKExceptionMessage = aws.String(te.message)
- }
-}
-
-func (m *metric) SetFinalException(e metricException) {
- switch te := e.(type) {
- case awsException:
- m.FinalAWSException = aws.String(te.exception)
- m.FinalAWSExceptionMessage = aws.String(te.message)
- case sdkException:
- m.FinalSDKException = aws.String(te.exception)
- m.FinalSDKExceptionMessage = aws.String(te.message)
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
deleted file mode 100644
index 82a3e345e..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_chan.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package csm
-
-import (
- "sync/atomic"
-)
-
-const (
- runningEnum = iota
- pausedEnum
-)
-
-var (
- // MetricsChannelSize of metrics to hold in the channel
- MetricsChannelSize = 100
-)
-
-type metricChan struct {
- ch chan metric
- paused *int64
-}
-
-func newMetricChan(size int) metricChan {
- return metricChan{
- ch: make(chan metric, size),
- paused: new(int64),
- }
-}
-
-func (ch *metricChan) Pause() {
- atomic.StoreInt64(ch.paused, pausedEnum)
-}
-
-func (ch *metricChan) Continue() {
- atomic.StoreInt64(ch.paused, runningEnum)
-}
-
-func (ch *metricChan) IsPaused() bool {
- v := atomic.LoadInt64(ch.paused)
- return v == pausedEnum
-}
-
-// Push will push metrics to the metric channel if the channel
-// is not paused
-func (ch *metricChan) Push(m metric) bool {
- if ch.IsPaused() {
- return false
- }
-
- select {
- case ch.ch <- m:
- return true
- default:
- return false
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
deleted file mode 100644
index 54a99280c..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/csm/metric_exception.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package csm
-
-type metricException interface {
- Exception() string
- Message() string
-}
-
-type requestException struct {
- exception string
- message string
-}
-
-func (e requestException) Exception() string {
- return e.exception
-}
-func (e requestException) Message() string {
- return e.message
-}
-
-type awsException struct {
- requestException
-}
-
-type sdkException struct {
- requestException
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go b/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
deleted file mode 100644
index 835bcd49c..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/csm/reporter.go
+++ /dev/null
@@ -1,264 +0,0 @@
-package csm
-
-import (
- "encoding/json"
- "net"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// Reporter will gather metrics of API requests made and
-// send those metrics to the CSM endpoint.
-type Reporter struct {
- clientID string
- url string
- conn net.Conn
- metricsCh metricChan
- done chan struct{}
-}
-
-var (
- sender *Reporter
-)
-
-func connect(url string) error {
- const network = "udp"
- if err := sender.connect(network, url); err != nil {
- return err
- }
-
- if sender.done == nil {
- sender.done = make(chan struct{})
- go sender.start()
- }
-
- return nil
-}
-
-func newReporter(clientID, url string) *Reporter {
- return &Reporter{
- clientID: clientID,
- url: url,
- metricsCh: newMetricChan(MetricsChannelSize),
- }
-}
-
-func (rep *Reporter) sendAPICallAttemptMetric(r *request.Request) {
- if rep == nil {
- return
- }
-
- now := time.Now()
- creds, _ := r.Config.Credentials.Get()
-
- m := metric{
- ClientID: aws.String(rep.clientID),
- API: aws.String(r.Operation.Name),
- Service: aws.String(r.ClientInfo.ServiceID),
- Timestamp: (*metricTime)(&now),
- UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
- Region: r.Config.Region,
- Type: aws.String("ApiCallAttempt"),
- Version: aws.Int(1),
-
- XAmzRequestID: aws.String(r.RequestID),
-
- AttemptLatency: aws.Int(int(now.Sub(r.AttemptTime).Nanoseconds() / int64(time.Millisecond))),
- AccessKey: aws.String(creds.AccessKeyID),
- }
-
- if r.HTTPResponse != nil {
- m.HTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
- }
-
- if r.Error != nil {
- if awserr, ok := r.Error.(awserr.Error); ok {
- m.SetException(getMetricException(awserr))
- }
- }
-
- m.TruncateFields()
- rep.metricsCh.Push(m)
-}
-
-func getMetricException(err awserr.Error) metricException {
- msg := err.Error()
- code := err.Code()
-
- switch code {
- case request.ErrCodeRequestError,
- request.ErrCodeSerialization,
- request.CanceledErrorCode:
- return sdkException{
- requestException{exception: code, message: msg},
- }
- default:
- return awsException{
- requestException{exception: code, message: msg},
- }
- }
-}
-
-func (rep *Reporter) sendAPICallMetric(r *request.Request) {
- if rep == nil {
- return
- }
-
- now := time.Now()
- m := metric{
- ClientID: aws.String(rep.clientID),
- API: aws.String(r.Operation.Name),
- Service: aws.String(r.ClientInfo.ServiceID),
- Timestamp: (*metricTime)(&now),
- UserAgent: aws.String(r.HTTPRequest.Header.Get("User-Agent")),
- Type: aws.String("ApiCall"),
- AttemptCount: aws.Int(r.RetryCount + 1),
- Region: r.Config.Region,
- Latency: aws.Int(int(time.Since(r.Time) / time.Millisecond)),
- XAmzRequestID: aws.String(r.RequestID),
- MaxRetriesExceeded: aws.Int(boolIntValue(r.RetryCount >= r.MaxRetries())),
- }
-
- if r.HTTPResponse != nil {
- m.FinalHTTPStatusCode = aws.Int(r.HTTPResponse.StatusCode)
- }
-
- if r.Error != nil {
- if awserr, ok := r.Error.(awserr.Error); ok {
- m.SetFinalException(getMetricException(awserr))
- }
- }
-
- m.TruncateFields()
-
- // TODO: Probably want to figure something out for logging dropped
- // metrics
- rep.metricsCh.Push(m)
-}
-
-func (rep *Reporter) connect(network, url string) error {
- if rep.conn != nil {
- rep.conn.Close()
- }
-
- conn, err := net.Dial(network, url)
- if err != nil {
- return awserr.New("UDPError", "Could not connect", err)
- }
-
- rep.conn = conn
-
- return nil
-}
-
-func (rep *Reporter) close() {
- if rep.done != nil {
- close(rep.done)
- }
-
- rep.metricsCh.Pause()
-}
-
-func (rep *Reporter) start() {
- defer func() {
- rep.metricsCh.Pause()
- }()
-
- for {
- select {
- case <-rep.done:
- rep.done = nil
- return
- case m := <-rep.metricsCh.ch:
- // TODO: What to do with this error? Probably should just log
- b, err := json.Marshal(m)
- if err != nil {
- continue
- }
-
- rep.conn.Write(b)
- }
- }
-}
-
-// Pause will pause the metric channel preventing any new metrics from being
-// added. It is safe to call concurrently with other calls to Pause, but if
-// called concurently with Continue can lead to unexpected state.
-func (rep *Reporter) Pause() {
- lock.Lock()
- defer lock.Unlock()
-
- if rep == nil {
- return
- }
-
- rep.close()
-}
-
-// Continue will reopen the metric channel and allow for monitoring to be
-// resumed. It is safe to call concurrently with other calls to Continue, but
-// if called concurently with Pause can lead to unexpected state.
-func (rep *Reporter) Continue() {
- lock.Lock()
- defer lock.Unlock()
- if rep == nil {
- return
- }
-
- if !rep.metricsCh.IsPaused() {
- return
- }
-
- rep.metricsCh.Continue()
-}
-
-// Client side metric handler names
-const (
- APICallMetricHandlerName = "awscsm.SendAPICallMetric"
- APICallAttemptMetricHandlerName = "awscsm.SendAPICallAttemptMetric"
-)
-
-// InjectHandlers will will enable client side metrics and inject the proper
-// handlers to handle how metrics are sent.
-//
-// InjectHandlers is NOT safe to call concurrently. Calling InjectHandlers
-// multiple times may lead to unexpected behavior, (e.g. duplicate metrics).
-//
-// // Start must be called in order to inject the correct handlers
-// r, err := csm.Start("clientID", "127.0.0.1:8094")
-// if err != nil {
-// panic(fmt.Errorf("expected no error, but received %v", err))
-// }
-//
-// sess := session.NewSession()
-// r.InjectHandlers(&sess.Handlers)
-//
-// // create a new service client with our client side metric session
-// svc := s3.New(sess)
-func (rep *Reporter) InjectHandlers(handlers *request.Handlers) {
- if rep == nil {
- return
- }
-
- handlers.Complete.PushFrontNamed(request.NamedHandler{
- Name: APICallMetricHandlerName,
- Fn: rep.sendAPICallMetric,
- })
-
- handlers.CompleteAttempt.PushFrontNamed(request.NamedHandler{
- Name: APICallAttemptMetricHandlerName,
- Fn: rep.sendAPICallAttemptMetric,
- })
-}
-
-// boolIntValue return 1 for true and 0 for false.
-func boolIntValue(b bool) int {
- if b {
- return 1
- }
-
- return 0
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
deleted file mode 100644
index 1ba80b576..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
+++ /dev/null
@@ -1,252 +0,0 @@
-// Package defaults is a collection of helpers to retrieve the SDK's default
-// configuration and handlers.
-//
-// Generally this package shouldn't be used directly, but session.Session
-// instead. This package is useful when you need to reset the defaults
-// of a session or service client to the SDK defaults before setting
-// additional parameters.
-package defaults
-
-import (
- "fmt"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "os"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/corehandlers"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
- "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
- "github.com/aws/aws-sdk-go/aws/ec2metadata"
- "github.com/aws/aws-sdk-go/aws/endpoints"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/internal/shareddefaults"
-)
-
-// A Defaults provides a collection of default values for SDK clients.
-type Defaults struct {
- Config *aws.Config
- Handlers request.Handlers
-}
-
-// Get returns the SDK's default values with Config and handlers pre-configured.
-func Get() Defaults {
- cfg := Config()
- handlers := Handlers()
- cfg.Credentials = CredChain(cfg, handlers)
-
- return Defaults{
- Config: cfg,
- Handlers: handlers,
- }
-}
-
-// Config returns the default configuration without credentials.
-// To retrieve a config with credentials also included use
-// `defaults.Get().Config` instead.
-//
-// Generally you shouldn't need to use this method directly, but
-// is available if you need to reset the configuration of an
-// existing service client or session.
-func Config() *aws.Config {
- return aws.NewConfig().
- WithCredentials(credentials.AnonymousCredentials).
- WithRegion(os.Getenv("AWS_REGION")).
- WithHTTPClient(http.DefaultClient).
- WithMaxRetries(aws.UseServiceDefaultRetries).
- WithLogger(aws.NewDefaultLogger()).
- WithLogLevel(aws.LogOff).
- WithEndpointResolver(endpoints.DefaultResolver())
-}
-
-// Handlers returns the default request handlers.
-//
-// Generally you shouldn't need to use this method directly, but
-// is available if you need to reset the request handlers of an
-// existing service client or session.
-func Handlers() request.Handlers {
- var handlers request.Handlers
-
- handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
- handlers.Validate.AfterEachFn = request.HandlerListStopOnError
- handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
- handlers.Build.PushBackNamed(corehandlers.AddAwsInternal)
- handlers.Build.PushBackNamed(corehandlers.AddHostExecEnvUserAgentHander)
- handlers.Build.AfterEachFn = request.HandlerListStopOnError
- handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
- handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
- handlers.Send.PushBackNamed(corehandlers.SendHandler)
- handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
- handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
-
- return handlers
-}
-
-// CredChain returns the default credential chain.
-//
-// Generally you shouldn't need to use this method directly, but
-// is available if you need to reset the credentials of an
-// existing service client or session's Config.
-func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
- return credentials.NewCredentials(&credentials.ChainProvider{
- VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
- Providers: CredProviders(cfg, handlers),
- })
-}
-
-// CredProviders returns the slice of providers used in
-// the default credential chain.
-//
-// For applications that need to use some other provider (for example use
-// different environment variables for legacy reasons) but still fall back
-// on the default chain of providers. This allows that default chaint to be
-// automatically updated
-func CredProviders(cfg *aws.Config, handlers request.Handlers) []credentials.Provider {
- return []credentials.Provider{
- &credentials.EnvProvider{},
- &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
- RemoteCredProvider(*cfg, handlers),
- }
-}
-
-const (
- httpProviderAuthorizationEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN"
- httpProviderAuthFileEnvVar = "AWS_CONTAINER_AUTHORIZATION_TOKEN_FILE"
- httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
-)
-
-// direct representation of the IPv4 address for the ECS container
-// "169.254.170.2"
-var ecsContainerIPv4 net.IP = []byte{
- 169, 254, 170, 2,
-}
-
-// direct representation of the IPv4 address for the EKS container
-// "169.254.170.23"
-var eksContainerIPv4 net.IP = []byte{
- 169, 254, 170, 23,
-}
-
-// direct representation of the IPv6 address for the EKS container
-// "fd00:ec2::23"
-var eksContainerIPv6 net.IP = []byte{
- 0xFD, 0, 0xE, 0xC2,
- 0, 0, 0, 0,
- 0, 0, 0, 0,
- 0, 0, 0, 0x23,
-}
-
-// RemoteCredProvider returns a credentials provider for the default remote
-// endpoints such as EC2 or ECS Roles.
-func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
- if u := os.Getenv(httpProviderEnvVar); len(u) > 0 {
- return localHTTPCredProvider(cfg, handlers, u)
- }
-
- if uri := os.Getenv(shareddefaults.ECSCredsProviderEnvVar); len(uri) > 0 {
- u := fmt.Sprintf("%s%s", shareddefaults.ECSContainerCredentialsURI, uri)
- return httpCredProvider(cfg, handlers, u)
- }
-
- return ec2RoleProvider(cfg, handlers)
-}
-
-var lookupHostFn = net.LookupHost
-
-// isAllowedHost allows host to be loopback or known ECS/EKS container IPs
-//
-// host can either be an IP address OR an unresolved hostname - resolution will
-// be automatically performed in the latter case
-func isAllowedHost(host string) (bool, error) {
- if ip := net.ParseIP(host); ip != nil {
- return isIPAllowed(ip), nil
- }
-
- addrs, err := lookupHostFn(host)
- if err != nil {
- return false, err
- }
-
- for _, addr := range addrs {
- if ip := net.ParseIP(addr); ip == nil || !isIPAllowed(ip) {
- return false, nil
- }
- }
-
- return true, nil
-}
-
-func isIPAllowed(ip net.IP) bool {
- return ip.IsLoopback() ||
- ip.Equal(ecsContainerIPv4) ||
- ip.Equal(eksContainerIPv4) ||
- ip.Equal(eksContainerIPv6)
-}
-
-func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
- var errMsg string
-
- parsed, err := url.Parse(u)
- if err != nil {
- errMsg = fmt.Sprintf("invalid URL, %v", err)
- } else {
- host := aws.URLHostname(parsed)
- if len(host) == 0 {
- errMsg = "unable to parse host from local HTTP cred provider URL"
- } else if parsed.Scheme == "http" {
- if isAllowedHost, allowHostErr := isAllowedHost(host); allowHostErr != nil {
- errMsg = fmt.Sprintf("failed to resolve host %q, %v", host, allowHostErr)
- } else if !isAllowedHost {
- errMsg = fmt.Sprintf("invalid endpoint host, %q, only loopback/ecs/eks hosts are allowed.", host)
- }
- }
- }
-
- if len(errMsg) > 0 {
- if cfg.Logger != nil {
- cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err)
- }
- return credentials.ErrorProvider{
- Err: awserr.New("CredentialsEndpointError", errMsg, err),
- ProviderName: endpointcreds.ProviderName,
- }
- }
-
- return httpCredProvider(cfg, handlers, u)
-}
-
-func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
- return endpointcreds.NewProviderClient(cfg, handlers, u,
- func(p *endpointcreds.Provider) {
- p.ExpiryWindow = 5 * time.Minute
- p.AuthorizationToken = os.Getenv(httpProviderAuthorizationEnvVar)
- if authFilePath := os.Getenv(httpProviderAuthFileEnvVar); authFilePath != "" {
- p.AuthorizationTokenProvider = endpointcreds.TokenProviderFunc(func() (string, error) {
- if contents, err := ioutil.ReadFile(authFilePath); err != nil {
- return "", fmt.Errorf("failed to read authorization token from %v: %v", authFilePath, err)
- } else {
- return string(contents), nil
- }
- })
- }
- },
- )
-}
-
-func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
- resolver := cfg.EndpointResolver
- if resolver == nil {
- resolver = endpoints.DefaultResolver()
- }
-
- e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "")
- return &ec2rolecreds.EC2RoleProvider{
- Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion),
- ExpiryWindow: 5 * time.Minute,
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
deleted file mode 100644
index ca0ee1dcc..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package defaults
-
-import (
- "github.com/aws/aws-sdk-go/internal/shareddefaults"
-)
-
-// SharedCredentialsFilename returns the SDK's default file path
-// for the shared credentials file.
-//
-// Builds the shared config file path based on the OS's platform.
-//
-// - Linux/Unix: $HOME/.aws/credentials
-// - Windows: %USERPROFILE%\.aws\credentials
-func SharedCredentialsFilename() string {
- return shareddefaults.SharedCredentialsFilename()
-}
-
-// SharedConfigFilename returns the SDK's default file path for
-// the shared config file.
-//
-// Builds the shared config file path based on the OS's platform.
-//
-// - Linux/Unix: $HOME/.aws/config
-// - Windows: %USERPROFILE%\.aws\config
-func SharedConfigFilename() string {
- return shareddefaults.SharedConfigFilename()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go
deleted file mode 100644
index 4fcb61618..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/doc.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Package aws provides the core SDK's utilities and shared types. Use this package's
-// utilities to simplify setting and reading API operations parameters.
-//
-// Value and Pointer Conversion Utilities
-//
-// This package includes a helper conversion utility for each scalar type the SDK's
-// API use. These utilities make getting a pointer of the scalar, and dereferencing
-// a pointer easier.
-//
-// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
-// The Pointer to value will safely dereference the pointer and return its value.
-// If the pointer was nil, the scalar's zero value will be returned.
-//
-// The value to pointer functions will be named after the scalar type. So get a
-// *string from a string value use the "String" function. This makes it easy to
-// to get pointer of a literal string value, because getting the address of a
-// literal requires assigning the value to a variable first.
-//
-// var strPtr *string
-//
-// // Without the SDK's conversion functions
-// str := "my string"
-// strPtr = &str
-//
-// // With the SDK's conversion functions
-// strPtr = aws.String("my string")
-//
-// // Convert *string to string value
-// str = aws.StringValue(strPtr)
-//
-// In addition to scalars the aws package also includes conversion utilities for
-// map and slice for commonly types used in API parameters. The map and slice
-// conversion functions use similar naming pattern as the scalar conversion
-// functions.
-//
-// var strPtrs []*string
-// var strs []string = []string{"Go", "Gophers", "Go"}
-//
-// // Convert []string to []*string
-// strPtrs = aws.StringSlice(strs)
-//
-// // Convert []*string to []string
-// strs = aws.StringValueSlice(strPtrs)
-//
-// SDK Default HTTP Client
-//
-// The SDK will use the http.DefaultClient if a HTTP client is not provided to
-// the SDK's Session, or service client constructor. This means that if the
-// http.DefaultClient is modified by other components of your application the
-// modifications will be picked up by the SDK as well.
-//
-// In some cases this might be intended, but it is a better practice to create
-// a custom HTTP Client to share explicitly through your application. You can
-// configure the SDK to use the custom HTTP Client by setting the HTTPClient
-// value of the SDK's Config type when creating a Session or service client.
-package aws
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
deleted file mode 100644
index 69fa63dc0..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
+++ /dev/null
@@ -1,250 +0,0 @@
-package ec2metadata
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
- "strconv"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/internal/sdkuri"
-)
-
-// getToken uses the duration to return a token for EC2 metadata service,
-// or an error if the request failed.
-func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOutput, error) {
- op := &request.Operation{
- Name: "GetToken",
- HTTPMethod: "PUT",
- HTTPPath: "/latest/api/token",
- }
-
- var output tokenOutput
- req := c.NewRequest(op, nil, &output)
- req.SetContext(ctx)
-
- // remove the fetch token handler from the request handlers to avoid infinite recursion
- req.Handlers.Sign.RemoveByName(fetchTokenHandlerName)
-
- // Swap the unmarshalMetadataHandler with unmarshalTokenHandler on this request.
- req.Handlers.Unmarshal.Swap(unmarshalMetadataHandlerName, unmarshalTokenHandler)
-
- ttl := strconv.FormatInt(int64(duration/time.Second), 10)
- req.HTTPRequest.Header.Set(ttlHeader, ttl)
-
- err := req.Send()
-
- // Errors with bad request status should be returned.
- if err != nil {
- err = awserr.NewRequestFailure(
- awserr.New(req.HTTPResponse.Status, http.StatusText(req.HTTPResponse.StatusCode), err),
- req.HTTPResponse.StatusCode, req.RequestID)
- }
-
- return output, err
-}
-
-// GetMetadata uses the path provided to request information from the EC2
-// instance metadata service. The content will be returned as a string, or
-// error if the request failed.
-func (c *EC2Metadata) GetMetadata(p string) (string, error) {
- return c.GetMetadataWithContext(aws.BackgroundContext(), p)
-}
-
-// GetMetadataWithContext uses the path provided to request information from the EC2
-// instance metadata service. The content will be returned as a string, or
-// error if the request failed.
-func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string, error) {
- op := &request.Operation{
- Name: "GetMetadata",
- HTTPMethod: "GET",
- HTTPPath: sdkuri.PathJoin("/latest/meta-data", p),
- }
- output := &metadataOutput{}
-
- req := c.NewRequest(op, nil, output)
-
- req.SetContext(ctx)
-
- err := req.Send()
- return output.Content, err
-}
-
-// GetUserData returns the userdata that was configured for the service. If
-// there is no user-data setup for the EC2 instance a "NotFoundError" error
-// code will be returned.
-func (c *EC2Metadata) GetUserData() (string, error) {
- return c.GetUserDataWithContext(aws.BackgroundContext())
-}
-
-// GetUserDataWithContext returns the userdata that was configured for the service. If
-// there is no user-data setup for the EC2 instance a "NotFoundError" error
-// code will be returned.
-func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) {
- op := &request.Operation{
- Name: "GetUserData",
- HTTPMethod: "GET",
- HTTPPath: "/latest/user-data",
- }
-
- output := &metadataOutput{}
- req := c.NewRequest(op, nil, output)
- req.SetContext(ctx)
-
- err := req.Send()
- return output.Content, err
-}
-
-// GetDynamicData uses the path provided to request information from the EC2
-// instance metadata service for dynamic data. The content will be returned
-// as a string, or error if the request failed.
-func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
- return c.GetDynamicDataWithContext(aws.BackgroundContext(), p)
-}
-
-// GetDynamicDataWithContext uses the path provided to request information from the EC2
-// instance metadata service for dynamic data. The content will be returned
-// as a string, or error if the request failed.
-func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (string, error) {
- op := &request.Operation{
- Name: "GetDynamicData",
- HTTPMethod: "GET",
- HTTPPath: sdkuri.PathJoin("/latest/dynamic", p),
- }
-
- output := &metadataOutput{}
- req := c.NewRequest(op, nil, output)
- req.SetContext(ctx)
-
- err := req.Send()
- return output.Content, err
-}
-
-// GetInstanceIdentityDocument retrieves an identity document describing an
-// instance. Error is returned if the request fails or is unable to parse
-// the response.
-func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
- return c.GetInstanceIdentityDocumentWithContext(aws.BackgroundContext())
-}
-
-// GetInstanceIdentityDocumentWithContext retrieves an identity document describing an
-// instance. Error is returned if the request fails or is unable to parse
-// the response.
-func (c *EC2Metadata) GetInstanceIdentityDocumentWithContext(ctx aws.Context) (EC2InstanceIdentityDocument, error) {
- resp, err := c.GetDynamicDataWithContext(ctx, "instance-identity/document")
- if err != nil {
- return EC2InstanceIdentityDocument{},
- awserr.New("EC2MetadataRequestError",
- "failed to get EC2 instance identity document", err)
- }
-
- doc := EC2InstanceIdentityDocument{}
- if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
- return EC2InstanceIdentityDocument{},
- awserr.New(request.ErrCodeSerialization,
- "failed to decode EC2 instance identity document", err)
- }
-
- return doc, nil
-}
-
-// IAMInfo retrieves IAM info from the metadata API
-func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
- return c.IAMInfoWithContext(aws.BackgroundContext())
-}
-
-// IAMInfoWithContext retrieves IAM info from the metadata API
-func (c *EC2Metadata) IAMInfoWithContext(ctx aws.Context) (EC2IAMInfo, error) {
- resp, err := c.GetMetadataWithContext(ctx, "iam/info")
- if err != nil {
- return EC2IAMInfo{},
- awserr.New("EC2MetadataRequestError",
- "failed to get EC2 IAM info", err)
- }
-
- info := EC2IAMInfo{}
- if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
- return EC2IAMInfo{},
- awserr.New(request.ErrCodeSerialization,
- "failed to decode EC2 IAM info", err)
- }
-
- if info.Code != "Success" {
- errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
- return EC2IAMInfo{},
- awserr.New("EC2MetadataError", errMsg, nil)
- }
-
- return info, nil
-}
-
-// Region returns the region the instance is running in.
-func (c *EC2Metadata) Region() (string, error) {
- return c.RegionWithContext(aws.BackgroundContext())
-}
-
-// RegionWithContext returns the region the instance is running in.
-func (c *EC2Metadata) RegionWithContext(ctx aws.Context) (string, error) {
- ec2InstanceIdentityDocument, err := c.GetInstanceIdentityDocumentWithContext(ctx)
- if err != nil {
- return "", err
- }
- // extract region from the ec2InstanceIdentityDocument
- region := ec2InstanceIdentityDocument.Region
- if len(region) == 0 {
- return "", awserr.New("EC2MetadataError", "invalid region received for ec2metadata instance", nil)
- }
- // returns region
- return region, nil
-}
-
-// Available returns if the application has access to the EC2 Metadata service.
-// Can be used to determine if application is running within an EC2 Instance and
-// the metadata service is available.
-func (c *EC2Metadata) Available() bool {
- return c.AvailableWithContext(aws.BackgroundContext())
-}
-
-// AvailableWithContext returns if the application has access to the EC2 Metadata service.
-// Can be used to determine if application is running within an EC2 Instance and
-// the metadata service is available.
-func (c *EC2Metadata) AvailableWithContext(ctx aws.Context) bool {
- if _, err := c.GetMetadataWithContext(ctx, "instance-id"); err != nil {
- return false
- }
-
- return true
-}
-
-// An EC2IAMInfo provides the shape for unmarshaling
-// an IAM info from the metadata API
-type EC2IAMInfo struct {
- Code string
- LastUpdated time.Time
- InstanceProfileArn string
- InstanceProfileID string
-}
-
-// An EC2InstanceIdentityDocument provides the shape for unmarshaling
-// an instance identity document
-type EC2InstanceIdentityDocument struct {
- DevpayProductCodes []string `json:"devpayProductCodes"`
- MarketplaceProductCodes []string `json:"marketplaceProductCodes"`
- AvailabilityZone string `json:"availabilityZone"`
- PrivateIP string `json:"privateIp"`
- Version string `json:"version"`
- Region string `json:"region"`
- InstanceID string `json:"instanceId"`
- BillingProducts []string `json:"billingProducts"`
- InstanceType string `json:"instanceType"`
- AccountID string `json:"accountId"`
- PendingTime time.Time `json:"pendingTime"`
- ImageID string `json:"imageId"`
- KernelID string `json:"kernelId"`
- RamdiskID string `json:"ramdiskId"`
- Architecture string `json:"architecture"`
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
deleted file mode 100644
index f4cc8751d..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
+++ /dev/null
@@ -1,245 +0,0 @@
-// Package ec2metadata provides the client for making API calls to the
-// EC2 Metadata service.
-//
-// This package's client can be disabled completely by setting the environment
-// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
-// true instructs the SDK to disable the EC2 Metadata client. The client cannot
-// be used while the environment variable is set to true, (case insensitive).
-//
-// The endpoint of the EC2 IMDS client can be configured via the environment
-// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a
-// Session. See aws/session#Options.EC2IMDSEndpoint for more details.
-package ec2metadata
-
-import (
- "bytes"
- "io"
- "net/http"
- "net/url"
- "os"
- "strconv"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/corehandlers"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-const (
- // ServiceName is the name of the service.
- ServiceName = "ec2metadata"
- disableServiceEnvVar = "AWS_EC2_METADATA_DISABLED"
-
- // Headers for Token and TTL
- ttlHeader = "x-aws-ec2-metadata-token-ttl-seconds"
- tokenHeader = "x-aws-ec2-metadata-token"
-
- // Named Handler constants
- fetchTokenHandlerName = "FetchTokenHandler"
- unmarshalMetadataHandlerName = "unmarshalMetadataHandler"
- unmarshalTokenHandlerName = "unmarshalTokenHandler"
- enableTokenProviderHandlerName = "enableTokenProviderHandler"
-
- // TTL constants
- defaultTTL = 21600 * time.Second
- ttlExpirationWindow = 30 * time.Second
-)
-
-// A EC2Metadata is an EC2 Metadata service Client.
-type EC2Metadata struct {
- *client.Client
-}
-
-// New creates a new instance of the EC2Metadata client with a session.
-// This client is safe to use across multiple goroutines.
-//
-// Example:
-//
-// // Create a EC2Metadata client from just a session.
-// svc := ec2metadata.New(mySession)
-//
-// // Create a EC2Metadata client with additional configuration
-// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
-func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
- c := p.ClientConfig(ServiceName, cfgs...)
- return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
-}
-
-// NewClient returns a new EC2Metadata client. Should be used to create
-// a client when not using a session. Generally using just New with a session
-// is preferred.
-//
-// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS
-// client is able to communicate with the EC2 IMDS API.
-//
-// If an unmodified HTTP client is provided from the stdlib default, or no client
-// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
-// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
-func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
- if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
- // If the http client is unmodified and this feature is not disabled
- // set custom timeouts for EC2Metadata requests.
- cfg.HTTPClient = &http.Client{
- // use a shorter timeout than default because the metadata
- // service is local if it is running, and to fail faster
- // if not running on an ec2 instance.
- Timeout: 1 * time.Second,
- }
- // max number of retries on the client operation
- cfg.MaxRetries = aws.Int(2)
- }
-
- if u, err := url.Parse(endpoint); err == nil {
- // Remove path from the endpoint since it will be added by requests.
- // This is an artifact of the SDK adding `/latest` to the endpoint for
- // EC2 IMDS, but this is now moved to the operation definition.
- u.Path = ""
- u.RawPath = ""
- endpoint = u.String()
- }
-
- svc := &EC2Metadata{
- Client: client.New(
- cfg,
- metadata.ClientInfo{
- ServiceName: ServiceName,
- ServiceID: ServiceName,
- Endpoint: endpoint,
- APIVersion: "latest",
- },
- handlers,
- ),
- }
-
- // token provider instance
- tp := newTokenProvider(svc, defaultTTL)
-
- // NamedHandler for fetching token
- svc.Handlers.Sign.PushBackNamed(request.NamedHandler{
- Name: fetchTokenHandlerName,
- Fn: tp.fetchTokenHandler,
- })
- // NamedHandler for enabling token provider
- svc.Handlers.Complete.PushBackNamed(request.NamedHandler{
- Name: enableTokenProviderHandlerName,
- Fn: tp.enableTokenProviderHandler,
- })
-
- svc.Handlers.Unmarshal.PushBackNamed(unmarshalHandler)
- svc.Handlers.UnmarshalError.PushBack(unmarshalError)
- svc.Handlers.Validate.Clear()
- svc.Handlers.Validate.PushBack(validateEndpointHandler)
-
- // Disable the EC2 Metadata service if the environment variable is set.
- // This short-circuits the service's functionality to always fail to send
- // requests.
- if strings.ToLower(os.Getenv(disableServiceEnvVar)) == "true" {
- svc.Handlers.Send.SwapNamed(request.NamedHandler{
- Name: corehandlers.SendHandler.Name,
- Fn: func(r *request.Request) {
- r.HTTPResponse = &http.Response{
- Header: http.Header{},
- }
- r.Error = awserr.New(
- request.CanceledErrorCode,
- "EC2 IMDS access disabled via "+disableServiceEnvVar+" env var",
- nil)
- },
- })
- }
-
- // Add additional options to the service config
- for _, option := range opts {
- option(svc.Client)
- }
- return svc
-}
-
-func httpClientZero(c *http.Client) bool {
- return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
-}
-
-type metadataOutput struct {
- Content string
-}
-
-type tokenOutput struct {
- Token string
- TTL time.Duration
-}
-
-// unmarshal token handler is used to parse the response of a getToken operation
-var unmarshalTokenHandler = request.NamedHandler{
- Name: unmarshalTokenHandlerName,
- Fn: func(r *request.Request) {
- defer r.HTTPResponse.Body.Close()
- var b bytes.Buffer
- if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil {
- r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization,
- "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID)
- return
- }
-
- v := r.HTTPResponse.Header.Get(ttlHeader)
- data, ok := r.Data.(*tokenOutput)
- if !ok {
- return
- }
-
- data.Token = b.String()
- // TTL is in seconds
- i, err := strconv.ParseInt(v, 10, 64)
- if err != nil {
- r.Error = awserr.NewRequestFailure(awserr.New(request.ParamFormatErrCode,
- "unable to parse EC2 token TTL response", err), r.HTTPResponse.StatusCode, r.RequestID)
- return
- }
- t := time.Duration(i) * time.Second
- data.TTL = t
- },
-}
-
-var unmarshalHandler = request.NamedHandler{
- Name: unmarshalMetadataHandlerName,
- Fn: func(r *request.Request) {
- defer r.HTTPResponse.Body.Close()
- var b bytes.Buffer
- if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil {
- r.Error = awserr.NewRequestFailure(awserr.New(request.ErrCodeSerialization,
- "unable to unmarshal EC2 metadata response", err), r.HTTPResponse.StatusCode, r.RequestID)
- return
- }
-
- if data, ok := r.Data.(*metadataOutput); ok {
- data.Content = b.String()
- }
- },
-}
-
-func unmarshalError(r *request.Request) {
- defer r.HTTPResponse.Body.Close()
- var b bytes.Buffer
-
- if _, err := io.Copy(&b, r.HTTPResponse.Body); err != nil {
- r.Error = awserr.NewRequestFailure(
- awserr.New(request.ErrCodeSerialization, "unable to unmarshal EC2 metadata error response", err),
- r.HTTPResponse.StatusCode, r.RequestID)
- return
- }
-
- // Response body format is not consistent between metadata endpoints.
- // Grab the error message as a string and include that as the source error
- r.Error = awserr.NewRequestFailure(
- awserr.New("EC2MetadataError", "failed to make EC2Metadata request\n"+b.String(), nil),
- r.HTTPResponse.StatusCode, r.RequestID)
-}
-
-func validateEndpointHandler(r *request.Request) {
- if r.ClientInfo.Endpoint == "" {
- r.Error = aws.ErrMissingEndpoint
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
deleted file mode 100644
index f1f9ba4ec..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/token_provider.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package ec2metadata
-
-import (
- "fmt"
- "github.com/aws/aws-sdk-go/aws"
- "net/http"
- "sync/atomic"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// A tokenProvider struct provides access to EC2Metadata client
-// and atomic instance of a token, along with configuredTTL for it.
-// tokenProvider also provides an atomic flag to disable the
-// fetch token operation.
-// The disabled member will use 0 as false, and 1 as true.
-type tokenProvider struct {
- client *EC2Metadata
- token atomic.Value
- configuredTTL time.Duration
- disabled uint32
-}
-
-// A ec2Token struct helps use of token in EC2 Metadata service ops
-type ec2Token struct {
- token string
- credentials.Expiry
-}
-
-// newTokenProvider provides a pointer to a tokenProvider instance
-func newTokenProvider(c *EC2Metadata, duration time.Duration) *tokenProvider {
- return &tokenProvider{client: c, configuredTTL: duration}
-}
-
-// check if fallback is enabled
-func (t *tokenProvider) fallbackEnabled() bool {
- return t.client.Config.EC2MetadataEnableFallback == nil || *t.client.Config.EC2MetadataEnableFallback
-}
-
-// fetchTokenHandler fetches token for EC2Metadata service client by default.
-func (t *tokenProvider) fetchTokenHandler(r *request.Request) {
- // short-circuits to insecure data flow if tokenProvider is disabled.
- if v := atomic.LoadUint32(&t.disabled); v == 1 && t.fallbackEnabled() {
- return
- }
-
- if ec2Token, ok := t.token.Load().(ec2Token); ok && !ec2Token.IsExpired() {
- r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token)
- return
- }
-
- output, err := t.client.getToken(r.Context(), t.configuredTTL)
-
- if err != nil {
- // only attempt fallback to insecure data flow if IMDSv1 is enabled
- if !t.fallbackEnabled() {
- r.Error = awserr.New("EC2MetadataError", "failed to get IMDSv2 token and fallback to IMDSv1 is disabled", err)
- return
- }
-
- // change the disabled flag on token provider to true and fallback
- if requestFailureError, ok := err.(awserr.RequestFailure); ok {
- switch requestFailureError.StatusCode() {
- case http.StatusForbidden, http.StatusNotFound, http.StatusMethodNotAllowed:
- atomic.StoreUint32(&t.disabled, 1)
- if t.client.Config.LogLevel.Matches(aws.LogDebugWithDeprecated) {
- t.client.Config.Logger.Log(fmt.Sprintf("WARN: failed to get session token, falling back to IMDSv1: %v", requestFailureError))
- }
- case http.StatusBadRequest:
- r.Error = requestFailureError
- }
- }
- return
- }
-
- newToken := ec2Token{
- token: output.Token,
- }
- newToken.SetExpiration(time.Now().Add(output.TTL), ttlExpirationWindow)
- t.token.Store(newToken)
-
- // Inject token header to the request.
- if ec2Token, ok := t.token.Load().(ec2Token); ok {
- r.HTTPRequest.Header.Set(tokenHeader, ec2Token.token)
- }
-}
-
-// enableTokenProviderHandler enables the token provider
-func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) {
- // If the error code status is 401, we enable the token provider
- if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil &&
- e.StatusCode() == http.StatusUnauthorized {
- t.token.Store(ec2Token{})
- atomic.StoreUint32(&t.disabled, 0)
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
deleted file mode 100644
index cad3b9a48..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package endpoints
-
-import (
- "encoding/json"
- "fmt"
- "io"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-type modelDefinition map[string]json.RawMessage
-
-// A DecodeModelOptions are the options for how the endpoints model definition
-// are decoded.
-type DecodeModelOptions struct {
- SkipCustomizations bool
-}
-
-// Set combines all of the option functions together.
-func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) {
- for _, fn := range optFns {
- fn(d)
- }
-}
-
-// DecodeModel unmarshals a Regions and Endpoint model definition file into
-// a endpoint Resolver. If the file format is not supported, or an error occurs
-// when unmarshaling the model an error will be returned.
-//
-// Casting the return value of this func to a EnumPartitions will
-// allow you to get a list of the partitions in the order the endpoints
-// will be resolved in.
-//
-// resolver, err := endpoints.DecodeModel(reader)
-//
-// partitions := resolver.(endpoints.EnumPartitions).Partitions()
-// for _, p := range partitions {
-// // ... inspect partitions
-// }
-func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) {
- var opts DecodeModelOptions
- opts.Set(optFns...)
-
- // Get the version of the partition file to determine what
- // unmarshaling model to use.
- modelDef := modelDefinition{}
- if err := json.NewDecoder(r).Decode(&modelDef); err != nil {
- return nil, newDecodeModelError("failed to decode endpoints model", err)
- }
-
- var version string
- if b, ok := modelDef["version"]; ok {
- version = string(b)
- } else {
- return nil, newDecodeModelError("endpoints version not found in model", nil)
- }
-
- if version == "3" {
- return decodeV3Endpoints(modelDef, opts)
- }
-
- return nil, newDecodeModelError(
- fmt.Sprintf("endpoints version %s, not supported", version), nil)
-}
-
-func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) {
- b, ok := modelDef["partitions"]
- if !ok {
- return nil, newDecodeModelError("endpoints model missing partitions", nil)
- }
-
- ps := partitions{}
- if err := json.Unmarshal(b, &ps); err != nil {
- return nil, newDecodeModelError("failed to decode endpoints model", err)
- }
-
- if opts.SkipCustomizations {
- return ps, nil
- }
-
- // Customization
- for i := 0; i < len(ps); i++ {
- p := &ps[i]
- custRegionalS3(p)
- custRmIotDataService(p)
- custFixAppAutoscalingChina(p)
- custFixAppAutoscalingUsGov(p)
- }
-
- return ps, nil
-}
-
-func custRegionalS3(p *partition) {
- if p.ID != "aws" {
- return
- }
-
- service, ok := p.Services["s3"]
- if !ok {
- return
- }
-
- const awsGlobal = "aws-global"
- const usEast1 = "us-east-1"
-
- // If global endpoint already exists no customization needed.
- if _, ok := service.Endpoints[endpointKey{Region: awsGlobal}]; ok {
- return
- }
-
- service.PartitionEndpoint = awsGlobal
- if _, ok := service.Endpoints[endpointKey{Region: usEast1}]; !ok {
- service.Endpoints[endpointKey{Region: usEast1}] = endpoint{}
- }
- service.Endpoints[endpointKey{Region: awsGlobal}] = endpoint{
- Hostname: "s3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: usEast1,
- },
- }
-
- p.Services["s3"] = service
-}
-
-func custRmIotDataService(p *partition) {
- delete(p.Services, "data.iot")
-}
-
-func custFixAppAutoscalingChina(p *partition) {
- if p.ID != "aws-cn" {
- return
- }
-
- const serviceName = "application-autoscaling"
- s, ok := p.Services[serviceName]
- if !ok {
- return
- }
-
- const expectHostname = `autoscaling.{region}.amazonaws.com`
- serviceDefault := s.Defaults[defaultKey{}]
- if e, a := expectHostname, serviceDefault.Hostname; e != a {
- fmt.Printf("custFixAppAutoscalingChina: ignoring customization, expected %s, got %s\n", e, a)
- return
- }
- serviceDefault.Hostname = expectHostname + ".cn"
- s.Defaults[defaultKey{}] = serviceDefault
- p.Services[serviceName] = s
-}
-
-func custFixAppAutoscalingUsGov(p *partition) {
- if p.ID != "aws-us-gov" {
- return
- }
-
- const serviceName = "application-autoscaling"
- s, ok := p.Services[serviceName]
- if !ok {
- return
- }
-
- serviceDefault := s.Defaults[defaultKey{}]
- if a := serviceDefault.CredentialScope.Service; a != "" {
- fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty credential scope service, got %s\n", a)
- return
- }
-
- if a := serviceDefault.Hostname; a != "" {
- fmt.Printf("custFixAppAutoscalingUsGov: ignoring customization, expected empty hostname, got %s\n", a)
- return
- }
-
- serviceDefault.CredentialScope.Service = "application-autoscaling"
- serviceDefault.Hostname = "autoscaling.{region}.amazonaws.com"
-
- if s.Defaults == nil {
- s.Defaults = make(endpointDefaults)
- }
-
- s.Defaults[defaultKey{}] = serviceDefault
-
- p.Services[serviceName] = s
-}
-
-type decodeModelError struct {
- awsError
-}
-
-func newDecodeModelError(msg string, err error) decodeModelError {
- return decodeModelError{
- awsError: awserr.New("DecodeEndpointsModelError", msg, err),
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
deleted file mode 100644
index c3516e018..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ /dev/null
@@ -1,48609 +0,0 @@
-// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
-
-package endpoints
-
-import (
- "regexp"
-)
-
-// Partition identifiers
-const (
- AwsPartitionID = "aws" // AWS Standard partition.
- AwsCnPartitionID = "aws-cn" // AWS China partition.
- AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition.
- AwsIsoPartitionID = "aws-iso" // AWS ISO (US) partition.
- AwsIsoBPartitionID = "aws-iso-b" // AWS ISOB (US) partition.
- AwsIsoEPartitionID = "aws-iso-e" // AWS ISOE (Europe) partition.
- AwsIsoFPartitionID = "aws-iso-f" // AWS ISOF partition.
-)
-
-// AWS Standard partition's regions.
-const (
- AfSouth1RegionID = "af-south-1" // Africa (Cape Town).
- ApEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong).
- ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo).
- ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul).
- ApNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka).
- ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai).
- ApSouth2RegionID = "ap-south-2" // Asia Pacific (Hyderabad).
- ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore).
- ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney).
- ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta).
- ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne).
- CaCentral1RegionID = "ca-central-1" // Canada (Central).
- CaWest1RegionID = "ca-west-1" // Canada West (Calgary).
- EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt).
- EuCentral2RegionID = "eu-central-2" // Europe (Zurich).
- EuNorth1RegionID = "eu-north-1" // Europe (Stockholm).
- EuSouth1RegionID = "eu-south-1" // Europe (Milan).
- EuSouth2RegionID = "eu-south-2" // Europe (Spain).
- EuWest1RegionID = "eu-west-1" // Europe (Ireland).
- EuWest2RegionID = "eu-west-2" // Europe (London).
- EuWest3RegionID = "eu-west-3" // Europe (Paris).
- IlCentral1RegionID = "il-central-1" // Israel (Tel Aviv).
- MeCentral1RegionID = "me-central-1" // Middle East (UAE).
- MeSouth1RegionID = "me-south-1" // Middle East (Bahrain).
- SaEast1RegionID = "sa-east-1" // South America (Sao Paulo).
- UsEast1RegionID = "us-east-1" // US East (N. Virginia).
- UsEast2RegionID = "us-east-2" // US East (Ohio).
- UsWest1RegionID = "us-west-1" // US West (N. California).
- UsWest2RegionID = "us-west-2" // US West (Oregon).
-)
-
-// AWS China partition's regions.
-const (
- CnNorth1RegionID = "cn-north-1" // China (Beijing).
- CnNorthwest1RegionID = "cn-northwest-1" // China (Ningxia).
-)
-
-// AWS GovCloud (US) partition's regions.
-const (
- UsGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East).
- UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West).
-)
-
-// AWS ISO (US) partition's regions.
-const (
- UsIsoEast1RegionID = "us-iso-east-1" // US ISO East.
- UsIsoWest1RegionID = "us-iso-west-1" // US ISO WEST.
-)
-
-// AWS ISOB (US) partition's regions.
-const (
- UsIsobEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio).
-)
-
-// AWS ISOE (Europe) partition's regions.
-const (
- EuIsoeWest1RegionID = "eu-isoe-west-1" // EU ISOE West.
-)
-
-// AWS ISOF partition's regions.
-const ()
-
-// DefaultResolver returns an Endpoint resolver that will be able
-// to resolve endpoints for: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF.
-//
-// Use DefaultPartitions() to get the list of the default partitions.
-func DefaultResolver() Resolver {
- return defaultPartitions
-}
-
-// DefaultPartitions returns a list of the partitions the SDK is bundled
-// with. The available partitions are: AWS Standard, AWS China, AWS GovCloud (US), AWS ISO (US), AWS ISOB (US), AWS ISOE (Europe), and AWS ISOF.
-//
-// partitions := endpoints.DefaultPartitions
-// for _, p := range partitions {
-// // ... inspect partitions
-// }
-func DefaultPartitions() []Partition {
- return defaultPartitions.Partitions()
-}
-
-var defaultPartitions = partitions{
- awsPartition,
- awscnPartition,
- awsusgovPartition,
- awsisoPartition,
- awsisobPartition,
- awsisoePartition,
- awsisofPartition,
-}
-
-// AwsPartition returns the Resolver for AWS Standard.
-func AwsPartition() Partition {
- return awsPartition.Partition()
-}
-
-var awsPartition = partition{
- ID: "aws",
- Name: "AWS Standard",
- DNSSuffix: "amazonaws.com",
- RegionRegex: regionRegex{
- Regexp: func() *regexp.Regexp {
- reg, _ := regexp.Compile("^(us|eu|ap|sa|ca|me|af|il)\\-\\w+\\-\\d+$")
- return reg
- }(),
- },
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Hostname: "{service}.{region}.{dnsSuffix}",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- defaultKey{
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "{service}.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "amazonaws.com",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- defaultKey{
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- },
- Regions: regions{
- "af-south-1": region{
- Description: "Africa (Cape Town)",
- },
- "ap-east-1": region{
- Description: "Asia Pacific (Hong Kong)",
- },
- "ap-northeast-1": region{
- Description: "Asia Pacific (Tokyo)",
- },
- "ap-northeast-2": region{
- Description: "Asia Pacific (Seoul)",
- },
- "ap-northeast-3": region{
- Description: "Asia Pacific (Osaka)",
- },
- "ap-south-1": region{
- Description: "Asia Pacific (Mumbai)",
- },
- "ap-south-2": region{
- Description: "Asia Pacific (Hyderabad)",
- },
- "ap-southeast-1": region{
- Description: "Asia Pacific (Singapore)",
- },
- "ap-southeast-2": region{
- Description: "Asia Pacific (Sydney)",
- },
- "ap-southeast-3": region{
- Description: "Asia Pacific (Jakarta)",
- },
- "ap-southeast-4": region{
- Description: "Asia Pacific (Melbourne)",
- },
- "ca-central-1": region{
- Description: "Canada (Central)",
- },
- "ca-west-1": region{
- Description: "Canada West (Calgary)",
- },
- "eu-central-1": region{
- Description: "Europe (Frankfurt)",
- },
- "eu-central-2": region{
- Description: "Europe (Zurich)",
- },
- "eu-north-1": region{
- Description: "Europe (Stockholm)",
- },
- "eu-south-1": region{
- Description: "Europe (Milan)",
- },
- "eu-south-2": region{
- Description: "Europe (Spain)",
- },
- "eu-west-1": region{
- Description: "Europe (Ireland)",
- },
- "eu-west-2": region{
- Description: "Europe (London)",
- },
- "eu-west-3": region{
- Description: "Europe (Paris)",
- },
- "il-central-1": region{
- Description: "Israel (Tel Aviv)",
- },
- "me-central-1": region{
- Description: "Middle East (UAE)",
- },
- "me-south-1": region{
- Description: "Middle East (Bahrain)",
- },
- "sa-east-1": region{
- Description: "South America (Sao Paulo)",
- },
- "us-east-1": region{
- Description: "US East (N. Virginia)",
- },
- "us-east-2": region{
- Description: "US East (Ohio)",
- },
- "us-west-1": region{
- Description: "US West (N. California)",
- },
- "us-west-2": region{
- Description: "US West (Oregon)",
- },
- },
- Services: services{
- "access-analyzer": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "access-analyzer-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "access-analyzer-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "access-analyzer-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "access-analyzer-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "access-analyzer-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "access-analyzer-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "access-analyzer-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "access-analyzer-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "access-analyzer-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "account": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "account.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- },
- },
- "acm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "acm-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1-fips",
- }: endpoint{
- Hostname: "acm-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "acm-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "acm-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "acm-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "acm-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "acm-pca": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-pca-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-pca-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "acm-pca-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "acm-pca-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "acm-pca-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "acm-pca-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "acm-pca-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "acm-pca-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-pca-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-pca-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-pca-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-pca-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "agreement-marketplace": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "airflow": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "amplify": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "amplifybackend": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "amplifyuibuilder": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "aoss": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "api.detective": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.detective-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "api.detective-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.detective-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "api.detective-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.detective-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "api.detective-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.detective-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "api.detective-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.detective-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "api.detective-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "api.ecr": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecr-fips.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{
- Hostname: "api.ecr.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Hostname: "api.ecr.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "api.ecr.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "api.ecr.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{
- Hostname: "api.ecr.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "api.ecr.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{
- Hostname: "api.ecr.ap-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "api.ecr.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "api.ecr.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{
- Hostname: "api.ecr.ap-southeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{
- Hostname: "api.ecr.ap-southeast-4.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-4",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "api.ecr.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{
- Hostname: "api.ecr.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- },
- endpointKey{
- Region: "dkr-us-east-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dkr-us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecr-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dkr-us-east-2",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dkr-us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecr-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dkr-us-west-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dkr-us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecr-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dkr-us-west-2",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dkr-us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecr-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "api.ecr.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{
- Hostname: "api.ecr.eu-central-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "api.ecr.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{
- Hostname: "api.ecr.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{
- Hostname: "api.ecr.eu-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-2",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "api.ecr.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "api.ecr.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Hostname: "api.ecr.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "fips-dkr-us-east-1",
- }: endpoint{
- Hostname: "ecr-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-dkr-us-east-2",
- }: endpoint{
- Hostname: "ecr-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-dkr-us-west-1",
- }: endpoint{
- Hostname: "ecr-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-dkr-us-west-2",
- }: endpoint{
- Hostname: "ecr-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "ecr-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "ecr-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "ecr-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "ecr-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{
- Hostname: "api.ecr.il-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{
- Hostname: "api.ecr.me-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Hostname: "api.ecr.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "api.ecr.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "api.ecr.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecr-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "api.ecr.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecr-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Hostname: "api.ecr.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecr-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "api.ecr.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecr-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "api.ecr-public": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "api.ecr-public.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "api.ecr-public.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "api.elastic-inference": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "api.elastic-inference.ap-northeast-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "api.elastic-inference.ap-northeast-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "api.elastic-inference.eu-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "api.elastic-inference.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "api.elastic-inference.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "api.elastic-inference.us-west-2.amazonaws.com",
- },
- },
- },
- "api.fleethub.iot": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "api.fleethub.iot-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.fleethub.iot-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.fleethub.iot-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.fleethub.iot-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "api.iotdeviceadvisor": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "api.iotdeviceadvisor.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "api.iotdeviceadvisor.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "api.iotdeviceadvisor.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "api.iotdeviceadvisor.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "api.iotwireless": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "api.iotwireless.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "api.iotwireless.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "api.iotwireless.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "api.iotwireless.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "api.iotwireless.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "api.mediatailor": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "api.pricing": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "pricing",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "api.sagemaker": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "api-fips.sagemaker.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "api-fips.sagemaker.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "api-fips.sagemaker.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "api-fips.sagemaker.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "api.tunneling.iot": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "apigateway": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "apigateway-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "apigateway-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "apigateway-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "apigateway-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "apigateway-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "apigateway-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "apigateway-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "apigateway-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "apigateway-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "apigateway-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "apigateway-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "apigateway-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "app-integrations": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "appconfig": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "appconfigdata": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "appflow": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "appflow-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "appflow-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "appflow-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "appflow-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appflow-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appflow-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appflow-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appflow-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "application-autoscaling": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "applicationinsights": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "appmesh": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.af-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.ap-east-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.ap-northeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.ap-northeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.ap-northeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.ap-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.ap-southeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.ap-southeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.ap-southeast-3.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appmesh-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "appmesh-fips.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "appmesh-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.eu-central-1.api.aws",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.eu-north-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.eu-south-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.eu-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.eu-west-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.eu-west-3.api.aws",
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.il-central-1.api.aws",
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.me-south-1.api.aws",
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.sa-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appmesh-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "appmesh-fips.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "appmesh-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appmesh-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "appmesh-fips.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "appmesh-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appmesh-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "appmesh-fips.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "appmesh-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.us-west-2.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appmesh-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "appmesh-fips.us-west-2.api.aws",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "appmesh-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "apprunner": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "apprunner-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "apprunner-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "apprunner-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "apprunner-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "apprunner-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "apprunner-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "appstream2": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- CredentialScope: credentialScope{
- Service: "appstream",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips",
- }: endpoint{
- Hostname: "appstream2-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appstream2-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "appstream2-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appstream2-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "appstream2-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "appsync": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "aps": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "arc-zonal-shift": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "athena": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.af-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.ap-east-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.ap-northeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.ap-northeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.ap-northeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.ap-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.ap-south-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.ap-southeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.ap-southeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.ap-southeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.ap-southeast-4.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.ca-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.eu-central-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.eu-central-2.api.aws",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.eu-north-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.eu-south-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.eu-south-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.eu-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.eu-west-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.eu-west-3.api.aws",
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "athena-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "athena-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "athena-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "athena-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.il-central-1.api.aws",
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.me-central-1.api.aws",
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.me-south-1.api.aws",
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.sa-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "athena-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "athena-fips.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "athena-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "athena-fips.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "athena-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "athena-fips.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.us-west-2.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "athena-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "athena-fips.us-west-2.api.aws",
- },
- },
- },
- "auditmanager": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "auditmanager-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "auditmanager-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "auditmanager-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "auditmanager-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "auditmanager-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "auditmanager-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "auditmanager-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "auditmanager-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "autoscaling": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "autoscaling-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "autoscaling-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "autoscaling-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "autoscaling-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "autoscaling-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "autoscaling-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "autoscaling-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "autoscaling-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "autoscaling-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "autoscaling-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "autoscaling-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "autoscaling-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "autoscaling-plans": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "backup": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "backup-gateway": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "batch": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.batch.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "fips.batch.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "fips.batch.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "fips.batch.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "fips.batch.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.batch.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.batch.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.batch.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.batch.us-west-2.amazonaws.com",
- },
- },
- },
- "bedrock": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "bedrock-ap-northeast-1",
- }: endpoint{
- Hostname: "bedrock.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "bedrock-ap-south-1",
- }: endpoint{
- Hostname: "bedrock.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "bedrock-ap-southeast-1",
- }: endpoint{
- Hostname: "bedrock.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "bedrock-ap-southeast-2",
- }: endpoint{
- Hostname: "bedrock.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "bedrock-ca-central-1",
- }: endpoint{
- Hostname: "bedrock.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "bedrock-eu-central-1",
- }: endpoint{
- Hostname: "bedrock.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "bedrock-eu-west-1",
- }: endpoint{
- Hostname: "bedrock.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "bedrock-eu-west-2",
- }: endpoint{
- Hostname: "bedrock.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "bedrock-eu-west-3",
- }: endpoint{
- Hostname: "bedrock.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "bedrock-fips-ca-central-1",
- }: endpoint{
- Hostname: "bedrock-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "bedrock-fips-us-east-1",
- }: endpoint{
- Hostname: "bedrock-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "bedrock-fips-us-west-2",
- }: endpoint{
- Hostname: "bedrock-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-ap-northeast-1",
- }: endpoint{
- Hostname: "bedrock-runtime.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-ap-south-1",
- }: endpoint{
- Hostname: "bedrock-runtime.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-ap-southeast-1",
- }: endpoint{
- Hostname: "bedrock-runtime.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-ap-southeast-2",
- }: endpoint{
- Hostname: "bedrock-runtime.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-ca-central-1",
- }: endpoint{
- Hostname: "bedrock-runtime.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-eu-central-1",
- }: endpoint{
- Hostname: "bedrock-runtime.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-eu-west-1",
- }: endpoint{
- Hostname: "bedrock-runtime.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-eu-west-2",
- }: endpoint{
- Hostname: "bedrock-runtime.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-eu-west-3",
- }: endpoint{
- Hostname: "bedrock-runtime.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-fips-ca-central-1",
- }: endpoint{
- Hostname: "bedrock-runtime-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-fips-us-east-1",
- }: endpoint{
- Hostname: "bedrock-runtime-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-fips-us-west-2",
- }: endpoint{
- Hostname: "bedrock-runtime-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-sa-east-1",
- }: endpoint{
- Hostname: "bedrock-runtime.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-us-east-1",
- }: endpoint{
- Hostname: "bedrock-runtime.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-us-west-2",
- }: endpoint{
- Hostname: "bedrock-runtime.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "bedrock-sa-east-1",
- }: endpoint{
- Hostname: "bedrock.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "bedrock-us-east-1",
- }: endpoint{
- Hostname: "bedrock.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "bedrock-us-west-2",
- }: endpoint{
- Hostname: "bedrock.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "billingconductor": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "billingconductor.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- },
- },
- "braket": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "budgets": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "budgets.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- },
- },
- "cases": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{},
- },
- },
- "cassandra": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "cassandra-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "cassandra-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cassandra-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cassandra-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "catalog.marketplace": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "ce": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "ce.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- },
- },
- "chime": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "chime.us-east-1.amazonaws.com",
- Protocols: []string{"https"},
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- },
- },
- "cleanrooms": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "cloud9": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloud9-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "cloud9-fips.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "cloud9-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "cloud9-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "cloud9-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "cloud9-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "cloud9-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloud9-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "cloud9-fips.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloud9-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "cloud9-fips.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloud9-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "cloud9-fips.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloud9-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "cloud9-fips.us-west-2.api.aws",
- },
- },
- },
- "cloudcontrolapi": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.af-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.ap-east-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.ap-northeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.ap-northeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.ap-northeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.ap-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.ap-south-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.ap-southeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.ap-southeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.ap-southeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.ap-southeast-4.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.ca-west-1.api.aws",
- },
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.ca-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.eu-central-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.eu-central-2.api.aws",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.eu-north-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.eu-south-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.eu-south-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.eu-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.eu-west-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.eu-west-3.api.aws",
- },
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.il-central-1.api.aws",
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.me-central-1.api.aws",
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.me-south-1.api.aws",
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.sa-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.us-west-2.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-west-2.api.aws",
- },
- },
- },
- "clouddirectory": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "cloudformation": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudformation-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "cloudformation-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudformation-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "cloudformation-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudformation-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "cloudformation-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudformation-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "cloudformation-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "cloudfront": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "cloudfront.amazonaws.com",
- Protocols: []string{"http", "https"},
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- },
- },
- "cloudhsm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "cloudhsmv2": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "cloudhsm",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "cloudsearch": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "cloudtrail": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "cloudtrail-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "cloudtrail-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "cloudtrail-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "cloudtrail-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudtrail-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudtrail-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudtrail-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudtrail-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "cloudtrail-data": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "codeartifact": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "codebuild": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codebuild-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "codebuild-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codebuild-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "codebuild-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codebuild-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "codebuild-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codebuild-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "codebuild-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "codecatalyst": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "codecatalyst.global.api.aws",
- },
- },
- },
- "codecommit": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codecommit-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "codecommit-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips",
- }: endpoint{
- Hostname: "codecommit-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codecommit-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "codecommit-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codecommit-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "codecommit-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codecommit-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "codecommit-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codecommit-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "codecommit-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "codedeploy": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codedeploy-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "codedeploy-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codedeploy-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "codedeploy-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codedeploy-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "codedeploy-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codedeploy-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "codedeploy-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "codeguru-reviewer": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "codepipeline": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codepipeline-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "codepipeline-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "codepipeline-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "codepipeline-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "codepipeline-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "codepipeline-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codepipeline-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codepipeline-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codepipeline-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codepipeline-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "codestar": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "codestar-connections": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "codestar-notifications": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "cognito-identity": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "cognito-identity-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "cognito-identity-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "cognito-identity-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "cognito-identity-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cognito-identity-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cognito-identity-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cognito-identity-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cognito-identity-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "cognito-idp": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "cognito-idp-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "cognito-idp-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "cognito-idp-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "cognito-idp-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cognito-idp-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cognito-idp-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cognito-idp-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cognito-idp-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "cognito-sync": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "comprehend": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "comprehend-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "comprehend-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "comprehend-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "comprehend-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "comprehend-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "comprehend-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "comprehendmedical": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "comprehendmedical-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "comprehendmedical-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "comprehendmedical-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "comprehendmedical-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "compute-optimizer": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{
- Hostname: "compute-optimizer.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Hostname: "compute-optimizer.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "compute-optimizer.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "compute-optimizer.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{
- Hostname: "compute-optimizer.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "compute-optimizer.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{
- Hostname: "compute-optimizer.ap-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "compute-optimizer.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "compute-optimizer.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{
- Hostname: "compute-optimizer.ap-southeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{
- Hostname: "compute-optimizer.ap-southeast-4.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-4",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "compute-optimizer.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "compute-optimizer.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{
- Hostname: "compute-optimizer.eu-central-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "compute-optimizer.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{
- Hostname: "compute-optimizer.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{
- Hostname: "compute-optimizer.eu-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-2",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "compute-optimizer.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "compute-optimizer.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Hostname: "compute-optimizer.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{
- Hostname: "compute-optimizer.il-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{
- Hostname: "compute-optimizer.me-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Hostname: "compute-optimizer.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "compute-optimizer.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "compute-optimizer.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "compute-optimizer.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Hostname: "compute-optimizer.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "compute-optimizer.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "config": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "config-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "config-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "config-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "config-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "config-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "config-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "config-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "config-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "connect": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "connect-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "connect-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "connect-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "connect-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "connect-campaigns": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "connect-campaigns-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "connect-campaigns-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "contact-lens": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "controltower": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "controltower-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "controltower-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "controltower-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1-fips",
- }: endpoint{
- Hostname: "controltower-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "controltower-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "controltower-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "controltower-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "controltower-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "controltower-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "controltower-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "controltower-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "controltower-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "cost-optimization-hub": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "cost-optimization-hub.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- },
- },
- "cur": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "data-ats.iot": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- CredentialScope: credentialScope{
- Service: "iotdata",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iot-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "data.iot-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Service: "iotdata",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "data.iot-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Service: "iotdata",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "data.iot-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Service: "iotdata",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "data.iot-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Service: "iotdata",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "data.iot-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Service: "iotdata",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iot-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iot-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iot-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iot-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "data.jobs.iot": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "data.jobs.iot-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.jobs.iot-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.jobs.iot-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.jobs.iot-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.jobs.iot-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "data.mediastore": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "databrew": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "databrew-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "databrew-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "databrew-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "databrew-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "databrew-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "databrew-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "databrew-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "databrew-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "dataexchange": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "datapipeline": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "datasync": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "datasync-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "datasync-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "datasync-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "datasync-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "datasync-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "datasync-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "datasync-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "datasync-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "datasync-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "datasync-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "datasync-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "datasync-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "datazone": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.aws",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{
- Hostname: "datazone.af-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Hostname: "datazone.ap-east-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "datazone.ap-northeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "datazone.ap-northeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{
- Hostname: "datazone.ap-northeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "datazone.ap-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{
- Hostname: "datazone.ap-south-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "datazone.ap-southeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "datazone.ap-southeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{
- Hostname: "datazone.ap-southeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{
- Hostname: "datazone.ap-southeast-4.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "datazone.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "datazone-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{
- Hostname: "datazone.ca-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "datazone.eu-central-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{
- Hostname: "datazone.eu-central-2.api.aws",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "datazone.eu-north-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{
- Hostname: "datazone.eu-south-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{
- Hostname: "datazone.eu-south-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "datazone.eu-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "datazone.eu-west-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Hostname: "datazone.eu-west-3.api.aws",
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{
- Hostname: "datazone.il-central-1.api.aws",
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{
- Hostname: "datazone.me-central-1.api.aws",
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Hostname: "datazone.me-south-1.api.aws",
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "datazone.sa-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "datazone.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "datazone-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "datazone.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "datazone-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Hostname: "datazone.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "datazone.us-west-2.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "datazone-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "dax": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "devicefarm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "devops-guru": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "devops-guru-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "devops-guru-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "devops-guru-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "devops-guru-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "devops-guru-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "devops-guru-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "devops-guru-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "devops-guru-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "devops-guru-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "devops-guru-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "directconnect": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "directconnect-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "directconnect-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "directconnect-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "directconnect-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "directconnect-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "directconnect-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "directconnect-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "directconnect-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "directconnect-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "directconnect-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "directconnect-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "directconnect-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "discovery": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "dlm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "dms": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "dms",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dms",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dms-fips",
- }: endpoint{
- Hostname: "dms-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "dms-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "dms-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "dms-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "dms-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "docdb": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "rds.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "rds.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "rds.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "rds.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "rds.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "rds.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "rds.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "rds.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "rds.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Hostname: "rds.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "rds.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "rds.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "rds.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "rds.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "drs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "drs-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "drs-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "drs-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "drs-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "drs-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "drs-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "drs-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "drs-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "ds": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ds-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ds-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "ds-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "ds-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "ds-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "ds-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "ds-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "ds-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ds-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ds-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ds-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ds-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "dynamodb": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dynamodb-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "local",
- }: endpoint{
- Hostname: "localhost:8000",
- Protocols: []string{"http"},
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "dynamodb-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "ebs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ebs-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ebs-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "ebs-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "ebs-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "ebs-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "ebs-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "ebs-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "ebs-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ebs-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ebs-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ebs-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ebs-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "ec2": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "ec2.ap-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ec2-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ec2-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "ec2.eu-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "ec2-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "ec2-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "ec2-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "ec2-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "ec2-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "ec2-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "ec2.sa-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "ec2.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ec2-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "ec2.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ec2-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ec2-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "ec2.us-west-2.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ec2-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "ecs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "ecs-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "ecs-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "ecs-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "ecs-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecs-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecs-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecs-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecs-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "edge.sagemaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "eks": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.eks.{region}.{dnsSuffix}",
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "fips.eks.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "fips.eks.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "fips.eks.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "fips.eks.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.eks.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.eks.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.eks.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.eks.us-west-2.amazonaws.com",
- },
- },
- },
- "eks-auth": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.aws",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{
- Hostname: "eks-auth.af-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Hostname: "eks-auth.ap-east-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "eks-auth.ap-northeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "eks-auth.ap-northeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{
- Hostname: "eks-auth.ap-northeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "eks-auth.ap-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{
- Hostname: "eks-auth.ap-south-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "eks-auth.ap-southeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "eks-auth.ap-southeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{
- Hostname: "eks-auth.ap-southeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{
- Hostname: "eks-auth.ap-southeast-4.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "eks-auth.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{
- Hostname: "eks-auth.ca-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "eks-auth.eu-central-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{
- Hostname: "eks-auth.eu-central-2.api.aws",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "eks-auth.eu-north-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{
- Hostname: "eks-auth.eu-south-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{
- Hostname: "eks-auth.eu-south-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "eks-auth.eu-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "eks-auth.eu-west-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Hostname: "eks-auth.eu-west-3.api.aws",
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{
- Hostname: "eks-auth.il-central-1.api.aws",
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{
- Hostname: "eks-auth.me-central-1.api.aws",
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Hostname: "eks-auth.me-south-1.api.aws",
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "eks-auth.sa-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "eks-auth.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "eks-auth.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Hostname: "eks-auth.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "eks-auth.us-west-2.api.aws",
- },
- },
- },
- "elasticache": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips",
- }: endpoint{
- Hostname: "elasticache-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticache-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "elasticache-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticache-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "elasticache-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticache-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "elasticache-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticache-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "elasticache-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "elasticbeanstalk": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticbeanstalk-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticbeanstalk-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticbeanstalk-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticbeanstalk-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "elasticfilesystem": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com",
- },
- endpointKey{
- Region: "fips-af-south-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-east-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-2",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-3",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-south-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-south-2",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-2",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-3",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-southeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-4",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ap-southeast-4.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-4",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-central-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-central-2",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-central-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-north-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-south-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-south-2",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-2",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-3",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-il-central-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-me-central-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-me-south-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-sa-east-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.il-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.me-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.me-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.sa-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "elasticloadbalancing": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticloadbalancing-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticloadbalancing-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticloadbalancing-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticloadbalancing-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "elasticmapreduce": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- SSLCommonName: "{region}.{service}.{dnsSuffix}",
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- SSLCommonName: "{service}.{region}.{dnsSuffix}",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- SSLCommonName: "{service}.{region}.{dnsSuffix}",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticmapreduce-fips.us-east-1.amazonaws.com",
- SSLCommonName: "{service}.{region}.{dnsSuffix}",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "elasticmapreduce.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticmapreduce-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticmapreduce-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticmapreduce-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "elastictranscoder": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "email": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "email-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "email-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "email-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "email-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "email-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "email-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "email-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "email-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "email-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "email-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "emr-containers": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "emr-containers-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "emr-containers-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "emr-containers-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "emr-containers-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "emr-containers-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "emr-containers-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "emr-containers-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "emr-containers-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "emr-containers-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "emr-containers-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "emr-serverless": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "emr-serverless-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "emr-serverless-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "emr-serverless-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "emr-serverless-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "emr-serverless-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "emr-serverless-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "emr-serverless-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "emr-serverless-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "emr-serverless-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "entitlement.marketplace": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "aws-marketplace",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "es": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.af-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.ap-east-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.ap-northeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.ap-northeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.ap-northeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.ap-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.ap-south-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.ap-southeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.ap-southeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.ap-southeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.ap-southeast-4.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.ca-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.eu-central-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.eu-central-2.api.aws",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.eu-north-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.eu-south-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.eu-south-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.eu-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.eu-west-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.eu-west-3.api.aws",
- },
- endpointKey{
- Region: "fips",
- }: endpoint{
- Hostname: "es-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.il-central-1.api.aws",
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.me-central-1.api.aws",
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.me-south-1.api.aws",
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.sa-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "es-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "es-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "es-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "es-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "es-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "es-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.us-west-2.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "es-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "es-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "events": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "events-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "events-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "events-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "events-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "events-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "events-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "events-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "events-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "evidently": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "evidently.ap-northeast-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "evidently.ap-southeast-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "evidently.ap-southeast-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "evidently.eu-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "evidently.eu-north-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "evidently.eu-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "evidently.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "evidently.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "evidently.us-west-2.amazonaws.com",
- },
- },
- },
- "finspace": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "finspace-api": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "firehose": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "firehose-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "firehose-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "firehose-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "firehose-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "firehose-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "firehose-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "firehose-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "firehose-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "fms": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.af-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.ap-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.ap-northeast-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.ap-northeast-2.amazonaws.com",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.ap-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.ap-southeast-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.ap-southeast-2.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.eu-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.eu-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.eu-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.eu-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.eu-west-3.amazonaws.com",
- },
- endpointKey{
- Region: "fips-af-south-1",
- }: endpoint{
- Hostname: "fms-fips.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-east-1",
- }: endpoint{
- Hostname: "fms-fips.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-1",
- }: endpoint{
- Hostname: "fms-fips.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-2",
- }: endpoint{
- Hostname: "fms-fips.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-south-1",
- }: endpoint{
- Hostname: "fms-fips.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-1",
- }: endpoint{
- Hostname: "fms-fips.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-2",
- }: endpoint{
- Hostname: "fms-fips.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "fms-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "fms-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-central-1",
- }: endpoint{
- Hostname: "fms-fips.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-south-1",
- }: endpoint{
- Hostname: "fms-fips.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-1",
- }: endpoint{
- Hostname: "fms-fips.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-2",
- }: endpoint{
- Hostname: "fms-fips.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-3",
- }: endpoint{
- Hostname: "fms-fips.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-me-south-1",
- }: endpoint{
- Hostname: "fms-fips.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-sa-east-1",
- }: endpoint{
- Hostname: "fms-fips.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "fms-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "fms-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "fms-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "fms-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.me-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.sa-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "forecast": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "forecast-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "forecast-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "forecast-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "forecast-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "forecast-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "forecast-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "forecastquery": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "forecastquery-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "forecastquery-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "forecastquery-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "forecastquery-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "forecastquery-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "forecastquery-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "frauddetector": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "fsx": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "fsx-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "fsx-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-prod-ca-central-1",
- }: endpoint{
- Hostname: "fsx-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-prod-ca-west-1",
- }: endpoint{
- Hostname: "fsx-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-prod-us-east-1",
- }: endpoint{
- Hostname: "fsx-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-prod-us-east-2",
- }: endpoint{
- Hostname: "fsx-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-prod-us-west-1",
- }: endpoint{
- Hostname: "fsx-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-prod-us-west-2",
- }: endpoint{
- Hostname: "fsx-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "fsx-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "fsx-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "fsx-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "fsx-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "prod-ca-central-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-ca-west-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-us-east-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-us-east-2",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-us-west-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-us-west-2",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "gamelift": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "geo": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "glacier": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "glacier-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "glacier-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "glacier-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "glacier-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "glacier-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "glacier-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "glacier-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "glacier-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "glacier-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "glacier-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "globalaccelerator": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "globalaccelerator-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "glue": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "glue-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "glue-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "glue-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "glue-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "glue-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "glue-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "glue-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "glue-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "grafana": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "grafana.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "grafana.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "grafana.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "grafana.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "grafana.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "grafana.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "grafana.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "grafana.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "grafana.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "grafana.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "greengrass": service{
- IsRegionalized: boxedTrue,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "greengrass-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "greengrass-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "greengrass-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "greengrass-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "greengrass-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "greengrass-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "greengrass-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "greengrass-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "groundstation": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "groundstation-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "groundstation-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "groundstation-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "groundstation-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "groundstation-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "groundstation-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "guardduty": service{
- IsRegionalized: boxedTrue,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "guardduty-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "guardduty-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "guardduty-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "guardduty-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "guardduty-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "guardduty-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "guardduty-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "guardduty-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "health": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- SSLCommonName: "health.us-east-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "global.health.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "health-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "health-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "healthlake": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "iam": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "iam.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "aws-global",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iam-fips.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "aws-global-fips",
- }: endpoint{
- Hostname: "iam-fips.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "iam",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "iam",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iam-fips.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "iam-fips",
- }: endpoint{
- Hostname: "iam-fips.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "identity-chime": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "identity-chime-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "identity-chime-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "identitystore": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "importexport": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "importexport.amazonaws.com",
- SignatureVersions: []string{"v2", "v4"},
- CredentialScope: credentialScope{
- Region: "us-east-1",
- Service: "IngestionService",
- },
- },
- },
- },
- "ingest.timestream": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ingest-fips-us-east-1",
- }: endpoint{
- Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ingest-fips-us-east-2",
- }: endpoint{
- Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ingest-fips-us-west-2",
- }: endpoint{
- Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ingest-us-east-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ingest-us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ingest.timestream-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ingest-us-east-2",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ingest-us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ingest.timestream-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ingest-us-west-2",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ingest-us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ingest.timestream-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "inspector": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "inspector-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "inspector-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "inspector-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "inspector-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "inspector-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "inspector-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "inspector-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "inspector-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "inspector2": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "inspector2-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "inspector2-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "inspector2-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "inspector2-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "inspector2-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "inspector2-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "inspector2-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "inspector2-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "internetmonitor": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.aws",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{
- Hostname: "internetmonitor.af-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Hostname: "internetmonitor.ap-east-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "internetmonitor.ap-northeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "internetmonitor.ap-northeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{
- Hostname: "internetmonitor.ap-northeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "internetmonitor.ap-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{
- Hostname: "internetmonitor.ap-south-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "internetmonitor.ap-southeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "internetmonitor.ap-southeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{
- Hostname: "internetmonitor.ap-southeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{
- Hostname: "internetmonitor.ap-southeast-4.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "internetmonitor.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{
- Hostname: "internetmonitor.ca-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "internetmonitor.eu-central-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{
- Hostname: "internetmonitor.eu-central-2.api.aws",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "internetmonitor.eu-north-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{
- Hostname: "internetmonitor.eu-south-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{
- Hostname: "internetmonitor.eu-south-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "internetmonitor.eu-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "internetmonitor.eu-west-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Hostname: "internetmonitor.eu-west-3.api.aws",
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{
- Hostname: "internetmonitor.il-central-1.api.aws",
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{
- Hostname: "internetmonitor.me-central-1.api.aws",
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Hostname: "internetmonitor.me-south-1.api.aws",
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "internetmonitor.sa-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "internetmonitor.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "internetmonitor-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "internetmonitor.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "internetmonitor-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Hostname: "internetmonitor.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "internetmonitor-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "internetmonitor.us-west-2.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "internetmonitor-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "iot": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iot-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "iot-fips.ca-central-1.amazonaws.com",
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "iot-fips.us-east-1.amazonaws.com",
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "iot-fips.us-east-2.amazonaws.com",
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "iot-fips.us-west-1.amazonaws.com",
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "iot-fips.us-west-2.amazonaws.com",
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iot-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iot-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iot-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iot-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "iotanalytics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "iotevents": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iotevents-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "iotevents-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "iotevents-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "iotevents-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "iotevents-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iotevents-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iotevents-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iotevents-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "ioteventsdata": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "data.iotevents.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "data.iotevents.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "data.iotevents.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "data.iotevents.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "data.iotevents.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "data.iotevents.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "data.iotevents.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "data.iotevents.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "data.iotevents.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "data.iotevents-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "data.iotevents-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "data.iotevents-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "data.iotevents-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "data.iotevents.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iotevents-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "data.iotevents.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iotevents-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "data.iotevents.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iotevents-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "iotfleetwise": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "iotsecuredtunneling": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "iotsitewise": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "iotsitewise-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "iotsitewise-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "iotsitewise-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "iotsitewise-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iotsitewise-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iotsitewise-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iotsitewise-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "iotthingsgraph": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "iotthingsgraph",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "iottwinmaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "api-ap-northeast-1",
- }: endpoint{
- Hostname: "api.iottwinmaker.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "api-ap-northeast-2",
- }: endpoint{
- Hostname: "api.iottwinmaker.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "api-ap-south-1",
- }: endpoint{
- Hostname: "api.iottwinmaker.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "api-ap-southeast-1",
- }: endpoint{
- Hostname: "api.iottwinmaker.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "api-ap-southeast-2",
- }: endpoint{
- Hostname: "api.iottwinmaker.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "api-eu-central-1",
- }: endpoint{
- Hostname: "api.iottwinmaker.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "api-eu-west-1",
- }: endpoint{
- Hostname: "api.iottwinmaker.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "api-us-east-1",
- }: endpoint{
- Hostname: "api.iottwinmaker.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "api-us-west-2",
- }: endpoint{
- Hostname: "api.iottwinmaker.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "data-ap-northeast-1",
- }: endpoint{
- Hostname: "data.iottwinmaker.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "data-ap-northeast-2",
- }: endpoint{
- Hostname: "data.iottwinmaker.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "data-ap-south-1",
- }: endpoint{
- Hostname: "data.iottwinmaker.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "data-ap-southeast-1",
- }: endpoint{
- Hostname: "data.iottwinmaker.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "data-ap-southeast-2",
- }: endpoint{
- Hostname: "data.iottwinmaker.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "data-eu-central-1",
- }: endpoint{
- Hostname: "data.iottwinmaker.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "data-eu-west-1",
- }: endpoint{
- Hostname: "data.iottwinmaker.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "data-us-east-1",
- }: endpoint{
- Hostname: "data.iottwinmaker.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "data-us-west-2",
- }: endpoint{
- Hostname: "data.iottwinmaker.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "fips-api-us-east-1",
- }: endpoint{
- Hostname: "api.iottwinmaker-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "fips-api-us-west-2",
- }: endpoint{
- Hostname: "api.iottwinmaker-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "fips-data-us-east-1",
- }: endpoint{
- Hostname: "data.iottwinmaker-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "fips-data-us-west-2",
- }: endpoint{
- Hostname: "data.iottwinmaker-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iottwinmaker-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iottwinmaker-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "iotwireless": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "api.iotwireless.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "api.iotwireless.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "api.iotwireless.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "api.iotwireless.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "api.iotwireless.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "ivs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "ivschat": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "ivsrealtime": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "kafka": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kafka-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kafka-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "kafka-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "kafka-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "kafka-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "kafka-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "kafka-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "kafka-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kafka-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kafka-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kafka-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kafka-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "kafkaconnect": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "kendra": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kendra-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "kendra-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "kendra-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "kendra-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "kendra-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kendra-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kendra-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kendra-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "kendra-ranking": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.aws",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{
- Hostname: "kendra-ranking.af-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Hostname: "kendra-ranking.ap-east-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "kendra-ranking.ap-northeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "kendra-ranking.ap-northeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{
- Hostname: "kendra-ranking.ap-northeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "kendra-ranking.ap-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{
- Hostname: "kendra-ranking.ap-south-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "kendra-ranking.ap-southeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "kendra-ranking.ap-southeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{
- Hostname: "kendra-ranking.ap-southeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{
- Hostname: "kendra-ranking.ap-southeast-4.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "kendra-ranking.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kendra-ranking-fips.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{
- Hostname: "kendra-ranking.ca-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{
- Hostname: "kendra-ranking.eu-central-2.api.aws",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "kendra-ranking.eu-north-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{
- Hostname: "kendra-ranking.eu-south-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{
- Hostname: "kendra-ranking.eu-south-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "kendra-ranking.eu-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Hostname: "kendra-ranking.eu-west-3.api.aws",
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{
- Hostname: "kendra-ranking.il-central-1.api.aws",
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{
- Hostname: "kendra-ranking.me-central-1.api.aws",
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Hostname: "kendra-ranking.me-south-1.api.aws",
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "kendra-ranking.sa-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "kendra-ranking.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kendra-ranking-fips.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "kendra-ranking.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kendra-ranking-fips.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Hostname: "kendra-ranking.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "kendra-ranking.us-west-2.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kendra-ranking-fips.us-west-2.api.aws",
- },
- },
- },
- "kinesis": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "kinesis-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "kinesis-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "kinesis-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "kinesis-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kinesis-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kinesis-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kinesis-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kinesis-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "kinesisanalytics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "kinesisvideo": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "kms": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ProdFips",
- }: endpoint{
- Hostname: "kms-fips.eu-central-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.af-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "af-south-1-fips",
- }: endpoint{
- Hostname: "kms-fips.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.ap-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-east-1-fips",
- }: endpoint{
- Hostname: "kms-fips.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.ap-northeast-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-northeast-1-fips",
- }: endpoint{
- Hostname: "kms-fips.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.ap-northeast-2.amazonaws.com",
- },
- endpointKey{
- Region: "ap-northeast-2-fips",
- }: endpoint{
- Hostname: "kms-fips.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.ap-northeast-3.amazonaws.com",
- },
- endpointKey{
- Region: "ap-northeast-3-fips",
- }: endpoint{
- Hostname: "kms-fips.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.ap-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-south-1-fips",
- }: endpoint{
- Hostname: "kms-fips.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.ap-south-2.amazonaws.com",
- },
- endpointKey{
- Region: "ap-south-2-fips",
- }: endpoint{
- Hostname: "kms-fips.ap-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.ap-southeast-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-1-fips",
- }: endpoint{
- Hostname: "kms-fips.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.ap-southeast-2.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-2-fips",
- }: endpoint{
- Hostname: "kms-fips.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.ap-southeast-3.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-3-fips",
- }: endpoint{
- Hostname: "kms-fips.ap-southeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.ap-southeast-4.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-4-fips",
- }: endpoint{
- Hostname: "kms-fips.ap-southeast-4.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-4",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "kms-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1-fips",
- }: endpoint{
- Hostname: "kms-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.eu-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1-fips",
- }: endpoint{
- Hostname: "kms-fips.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.eu-central-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-2-fips",
- }: endpoint{
- Hostname: "kms-fips.eu-central-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.eu-north-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-north-1-fips",
- }: endpoint{
- Hostname: "kms-fips.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.eu-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-south-1-fips",
- }: endpoint{
- Hostname: "kms-fips.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.eu-south-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-south-2-fips",
- }: endpoint{
- Hostname: "kms-fips.eu-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.eu-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-1-fips",
- }: endpoint{
- Hostname: "kms-fips.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.eu-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-2-fips",
- }: endpoint{
- Hostname: "kms-fips.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.eu-west-3.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-3-fips",
- }: endpoint{
- Hostname: "kms-fips.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.il-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "il-central-1-fips",
- }: endpoint{
- Hostname: "kms-fips.il-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.me-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "me-central-1-fips",
- }: endpoint{
- Hostname: "kms-fips.me-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.me-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "me-south-1-fips",
- }: endpoint{
- Hostname: "kms-fips.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.sa-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "sa-east-1-fips",
- }: endpoint{
- Hostname: "kms-fips.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "kms-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "kms-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "kms-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "kms-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "lakeformation": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "lakeformation-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "lakeformation-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "lakeformation-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "lakeformation-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "lakeformation-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "lakeformation-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "lakeformation-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "lakeformation-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "lambda": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.af-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-east-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-northeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-northeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-northeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-south-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-southeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-southeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-southeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ap-southeast-4.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.ca-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.eu-central-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.eu-central-2.api.aws",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.eu-north-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.eu-south-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.eu-south-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.eu-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.eu-west-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.eu-west-3.api.aws",
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "lambda-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "lambda-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "lambda-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "lambda-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.il-central-1.api.aws",
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.me-central-1.api.aws",
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.me-south-1.api.aws",
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.sa-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "lambda-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "lambda-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "lambda-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.us-west-2.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "lambda-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "license-manager": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "license-manager-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "license-manager-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "license-manager-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "license-manager-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "license-manager-linux-subscriptions": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-linux-subscriptions-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-linux-subscriptions-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-linux-subscriptions-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-linux-subscriptions-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "license-manager-user-subscriptions": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-user-subscriptions-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "lightsail": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "logs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.af-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.ap-east-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.ap-northeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.ap-northeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.ap-northeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.ap-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.ap-south-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.ap-southeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.ap-southeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.ap-southeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.ap-southeast-4.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "logs-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.ca-west-1.api.aws",
- },
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "logs-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.eu-central-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.eu-central-2.api.aws",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.eu-north-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.eu-south-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.eu-south-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.eu-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.eu-west-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.eu-west-3.api.aws",
- },
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "logs-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "logs-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "logs-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "logs-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "logs-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "logs-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.il-central-1.api.aws",
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.me-central-1.api.aws",
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.me-south-1.api.aws",
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.sa-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "logs-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "logs-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "logs-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "logs.us-west-2.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "logs-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "lookoutequipment": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "lookoutmetrics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "lookoutvision": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "m2": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{},
- },
- },
- "machinelearning": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "macie2": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "macie2-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "macie2-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "macie2-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "macie2-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "macie2-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "macie2-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "macie2-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "macie2-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "managedblockchain": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "managedblockchain-query": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "marketplacecommerceanalytics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "media-pipelines-chime": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "media-pipelines-chime-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "media-pipelines-chime-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "mediaconnect": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "mediaconvert": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "mediaconvert-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "mediaconvert-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "mediaconvert-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "mediaconvert-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "mediaconvert-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mediaconvert-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mediaconvert-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mediaconvert-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mediaconvert-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "medialive": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "medialive-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "medialive-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "medialive-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "medialive-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "medialive-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "medialive-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "mediapackage": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "mediapackage-vod": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "mediapackagev2": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "mediastore": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "meetings-chime": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "meetings-chime-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "meetings-chime-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "meetings-chime-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "meetings-chime-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "meetings-chime-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "memory-db": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips",
- }: endpoint{
- Hostname: "memory-db-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "messaging-chime": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "messaging-chime-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "messaging-chime-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "metering.marketplace": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "aws-marketplace",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "metrics.sagemaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "mgh": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "mgn": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "mgn-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "mgn-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "mgn-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "mgn-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mgn-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mgn-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mgn-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mgn-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "migrationhub-orchestrator": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "migrationhub-strategy": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "mobileanalytics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "models-v2-lex": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "models.lex": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "lex",
- },
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "models-fips.lex.{region}.{dnsSuffix}",
- CredentialScope: credentialScope{
- Service: "lex",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "models-fips.lex.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "models-fips.lex.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "models-fips.lex.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "models-fips.lex.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "monitoring": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "monitoring-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "monitoring-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "monitoring-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "monitoring-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "monitoring-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "monitoring-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "monitoring-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "monitoring-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "mq": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "mq-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "mq-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "mq-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "mq-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mq-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mq-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mq-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mq-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "mturk-requester": service{
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "sandbox",
- }: endpoint{
- Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "neptune": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Hostname: "rds.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "rds.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "rds.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "rds.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "rds.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "rds.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "rds.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "rds.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "rds.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "rds.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "rds.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Hostname: "rds.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Hostname: "rds.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "rds.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "rds.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "rds.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Hostname: "rds.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "rds.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "network-firewall": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "network-firewall-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "network-firewall-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "network-firewall-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "network-firewall-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "network-firewall-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "network-firewall-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "network-firewall-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "network-firewall-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "network-firewall-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "network-firewall-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "networkmanager": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "networkmanager.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "aws-global",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "networkmanager-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "fips-aws-global",
- }: endpoint{
- Hostname: "networkmanager-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "nimble": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "oam": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "oidc": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{
- Hostname: "oidc.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Hostname: "oidc.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "oidc.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "oidc.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{
- Hostname: "oidc.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "oidc.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{
- Hostname: "oidc.ap-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "oidc.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "oidc.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{
- Hostname: "oidc.ap-southeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{
- Hostname: "oidc.ap-southeast-4.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-4",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "oidc.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{
- Hostname: "oidc.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "oidc.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{
- Hostname: "oidc.eu-central-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "oidc.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{
- Hostname: "oidc.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{
- Hostname: "oidc.eu-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-2",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "oidc.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "oidc.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Hostname: "oidc.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{
- Hostname: "oidc.il-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{
- Hostname: "oidc.me-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Hostname: "oidc.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "oidc.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "oidc.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "oidc.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Hostname: "oidc.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "oidc.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "omics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "omics.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "omics.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "omics.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "omics.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "omics-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "omics-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{
- Hostname: "omics.il-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "omics.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "omics-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "omics.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "omics-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "opsworks": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "opsworks-cm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "organizations": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "organizations.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "aws-global",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "organizations-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "fips-aws-global",
- }: endpoint{
- Hostname: "organizations-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "osis": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "outposts": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "outposts-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "outposts-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "outposts-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "outposts-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "outposts-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "outposts-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "outposts-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "outposts-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "outposts-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "outposts-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "participant.connect": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "participant.connect-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "participant.connect-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "participant.connect-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "participant.connect-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "personalize": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "pi": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "af-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.af-south-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.ap-east-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-northeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.ap-northeast-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.ap-northeast-2.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-northeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.ap-northeast-3.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.ap-south-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.ap-south-2.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.ap-southeast-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.ap-southeast-2.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-southeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.ap-southeast-3.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-southeast-4",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.ap-southeast-4.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.ca-central-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pi-fips.ca-central-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "pi-fips.ca-central-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ca-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.ca-west-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pi-fips.ca-west-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "pi-fips.ca-west-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.eu-central-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-central-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.eu-central-2.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.eu-north-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.eu-south-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.eu-south-2.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.eu-west-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.eu-west-2.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.eu-west-3.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "pi-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "pi-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "pi-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "pi-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "pi-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "pi-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "il-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.il-central-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "me-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.me-central-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "me-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.me-south-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.sa-east-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.us-east-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pi-fips.us-east-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "pi-fips.us-east-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.us-east-2.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pi-fips.us-east-2.amazonaws.com",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "pi-fips.us-east-2.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.us-west-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pi-fips.us-west-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "pi-fips.us-west-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.us-west-2.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pi-fips.us-west-2.amazonaws.com",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "pi-fips.us-west-2.api.aws",
- Protocols: []string{"https"},
- },
- },
- },
- "pinpoint": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "mobiletargeting",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "pinpoint.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pinpoint-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "pinpoint-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "pinpoint-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "pinpoint-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "pinpoint-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "pinpoint.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pinpoint-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "pinpoint.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pinpoint-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "pinpoint.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pinpoint-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "pipes": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "polly": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "polly-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "polly-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "polly-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "polly-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "polly-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "polly-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "polly-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "polly-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "portal.sso": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{
- Hostname: "portal.sso.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Hostname: "portal.sso.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "portal.sso.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "portal.sso.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{
- Hostname: "portal.sso.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "portal.sso.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{
- Hostname: "portal.sso.ap-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "portal.sso.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "portal.sso.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{
- Hostname: "portal.sso.ap-southeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{
- Hostname: "portal.sso.ap-southeast-4.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-4",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "portal.sso.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{
- Hostname: "portal.sso.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "portal.sso.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{
- Hostname: "portal.sso.eu-central-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "portal.sso.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{
- Hostname: "portal.sso.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{
- Hostname: "portal.sso.eu-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-2",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "portal.sso.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "portal.sso.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Hostname: "portal.sso.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{
- Hostname: "portal.sso.il-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{
- Hostname: "portal.sso.me-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Hostname: "portal.sso.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "portal.sso.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "portal.sso.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "portal.sso.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Hostname: "portal.sso.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "portal.sso.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "private-networks": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "profile": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "profile-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "profile-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "profile-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "profile-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "profile-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "profile-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "projects.iot1click": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "proton": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "qbusiness": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.aws",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{
- Hostname: "qbusiness.af-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Hostname: "qbusiness.ap-east-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "qbusiness.ap-northeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "qbusiness.ap-northeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{
- Hostname: "qbusiness.ap-northeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "qbusiness.ap-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{
- Hostname: "qbusiness.ap-south-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "qbusiness.ap-southeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "qbusiness.ap-southeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{
- Hostname: "qbusiness.ap-southeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{
- Hostname: "qbusiness.ap-southeast-4.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "qbusiness.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{
- Hostname: "qbusiness.ca-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "qbusiness.eu-central-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{
- Hostname: "qbusiness.eu-central-2.api.aws",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "qbusiness.eu-north-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{
- Hostname: "qbusiness.eu-south-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{
- Hostname: "qbusiness.eu-south-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "qbusiness.eu-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "qbusiness.eu-west-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Hostname: "qbusiness.eu-west-3.api.aws",
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{
- Hostname: "qbusiness.il-central-1.api.aws",
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{
- Hostname: "qbusiness.me-central-1.api.aws",
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Hostname: "qbusiness.me-south-1.api.aws",
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "qbusiness.sa-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "qbusiness.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "qbusiness.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Hostname: "qbusiness.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "qbusiness.us-west-2.api.aws",
- },
- },
- },
- "qldb": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "qldb-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "qldb-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "qldb-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "qldb-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "qldb-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "qldb-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "qldb-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "qldb-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "quicksight": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "api",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "ram": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ram-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ram-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "ram-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "ram-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "ram-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "ram-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "ram-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "ram-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ram-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ram-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ram-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ram-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "rbin": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rbin-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rbin-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "rbin-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "rbin-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "rbin-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "rbin-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "rbin-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "rbin-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rbin-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rbin-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rbin-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rbin-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "rds": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "rds-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1-fips",
- }: endpoint{
- Hostname: "rds-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "rds-fips.ca-central-1",
- }: endpoint{
- Hostname: "rds-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds-fips.ca-west-1",
- }: endpoint{
- Hostname: "rds-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds-fips.us-east-1",
- }: endpoint{
- Hostname: "rds-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds-fips.us-east-2",
- }: endpoint{
- Hostname: "rds-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds-fips.us-west-1",
- }: endpoint{
- Hostname: "rds-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds-fips.us-west-2",
- }: endpoint{
- Hostname: "rds-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds.ca-central-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds.ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds.ca-west-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds.ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds.us-east-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds.us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds.us-east-2",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds.us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds.us-west-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds.us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds.us-west-2",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds.us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- SSLCommonName: "{service}.{dnsSuffix}",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-fips.us-east-1.amazonaws.com",
- SSLCommonName: "{service}.{dnsSuffix}",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "rds-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "rds-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "rds-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "rds-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "rds-data": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "rds-data-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "rds-data-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "rds-data-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "rds-data-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-data-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-data-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-data-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds-data-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "redshift": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "redshift-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "redshift-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "redshift-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "redshift-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "redshift-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "redshift-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "redshift-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "redshift-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "redshift-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "redshift-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "redshift-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "redshift-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "redshift-serverless": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "redshift-serverless-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "redshift-serverless-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "redshift-serverless-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "redshift-serverless-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "redshift-serverless-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "rekognition": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rekognition-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "rekognition-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "rekognition-fips.ca-central-1",
- }: endpoint{
- Hostname: "rekognition-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition-fips.us-east-1",
- }: endpoint{
- Hostname: "rekognition-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition-fips.us-east-2",
- }: endpoint{
- Hostname: "rekognition-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition-fips.us-west-1",
- }: endpoint{
- Hostname: "rekognition-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition-fips.us-west-2",
- }: endpoint{
- Hostname: "rekognition-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition.ca-central-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition.ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rekognition-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition.us-east-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition.us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rekognition-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition.us-east-2",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition.us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rekognition-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition.us-west-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition.us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rekognition-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition.us-west-2",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition.us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rekognition-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rekognition-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "rekognition-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rekognition-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "rekognition-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rekognition-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "rekognition-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rekognition-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "rekognition-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "resiliencehub": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "resource-explorer-2": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "resource-groups": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "resource-groups-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "resource-groups-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "resource-groups-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "resource-groups-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "resource-groups-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "resource-groups-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "resource-groups-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "resource-groups-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "robomaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "rolesanywhere": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "route53": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "route53.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "aws-global",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "route53-fips.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "fips-aws-global",
- }: endpoint{
- Hostname: "route53-fips.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "route53-recovery-control-config": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "route53-recovery-control-config.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "route53domains": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- },
- },
- "route53resolver": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "rum": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "runtime-v2-lex": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "runtime.lex": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "lex",
- },
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "runtime-fips.lex.{region}.{dnsSuffix}",
- CredentialScope: credentialScope{
- Service: "lex",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "runtime-fips.lex.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "runtime-fips.lex.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "runtime-fips.lex.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "runtime-fips.lex.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "runtime.sagemaker": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "runtime-fips.sagemaker.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "runtime-fips.sagemaker.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "runtime-fips.sagemaker.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "runtime-fips.sagemaker.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "runtime-fips.sagemaker.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "s3": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedTrue,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- SignatureVersions: []string{"s3v4"},
- },
- defaultKey{
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "{service}.dualstack.{region}.{dnsSuffix}",
- DNSSuffix: "amazonaws.com",
- Protocols: []string{"http", "https"},
- SignatureVersions: []string{"s3v4"},
- },
- defaultKey{
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}",
- DNSSuffix: "amazonaws.com",
- Protocols: []string{"http", "https"},
- SignatureVersions: []string{"s3v4"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.af-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.ap-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "s3.ap-northeast-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "ap-northeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.ap-northeast-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.ap-northeast-2.amazonaws.com",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.ap-northeast-3.amazonaws.com",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.ap-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.ap-south-2.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "s3.ap-southeast-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.ap-southeast-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "s3.ap-southeast-2.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.ap-southeast-2.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.ap-southeast-3.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.ap-southeast-4.amazonaws.com",
- },
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "s3.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.eu-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.eu-central-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.eu-north-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.eu-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.eu-south-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "s3.eu-west-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.eu-west-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.eu-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.eu-west-3.amazonaws.com",
- },
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "s3-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "s3-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "s3-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "s3-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "s3-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "s3-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.il-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.me-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.me-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "s3-external-1",
- }: endpoint{
- Hostname: "s3-external-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "s3.sa-east-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.sa-east-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "s3.us-east-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.us-east-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-fips.us-east-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-fips.dualstack.us-east-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-fips.dualstack.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Hostname: "s3.us-west-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.us-west-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-fips.us-west-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-fips.dualstack.us-west-1.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "s3.us-west-2.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.us-west-2.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-fips.us-west-2.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-fips.dualstack.us-west-2.amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- },
- },
- "s3-control": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- SignatureVersions: []string{"s3v4"},
- },
- defaultKey{
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "{service}.dualstack.{region}.{dnsSuffix}",
- DNSSuffix: "amazonaws.com",
- Protocols: []string{"https"},
- SignatureVersions: []string{"s3v4"},
- },
- defaultKey{
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}",
- DNSSuffix: "amazonaws.com",
- Protocols: []string{"https"},
- SignatureVersions: []string{"s3v4"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{
- Hostname: "s3-control.af-south-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- },
- endpointKey{
- Region: "af-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.af-south-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Hostname: "s3-control.ap-east-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- },
- endpointKey{
- Region: "ap-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.ap-east-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "s3-control.ap-northeast-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.ap-northeast-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "s3-control.ap-northeast-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.ap-northeast-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{
- Hostname: "s3-control.ap-northeast-3.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- },
- endpointKey{
- Region: "ap-northeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.ap-northeast-3.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "s3-control.ap-south-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.ap-south-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{
- Hostname: "s3-control.ap-south-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-south-2",
- },
- },
- endpointKey{
- Region: "ap-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.ap-south-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-south-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "s3-control.ap-southeast-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.ap-southeast-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "s3-control.ap-southeast-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.ap-southeast-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{
- Hostname: "s3-control.ap-southeast-3.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- },
- endpointKey{
- Region: "ap-southeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.ap-southeast-3.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{
- Hostname: "s3-control.ap-southeast-4.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-southeast-4",
- },
- },
- endpointKey{
- Region: "ap-southeast-4",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.ap-southeast-4.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ap-southeast-4",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "s3-control.ca-central-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.ca-central-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-control-fips.ca-central-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-control-fips.dualstack.ca-central-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "s3-control-fips.ca-central-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{
- Hostname: "s3-control.ca-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- },
- endpointKey{
- Region: "ca-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.ca-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- },
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-control-fips.ca-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- },
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-control-fips.dualstack.ca-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- },
- endpointKey{
- Region: "ca-west-1-fips",
- }: endpoint{
- Hostname: "s3-control-fips.ca-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "s3-control.eu-central-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.eu-central-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{
- Hostname: "s3-control.eu-central-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- },
- endpointKey{
- Region: "eu-central-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.eu-central-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "s3-control.eu-north-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- },
- endpointKey{
- Region: "eu-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.eu-north-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{
- Hostname: "s3-control.eu-south-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- },
- endpointKey{
- Region: "eu-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.eu-south-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{
- Hostname: "s3-control.eu-south-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-south-2",
- },
- },
- endpointKey{
- Region: "eu-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.eu-south-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-south-2",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "s3-control.eu-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.eu-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "s3-control.eu-west-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.eu-west-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Hostname: "s3-control.eu-west-3.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.eu-west-3.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{
- Hostname: "s3-control.il-central-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- },
- endpointKey{
- Region: "il-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.il-central-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{
- Hostname: "s3-control.me-central-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- },
- endpointKey{
- Region: "me-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.me-central-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Hostname: "s3-control.me-south-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- },
- endpointKey{
- Region: "me-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.me-south-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "s3-control.sa-east-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.sa-east-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "s3-control.us-east-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.us-east-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-control-fips.us-east-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-control-fips.dualstack.us-east-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "s3-control-fips.us-east-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "s3-control.us-east-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.us-east-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-control-fips.us-east-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-control-fips.dualstack.us-east-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "s3-control-fips.us-east-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Hostname: "s3-control.us-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.us-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-control-fips.us-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-control-fips.dualstack.us-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "s3-control-fips.us-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "s3-control.us-west-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.us-west-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-control-fips.us-west-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-control-fips.dualstack.us-west-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "s3-control-fips.us-west-2.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "s3-outposts": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- },
- },
- "sagemaker-geospatial": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "savingsplans": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "savingsplans.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- },
- },
- "scheduler": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "schemas": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "sdb": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- SignatureVersions: []string{"v2"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "sdb.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "secretsmanager": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1-fips",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- },
- },
- "securityhub": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "securityhub-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "securityhub-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "securityhub-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "securityhub-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "securityhub-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "securityhub-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "securityhub-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "securityhub-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "securitylake": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "securitylake-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "securitylake-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "securitylake-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "securitylake-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "securitylake-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "securitylake-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "securitylake-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "securitylake-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "serverlessrepo": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "serverlessrepo-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "serverlessrepo-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "serverlessrepo-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "serverlessrepo-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "servicecatalog": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicecatalog-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "servicecatalog-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicecatalog-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "servicecatalog-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicecatalog-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "servicecatalog-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicecatalog-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "servicecatalog-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "servicecatalog-appregistry": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicecatalog-appregistry-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicecatalog-appregistry-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicecatalog-appregistry-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicecatalog-appregistry-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "servicediscovery": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "af-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.af-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.ap-east-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.ap-northeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.ap-northeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.ap-northeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.ap-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.ap-south-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.ap-southeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.ap-southeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.ap-southeast-3.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.ap-southeast-4.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "servicediscovery-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.ca-west-1.api.aws",
- },
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.ca-west-1.api.aws",
- },
- endpointKey{
- Region: "ca-west-1-fips",
- }: endpoint{
- Hostname: "servicediscovery-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.eu-central-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.eu-central-2.api.aws",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.eu-north-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.eu-south-1.api.aws",
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.eu-south-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.eu-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.eu-west-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.eu-west-3.api.aws",
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.il-central-1.api.aws",
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.me-central-1.api.aws",
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.me-south-1.api.aws",
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.sa-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "servicediscovery-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "servicediscovery-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "servicediscovery-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.us-west-2.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.us-west-2.api.aws",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "servicediscovery-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "servicequotas": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "session.qldb": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "session.qldb-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "session.qldb-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "session.qldb-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "session.qldb-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "session.qldb-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "session.qldb-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "shield": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- SSLCommonName: "shield.us-east-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "shield.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "aws-global",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "shield-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "fips-aws-global",
- }: endpoint{
- Hostname: "shield-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "signer": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "signer-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "signer-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "signer-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "signer-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-verification-us-east-1",
- }: endpoint{
- Hostname: "verification.signer-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "fips-verification-us-east-2",
- }: endpoint{
- Hostname: "verification.signer-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "fips-verification-us-west-1",
- }: endpoint{
- Hostname: "verification.signer-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "fips-verification-us-west-2",
- }: endpoint{
- Hostname: "verification.signer-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "signer-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "signer-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "signer-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "signer-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "verification-af-south-1",
- }: endpoint{
- Hostname: "verification.signer.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- },
- endpointKey{
- Region: "verification-ap-east-1",
- }: endpoint{
- Hostname: "verification.signer.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- },
- endpointKey{
- Region: "verification-ap-northeast-1",
- }: endpoint{
- Hostname: "verification.signer.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "verification-ap-northeast-2",
- }: endpoint{
- Hostname: "verification.signer.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "verification-ap-south-1",
- }: endpoint{
- Hostname: "verification.signer.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "verification-ap-southeast-1",
- }: endpoint{
- Hostname: "verification.signer.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "verification-ap-southeast-2",
- }: endpoint{
- Hostname: "verification.signer.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "verification-ca-central-1",
- }: endpoint{
- Hostname: "verification.signer.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "verification-eu-central-1",
- }: endpoint{
- Hostname: "verification.signer.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "verification-eu-north-1",
- }: endpoint{
- Hostname: "verification.signer.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- },
- endpointKey{
- Region: "verification-eu-south-1",
- }: endpoint{
- Hostname: "verification.signer.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- },
- endpointKey{
- Region: "verification-eu-west-1",
- }: endpoint{
- Hostname: "verification.signer.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "verification-eu-west-2",
- }: endpoint{
- Hostname: "verification.signer.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "verification-eu-west-3",
- }: endpoint{
- Hostname: "verification.signer.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "verification-me-south-1",
- }: endpoint{
- Hostname: "verification.signer.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- },
- endpointKey{
- Region: "verification-sa-east-1",
- }: endpoint{
- Hostname: "verification.signer.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "verification-us-east-1",
- }: endpoint{
- Hostname: "verification.signer.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "verification-us-east-2",
- }: endpoint{
- Hostname: "verification.signer.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "verification-us-west-1",
- }: endpoint{
- Hostname: "verification.signer.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "verification-us-west-2",
- }: endpoint{
- Hostname: "verification.signer.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "simspaceweaver": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "sms": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "sms-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sms-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "sms-voice": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sms-voice-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "sms-voice-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "sms-voice-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "sms-voice-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "sms-voice-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "sms-voice-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sms-voice-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sms-voice-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sms-voice-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sms-voice-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "snowball": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.ap-northeast-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.ap-northeast-2.amazonaws.com",
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.ap-northeast-3.amazonaws.com",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.ap-south-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.ap-southeast-1.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.ap-southeast-2.amazonaws.com",
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.eu-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.eu-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.eu-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.eu-west-3.amazonaws.com",
- },
- endpointKey{
- Region: "fips-ap-northeast-1",
- }: endpoint{
- Hostname: "snowball-fips.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-2",
- }: endpoint{
- Hostname: "snowball-fips.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-3",
- }: endpoint{
- Hostname: "snowball-fips.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-south-1",
- }: endpoint{
- Hostname: "snowball-fips.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-1",
- }: endpoint{
- Hostname: "snowball-fips.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-2",
- }: endpoint{
- Hostname: "snowball-fips.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "snowball-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-central-1",
- }: endpoint{
- Hostname: "snowball-fips.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-1",
- }: endpoint{
- Hostname: "snowball-fips.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-2",
- }: endpoint{
- Hostname: "snowball-fips.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-3",
- }: endpoint{
- Hostname: "snowball-fips.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-sa-east-1",
- }: endpoint{
- Hostname: "snowball-fips.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "snowball-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "snowball-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "snowball-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "snowball-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.sa-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "sns": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sns-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "sns-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "sns-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "sns-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "sns-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "sns-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sns-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sns-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sns-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sns-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "sqs": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- SSLCommonName: "{region}.queue.{dnsSuffix}",
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "sqs-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "sqs-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "sqs-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "sqs-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- SSLCommonName: "queue.{dnsSuffix}",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sqs-fips.us-east-1.amazonaws.com",
- SSLCommonName: "queue.{dnsSuffix}",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sqs-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sqs-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sqs-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "ssm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "ssm-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "ssm-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "ssm-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "ssm-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "ssm-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "ssm-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "ssm-contacts": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-contacts-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-contacts-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-contacts-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-contacts-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "ssm-incidents": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "ssm-incidents-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-incidents-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-incidents-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-incidents-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-incidents-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "ssm-sap": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "ssm-sap-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "ssm-sap-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "ssm-sap-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "ssm-sap-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "ssm-sap-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-sap-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-sap-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-sap-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm-sap-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "sso": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "states": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "states-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "states-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "states-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "states-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "states-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "states-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "states-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "states-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "storagegateway": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "storagegateway-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "storagegateway-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "storagegateway-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1-fips",
- }: endpoint{
- Hostname: "storagegateway-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "storagegateway-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "storagegateway-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "storagegateway-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "storagegateway-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "storagegateway-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "storagegateway-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "storagegateway-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "storagegateway-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "streams.dynamodb": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- CredentialScope: credentialScope{
- Service: "dynamodb",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "local",
- }: endpoint{
- Hostname: "localhost:8000",
- Protocols: []string{"http"},
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "sts": service{
- PartitionEndpoint: "aws-global",
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "sts.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sts-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "sts-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sts-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "sts-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sts-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "sts-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sts-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "sts-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "support": service{
- PartitionEndpoint: "aws-global",
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "support.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- },
- },
- "supportapp": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "swf": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "swf-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "swf-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "swf-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "swf-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "swf-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "swf-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "swf-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "swf-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "synthetics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "synthetics-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "synthetics-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "synthetics-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "synthetics-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "synthetics-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "synthetics-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "synthetics-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "synthetics-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "tagging": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "tax": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "tax.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- },
- },
- "textract": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.ap-northeast-2.api.aws",
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.ap-south-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.ap-southeast-1.api.aws",
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.ap-southeast-2.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "textract-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "textract-fips.ca-central-1.api.aws",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.eu-central-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.eu-west-1.api.aws",
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.eu-west-2.api.aws",
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.eu-west-3.api.aws",
- },
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "textract-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "textract-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "textract-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "textract-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "textract-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "textract-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "textract-fips.us-east-1.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "textract-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "textract-fips.us-east-2.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "textract-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "textract-fips.us-west-1.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.us-west-2.api.aws",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "textract-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "textract-fips.us-west-2.api.aws",
- },
- },
- },
- "thinclient": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "tnb": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "transcribe": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.transcribe.{region}.{dnsSuffix}",
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.transcribe.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "fips.transcribe.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "fips.transcribe.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "fips.transcribe.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "fips.transcribe.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "fips.transcribe.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.transcribe.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.transcribe.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.transcribe.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.transcribe.us-west-2.amazonaws.com",
- },
- },
- },
- "transcribestreaming": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "transcribestreaming-ca-central-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "transcribestreaming-ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "transcribestreaming-fips-ca-central-1",
- }: endpoint{
- Hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "transcribestreaming-fips-us-east-1",
- }: endpoint{
- Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "transcribestreaming-fips-us-east-2",
- }: endpoint{
- Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "transcribestreaming-fips-us-west-2",
- }: endpoint{
- Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "transcribestreaming-us-east-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "transcribestreaming-us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "transcribestreaming-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "transcribestreaming-us-east-2",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "transcribestreaming-us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "transcribestreaming-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "transcribestreaming-us-west-2",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "transcribestreaming-us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "transcribestreaming-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "transfer": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "transfer-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "transfer-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "transfer-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "transfer-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "transfer-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "transfer-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "transfer-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "transfer-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "transfer-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "transfer-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "transfer-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "transfer-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "translate": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "translate-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "translate-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "translate-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2-fips",
- }: endpoint{
- Hostname: "translate-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "translate-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1-fips",
- }: endpoint{
- Hostname: "translate-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "translate-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "translate-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "verifiedpermissions": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "verifiedpermissions-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "verifiedpermissions-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "verifiedpermissions-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "verifiedpermissions-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "verifiedpermissions-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "verifiedpermissions-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "voice-chime": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "voice-chime-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "ca-central-1-fips",
- }: endpoint{
- Hostname: "voice-chime-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "voice-chime-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-1-fips",
- }: endpoint{
- Hostname: "voice-chime-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "voice-chime-fips.us-west-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2-fips",
- }: endpoint{
- Hostname: "voice-chime-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "voiceid": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "voiceid-fips.ca-central-1.amazonaws.com",
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "voiceid-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "voiceid-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "voiceid-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "voiceid-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "voiceid-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "vpc-lattice": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "waf": service{
- PartitionEndpoint: "aws-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "aws",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-fips.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "aws-fips",
- }: endpoint{
- Hostname: "waf-fips.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "aws-global",
- }: endpoint{
- Hostname: "waf.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "aws-global",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-fips.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "aws-global-fips",
- }: endpoint{
- Hostname: "waf-fips.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "waf-regional": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{
- Hostname: "waf-regional.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- },
- endpointKey{
- Region: "af-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Hostname: "waf-regional.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- },
- endpointKey{
- Region: "ap-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "waf-regional.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "waf-regional.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "ap-northeast-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{
- Hostname: "waf-regional.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- },
- endpointKey{
- Region: "ap-northeast-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "waf-regional.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "ap-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{
- Hostname: "waf-regional.ap-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-2",
- },
- },
- endpointKey{
- Region: "ap-south-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.ap-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "waf-regional.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "waf-regional.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{
- Hostname: "waf-regional.ap-southeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- },
- endpointKey{
- Region: "ap-southeast-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{
- Hostname: "waf-regional.ap-southeast-4.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-4",
- },
- },
- endpointKey{
- Region: "ap-southeast-4",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-4",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "waf-regional.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "waf-regional.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{
- Hostname: "waf-regional.eu-central-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- },
- endpointKey{
- Region: "eu-central-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.eu-central-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "waf-regional.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- },
- endpointKey{
- Region: "eu-north-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{
- Hostname: "waf-regional.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- },
- endpointKey{
- Region: "eu-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{
- Hostname: "waf-regional.eu-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-2",
- },
- },
- endpointKey{
- Region: "eu-south-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.eu-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-2",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "waf-regional.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "waf-regional.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Hostname: "waf-regional.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "eu-west-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "fips-af-south-1",
- }: endpoint{
- Hostname: "waf-regional-fips.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-east-1",
- }: endpoint{
- Hostname: "waf-regional-fips.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-1",
- }: endpoint{
- Hostname: "waf-regional-fips.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-2",
- }: endpoint{
- Hostname: "waf-regional-fips.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-3",
- }: endpoint{
- Hostname: "waf-regional-fips.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-south-1",
- }: endpoint{
- Hostname: "waf-regional-fips.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-south-2",
- }: endpoint{
- Hostname: "waf-regional-fips.ap-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-1",
- }: endpoint{
- Hostname: "waf-regional-fips.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-2",
- }: endpoint{
- Hostname: "waf-regional-fips.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-3",
- }: endpoint{
- Hostname: "waf-regional-fips.ap-southeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-4",
- }: endpoint{
- Hostname: "waf-regional-fips.ap-southeast-4.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-4",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "waf-regional-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-central-1",
- }: endpoint{
- Hostname: "waf-regional-fips.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-central-2",
- }: endpoint{
- Hostname: "waf-regional-fips.eu-central-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-north-1",
- }: endpoint{
- Hostname: "waf-regional-fips.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-south-1",
- }: endpoint{
- Hostname: "waf-regional-fips.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-south-2",
- }: endpoint{
- Hostname: "waf-regional-fips.eu-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-1",
- }: endpoint{
- Hostname: "waf-regional-fips.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-2",
- }: endpoint{
- Hostname: "waf-regional-fips.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-3",
- }: endpoint{
- Hostname: "waf-regional-fips.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-il-central-1",
- }: endpoint{
- Hostname: "waf-regional-fips.il-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-me-central-1",
- }: endpoint{
- Hostname: "waf-regional-fips.me-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-me-south-1",
- }: endpoint{
- Hostname: "waf-regional-fips.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-sa-east-1",
- }: endpoint{
- Hostname: "waf-regional-fips.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "waf-regional-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "waf-regional-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "waf-regional-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "waf-regional-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{
- Hostname: "waf-regional.il-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- },
- endpointKey{
- Region: "il-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.il-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{
- Hostname: "waf-regional.me-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- },
- endpointKey{
- Region: "me-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.me-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Hostname: "waf-regional.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- },
- endpointKey{
- Region: "me-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "waf-regional.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "waf-regional.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "waf-regional.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Hostname: "waf-regional.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "waf-regional.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "wafv2": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{
- Hostname: "wafv2.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- },
- endpointKey{
- Region: "af-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- },
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{
- Hostname: "wafv2.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- },
- endpointKey{
- Region: "ap-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{
- Hostname: "wafv2.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- },
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{
- Hostname: "wafv2.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "ap-northeast-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- },
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{
- Hostname: "wafv2.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- },
- endpointKey{
- Region: "ap-northeast-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- },
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{
- Hostname: "wafv2.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "ap-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- },
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{
- Hostname: "wafv2.ap-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-2",
- },
- },
- endpointKey{
- Region: "ap-south-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.ap-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{
- Hostname: "wafv2.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{
- Hostname: "wafv2.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- },
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{
- Hostname: "wafv2.ap-southeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- },
- endpointKey{
- Region: "ap-southeast-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- },
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{
- Hostname: "wafv2.ap-southeast-4.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-4",
- },
- },
- endpointKey{
- Region: "ap-southeast-4",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-4",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{
- Hostname: "wafv2.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "ca-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- },
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{
- Hostname: "wafv2.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- },
- endpointKey{
- Region: "ca-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{
- Hostname: "wafv2.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- },
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{
- Hostname: "wafv2.eu-central-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- },
- endpointKey{
- Region: "eu-central-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.eu-central-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- },
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{
- Hostname: "wafv2.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- },
- endpointKey{
- Region: "eu-north-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- },
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{
- Hostname: "wafv2.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- },
- endpointKey{
- Region: "eu-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- },
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{
- Hostname: "wafv2.eu-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-2",
- },
- },
- endpointKey{
- Region: "eu-south-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.eu-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-2",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{
- Hostname: "wafv2.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{
- Hostname: "wafv2.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "eu-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- },
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{
- Hostname: "wafv2.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "eu-west-3",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- },
- endpointKey{
- Region: "fips-af-south-1",
- }: endpoint{
- Hostname: "wafv2-fips.af-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "af-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-east-1",
- }: endpoint{
- Hostname: "wafv2-fips.ap-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-1",
- }: endpoint{
- Hostname: "wafv2-fips.ap-northeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-2",
- }: endpoint{
- Hostname: "wafv2-fips.ap-northeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-northeast-3",
- }: endpoint{
- Hostname: "wafv2-fips.ap-northeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-northeast-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-south-1",
- }: endpoint{
- Hostname: "wafv2-fips.ap-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-south-2",
- }: endpoint{
- Hostname: "wafv2-fips.ap-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-south-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-1",
- }: endpoint{
- Hostname: "wafv2-fips.ap-southeast-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-2",
- }: endpoint{
- Hostname: "wafv2-fips.ap-southeast-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-3",
- }: endpoint{
- Hostname: "wafv2-fips.ap-southeast-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ap-southeast-4",
- }: endpoint{
- Hostname: "wafv2-fips.ap-southeast-4.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ap-southeast-4",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-central-1",
- }: endpoint{
- Hostname: "wafv2-fips.ca-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-ca-west-1",
- }: endpoint{
- Hostname: "wafv2-fips.ca-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "ca-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-central-1",
- }: endpoint{
- Hostname: "wafv2-fips.eu-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-central-2",
- }: endpoint{
- Hostname: "wafv2-fips.eu-central-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-central-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-north-1",
- }: endpoint{
- Hostname: "wafv2-fips.eu-north-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-north-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-south-1",
- }: endpoint{
- Hostname: "wafv2-fips.eu-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-south-2",
- }: endpoint{
- Hostname: "wafv2-fips.eu-south-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-south-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-1",
- }: endpoint{
- Hostname: "wafv2-fips.eu-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-2",
- }: endpoint{
- Hostname: "wafv2-fips.eu-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-eu-west-3",
- }: endpoint{
- Hostname: "wafv2-fips.eu-west-3.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "eu-west-3",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-il-central-1",
- }: endpoint{
- Hostname: "wafv2-fips.il-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-me-central-1",
- }: endpoint{
- Hostname: "wafv2-fips.me-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-me-south-1",
- }: endpoint{
- Hostname: "wafv2-fips.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-sa-east-1",
- }: endpoint{
- Hostname: "wafv2-fips.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "wafv2-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "wafv2-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "wafv2-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "wafv2-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{
- Hostname: "wafv2.il-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- },
- endpointKey{
- Region: "il-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.il-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "il-central-1",
- },
- },
- endpointKey{
- Region: "me-central-1",
- }: endpoint{
- Hostname: "wafv2.me-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- },
- endpointKey{
- Region: "me-central-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.me-central-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-central-1",
- },
- },
- endpointKey{
- Region: "me-south-1",
- }: endpoint{
- Hostname: "wafv2.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- },
- endpointKey{
- Region: "me-south-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.me-south-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "me-south-1",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{
- Hostname: "wafv2.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "sa-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.sa-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "sa-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{
- Hostname: "wafv2.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{
- Hostname: "wafv2.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{
- Hostname: "wafv2.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{
- Hostname: "wafv2.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- },
- },
- },
- "wellarchitected": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "wisdom": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "ui-ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ui-ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ui-ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ui-ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ui-ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ui-eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ui-eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "ui-us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ui-us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{},
- },
- },
- "workdocs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "workdocs-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "workdocs-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "workdocs-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "workdocs-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "workmail": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "workspaces": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "workspaces-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "workspaces-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "workspaces-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "workspaces-fips.us-west-2.amazonaws.com",
- },
- },
- },
- "workspaces-web": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- },
- },
- "xray": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "af-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-east-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-northeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-south-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-1",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-2",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-3",
- }: endpoint{},
- endpointKey{
- Region: "ap-southeast-4",
- }: endpoint{},
- endpointKey{
- Region: "ca-central-1",
- }: endpoint{},
- endpointKey{
- Region: "ca-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-central-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-north-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-south-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-1",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-2",
- }: endpoint{},
- endpointKey{
- Region: "eu-west-3",
- }: endpoint{},
- endpointKey{
- Region: "fips-us-east-1",
- }: endpoint{
- Hostname: "xray-fips.us-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-east-2",
- }: endpoint{
- Hostname: "xray-fips.us-east-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-east-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-1",
- }: endpoint{
- Hostname: "xray-fips.us-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-west-2",
- }: endpoint{
- Hostname: "xray-fips.us-west-2.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-west-2",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "il-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-central-1",
- }: endpoint{},
- endpointKey{
- Region: "me-south-1",
- }: endpoint{},
- endpointKey{
- Region: "sa-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "xray-fips.us-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-east-2",
- }: endpoint{},
- endpointKey{
- Region: "us-east-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "xray-fips.us-east-2.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "xray-fips.us-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-west-2",
- }: endpoint{},
- endpointKey{
- Region: "us-west-2",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "xray-fips.us-west-2.amazonaws.com",
- },
- },
- },
- },
-}
-
-// AwsCnPartition returns the Resolver for AWS China.
-func AwsCnPartition() Partition {
- return awscnPartition.Partition()
-}
-
-var awscnPartition = partition{
- ID: "aws-cn",
- Name: "AWS China",
- DNSSuffix: "amazonaws.com.cn",
- RegionRegex: regionRegex{
- Regexp: func() *regexp.Regexp {
- reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$")
- return reg
- }(),
- },
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Hostname: "{service}.{region}.{dnsSuffix}",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- defaultKey{
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "{service}.{region}.{dnsSuffix}",
- DNSSuffix: "api.amazonwebservices.com.cn",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "amazonaws.com.cn",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- defaultKey{
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.amazonwebservices.com.cn",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- },
- Regions: regions{
- "cn-north-1": region{
- Description: "China (Beijing)",
- },
- "cn-northwest-1": region{
- Description: "China (Ningxia)",
- },
- },
- Services: services{
- "access-analyzer": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "account": service{
- PartitionEndpoint: "aws-cn-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-cn-global",
- }: endpoint{
- Hostname: "account.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "acm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "acm-pca": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "airflow": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "api.ecr": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "api.ecr.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "api.ecr.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "api.pricing": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "pricing",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "api.sagemaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "api.tunneling.iot": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "apigateway": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "appconfig": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "appconfigdata": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "application-autoscaling": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "applicationinsights": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "appmesh": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.cn-north-1.api.amazonwebservices.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "appmesh.cn-northwest-1.api.amazonwebservices.com.cn",
- },
- },
- },
- "appsync": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "arc-zonal-shift": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "athena": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.cn-north-1.api.amazonwebservices.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.cn-northwest-1.api.amazonwebservices.com.cn",
- },
- },
- },
- "autoscaling": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "autoscaling-plans": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "backup": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "batch": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "budgets": service{
- PartitionEndpoint: "aws-cn-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-cn-global",
- }: endpoint{
- Hostname: "budgets.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "cassandra": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "ce": service{
- PartitionEndpoint: "aws-cn-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-cn-global",
- }: endpoint{
- Hostname: "ce.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "cloudcontrolapi": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.cn-north-1.api.amazonwebservices.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.cn-northwest-1.api.amazonwebservices.com.cn",
- },
- },
- },
- "cloudformation": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "cloudfront": service{
- PartitionEndpoint: "aws-cn-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-cn-global",
- }: endpoint{
- Hostname: "cloudfront.cn-northwest-1.amazonaws.com.cn",
- Protocols: []string{"http", "https"},
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "cloudtrail": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "codebuild": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "codecommit": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "codedeploy": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "codepipeline": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "cognito-identity": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- },
- },
- "compute-optimizer": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "compute-optimizer.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "compute-optimizer.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "config": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "cur": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "data-ats.iot": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- CredentialScope: credentialScope{
- Service: "iotdata",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "data.ats.iot.cn-north-1.amazonaws.com.cn",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "data.jobs.iot": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "databrew": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "datasync": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "datazone": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.amazonwebservices.com.cn",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.amazonwebservices.com.cn",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "datazone.cn-north-1.api.amazonwebservices.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "datazone.cn-northwest-1.api.amazonwebservices.com.cn",
- },
- },
- },
- "dax": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "directconnect": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "dlm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "dms": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "docdb": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "rds.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "ds": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "dynamodb": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "ebs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "ec2": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "ecs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "eks": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "eks-auth": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.amazonwebservices.com.cn",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.amazonwebservices.com.cn",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "eks-auth.cn-north-1.api.amazonwebservices.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "eks-auth.cn-northwest-1.api.amazonwebservices.com.cn",
- },
- },
- },
- "elasticache": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "elasticbeanstalk": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "elasticfilesystem": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-north-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn",
- },
- endpointKey{
- Region: "fips-cn-north-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-cn-northwest-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "elasticloadbalancing": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "elasticmapreduce": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "elasticmapreduce.cn-north-1.api.amazonwebservices.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "elasticmapreduce.cn-northwest-1.api.amazonwebservices.com.cn",
- },
- },
- },
- "emr-containers": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "emr-serverless": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "entitlement.marketplace": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "entitlement-marketplace.cn-northwest-1.amazonaws.com.cn",
- Protocols: []string{"https"},
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "es": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.cn-north-1.api.amazonwebservices.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.cn-northwest-1.api.amazonwebservices.com.cn",
- },
- },
- },
- "events": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "firehose": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "firehose.cn-north-1.api.amazonwebservices.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "firehose.cn-northwest-1.api.amazonwebservices.com.cn",
- },
- },
- },
- "fms": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "fsx": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "gamelift": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "glacier": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "glue": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "greengrass": service{
- IsRegionalized: boxedTrue,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- },
- },
- "guardduty": service{
- IsRegionalized: boxedTrue,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "health": service{
- PartitionEndpoint: "aws-cn-global",
- IsRegionalized: boxedFalse,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- SSLCommonName: "health.cn-northwest-1.amazonaws.com.cn",
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-cn-global",
- }: endpoint{
- Hostname: "global.health.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "iam": service{
- PartitionEndpoint: "aws-cn-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-cn-global",
- }: endpoint{
- Hostname: "iam.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- },
- },
- "identitystore": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "inspector2": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "internetmonitor": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.amazonwebservices.com.cn",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.amazonwebservices.com.cn",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "internetmonitor.cn-north-1.api.amazonwebservices.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "internetmonitor.cn-northwest-1.api.amazonwebservices.com.cn",
- },
- },
- },
- "iot": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "iotanalytics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- },
- },
- "iotevents": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- },
- },
- "ioteventsdata": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "data.iotevents.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- },
- },
- "iotsecuredtunneling": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "iotsitewise": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- },
- },
- "iottwinmaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "api-cn-north-1",
- }: endpoint{
- Hostname: "api.iottwinmaker.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "data-cn-north-1",
- }: endpoint{
- Hostname: "data.iottwinmaker.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- },
- },
- "kafka": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "kendra-ranking": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.amazonwebservices.com.cn",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.amazonwebservices.com.cn",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "kendra-ranking.cn-north-1.api.amazonwebservices.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "kendra-ranking.cn-northwest-1.api.amazonwebservices.com.cn",
- },
- },
- },
- "kinesis": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "kinesisanalytics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "kinesisvideo": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- },
- },
- "kms": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "lakeformation": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "lambda": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.cn-north-1.api.amazonwebservices.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.cn-northwest-1.api.amazonwebservices.com.cn",
- },
- },
- },
- "license-manager": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "license-manager-linux-subscriptions": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "logs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "mediaconvert": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "mediaconvert.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "memory-db": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "metrics.sagemaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "monitoring": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "mq": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "neptune": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "rds.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "rds.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "network-firewall": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "oam": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "oidc": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "oidc.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "oidc.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "organizations": service{
- PartitionEndpoint: "aws-cn-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-cn-global",
- }: endpoint{
- Hostname: "organizations.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "personalize": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- },
- },
- "pi": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "cn-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.cn-north-1.api.amazonwebservices.com.cn",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "cn-northwest-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.cn-northwest-1.api.amazonwebservices.com.cn",
- Protocols: []string{"https"},
- },
- },
- },
- "pipes": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "polly": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "portal.sso": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "portal.sso.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "portal.sso.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "qbusiness": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.amazonwebservices.com.cn",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.amazonwebservices.com.cn",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "qbusiness.cn-north-1.api.amazonwebservices.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "qbusiness.cn-northwest-1.api.amazonwebservices.com.cn",
- },
- },
- },
- "quicksight": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- },
- },
- "ram": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "rbin": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "rds": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "redshift": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "redshift-serverless": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "resource-groups": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "rolesanywhere": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "route53": service{
- PartitionEndpoint: "aws-cn-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-cn-global",
- }: endpoint{
- Hostname: "route53.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "route53resolver": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "runtime.sagemaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "s3": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- SignatureVersions: []string{"s3v4"},
- },
- defaultKey{
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "{service}.dualstack.{region}.{dnsSuffix}",
- DNSSuffix: "amazonaws.com.cn",
- Protocols: []string{"http", "https"},
- SignatureVersions: []string{"s3v4"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.cn-north-1.amazonaws.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.cn-northwest-1.amazonaws.com.cn",
- },
- },
- },
- "s3-control": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- SignatureVersions: []string{"s3v4"},
- },
- defaultKey{
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "{service}.dualstack.{region}.{dnsSuffix}",
- DNSSuffix: "amazonaws.com.cn",
- Protocols: []string{"https"},
- SignatureVersions: []string{"s3v4"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "s3-control.cn-north-1.amazonaws.com.cn",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "cn-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.cn-north-1.amazonaws.com.cn",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "s3-control.cn-northwest-1.amazonaws.com.cn",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- endpointKey{
- Region: "cn-northwest-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.cn-northwest-1.amazonaws.com.cn",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "savingsplans": service{
- IsRegionalized: boxedTrue,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "savingsplans.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "savingsplans.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "schemas": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "secretsmanager": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-north-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- Variant: dualStackVariant,
- }: endpoint{},
- },
- },
- "securityhub": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "serverlessrepo": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- },
- },
- "servicecatalog": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "servicediscovery": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.cn-north-1.api.amazonwebservices.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.cn-northwest-1.api.amazonwebservices.com.cn",
- },
- },
- },
- "servicequotas": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "signer": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- endpointKey{
- Region: "verification-cn-north-1",
- }: endpoint{
- Hostname: "verification.signer.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "verification-cn-northwest-1",
- }: endpoint{
- Hostname: "verification.signer.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "sms": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- },
- },
- "snowball": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-north-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn",
- },
- endpointKey{
- Region: "fips-cn-north-1",
- }: endpoint{
- Hostname: "snowball-fips.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-cn-northwest-1",
- }: endpoint{
- Hostname: "snowball-fips.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "sns": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "sqs": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- SSLCommonName: "{region}.queue.{dnsSuffix}",
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "ssm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "sso": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "states": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-north-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "states.cn-north-1.api.amazonwebservices.com.cn",
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "states.cn-northwest-1.api.amazonwebservices.com.cn",
- },
- },
- },
- "storagegateway": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "streams.dynamodb": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- CredentialScope: credentialScope{
- Service: "dynamodb",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "sts": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "support": service{
- PartitionEndpoint: "aws-cn-global",
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-cn-global",
- }: endpoint{
- Hostname: "support.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- },
- },
- "swf": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "synthetics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "tagging": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "transcribe": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "cn.transcribe.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "cn.transcribe.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- },
- },
- "transcribestreaming": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "transfer": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "waf-regional": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "waf-regional.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "cn-north-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "waf-regional.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- endpointKey{
- Region: "cn-northwest-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- endpointKey{
- Region: "fips-cn-north-1",
- }: endpoint{
- Hostname: "waf-regional-fips.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-cn-northwest-1",
- }: endpoint{
- Hostname: "waf-regional-fips.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "wafv2": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{
- Hostname: "wafv2.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "cn-north-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- },
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{
- Hostname: "wafv2.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- endpointKey{
- Region: "cn-northwest-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- },
- endpointKey{
- Region: "fips-cn-north-1",
- }: endpoint{
- Hostname: "wafv2-fips.cn-north-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-north-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-cn-northwest-1",
- }: endpoint{
- Hostname: "wafv2-fips.cn-northwest-1.amazonaws.com.cn",
- CredentialScope: credentialScope{
- Region: "cn-northwest-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "workspaces": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- "xray": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "cn-north-1",
- }: endpoint{},
- endpointKey{
- Region: "cn-northwest-1",
- }: endpoint{},
- },
- },
- },
-}
-
-// AwsUsGovPartition returns the Resolver for AWS GovCloud (US).
-func AwsUsGovPartition() Partition {
- return awsusgovPartition.Partition()
-}
-
-var awsusgovPartition = partition{
- ID: "aws-us-gov",
- Name: "AWS GovCloud (US)",
- DNSSuffix: "amazonaws.com",
- RegionRegex: regionRegex{
- Regexp: func() *regexp.Regexp {
- reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$")
- return reg
- }(),
- },
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Hostname: "{service}.{region}.{dnsSuffix}",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- defaultKey{
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "{service}.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "amazonaws.com",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- defaultKey{
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- },
- Regions: regions{
- "us-gov-east-1": region{
- Description: "AWS GovCloud (US-East)",
- },
- "us-gov-west-1": region{
- Description: "AWS GovCloud (US-West)",
- },
- },
- Services: services{
- "access-analyzer": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "access-analyzer.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "access-analyzer.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "access-analyzer.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "access-analyzer.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "access-analyzer.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "access-analyzer.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "acm": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "acm.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "acm.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "acm-pca": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-pca.{region}.{dnsSuffix}",
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "acm-pca.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "acm-pca.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-pca.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "acm-pca.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "api.detective": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "api.detective-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "api.detective-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "api.ecr": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecr-fips.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "dkr-us-gov-east-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dkr-us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecr-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dkr-us-gov-west-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dkr-us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecr-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-dkr-us-gov-east-1",
- }: endpoint{
- Hostname: "ecr-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-dkr-us-gov-west-1",
- }: endpoint{
- Hostname: "ecr-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "ecr-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "ecr-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "api.ecr.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecr-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "api.ecr.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecr-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "api.sagemaker": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api-fips.sagemaker.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "api-fips.sagemaker.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1-fips-secondary",
- }: endpoint{
- Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1-secondary",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1-secondary",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.sagemaker.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "api.tunneling.iot": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "apigateway": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "appconfig": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "appconfig.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "appconfig.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appconfig.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appconfig.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "appconfigdata": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "appconfigdata.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "appconfigdata.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appconfigdata.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appconfigdata.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "application-autoscaling": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Hostname: "autoscaling.{region}.amazonaws.com",
- Protocols: []string{"http", "https"},
- CredentialScope: credentialScope{
- Service: "application-autoscaling",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com",
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com",
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "application-autoscaling.us-gov-east-1.amazonaws.com",
- Protocols: []string{"http", "https"},
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com",
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com",
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "application-autoscaling.us-gov-west-1.amazonaws.com",
- Protocols: []string{"http", "https"},
-
- Deprecated: boxedTrue,
- },
- },
- },
- "applicationinsights": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "applicationinsights.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "applicationinsights.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "appstream2": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- CredentialScope: credentialScope{
- Service: "appstream",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips",
- }: endpoint{
- Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "appstream2-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "appstream2-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "arc-zonal-shift": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "athena": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "athena-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "athena-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "athena-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "athena-fips.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "athena.us-gov-west-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "athena-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "athena-fips.us-gov-west-1.api.aws",
- },
- },
- },
- "autoscaling": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "autoscaling.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- },
- "autoscaling-plans": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com",
- Protocols: []string{"http", "https"},
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com",
- Protocols: []string{"http", "https"},
-
- Deprecated: boxedTrue,
- },
- },
- },
- "backup": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "backup-gateway": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "batch": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "batch.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "batch.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "batch.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "batch.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "batch.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "bedrock": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "bedrock-fips-us-gov-west-1",
- }: endpoint{
- Hostname: "bedrock-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-fips-us-gov-west-1",
- }: endpoint{
- Hostname: "bedrock-runtime-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "bedrock-runtime-us-gov-west-1",
- }: endpoint{
- Hostname: "bedrock-runtime.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "bedrock-us-gov-west-1",
- }: endpoint{
- Hostname: "bedrock.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "cassandra": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "cassandra.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cassandra.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "cassandra.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "cassandra.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cassandra.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "cassandra.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "cloudcontrolapi": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi.us-gov-west-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "cloudcontrolapi-fips.us-gov-west-1.api.aws",
- },
- },
- },
- "clouddirectory": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "clouddirectory.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "clouddirectory.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "cloudformation": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "cloudformation.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudformation.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "cloudformation.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "cloudformation.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudformation.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "cloudformation.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "cloudhsm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "cloudhsmv2": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "cloudhsm",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "cloudtrail": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudtrail.us-gov-west-1.amazonaws.com",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "cloudtrail.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "cloudtrail.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudtrail.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cloudtrail.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "codebuild": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "codebuild-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "codebuild-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "codecommit": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips",
- }: endpoint{
- Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "codecommit-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "codecommit-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "codedeploy": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "codedeploy-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "codedeploy-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "codepipeline": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codepipeline-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "codepipeline-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "codestar-connections": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- },
- },
- "cognito-identity": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cognito-identity-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "cognito-idp": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "cognito-idp-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "comprehend": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "comprehend-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "comprehendmedical": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "comprehendmedical-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "compute-optimizer": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "compute-optimizer-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "compute-optimizer-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "config": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "config.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "config.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "config.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "config.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "config.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "connect": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "connect.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "connect.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "controltower": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "controltower-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "controltower-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "controltower-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "controltower-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "data-ats.iot": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- CredentialScope: credentialScope{
- Service: "iotdata",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Service: "iotdata",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Service: "iotdata",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iot-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iot-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "data.jobs.iot": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.jobs.iot-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.jobs.iot-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "databrew": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "databrew.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "databrew.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "datasync": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "datasync-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "datasync-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "datasync-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "datasync-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "datazone": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.aws",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "datazone.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "datazone.us-gov-west-1.api.aws",
- },
- },
- },
- "directconnect": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "dlm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dlm.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "dlm.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dlm.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "dlm.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "dms": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "dms",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dms",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dms-fips",
- }: endpoint{
- Hostname: "dms.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "dms.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "dms.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "docdb": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "rds.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "drs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "drs-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "drs-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "drs-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "drs-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "ds": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "ds-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "ds-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ds-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ds-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "dynamodb": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dynamodb.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dynamodb.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "dynamodb.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "dynamodb.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "ebs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "ec2": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ec2.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "ec2.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "ec2.us-gov-east-1.api.aws",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "ec2.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "ec2.us-gov-west-1.api.aws",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "ecs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "ecs-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "ecs-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecs-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ecs-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "eks": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "eks.{region}.{dnsSuffix}",
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "eks.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "eks.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "eks.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "eks.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "eks-auth": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.aws",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "eks-auth.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "eks-auth.us-gov-west-1.api.aws",
- },
- },
- },
- "elasticache": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticache.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips",
- }: endpoint{
- Hostname: "elasticache.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticache.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "elasticache.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "elasticbeanstalk": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "elasticbeanstalk.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "elasticbeanstalk.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "elasticfilesystem": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "elasticloadbalancing": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticloadbalancing.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticloadbalancing.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticloadbalancing.us-gov-west-1.amazonaws.com",
- Protocols: []string{"http", "https"},
- },
- },
- },
- "elasticmapreduce": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticmapreduce.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "elasticmapreduce.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticmapreduce.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "elasticmapreduce.us-gov-west-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticmapreduce.us-gov-west-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- },
- },
- "email": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "email-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "email-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "email-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "email-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "emr-containers": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "emr-containers.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "emr-containers.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "emr-containers.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "emr-containers.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "emr-serverless": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "emr-serverless.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "emr-serverless.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "emr-serverless.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "emr-serverless.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "es": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips",
- }: endpoint{
- Hostname: "es-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "es-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "es-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "aos.us-gov-west-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "es-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "es-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "events": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "events.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "events.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "events.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "events.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "firehose": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "firehose-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "firehose-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "firehose-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "firehose-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "fms": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "fms-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "fms-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fms-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "fsx": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-prod-us-gov-east-1",
- }: endpoint{
- Hostname: "fsx-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-prod-us-gov-west-1",
- }: endpoint{
- Hostname: "fsx-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "fsx-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "fsx-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-us-gov-east-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-us-gov-west-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "geo": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "geo-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "geo-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "glacier": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "glacier.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "glacier.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "glacier.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "glacier.us-gov-west-1.amazonaws.com",
- Protocols: []string{"http", "https"},
- },
- },
- },
- "glue": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "glue-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "glue-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "glue.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "glue-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "glue-fips.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "glue.us-gov-west-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "glue-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "glue-fips.us-gov-west-1.api.aws",
- },
- },
- },
- "greengrass": service{
- IsRegionalized: boxedTrue,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "dataplane-us-gov-east-1",
- }: endpoint{
- Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "dataplane-us-gov-west-1",
- }: endpoint{
- Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "greengrass.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "greengrass.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "greengrass.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "greengrass.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "guardduty": service{
- IsRegionalized: boxedTrue,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "guardduty.{region}.{dnsSuffix}",
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "guardduty.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "guardduty.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "guardduty.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "guardduty.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "health": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- SSLCommonName: "health.us-gov-west-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-us-gov-global",
- }: endpoint{
- Hostname: "global.health.us-gov.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "health-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "health-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "iam": service{
- PartitionEndpoint: "aws-us-gov-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-us-gov-global",
- }: endpoint{
- Hostname: "iam.us-gov.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "aws-us-gov-global",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iam.us-gov.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "aws-us-gov-global-fips",
- }: endpoint{
- Hostname: "iam.us-gov.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "iam-govcloud",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "iam-govcloud",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iam.us-gov.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "iam-govcloud-fips",
- }: endpoint{
- Hostname: "iam.us-gov.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "identitystore": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "identitystore.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "identitystore.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "identitystore.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "identitystore.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "identitystore.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "ingest.timestream": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "ingest.timestream.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "inspector": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "inspector-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "inspector-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "inspector-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "inspector-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "inspector2": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "inspector2-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "inspector2-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "internetmonitor": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.aws",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "internetmonitor.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "internetmonitor.us-gov-west-1.api.aws",
- },
- },
- },
- "iot": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "iot-fips.us-gov-east-1.amazonaws.com",
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "iot-fips.us-gov-west-1.amazonaws.com",
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iot-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iot-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "iotevents": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iotevents-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "ioteventsdata": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "data.iotevents.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "data.iotevents-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "iotsecuredtunneling": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "api.tunneling.iot-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "iotsitewise": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iotsitewise-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "iottwinmaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "api-us-gov-west-1",
- }: endpoint{
- Hostname: "api.iottwinmaker.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "data-us-gov-west-1",
- }: endpoint{
- Hostname: "data.iottwinmaker.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "fips-api-us-gov-west-1",
- }: endpoint{
- Hostname: "api.iottwinmaker-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "fips-data-us-gov-west-1",
- }: endpoint{
- Hostname: "data.iottwinmaker-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "iottwinmaker-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "kafka": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "kafka.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kafka.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "kafka.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "kafka.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kafka.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "kafka.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "kendra": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "kendra-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kendra-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "kendra-ranking": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.aws",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "kendra-ranking.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "kendra-ranking.us-gov-west-1.api.aws",
- },
- },
- },
- "kinesis": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "kinesis.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "kinesis.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "kinesis.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kinesis.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "kinesis.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kinesis.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "kinesisanalytics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "kinesisvideo": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kinesisvideo-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kinesisvideo-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "kms": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ProdFips",
- }: endpoint{
- Hostname: "kms-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "kms-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "kms-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "lakeformation": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lakeformation.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "lakeformation-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "lakeformation-fips.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lakeformation.us-gov-west-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "lakeformation-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "lakeformation-fips.us-gov-west-1.api.aws",
- },
- },
- },
- "lambda": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "lambda-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "lambda-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "lambda-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "lambda.us-gov-west-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "lambda-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "license-manager": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "license-manager-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "license-manager-linux-subscriptions": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "license-manager-user-subscriptions": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "logs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "logs.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "logs.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "logs.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "logs.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "m2": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{},
- },
- },
- "managedblockchain": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "mediaconvert": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "mediaconvert.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mediaconvert.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "meetings-chime": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "meetings-chime-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "meetings-chime-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "metering.marketplace": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "aws-marketplace",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "metrics.sagemaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "mgn": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "mgn-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "mgn-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mgn-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mgn-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "models-v2-lex": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "models.lex": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "lex",
- },
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "models-fips.lex.{region}.{dnsSuffix}",
- CredentialScope: credentialScope{
- Service: "lex",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "models-fips.lex.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "monitoring": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "monitoring.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "monitoring.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "monitoring.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "monitoring.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "monitoring.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "mq": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "mq-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "mq-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mq-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "mq-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "neptune": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "rds.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "rds.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "network-firewall": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "network-firewall-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "network-firewall-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "networkmanager": service{
- PartitionEndpoint: "aws-us-gov-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-us-gov-global",
- }: endpoint{
- Hostname: "networkmanager.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "aws-us-gov-global",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "networkmanager.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "fips-aws-us-gov-global",
- }: endpoint{
- Hostname: "networkmanager.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "oidc": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "oidc.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "oidc.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "organizations": service{
- PartitionEndpoint: "aws-us-gov-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-us-gov-global",
- }: endpoint{
- Hostname: "organizations.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "aws-us-gov-global",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "organizations.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "fips-aws-us-gov-global",
- }: endpoint{
- Hostname: "organizations.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "outposts": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "outposts.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "outposts.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "outposts.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "outposts.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "participant.connect": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "participant.connect.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "participant.connect.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "pi": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "pi-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "pi-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.us-gov-east-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pi-fips.us-gov-east-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "pi-fips.us-gov-east-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "pi.us-gov-west-1.api.aws",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pi-fips.us-gov-west-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "pi-fips.us-gov-west-1.api.aws",
- Protocols: []string{"https"},
- },
- },
- },
- "pinpoint": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "mobiletargeting",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "pinpoint.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "pinpoint-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "polly": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "polly-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "polly-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "portal.sso": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "portal.sso.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "portal.sso.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "qbusiness": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- DNSSuffix: "api.aws",
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "api.aws",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "qbusiness.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "qbusiness.us-gov-west-1.api.aws",
- },
- },
- },
- "quicksight": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "api",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "ram": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "ram.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ram.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "ram.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "ram.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ram.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "ram.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "rbin": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "rbin-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "rbin-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rbin-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rbin-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "rds": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "rds.us-gov-east-1",
- }: endpoint{
- Hostname: "rds.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds.us-gov-west-1",
- }: endpoint{
- Hostname: "rds.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "rds.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "rds.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "redshift": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "redshift.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "redshift.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "rekognition": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "rekognition-fips.us-gov-west-1",
- }: endpoint{
- Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition.us-gov-west-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rekognition.us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "rekognition-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "resiliencehub": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "resiliencehub-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "resiliencehub-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "resource-groups": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "resource-groups.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "resource-groups.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "resource-groups.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "resource-groups.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "resource-groups.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "robomaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "rolesanywhere": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "route53": service{
- PartitionEndpoint: "aws-us-gov-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-us-gov-global",
- }: endpoint{
- Hostname: "route53.us-gov.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "aws-us-gov-global",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "route53.us-gov.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "fips-aws-us-gov-global",
- }: endpoint{
- Hostname: "route53.us-gov.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "route53resolver": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "route53resolver.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "route53resolver.us-gov-east-1.amazonaws.com",
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "route53resolver.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "route53resolver.us-gov-west-1.amazonaws.com",
-
- Deprecated: boxedTrue,
- },
- },
- },
- "runtime-v2-lex": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "runtime.lex": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "lex",
- },
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "runtime-fips.lex.{region}.{dnsSuffix}",
- CredentialScope: credentialScope{
- Service: "lex",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "runtime-fips.lex.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "runtime.sagemaker": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "runtime.sagemaker.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "runtime.sagemaker.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "s3": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- SignatureVersions: []string{"s3", "s3v4"},
- },
- defaultKey{
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "{service}.dualstack.{region}.{dnsSuffix}",
- DNSSuffix: "amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- defaultKey{
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}",
- DNSSuffix: "amazonaws.com",
- SignatureVersions: []string{"s3", "s3v4"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "s3-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "s3-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "s3.us-gov-east-1.amazonaws.com",
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.us-gov-east-1.amazonaws.com",
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-fips.us-gov-east-1.amazonaws.com",
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "s3.us-gov-west-1.amazonaws.com",
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3.dualstack.us-gov-west-1.amazonaws.com",
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-fips.us-gov-west-1.amazonaws.com",
- Protocols: []string{"http", "https"},
- },
- },
- },
- "s3-control": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- SignatureVersions: []string{"s3v4"},
- },
- defaultKey{
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "{service}.dualstack.{region}.{dnsSuffix}",
- DNSSuffix: "amazonaws.com",
- Protocols: []string{"https"},
- SignatureVersions: []string{"s3v4"},
- },
- defaultKey{
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "{service}-fips.dualstack.{region}.{dnsSuffix}",
- DNSSuffix: "amazonaws.com",
- Protocols: []string{"https"},
- SignatureVersions: []string{"s3v4"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "s3-control.us-gov-east-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.us-gov-east-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-control-fips.dualstack.us-gov-east-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "s3-control-fips.us-gov-east-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "s3-control.us-gov-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.us-gov-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-control-fips.dualstack.us-gov-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "s3-control-fips.us-gov-west-1.amazonaws.com",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "s3-outposts": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- },
- },
- "secretsmanager": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- },
- },
- "securityhub": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "securityhub-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "securityhub-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "securitylake": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "securitylake.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "securitylake.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "securitylake.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "securitylake.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "serverlessrepo": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "serverlessrepo.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "serverlessrepo.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "servicecatalog": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "servicecatalog-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "servicecatalog-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "servicecatalog-appregistry": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicecatalog-appregistry.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "servicediscovery": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "servicediscovery",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "servicediscovery",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "servicediscovery-fips",
- }: endpoint{
- Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "servicediscovery-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery.us-gov-west-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "servicediscovery-fips.us-gov-west-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "servicediscovery-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "servicequotas": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicequotas.{region}.{dnsSuffix}",
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "servicequotas.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "servicequotas.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicequotas.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "servicequotas.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "signer": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "signer-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "signer-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-verification-us-gov-east-1",
- }: endpoint{
- Hostname: "verification.signer-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "fips-verification-us-gov-west-1",
- }: endpoint{
- Hostname: "verification.signer-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "signer-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "signer-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "verification-us-gov-east-1",
- }: endpoint{
- Hostname: "verification.signer.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "verification-us-gov-west-1",
- }: endpoint{
- Hostname: "verification.signer.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "simspaceweaver": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "simspaceweaver.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "simspaceweaver.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "sms": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "sms-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sms-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "sms-voice": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sms-voice-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sms-voice-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "snowball": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "snowball-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "snowball-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "snowball-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "sns": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "sns.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "sns.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sns.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sns.us-gov-west-1.amazonaws.com",
- Protocols: []string{"https"},
- },
- },
- },
- "sqs": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sqs.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "sqs.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "sqs.us-gov-west-1.amazonaws.com",
- SSLCommonName: "{region}.queue.{dnsSuffix}",
- Protocols: []string{"http", "https"},
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "ssm": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "ssm.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "ssm.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "ssm.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "sso": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "sso.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sso.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "sso.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "sso.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sso.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "sso.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "states": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "states-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "states.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "states-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "states.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "storagegateway": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips",
- }: endpoint{
- Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "storagegateway-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "storagegateway-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "streams.dynamodb": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "dynamodb",
- },
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "streams.dynamodb.{region}.{dnsSuffix}",
- CredentialScope: credentialScope{
- Service: "dynamodb",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "streams.dynamodb.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "streams.dynamodb.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "sts": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sts.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sts.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "sts.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "sts.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "sts.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "support": service{
- PartitionEndpoint: "aws-us-gov-global",
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-us-gov-global",
- }: endpoint{
- Hostname: "support.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "support.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "support.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "swf": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "swf.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "swf.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1-fips",
- }: endpoint{
- Hostname: "swf.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "swf.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "swf.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "swf.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "synthetics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "synthetics-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "synthetics-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "tagging": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "textract": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "textract-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "textract-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "textract-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "textract-fips.us-gov-east-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "textract.us-gov-west-1.api.aws",
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "textract-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "textract-fips.us-gov-west-1.api.aws",
- },
- },
- },
- "transcribe": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.transcribe.{region}.{dnsSuffix}",
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.transcribe.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fips.transcribe.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "transcribestreaming": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "transfer": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "transfer-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "transfer-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "transfer-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "transfer-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "translate": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "translate-fips.us-gov-west-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1-fips",
- }: endpoint{
- Hostname: "translate-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "verifiedpermissions": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "verifiedpermissions-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "verifiedpermissions-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "waf-regional": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "waf-regional.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "waf-regional.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "wafv2": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{
- Hostname: "wafv2.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{
- Hostname: "wafv2.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "wafv2-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- },
- },
- },
- "wellarchitected": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- },
- },
- "workspaces": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "workspaces-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "workspaces-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- "xray": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-gov-east-1",
- }: endpoint{
- Hostname: "xray-fips.us-gov-east-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-gov-west-1",
- }: endpoint{
- Hostname: "xray-fips.us-gov-west-1.amazonaws.com",
- CredentialScope: credentialScope{
- Region: "us-gov-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-gov-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "xray-fips.us-gov-east-1.amazonaws.com",
- },
- endpointKey{
- Region: "us-gov-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-gov-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "xray-fips.us-gov-west-1.amazonaws.com",
- },
- },
- },
- },
-}
-
-// AwsIsoPartition returns the Resolver for AWS ISO (US).
-func AwsIsoPartition() Partition {
- return awsisoPartition.Partition()
-}
-
-var awsisoPartition = partition{
- ID: "aws-iso",
- Name: "AWS ISO (US)",
- DNSSuffix: "c2s.ic.gov",
- RegionRegex: regionRegex{
- Regexp: func() *regexp.Regexp {
- reg, _ := regexp.Compile("^us\\-iso\\-\\w+\\-\\d+$")
- return reg
- }(),
- },
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Hostname: "{service}.{region}.{dnsSuffix}",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "c2s.ic.gov",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- },
- Regions: regions{
- "us-iso-east-1": region{
- Description: "US ISO East",
- },
- "us-iso-west-1": region{
- Description: "US ISO WEST",
- },
- },
- Services: services{
- "api.ecr": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{
- Hostname: "api.ecr.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{
- Hostname: "api.ecr.us-iso-west-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- },
- },
- },
- "api.pricing": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "pricing",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "api.sagemaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "apigateway": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "appconfig": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "appconfigdata": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "application-autoscaling": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "arc-zonal-shift": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "athena": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "autoscaling": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "cloudcontrolapi": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "cloudformation": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "cloudtrail": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "codedeploy": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "comprehend": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "config": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "datapipeline": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "datasync": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-iso-east-1",
- }: endpoint{
- Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-iso-west-1",
- }: endpoint{
- Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "datasync-fips.us-iso-east-1.c2s.ic.gov",
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "datasync-fips.us-iso-west-1.c2s.ic.gov",
- },
- },
- },
- "directconnect": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "dlm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "dms": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "dms",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dms",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dms-fips",
- }: endpoint{
- Hostname: "dms.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms.us-iso-east-1.c2s.ic.gov",
- },
- endpointKey{
- Region: "us-iso-east-1-fips",
- }: endpoint{
- Hostname: "dms.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms.us-iso-west-1.c2s.ic.gov",
- },
- endpointKey{
- Region: "us-iso-west-1-fips",
- }: endpoint{
- Hostname: "dms.us-iso-west-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "ds": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "dynamodb": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "ebs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "ec2": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "ecs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "eks": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "elasticache": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "elasticfilesystem": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-iso-east-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-iso-west-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-iso-east-1.c2s.ic.gov",
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-iso-west-1.c2s.ic.gov",
- },
- },
- },
- "elasticloadbalancing": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "elasticmapreduce": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-iso-east-1",
- }: endpoint{
- Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-iso-west-1",
- }: endpoint{
- Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-iso-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticmapreduce.us-iso-east-1.c2s.ic.gov",
- Protocols: []string{"https"},
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticmapreduce.us-iso-west-1.c2s.ic.gov",
- },
- },
- },
- "es": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "events": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "firehose": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "fsx": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-prod-us-iso-east-1",
- }: endpoint{
- Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-iso-east-1",
- }: endpoint{
- Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-us-iso-east-1",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "prod-us-iso-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "fsx-fips.us-iso-east-1.c2s.ic.gov",
- },
- },
- },
- "glacier": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "glue": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "guardduty": service{
- IsRegionalized: boxedTrue,
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "health": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "iam": service{
- PartitionEndpoint: "aws-iso-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-iso-global",
- }: endpoint{
- Hostname: "iam.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- },
- },
- },
- "kinesis": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "kms": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ProdFips",
- }: endpoint{
- Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov",
- },
- endpointKey{
- Region: "us-iso-east-1-fips",
- }: endpoint{
- Hostname: "kms-fips.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov",
- },
- endpointKey{
- Region: "us-iso-west-1-fips",
- }: endpoint{
- Hostname: "kms-fips.us-iso-west-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "lambda": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "license-manager": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "logs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "medialive": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "mediapackage": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "metrics.sagemaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "monitoring": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "outposts": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "ram": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "rbin": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-iso-east-1",
- }: endpoint{
- Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-iso-west-1",
- }: endpoint{
- Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rbin-fips.us-iso-east-1.c2s.ic.gov",
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rbin-fips.us-iso-west-1.c2s.ic.gov",
- },
- },
- },
- "rds": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "rds.us-iso-east-1",
- }: endpoint{
- Hostname: "rds.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "rds.us-iso-west-1",
- }: endpoint{
- Hostname: "rds.us-iso-west-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds.us-iso-east-1.c2s.ic.gov",
- },
- endpointKey{
- Region: "us-iso-east-1-fips",
- }: endpoint{
- Hostname: "rds.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds.us-iso-west-1.c2s.ic.gov",
- },
- endpointKey{
- Region: "us-iso-west-1-fips",
- }: endpoint{
- Hostname: "rds.us-iso-west-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "redshift": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{
- Hostname: "redshift.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{
- Hostname: "redshift.us-iso-west-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- },
- },
- },
- "resource-groups": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "route53": service{
- PartitionEndpoint: "aws-iso-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-iso-global",
- }: endpoint{
- Hostname: "route53.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- },
- },
- },
- "route53resolver": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "runtime.sagemaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "s3": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- SignatureVersions: []string{"s3v4"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-iso-east-1",
- }: endpoint{
- Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "fips-us-iso-west-1",
- }: endpoint{
- Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{
- Protocols: []string{"http", "https"},
- SignatureVersions: []string{"s3v4"},
- },
- endpointKey{
- Region: "us-iso-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov",
- Protocols: []string{"http", "https"},
- SignatureVersions: []string{"s3v4"},
- },
- endpointKey{
- Region: "us-iso-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov",
- Protocols: []string{"http", "https"},
- SignatureVersions: []string{"s3v4"},
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov",
- },
- endpointKey{
- Region: "us-iso-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov",
- },
- },
- },
- "s3-control": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- SignatureVersions: []string{"s3v4"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{
- Hostname: "s3-control.us-iso-east-1.c2s.ic.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- },
- endpointKey{
- Region: "us-iso-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.us-iso-east-1.c2s.ic.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- },
- endpointKey{
- Region: "us-iso-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- },
- endpointKey{
- Region: "us-iso-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-control-fips.dualstack.us-iso-east-1.c2s.ic.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- },
- endpointKey{
- Region: "us-iso-east-1-fips",
- }: endpoint{
- Hostname: "s3-control-fips.us-iso-east-1.c2s.ic.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{
- Hostname: "s3-control.us-iso-west-1.c2s.ic.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- },
- endpointKey{
- Region: "us-iso-west-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.us-iso-west-1.c2s.ic.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- },
- endpointKey{
- Region: "us-iso-west-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- },
- endpointKey{
- Region: "us-iso-west-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-control-fips.dualstack.us-iso-west-1.c2s.ic.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- },
- endpointKey{
- Region: "us-iso-west-1-fips",
- }: endpoint{
- Hostname: "s3-control-fips.us-iso-west-1.c2s.ic.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-iso-west-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "s3-outposts": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-iso-east-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-east-1",
- Variant: fipsVariant,
- }: endpoint{},
- },
- },
- "secretsmanager": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "snowball": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "sns": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "sqs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{
- Protocols: []string{"http", "https"},
- },
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "ssm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "states": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "streams.dynamodb": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "dynamodb",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "sts": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "support": service{
- PartitionEndpoint: "aws-iso-global",
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-iso-global",
- }: endpoint{
- Hostname: "support.us-iso-east-1.c2s.ic.gov",
- CredentialScope: credentialScope{
- Region: "us-iso-east-1",
- },
- },
- },
- },
- "swf": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "synthetics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "tagging": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- "textract": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "transcribe": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "transcribestreaming": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "translate": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- },
- },
- "workspaces": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-iso-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-iso-west-1",
- }: endpoint{},
- },
- },
- },
-}
-
-// AwsIsoBPartition returns the Resolver for AWS ISOB (US).
-func AwsIsoBPartition() Partition {
- return awsisobPartition.Partition()
-}
-
-var awsisobPartition = partition{
- ID: "aws-iso-b",
- Name: "AWS ISOB (US)",
- DNSSuffix: "sc2s.sgov.gov",
- RegionRegex: regionRegex{
- Regexp: func() *regexp.Regexp {
- reg, _ := regexp.Compile("^us\\-isob\\-\\w+\\-\\d+$")
- return reg
- }(),
- },
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Hostname: "{service}.{region}.{dnsSuffix}",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "sc2s.sgov.gov",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- },
- Regions: regions{
- "us-isob-east-1": region{
- Description: "US ISOB East (Ohio)",
- },
- },
- Services: services{
- "api.ecr": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{
- Hostname: "api.ecr.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- },
- },
- },
- "api.pricing": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "pricing",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "api.sagemaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "apigateway": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "appconfig": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "appconfigdata": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "application-autoscaling": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "arc-zonal-shift": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "autoscaling": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "cloudcontrolapi": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "cloudformation": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "cloudtrail": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "codedeploy": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "config": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "directconnect": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "dlm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "dms": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{},
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms.{region}.{dnsSuffix}",
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "dms",
- }: endpoint{
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dms",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "dms-fips",
- }: endpoint{
- Hostname: "dms.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-isob-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "dms.us-isob-east-1.sc2s.sgov.gov",
- },
- endpointKey{
- Region: "us-isob-east-1-fips",
- }: endpoint{
- Hostname: "dms.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "ds": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "dynamodb": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "ebs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "ec2": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "ecs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "eks": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "elasticache": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "elasticfilesystem": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-isob-east-1",
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-isob-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticfilesystem-fips.us-isob-east-1.sc2s.sgov.gov",
- },
- },
- },
- "elasticloadbalancing": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{
- Protocols: []string{"https"},
- },
- },
- },
- "elasticmapreduce": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-isob-east-1",
- }: endpoint{
- Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-isob-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "elasticmapreduce.us-isob-east-1.sc2s.sgov.gov",
- },
- },
- },
- "es": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "events": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "firehose": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "glacier": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "health": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "iam": service{
- PartitionEndpoint: "aws-iso-b-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-iso-b-global",
- }: endpoint{
- Hostname: "iam.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- },
- },
- },
- "kinesis": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "kms": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "ProdFips",
- }: endpoint{
- Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-isob-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov",
- },
- endpointKey{
- Region: "us-isob-east-1-fips",
- }: endpoint{
- Hostname: "kms-fips.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "lambda": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "license-manager": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "logs": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "medialive": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "mediapackage": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "metering.marketplace": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- CredentialScope: credentialScope{
- Service: "aws-marketplace",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "metrics.sagemaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "monitoring": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "outposts": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "ram": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "rbin": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-isob-east-1",
- }: endpoint{
- Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-isob-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rbin-fips.us-isob-east-1.sc2s.sgov.gov",
- },
- },
- },
- "rds": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "rds.us-isob-east-1",
- }: endpoint{
- Hostname: "rds.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-isob-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "rds.us-isob-east-1.sc2s.sgov.gov",
- },
- endpointKey{
- Region: "us-isob-east-1-fips",
- }: endpoint{
- Hostname: "rds.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "redshift": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{
- Hostname: "redshift.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- },
- },
- },
- "resource-groups": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "route53": service{
- PartitionEndpoint: "aws-iso-b-global",
- IsRegionalized: boxedFalse,
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-iso-b-global",
- }: endpoint{
- Hostname: "route53.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- },
- },
- },
- "route53resolver": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "runtime.sagemaker": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "s3": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- SignatureVersions: []string{"s3v4"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-isob-east-1",
- }: endpoint{
- Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-isob-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov",
- },
- endpointKey{
- Region: "us-isob-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov",
- },
- },
- },
- "s3-control": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"https"},
- SignatureVersions: []string{"s3v4"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{
- Hostname: "s3-control.us-isob-east-1.sc2s.sgov.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- },
- endpointKey{
- Region: "us-isob-east-1",
- Variant: dualStackVariant,
- }: endpoint{
- Hostname: "s3-control.dualstack.us-isob-east-1.sc2s.sgov.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- },
- endpointKey{
- Region: "us-isob-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- },
- endpointKey{
- Region: "us-isob-east-1",
- Variant: fipsVariant | dualStackVariant,
- }: endpoint{
- Hostname: "s3-control-fips.dualstack.us-isob-east-1.sc2s.sgov.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- },
- endpointKey{
- Region: "us-isob-east-1-fips",
- }: endpoint{
- Hostname: "s3-control-fips.us-isob-east-1.sc2s.sgov.gov",
- SignatureVersions: []string{"s3v4"},
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "s3-outposts": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips-us-isob-east-1",
- }: endpoint{
-
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-isob-east-1",
- Variant: fipsVariant,
- }: endpoint{},
- },
- },
- "secretsmanager": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "snowball": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "sns": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "sqs": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- SSLCommonName: "{region}.queue.{dnsSuffix}",
- Protocols: []string{"http", "https"},
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "ssm": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "states": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "storagegateway": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "fips",
- }: endpoint{
- Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- endpointKey{
- Region: "us-isob-east-1",
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov",
- },
- endpointKey{
- Region: "us-isob-east-1-fips",
- }: endpoint{
- Hostname: "storagegateway-fips.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- Deprecated: boxedTrue,
- },
- },
- },
- "streams.dynamodb": service{
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Protocols: []string{"http", "https"},
- CredentialScope: credentialScope{
- Service: "dynamodb",
- },
- },
- },
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "sts": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "support": service{
- PartitionEndpoint: "aws-iso-b-global",
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "aws-iso-b-global",
- }: endpoint{
- Hostname: "support.us-isob-east-1.sc2s.sgov.gov",
- CredentialScope: credentialScope{
- Region: "us-isob-east-1",
- },
- },
- },
- },
- "swf": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "synthetics": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "tagging": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- "workspaces": service{
- Endpoints: serviceEndpoints{
- endpointKey{
- Region: "us-isob-east-1",
- }: endpoint{},
- },
- },
- },
-}
-
-// AwsIsoEPartition returns the Resolver for AWS ISOE (Europe).
-func AwsIsoEPartition() Partition {
- return awsisoePartition.Partition()
-}
-
-var awsisoePartition = partition{
- ID: "aws-iso-e",
- Name: "AWS ISOE (Europe)",
- DNSSuffix: "cloud.adc-e.uk",
- RegionRegex: regionRegex{
- Regexp: func() *regexp.Regexp {
- reg, _ := regexp.Compile("^eu\\-isoe\\-\\w+\\-\\d+$")
- return reg
- }(),
- },
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Hostname: "{service}.{region}.{dnsSuffix}",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "cloud.adc-e.uk",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- },
- Regions: regions{
- "eu-isoe-west-1": region{
- Description: "EU ISOE West",
- },
- },
- Services: services{},
-}
-
-// AwsIsoFPartition returns the Resolver for AWS ISOF.
-func AwsIsoFPartition() Partition {
- return awsisofPartition.Partition()
-}
-
-var awsisofPartition = partition{
- ID: "aws-iso-f",
- Name: "AWS ISOF",
- DNSSuffix: "csp.hci.ic.gov",
- RegionRegex: regionRegex{
- Regexp: func() *regexp.Regexp {
- reg, _ := regexp.Compile("^us\\-isof\\-\\w+\\-\\d+$")
- return reg
- }(),
- },
- Defaults: endpointDefaults{
- defaultKey{}: endpoint{
- Hostname: "{service}.{region}.{dnsSuffix}",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- defaultKey{
- Variant: fipsVariant,
- }: endpoint{
- Hostname: "{service}-fips.{region}.{dnsSuffix}",
- DNSSuffix: "csp.hci.ic.gov",
- Protocols: []string{"https"},
- SignatureVersions: []string{"v4"},
- },
- },
- Regions: regions{},
- Services: services{},
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
deleted file mode 100644
index ca8fc828e..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/dep_service_ids.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package endpoints
-
-// Service identifiers
-//
-// Deprecated: Use client package's EndpointsID value instead of these
-// ServiceIDs. These IDs are not maintained, and are out of date.
-const (
- A4bServiceID = "a4b" // A4b.
- AcmServiceID = "acm" // Acm.
- AcmPcaServiceID = "acm-pca" // AcmPca.
- ApiMediatailorServiceID = "api.mediatailor" // ApiMediatailor.
- ApiPricingServiceID = "api.pricing" // ApiPricing.
- ApiSagemakerServiceID = "api.sagemaker" // ApiSagemaker.
- ApigatewayServiceID = "apigateway" // Apigateway.
- ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
- Appstream2ServiceID = "appstream2" // Appstream2.
- AppsyncServiceID = "appsync" // Appsync.
- AthenaServiceID = "athena" // Athena.
- AutoscalingServiceID = "autoscaling" // Autoscaling.
- AutoscalingPlansServiceID = "autoscaling-plans" // AutoscalingPlans.
- BatchServiceID = "batch" // Batch.
- BudgetsServiceID = "budgets" // Budgets.
- CeServiceID = "ce" // Ce.
- ChimeServiceID = "chime" // Chime.
- Cloud9ServiceID = "cloud9" // Cloud9.
- ClouddirectoryServiceID = "clouddirectory" // Clouddirectory.
- CloudformationServiceID = "cloudformation" // Cloudformation.
- CloudfrontServiceID = "cloudfront" // Cloudfront.
- CloudhsmServiceID = "cloudhsm" // Cloudhsm.
- Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
- CloudsearchServiceID = "cloudsearch" // Cloudsearch.
- CloudtrailServiceID = "cloudtrail" // Cloudtrail.
- CodebuildServiceID = "codebuild" // Codebuild.
- CodecommitServiceID = "codecommit" // Codecommit.
- CodedeployServiceID = "codedeploy" // Codedeploy.
- CodepipelineServiceID = "codepipeline" // Codepipeline.
- CodestarServiceID = "codestar" // Codestar.
- CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
- CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
- CognitoSyncServiceID = "cognito-sync" // CognitoSync.
- ComprehendServiceID = "comprehend" // Comprehend.
- ConfigServiceID = "config" // Config.
- CurServiceID = "cur" // Cur.
- DatapipelineServiceID = "datapipeline" // Datapipeline.
- DaxServiceID = "dax" // Dax.
- DevicefarmServiceID = "devicefarm" // Devicefarm.
- DirectconnectServiceID = "directconnect" // Directconnect.
- DiscoveryServiceID = "discovery" // Discovery.
- DmsServiceID = "dms" // Dms.
- DsServiceID = "ds" // Ds.
- DynamodbServiceID = "dynamodb" // Dynamodb.
- Ec2ServiceID = "ec2" // Ec2.
- Ec2metadataServiceID = "ec2metadata" // Ec2metadata.
- EcrServiceID = "ecr" // Ecr.
- EcsServiceID = "ecs" // Ecs.
- ElasticacheServiceID = "elasticache" // Elasticache.
- ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk.
- ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem.
- ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing.
- ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
- ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
- EmailServiceID = "email" // Email.
- EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace.
- EsServiceID = "es" // Es.
- EventsServiceID = "events" // Events.
- FirehoseServiceID = "firehose" // Firehose.
- FmsServiceID = "fms" // Fms.
- GameliftServiceID = "gamelift" // Gamelift.
- GlacierServiceID = "glacier" // Glacier.
- GlueServiceID = "glue" // Glue.
- GreengrassServiceID = "greengrass" // Greengrass.
- GuarddutyServiceID = "guardduty" // Guardduty.
- HealthServiceID = "health" // Health.
- IamServiceID = "iam" // Iam.
- ImportexportServiceID = "importexport" // Importexport.
- InspectorServiceID = "inspector" // Inspector.
- IotServiceID = "iot" // Iot.
- IotanalyticsServiceID = "iotanalytics" // Iotanalytics.
- KinesisServiceID = "kinesis" // Kinesis.
- KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics.
- KinesisvideoServiceID = "kinesisvideo" // Kinesisvideo.
- KmsServiceID = "kms" // Kms.
- LambdaServiceID = "lambda" // Lambda.
- LightsailServiceID = "lightsail" // Lightsail.
- LogsServiceID = "logs" // Logs.
- MachinelearningServiceID = "machinelearning" // Machinelearning.
- MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
- MediaconvertServiceID = "mediaconvert" // Mediaconvert.
- MedialiveServiceID = "medialive" // Medialive.
- MediapackageServiceID = "mediapackage" // Mediapackage.
- MediastoreServiceID = "mediastore" // Mediastore.
- MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
- MghServiceID = "mgh" // Mgh.
- MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
- ModelsLexServiceID = "models.lex" // ModelsLex.
- MonitoringServiceID = "monitoring" // Monitoring.
- MturkRequesterServiceID = "mturk-requester" // MturkRequester.
- NeptuneServiceID = "neptune" // Neptune.
- OpsworksServiceID = "opsworks" // Opsworks.
- OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
- OrganizationsServiceID = "organizations" // Organizations.
- PinpointServiceID = "pinpoint" // Pinpoint.
- PollyServiceID = "polly" // Polly.
- RdsServiceID = "rds" // Rds.
- RedshiftServiceID = "redshift" // Redshift.
- RekognitionServiceID = "rekognition" // Rekognition.
- ResourceGroupsServiceID = "resource-groups" // ResourceGroups.
- Route53ServiceID = "route53" // Route53.
- Route53domainsServiceID = "route53domains" // Route53domains.
- RuntimeLexServiceID = "runtime.lex" // RuntimeLex.
- RuntimeSagemakerServiceID = "runtime.sagemaker" // RuntimeSagemaker.
- S3ServiceID = "s3" // S3.
- S3ControlServiceID = "s3-control" // S3Control.
- SagemakerServiceID = "api.sagemaker" // Sagemaker.
- SdbServiceID = "sdb" // Sdb.
- SecretsmanagerServiceID = "secretsmanager" // Secretsmanager.
- ServerlessrepoServiceID = "serverlessrepo" // Serverlessrepo.
- ServicecatalogServiceID = "servicecatalog" // Servicecatalog.
- ServicediscoveryServiceID = "servicediscovery" // Servicediscovery.
- ShieldServiceID = "shield" // Shield.
- SmsServiceID = "sms" // Sms.
- SnowballServiceID = "snowball" // Snowball.
- SnsServiceID = "sns" // Sns.
- SqsServiceID = "sqs" // Sqs.
- SsmServiceID = "ssm" // Ssm.
- StatesServiceID = "states" // States.
- StoragegatewayServiceID = "storagegateway" // Storagegateway.
- StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb.
- StsServiceID = "sts" // Sts.
- SupportServiceID = "support" // Support.
- SwfServiceID = "swf" // Swf.
- TaggingServiceID = "tagging" // Tagging.
- TransferServiceID = "transfer" // Transfer.
- TranslateServiceID = "translate" // Translate.
- WafServiceID = "waf" // Waf.
- WafRegionalServiceID = "waf-regional" // WafRegional.
- WorkdocsServiceID = "workdocs" // Workdocs.
- WorkmailServiceID = "workmail" // Workmail.
- WorkspacesServiceID = "workspaces" // Workspaces.
- XrayServiceID = "xray" // Xray.
-)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
deleted file mode 100644
index 66dec6beb..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Package endpoints provides the types and functionality for defining regions
-// and endpoints, as well as querying those definitions.
-//
-// The SDK's Regions and Endpoints metadata is code generated into the endpoints
-// package, and is accessible via the DefaultResolver function. This function
-// returns a endpoint Resolver will search the metadata and build an associated
-// endpoint if one is found. The default resolver will search all partitions
-// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and
-// AWS GovCloud (US) (aws-us-gov).
-// .
-//
-// # Enumerating Regions and Endpoint Metadata
-//
-// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface
-// will allow you to get access to the list of underlying Partitions with the
-// Partitions method. This is helpful if you want to limit the SDK's endpoint
-// resolving to a single partition, or enumerate regions, services, and endpoints
-// in the partition.
-//
-// resolver := endpoints.DefaultResolver()
-// partitions := resolver.(endpoints.EnumPartitions).Partitions()
-//
-// for _, p := range partitions {
-// fmt.Println("Regions for", p.ID())
-// for id, _ := range p.Regions() {
-// fmt.Println("*", id)
-// }
-//
-// fmt.Println("Services for", p.ID())
-// for id, _ := range p.Services() {
-// fmt.Println("*", id)
-// }
-// }
-//
-// # Using Custom Endpoints
-//
-// The endpoints package also gives you the ability to use your own logic how
-// endpoints are resolved. This is a great way to define a custom endpoint
-// for select services, without passing that logic down through your code.
-//
-// If a type implements the Resolver interface it can be used to resolve
-// endpoints. To use this with the SDK's Session and Config set the value
-// of the type to the EndpointsResolver field of aws.Config when initializing
-// the session, or service client.
-//
-// In addition the ResolverFunc is a wrapper for a func matching the signature
-// of Resolver.EndpointFor, converting it to a type that satisfies the
-// Resolver interface.
-//
-// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
-// if service == endpoints.S3ServiceID {
-// return endpoints.ResolvedEndpoint{
-// URL: "s3.custom.endpoint.com",
-// SigningRegion: "custom-signing-region",
-// }, nil
-// }
-//
-// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
-// }
-//
-// sess := session.Must(session.NewSession(&aws.Config{
-// Region: aws.String("us-west-2"),
-// EndpointResolver: endpoints.ResolverFunc(myCustomResolver),
-// }))
-package endpoints
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
deleted file mode 100644
index a686a48fa..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
+++ /dev/null
@@ -1,708 +0,0 @@
-package endpoints
-
-import (
- "fmt"
- "regexp"
- "strings"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-// A Logger is a minimalistic interface for the SDK to log messages to.
-type Logger interface {
- Log(...interface{})
-}
-
-// DualStackEndpointState is a constant to describe the dual-stack endpoint resolution
-// behavior.
-type DualStackEndpointState uint
-
-const (
- // DualStackEndpointStateUnset is the default value behavior for dual-stack endpoint
- // resolution.
- DualStackEndpointStateUnset DualStackEndpointState = iota
-
- // DualStackEndpointStateEnabled enable dual-stack endpoint resolution for endpoints.
- DualStackEndpointStateEnabled
-
- // DualStackEndpointStateDisabled disables dual-stack endpoint resolution for endpoints.
- DualStackEndpointStateDisabled
-)
-
-// FIPSEndpointState is a constant to describe the FIPS endpoint resolution behavior.
-type FIPSEndpointState uint
-
-const (
- // FIPSEndpointStateUnset is the default value behavior for FIPS endpoint resolution.
- FIPSEndpointStateUnset FIPSEndpointState = iota
-
- // FIPSEndpointStateEnabled enables FIPS endpoint resolution for service endpoints.
- FIPSEndpointStateEnabled
-
- // FIPSEndpointStateDisabled disables FIPS endpoint resolution for endpoints.
- FIPSEndpointStateDisabled
-)
-
-// Options provide the configuration needed to direct how the
-// endpoints will be resolved.
-type Options struct {
- // DisableSSL forces the endpoint to be resolved as HTTP.
- // instead of HTTPS if the service supports it.
- DisableSSL bool
-
- // Sets the resolver to resolve the endpoint as a dualstack endpoint
- // for the service. If dualstack support for a service is not known and
- // StrictMatching is not enabled a dualstack endpoint for the service will
- // be returned. This endpoint may not be valid. If StrictMatching is
- // enabled only services that are known to support dualstack will return
- // dualstack endpoints.
- //
- // Deprecated: This option will continue to function for S3 and S3 Control for backwards compatibility.
- // UseDualStackEndpoint should be used to enable usage of a service's dual-stack endpoint for all service clients
- // moving forward. For S3 and S3 Control, when UseDualStackEndpoint is set to a non-zero value it takes higher
- // precedence then this option.
- UseDualStack bool
-
- // Sets the resolver to resolve a dual-stack endpoint for the service.
- UseDualStackEndpoint DualStackEndpointState
-
- // UseFIPSEndpoint specifies the resolver must resolve a FIPS endpoint.
- UseFIPSEndpoint FIPSEndpointState
-
- // Enables strict matching of services and regions resolved endpoints.
- // If the partition doesn't enumerate the exact service and region an
- // error will be returned. This option will prevent returning endpoints
- // that look valid, but may not resolve to any real endpoint.
- StrictMatching bool
-
- // Enables resolving a service endpoint based on the region provided if the
- // service does not exist. The service endpoint ID will be used as the service
- // domain name prefix. By default the endpoint resolver requires the service
- // to be known when resolving endpoints.
- //
- // If resolving an endpoint on the partition list the provided region will
- // be used to determine which partition's domain name pattern to the service
- // endpoint ID with. If both the service and region are unknown and resolving
- // the endpoint on partition list an UnknownEndpointError error will be returned.
- //
- // If resolving and endpoint on a partition specific resolver that partition's
- // domain name pattern will be used with the service endpoint ID. If both
- // region and service do not exist when resolving an endpoint on a specific
- // partition the partition's domain pattern will be used to combine the
- // endpoint and region together.
- //
- // This option is ignored if StrictMatching is enabled.
- ResolveUnknownService bool
-
- // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6)
- EC2MetadataEndpointMode EC2IMDSEndpointModeState
-
- // STS Regional Endpoint flag helps with resolving the STS endpoint
- STSRegionalEndpoint STSRegionalEndpoint
-
- // S3 Regional Endpoint flag helps with resolving the S3 endpoint
- S3UsEast1RegionalEndpoint S3UsEast1RegionalEndpoint
-
- // ResolvedRegion is the resolved region string. If provided (non-zero length) it takes priority
- // over the region name passed to the ResolveEndpoint call.
- ResolvedRegion string
-
- // Logger is the logger that will be used to log messages.
- Logger Logger
-
- // Determines whether logging of deprecated endpoints usage is enabled.
- LogDeprecated bool
-}
-
-func (o Options) getEndpointVariant(service string) (v endpointVariant) {
- const s3 = "s3"
- const s3Control = "s3-control"
-
- if (o.UseDualStackEndpoint == DualStackEndpointStateEnabled) ||
- ((service == s3 || service == s3Control) && (o.UseDualStackEndpoint == DualStackEndpointStateUnset && o.UseDualStack)) {
- v |= dualStackVariant
- }
- if o.UseFIPSEndpoint == FIPSEndpointStateEnabled {
- v |= fipsVariant
- }
- return v
-}
-
-// EC2IMDSEndpointModeState is an enum configuration variable describing the client endpoint mode.
-type EC2IMDSEndpointModeState uint
-
-// Enumeration values for EC2IMDSEndpointModeState
-const (
- EC2IMDSEndpointModeStateUnset EC2IMDSEndpointModeState = iota
- EC2IMDSEndpointModeStateIPv4
- EC2IMDSEndpointModeStateIPv6
-)
-
-// SetFromString sets the EC2IMDSEndpointModeState based on the provided string value. Unknown values will default to EC2IMDSEndpointModeStateUnset
-func (e *EC2IMDSEndpointModeState) SetFromString(v string) error {
- v = strings.TrimSpace(v)
-
- switch {
- case len(v) == 0:
- *e = EC2IMDSEndpointModeStateUnset
- case strings.EqualFold(v, "IPv6"):
- *e = EC2IMDSEndpointModeStateIPv6
- case strings.EqualFold(v, "IPv4"):
- *e = EC2IMDSEndpointModeStateIPv4
- default:
- return fmt.Errorf("unknown EC2 IMDS endpoint mode, must be either IPv6 or IPv4")
- }
- return nil
-}
-
-// STSRegionalEndpoint is an enum for the states of the STS Regional Endpoint
-// options.
-type STSRegionalEndpoint int
-
-func (e STSRegionalEndpoint) String() string {
- switch e {
- case LegacySTSEndpoint:
- return "legacy"
- case RegionalSTSEndpoint:
- return "regional"
- case UnsetSTSEndpoint:
- return ""
- default:
- return "unknown"
- }
-}
-
-const (
-
- // UnsetSTSEndpoint represents that STS Regional Endpoint flag is not specified.
- UnsetSTSEndpoint STSRegionalEndpoint = iota
-
- // LegacySTSEndpoint represents when STS Regional Endpoint flag is specified
- // to use legacy endpoints.
- LegacySTSEndpoint
-
- // RegionalSTSEndpoint represents when STS Regional Endpoint flag is specified
- // to use regional endpoints.
- RegionalSTSEndpoint
-)
-
-// GetSTSRegionalEndpoint function returns the STSRegionalEndpointFlag based
-// on the input string provided in env config or shared config by the user.
-//
-// `legacy`, `regional` are the only case-insensitive valid strings for
-// resolving the STS regional Endpoint flag.
-func GetSTSRegionalEndpoint(s string) (STSRegionalEndpoint, error) {
- switch {
- case strings.EqualFold(s, "legacy"):
- return LegacySTSEndpoint, nil
- case strings.EqualFold(s, "regional"):
- return RegionalSTSEndpoint, nil
- default:
- return UnsetSTSEndpoint, fmt.Errorf("unable to resolve the value of STSRegionalEndpoint for %v", s)
- }
-}
-
-// S3UsEast1RegionalEndpoint is an enum for the states of the S3 us-east-1
-// Regional Endpoint options.
-type S3UsEast1RegionalEndpoint int
-
-func (e S3UsEast1RegionalEndpoint) String() string {
- switch e {
- case LegacyS3UsEast1Endpoint:
- return "legacy"
- case RegionalS3UsEast1Endpoint:
- return "regional"
- case UnsetS3UsEast1Endpoint:
- return ""
- default:
- return "unknown"
- }
-}
-
-const (
-
- // UnsetS3UsEast1Endpoint represents that S3 Regional Endpoint flag is not
- // specified.
- UnsetS3UsEast1Endpoint S3UsEast1RegionalEndpoint = iota
-
- // LegacyS3UsEast1Endpoint represents when S3 Regional Endpoint flag is
- // specified to use legacy endpoints.
- LegacyS3UsEast1Endpoint
-
- // RegionalS3UsEast1Endpoint represents when S3 Regional Endpoint flag is
- // specified to use regional endpoints.
- RegionalS3UsEast1Endpoint
-)
-
-// GetS3UsEast1RegionalEndpoint function returns the S3UsEast1RegionalEndpointFlag based
-// on the input string provided in env config or shared config by the user.
-//
-// `legacy`, `regional` are the only case-insensitive valid strings for
-// resolving the S3 regional Endpoint flag.
-func GetS3UsEast1RegionalEndpoint(s string) (S3UsEast1RegionalEndpoint, error) {
- switch {
- case strings.EqualFold(s, "legacy"):
- return LegacyS3UsEast1Endpoint, nil
- case strings.EqualFold(s, "regional"):
- return RegionalS3UsEast1Endpoint, nil
- default:
- return UnsetS3UsEast1Endpoint,
- fmt.Errorf("unable to resolve the value of S3UsEast1RegionalEndpoint for %v", s)
- }
-}
-
-// Set combines all of the option functions together.
-func (o *Options) Set(optFns ...func(*Options)) {
- for _, fn := range optFns {
- fn(o)
- }
-}
-
-// DisableSSLOption sets the DisableSSL options. Can be used as a functional
-// option when resolving endpoints.
-func DisableSSLOption(o *Options) {
- o.DisableSSL = true
-}
-
-// UseDualStackOption sets the UseDualStack option. Can be used as a functional
-// option when resolving endpoints.
-//
-// Deprecated: UseDualStackEndpointOption should be used to enable usage of a service's dual-stack endpoint.
-// When DualStackEndpointState is set to a non-zero value it takes higher precedence then this option.
-func UseDualStackOption(o *Options) {
- o.UseDualStack = true
-}
-
-// UseDualStackEndpointOption sets the UseDualStackEndpoint option to enabled. Can be used as a functional
-// option when resolving endpoints.
-func UseDualStackEndpointOption(o *Options) {
- o.UseDualStackEndpoint = DualStackEndpointStateEnabled
-}
-
-// UseFIPSEndpointOption sets the UseFIPSEndpoint option to enabled. Can be used as a functional
-// option when resolving endpoints.
-func UseFIPSEndpointOption(o *Options) {
- o.UseFIPSEndpoint = FIPSEndpointStateEnabled
-}
-
-// StrictMatchingOption sets the StrictMatching option. Can be used as a functional
-// option when resolving endpoints.
-func StrictMatchingOption(o *Options) {
- o.StrictMatching = true
-}
-
-// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used
-// as a functional option when resolving endpoints.
-func ResolveUnknownServiceOption(o *Options) {
- o.ResolveUnknownService = true
-}
-
-// STSRegionalEndpointOption enables the STS endpoint resolver behavior to resolve
-// STS endpoint to their regional endpoint, instead of the global endpoint.
-func STSRegionalEndpointOption(o *Options) {
- o.STSRegionalEndpoint = RegionalSTSEndpoint
-}
-
-// A Resolver provides the interface for functionality to resolve endpoints.
-// The build in Partition and DefaultResolver return value satisfy this interface.
-type Resolver interface {
- EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
-}
-
-// ResolverFunc is a helper utility that wraps a function so it satisfies the
-// Resolver interface. This is useful when you want to add additional endpoint
-// resolving logic, or stub out specific endpoints with custom values.
-type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error)
-
-// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface.
-func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
- return fn(service, region, opts...)
-}
-
-var schemeRE = regexp.MustCompile("^([^:]+)://")
-
-// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
-// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS.
-//
-// If disableSSL is set, it will only set the URL's scheme if the URL does not
-// contain a scheme.
-func AddScheme(endpoint string, disableSSL bool) string {
- if !schemeRE.MatchString(endpoint) {
- scheme := "https"
- if disableSSL {
- scheme = "http"
- }
- endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
- }
-
- return endpoint
-}
-
-// EnumPartitions a provides a way to retrieve the underlying partitions that
-// make up the SDK's default Resolver, or any resolver decoded from a model
-// file.
-//
-// Use this interface with DefaultResolver and DecodeModels to get the list of
-// Partitions.
-type EnumPartitions interface {
- Partitions() []Partition
-}
-
-// RegionsForService returns a map of regions for the partition and service.
-// If either the partition or service does not exist false will be returned
-// as the second parameter.
-//
-// This example shows how to get the regions for DynamoDB in the AWS partition.
-//
-// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
-//
-// This is equivalent to using the partition directly.
-//
-// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
-func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
- for _, p := range ps {
- if p.ID() != partitionID {
- continue
- }
- if _, ok := p.p.Services[serviceID]; !(ok || serviceID == Ec2metadataServiceID) {
- break
- }
-
- s := Service{
- id: serviceID,
- p: p.p,
- }
- return s.Regions(), true
- }
-
- return map[string]Region{}, false
-}
-
-// PartitionForRegion returns the first partition which includes the region
-// passed in. This includes both known regions and regions which match
-// a pattern supported by the partition which may include regions that are
-// not explicitly known by the partition. Use the Regions method of the
-// returned Partition if explicit support is needed.
-func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) {
- for _, p := range ps {
- if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) {
- return p, true
- }
- }
-
- return Partition{}, false
-}
-
-// A Partition provides the ability to enumerate the partition's regions
-// and services.
-type Partition struct {
- id, dnsSuffix string
- p *partition
-}
-
-// DNSSuffix returns the base domain name of the partition.
-func (p Partition) DNSSuffix() string { return p.dnsSuffix }
-
-// ID returns the identifier of the partition.
-func (p Partition) ID() string { return p.id }
-
-// EndpointFor attempts to resolve the endpoint based on service and region.
-// See Options for information on configuring how the endpoint is resolved.
-//
-// If the service cannot be found in the metadata the UnknownServiceError
-// error will be returned. This validation will occur regardless if
-// StrictMatching is enabled. To enable resolving unknown services set the
-// "ResolveUnknownService" option to true. When StrictMatching is disabled
-// this option allows the partition resolver to resolve a endpoint based on
-// the service endpoint ID provided.
-//
-// When resolving endpoints you can choose to enable StrictMatching. This will
-// require the provided service and region to be known by the partition.
-// If the endpoint cannot be strictly resolved an error will be returned. This
-// mode is useful to ensure the endpoint resolved is valid. Without
-// StrictMatching enabled the endpoint returned may look valid but may not work.
-// StrictMatching requires the SDK to be updated if you want to take advantage
-// of new regions and services expansions.
-//
-// Errors that can be returned.
-// - UnknownServiceError
-// - UnknownEndpointError
-func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
- return p.p.EndpointFor(service, region, opts...)
-}
-
-// Regions returns a map of Regions indexed by their ID. This is useful for
-// enumerating over the regions in a partition.
-func (p Partition) Regions() map[string]Region {
- rs := make(map[string]Region, len(p.p.Regions))
- for id, r := range p.p.Regions {
- rs[id] = Region{
- id: id,
- desc: r.Description,
- p: p.p,
- }
- }
-
- return rs
-}
-
-// Services returns a map of Service indexed by their ID. This is useful for
-// enumerating over the services in a partition.
-func (p Partition) Services() map[string]Service {
- ss := make(map[string]Service, len(p.p.Services))
-
- for id := range p.p.Services {
- ss[id] = Service{
- id: id,
- p: p.p,
- }
- }
-
- // Since we have removed the customization that injected this into the model
- // we still need to pretend that this is a modeled service.
- if _, ok := ss[Ec2metadataServiceID]; !ok {
- ss[Ec2metadataServiceID] = Service{
- id: Ec2metadataServiceID,
- p: p.p,
- }
- }
-
- return ss
-}
-
-// A Region provides information about a region, and ability to resolve an
-// endpoint from the context of a region, given a service.
-type Region struct {
- id, desc string
- p *partition
-}
-
-// ID returns the region's identifier.
-func (r Region) ID() string { return r.id }
-
-// Description returns the region's description. The region description
-// is free text, it can be empty, and it may change between SDK releases.
-func (r Region) Description() string { return r.desc }
-
-// ResolveEndpoint resolves an endpoint from the context of the region given
-// a service. See Partition.EndpointFor for usage and errors that can be returned.
-func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
- return r.p.EndpointFor(service, r.id, opts...)
-}
-
-// Services returns a list of all services that are known to be in this region.
-func (r Region) Services() map[string]Service {
- ss := map[string]Service{}
- for id, s := range r.p.Services {
- if _, ok := s.Endpoints[endpointKey{Region: r.id}]; ok {
- ss[id] = Service{
- id: id,
- p: r.p,
- }
- }
- }
-
- return ss
-}
-
-// A Service provides information about a service, and ability to resolve an
-// endpoint from the context of a service, given a region.
-type Service struct {
- id string
- p *partition
-}
-
-// ID returns the identifier for the service.
-func (s Service) ID() string { return s.id }
-
-// ResolveEndpoint resolves an endpoint from the context of a service given
-// a region. See Partition.EndpointFor for usage and errors that can be returned.
-func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
- return s.p.EndpointFor(s.id, region, opts...)
-}
-
-// Regions returns a map of Regions that the service is present in.
-//
-// A region is the AWS region the service exists in. Whereas a Endpoint is
-// an URL that can be resolved to a instance of a service.
-func (s Service) Regions() map[string]Region {
- rs := map[string]Region{}
-
- service, ok := s.p.Services[s.id]
-
- // Since ec2metadata customization has been removed we need to check
- // if it was defined in non-standard endpoints.json file. If it's not
- // then we can return the empty map as there is no regional-endpoints for IMDS.
- // Otherwise, we iterate need to iterate the non-standard model.
- if s.id == Ec2metadataServiceID && !ok {
- return rs
- }
-
- for id := range service.Endpoints {
- if id.Variant != 0 {
- continue
- }
- if r, ok := s.p.Regions[id.Region]; ok {
- rs[id.Region] = Region{
- id: id.Region,
- desc: r.Description,
- p: s.p,
- }
- }
- }
-
- return rs
-}
-
-// Endpoints returns a map of Endpoints indexed by their ID for all known
-// endpoints for a service.
-//
-// A region is the AWS region the service exists in. Whereas a Endpoint is
-// an URL that can be resolved to a instance of a service.
-func (s Service) Endpoints() map[string]Endpoint {
- es := make(map[string]Endpoint, len(s.p.Services[s.id].Endpoints))
- for id := range s.p.Services[s.id].Endpoints {
- if id.Variant != 0 {
- continue
- }
- es[id.Region] = Endpoint{
- id: id.Region,
- serviceID: s.id,
- p: s.p,
- }
- }
-
- return es
-}
-
-// A Endpoint provides information about endpoints, and provides the ability
-// to resolve that endpoint for the service, and the region the endpoint
-// represents.
-type Endpoint struct {
- id string
- serviceID string
- p *partition
-}
-
-// ID returns the identifier for an endpoint.
-func (e Endpoint) ID() string { return e.id }
-
-// ServiceID returns the identifier the endpoint belongs to.
-func (e Endpoint) ServiceID() string { return e.serviceID }
-
-// ResolveEndpoint resolves an endpoint from the context of a service and
-// region the endpoint represents. See Partition.EndpointFor for usage and
-// errors that can be returned.
-func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
- return e.p.EndpointFor(e.serviceID, e.id, opts...)
-}
-
-// A ResolvedEndpoint is an endpoint that has been resolved based on a partition
-// service, and region.
-type ResolvedEndpoint struct {
- // The endpoint URL
- URL string
-
- // The endpoint partition
- PartitionID string
-
- // The region that should be used for signing requests.
- SigningRegion string
-
- // The service name that should be used for signing requests.
- SigningName string
-
- // States that the signing name for this endpoint was derived from metadata
- // passed in, but was not explicitly modeled.
- SigningNameDerived bool
-
- // The signing method that should be used for signing requests.
- SigningMethod string
-}
-
-// So that the Error interface type can be included as an anonymous field
-// in the requestError struct and not conflict with the error.Error() method.
-type awsError awserr.Error
-
-// A EndpointNotFoundError is returned when in StrictMatching mode, and the
-// endpoint for the service and region cannot be found in any of the partitions.
-type EndpointNotFoundError struct {
- awsError
- Partition string
- Service string
- Region string
-}
-
-// A UnknownServiceError is returned when the service does not resolve to an
-// endpoint. Includes a list of all known services for the partition. Returned
-// when a partition does not support the service.
-type UnknownServiceError struct {
- awsError
- Partition string
- Service string
- Known []string
-}
-
-// NewUnknownServiceError builds and returns UnknownServiceError.
-func NewUnknownServiceError(p, s string, known []string) UnknownServiceError {
- return UnknownServiceError{
- awsError: awserr.New("UnknownServiceError",
- "could not resolve endpoint for unknown service", nil),
- Partition: p,
- Service: s,
- Known: known,
- }
-}
-
-// String returns the string representation of the error.
-func (e UnknownServiceError) Error() string {
- extra := fmt.Sprintf("partition: %q, service: %q",
- e.Partition, e.Service)
- if len(e.Known) > 0 {
- extra += fmt.Sprintf(", known: %v", e.Known)
- }
- return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
-}
-
-// String returns the string representation of the error.
-func (e UnknownServiceError) String() string {
- return e.Error()
-}
-
-// A UnknownEndpointError is returned when in StrictMatching mode and the
-// service is valid, but the region does not resolve to an endpoint. Includes
-// a list of all known endpoints for the service.
-type UnknownEndpointError struct {
- awsError
- Partition string
- Service string
- Region string
- Known []string
-}
-
-// NewUnknownEndpointError builds and returns UnknownEndpointError.
-func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError {
- return UnknownEndpointError{
- awsError: awserr.New("UnknownEndpointError",
- "could not resolve endpoint", nil),
- Partition: p,
- Service: s,
- Region: r,
- Known: known,
- }
-}
-
-// String returns the string representation of the error.
-func (e UnknownEndpointError) Error() string {
- extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
- e.Partition, e.Service, e.Region)
- if len(e.Known) > 0 {
- extra += fmt.Sprintf(", known: %v", e.Known)
- }
- return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
-}
-
-// String returns the string representation of the error.
-func (e UnknownEndpointError) String() string {
- return e.Error()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go
deleted file mode 100644
index df75e899a..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/legacy_regions.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package endpoints
-
-var legacyGlobalRegions = map[string]map[string]struct{}{
- "sts": {
- "ap-northeast-1": {},
- "ap-south-1": {},
- "ap-southeast-1": {},
- "ap-southeast-2": {},
- "ca-central-1": {},
- "eu-central-1": {},
- "eu-north-1": {},
- "eu-west-1": {},
- "eu-west-2": {},
- "eu-west-3": {},
- "sa-east-1": {},
- "us-east-1": {},
- "us-east-2": {},
- "us-west-1": {},
- "us-west-2": {},
- },
- "s3": {
- "us-east-1": {},
- },
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
deleted file mode 100644
index 89f6627dc..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go
+++ /dev/null
@@ -1,594 +0,0 @@
-package endpoints
-
-import (
- "encoding/json"
- "fmt"
- "regexp"
- "strconv"
- "strings"
-)
-
-const (
- ec2MetadataEndpointIPv6 = "http://[fd00:ec2::254]/latest"
- ec2MetadataEndpointIPv4 = "http://169.254.169.254/latest"
-)
-
-const dnsSuffixTemplateKey = "{dnsSuffix}"
-
-// defaultKey is a compound map key of a variant and other values.
-type defaultKey struct {
- Variant endpointVariant
- ServiceVariant serviceVariant
-}
-
-// endpointKey is a compound map key of a region and associated variant value.
-type endpointKey struct {
- Region string
- Variant endpointVariant
-}
-
-// endpointVariant is a bit field to describe the endpoints attributes.
-type endpointVariant uint64
-
-// serviceVariant is a bit field to describe the service endpoint attributes.
-type serviceVariant uint64
-
-const (
- // fipsVariant indicates that the endpoint is FIPS capable.
- fipsVariant endpointVariant = 1 << (64 - 1 - iota)
-
- // dualStackVariant indicates that the endpoint is DualStack capable.
- dualStackVariant
-)
-
-var regionValidationRegex = regexp.MustCompile(`^[[:alnum:]]([[:alnum:]\-]*[[:alnum:]])?$`)
-
-type partitions []partition
-
-func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
- var opt Options
- opt.Set(opts...)
-
- if len(opt.ResolvedRegion) > 0 {
- region = opt.ResolvedRegion
- }
-
- for i := 0; i < len(ps); i++ {
- if !ps[i].canResolveEndpoint(service, region, opt) {
- continue
- }
-
- return ps[i].EndpointFor(service, region, opts...)
- }
-
- // If loose matching fallback to first partition format to use
- // when resolving the endpoint.
- if !opt.StrictMatching && len(ps) > 0 {
- return ps[0].EndpointFor(service, region, opts...)
- }
-
- return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{})
-}
-
-// Partitions satisfies the EnumPartitions interface and returns a list
-// of Partitions representing each partition represented in the SDK's
-// endpoints model.
-func (ps partitions) Partitions() []Partition {
- parts := make([]Partition, 0, len(ps))
- for i := 0; i < len(ps); i++ {
- parts = append(parts, ps[i].Partition())
- }
-
- return parts
-}
-
-type endpointWithVariants struct {
- endpoint
- Variants []endpointWithTags `json:"variants"`
-}
-
-type endpointWithTags struct {
- endpoint
- Tags []string `json:"tags"`
-}
-
-type endpointDefaults map[defaultKey]endpoint
-
-func (p *endpointDefaults) UnmarshalJSON(data []byte) error {
- if *p == nil {
- *p = make(endpointDefaults)
- }
-
- var e endpointWithVariants
- if err := json.Unmarshal(data, &e); err != nil {
- return err
- }
-
- (*p)[defaultKey{Variant: 0}] = e.endpoint
-
- e.Hostname = ""
- e.DNSSuffix = ""
-
- for _, variant := range e.Variants {
- endpointVariant, unknown := parseVariantTags(variant.Tags)
- if unknown {
- continue
- }
-
- var ve endpoint
- ve.mergeIn(e.endpoint)
- ve.mergeIn(variant.endpoint)
-
- (*p)[defaultKey{Variant: endpointVariant}] = ve
- }
-
- return nil
-}
-
-func parseVariantTags(tags []string) (ev endpointVariant, unknown bool) {
- if len(tags) == 0 {
- unknown = true
- return
- }
-
- for _, tag := range tags {
- switch {
- case strings.EqualFold("fips", tag):
- ev |= fipsVariant
- case strings.EqualFold("dualstack", tag):
- ev |= dualStackVariant
- default:
- unknown = true
- }
- }
- return ev, unknown
-}
-
-type partition struct {
- ID string `json:"partition"`
- Name string `json:"partitionName"`
- DNSSuffix string `json:"dnsSuffix"`
- RegionRegex regionRegex `json:"regionRegex"`
- Defaults endpointDefaults `json:"defaults"`
- Regions regions `json:"regions"`
- Services services `json:"services"`
-}
-
-func (p partition) Partition() Partition {
- return Partition{
- dnsSuffix: p.DNSSuffix,
- id: p.ID,
- p: &p,
- }
-}
-
-func (p partition) canResolveEndpoint(service, region string, options Options) bool {
- s, hasService := p.Services[service]
- _, hasEndpoint := s.Endpoints[endpointKey{
- Region: region,
- Variant: options.getEndpointVariant(service),
- }]
-
- if hasEndpoint && hasService {
- return true
- }
-
- if options.StrictMatching {
- return false
- }
-
- return p.RegionRegex.MatchString(region)
-}
-
-func allowLegacyEmptyRegion(service string) bool {
- legacy := map[string]struct{}{
- "budgets": {},
- "ce": {},
- "chime": {},
- "cloudfront": {},
- "ec2metadata": {},
- "iam": {},
- "importexport": {},
- "organizations": {},
- "route53": {},
- "sts": {},
- "support": {},
- "waf": {},
- }
-
- _, allowed := legacy[service]
- return allowed
-}
-
-func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) {
- var opt Options
- opt.Set(opts...)
-
- if len(opt.ResolvedRegion) > 0 {
- region = opt.ResolvedRegion
- }
-
- s, hasService := p.Services[service]
-
- if service == Ec2metadataServiceID && !hasService {
- endpoint := getEC2MetadataEndpoint(p.ID, service, opt.EC2MetadataEndpointMode)
- return endpoint, nil
- }
-
- if len(service) == 0 || !(hasService || opt.ResolveUnknownService) {
- // Only return error if the resolver will not fallback to creating
- // endpoint based on service endpoint ID passed in.
- return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services))
- }
-
- if len(region) == 0 && allowLegacyEmptyRegion(service) && len(s.PartitionEndpoint) != 0 {
- region = s.PartitionEndpoint
- }
-
- if r, ok := isLegacyGlobalRegion(service, region, opt); ok {
- region = r
- }
-
- variant := opt.getEndpointVariant(service)
-
- endpoints := s.Endpoints
-
- serviceDefaults, hasServiceDefault := s.Defaults[defaultKey{Variant: variant}]
- // If we searched for a variant which may have no explicit service defaults,
- // then we need to inherit the standard service defaults except the hostname and dnsSuffix
- if variant != 0 && !hasServiceDefault {
- serviceDefaults = s.Defaults[defaultKey{}]
- serviceDefaults.Hostname = ""
- serviceDefaults.DNSSuffix = ""
- }
-
- partitionDefaults, hasPartitionDefault := p.Defaults[defaultKey{Variant: variant}]
-
- var dnsSuffix string
- if len(serviceDefaults.DNSSuffix) > 0 {
- dnsSuffix = serviceDefaults.DNSSuffix
- } else if variant == 0 {
- // For legacy reasons the partition dnsSuffix is not in the defaults, so if we looked for
- // a non-variant endpoint then we need to set the dnsSuffix.
- dnsSuffix = p.DNSSuffix
- }
-
- noDefaults := !hasServiceDefault && !hasPartitionDefault
-
- e, hasEndpoint := s.endpointForRegion(region, endpoints, variant)
- if len(region) == 0 || (!hasEndpoint && (opt.StrictMatching || noDefaults)) {
- return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(endpoints, variant))
- }
-
- defs := []endpoint{partitionDefaults, serviceDefaults}
-
- return e.resolve(service, p.ID, region, dnsSuffixTemplateKey, dnsSuffix, defs, opt)
-}
-
-func getEC2MetadataEndpoint(partitionID, service string, mode EC2IMDSEndpointModeState) ResolvedEndpoint {
- switch mode {
- case EC2IMDSEndpointModeStateIPv6:
- return ResolvedEndpoint{
- URL: ec2MetadataEndpointIPv6,
- PartitionID: partitionID,
- SigningRegion: "aws-global",
- SigningName: service,
- SigningNameDerived: true,
- SigningMethod: "v4",
- }
- case EC2IMDSEndpointModeStateIPv4:
- fallthrough
- default:
- return ResolvedEndpoint{
- URL: ec2MetadataEndpointIPv4,
- PartitionID: partitionID,
- SigningRegion: "aws-global",
- SigningName: service,
- SigningNameDerived: true,
- SigningMethod: "v4",
- }
- }
-}
-
-func isLegacyGlobalRegion(service string, region string, opt Options) (string, bool) {
- if opt.getEndpointVariant(service) != 0 {
- return "", false
- }
-
- const (
- sts = "sts"
- s3 = "s3"
- awsGlobal = "aws-global"
- )
-
- switch {
- case service == sts && opt.STSRegionalEndpoint == RegionalSTSEndpoint:
- return region, false
- case service == s3 && opt.S3UsEast1RegionalEndpoint == RegionalS3UsEast1Endpoint:
- return region, false
- default:
- if _, ok := legacyGlobalRegions[service][region]; ok {
- return awsGlobal, true
- }
- }
-
- return region, false
-}
-
-func serviceList(ss services) []string {
- list := make([]string, 0, len(ss))
- for k := range ss {
- list = append(list, k)
- }
- return list
-}
-func endpointList(es serviceEndpoints, variant endpointVariant) []string {
- list := make([]string, 0, len(es))
- for k := range es {
- if k.Variant != variant {
- continue
- }
- list = append(list, k.Region)
- }
- return list
-}
-
-type regionRegex struct {
- *regexp.Regexp
-}
-
-func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) {
- // Strip leading and trailing quotes
- regex, err := strconv.Unquote(string(b))
- if err != nil {
- return fmt.Errorf("unable to strip quotes from regex, %v", err)
- }
-
- rr.Regexp, err = regexp.Compile(regex)
- if err != nil {
- return fmt.Errorf("unable to unmarshal region regex, %v", err)
- }
- return nil
-}
-
-type regions map[string]region
-
-type region struct {
- Description string `json:"description"`
-}
-
-type services map[string]service
-
-type service struct {
- PartitionEndpoint string `json:"partitionEndpoint"`
- IsRegionalized boxedBool `json:"isRegionalized,omitempty"`
- Defaults endpointDefaults `json:"defaults"`
- Endpoints serviceEndpoints `json:"endpoints"`
-}
-
-func (s *service) endpointForRegion(region string, endpoints serviceEndpoints, variant endpointVariant) (endpoint, bool) {
- if e, ok := endpoints[endpointKey{Region: region, Variant: variant}]; ok {
- return e, true
- }
-
- if s.IsRegionalized == boxedFalse {
- return endpoints[endpointKey{Region: s.PartitionEndpoint, Variant: variant}], region == s.PartitionEndpoint
- }
-
- // Unable to find any matching endpoint, return
- // blank that will be used for generic endpoint creation.
- return endpoint{}, false
-}
-
-type serviceEndpoints map[endpointKey]endpoint
-
-func (s *serviceEndpoints) UnmarshalJSON(data []byte) error {
- if *s == nil {
- *s = make(serviceEndpoints)
- }
-
- var regionToEndpoint map[string]endpointWithVariants
-
- if err := json.Unmarshal(data, ®ionToEndpoint); err != nil {
- return err
- }
-
- for region, e := range regionToEndpoint {
- (*s)[endpointKey{Region: region}] = e.endpoint
-
- e.Hostname = ""
- e.DNSSuffix = ""
-
- for _, variant := range e.Variants {
- endpointVariant, unknown := parseVariantTags(variant.Tags)
- if unknown {
- continue
- }
-
- var ve endpoint
- ve.mergeIn(e.endpoint)
- ve.mergeIn(variant.endpoint)
-
- (*s)[endpointKey{Region: region, Variant: endpointVariant}] = ve
- }
- }
-
- return nil
-}
-
-type endpoint struct {
- Hostname string `json:"hostname"`
- Protocols []string `json:"protocols"`
- CredentialScope credentialScope `json:"credentialScope"`
-
- DNSSuffix string `json:"dnsSuffix"`
-
- // Signature Version not used
- SignatureVersions []string `json:"signatureVersions"`
-
- // SSLCommonName not used.
- SSLCommonName string `json:"sslCommonName"`
-
- Deprecated boxedBool `json:"deprecated"`
-}
-
-// isZero returns whether the endpoint structure is an empty (zero) value.
-func (e endpoint) isZero() bool {
- switch {
- case len(e.Hostname) != 0:
- return false
- case len(e.Protocols) != 0:
- return false
- case e.CredentialScope != (credentialScope{}):
- return false
- case len(e.SignatureVersions) != 0:
- return false
- case len(e.SSLCommonName) != 0:
- return false
- }
- return true
-}
-
-const (
- defaultProtocol = "https"
- defaultSigner = "v4"
-)
-
-var (
- protocolPriority = []string{"https", "http"}
- signerPriority = []string{"v4", "v2"}
-)
-
-func getByPriority(s []string, p []string, def string) string {
- if len(s) == 0 {
- return def
- }
-
- for i := 0; i < len(p); i++ {
- for j := 0; j < len(s); j++ {
- if s[j] == p[i] {
- return s[j]
- }
- }
- }
-
- return s[0]
-}
-
-func (e endpoint) resolve(service, partitionID, region, dnsSuffixTemplateVariable, dnsSuffix string, defs []endpoint, opts Options) (ResolvedEndpoint, error) {
- var merged endpoint
- for _, def := range defs {
- merged.mergeIn(def)
- }
- merged.mergeIn(e)
- e = merged
-
- signingRegion := e.CredentialScope.Region
- if len(signingRegion) == 0 {
- signingRegion = region
- }
-
- signingName := e.CredentialScope.Service
- var signingNameDerived bool
- if len(signingName) == 0 {
- signingName = service
- signingNameDerived = true
- }
-
- hostname := e.Hostname
-
- if !validateInputRegion(region) {
- return ResolvedEndpoint{}, fmt.Errorf("invalid region identifier format provided")
- }
-
- if len(merged.DNSSuffix) > 0 {
- dnsSuffix = merged.DNSSuffix
- }
-
- u := strings.Replace(hostname, "{service}", service, 1)
- u = strings.Replace(u, "{region}", region, 1)
- u = strings.Replace(u, dnsSuffixTemplateVariable, dnsSuffix, 1)
-
- scheme := getEndpointScheme(e.Protocols, opts.DisableSSL)
- u = fmt.Sprintf("%s://%s", scheme, u)
-
- if e.Deprecated == boxedTrue && opts.LogDeprecated && opts.Logger != nil {
- opts.Logger.Log(fmt.Sprintf("endpoint identifier %q, url %q marked as deprecated", region, u))
- }
-
- return ResolvedEndpoint{
- URL: u,
- PartitionID: partitionID,
- SigningRegion: signingRegion,
- SigningName: signingName,
- SigningNameDerived: signingNameDerived,
- SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner),
- }, nil
-}
-
-func getEndpointScheme(protocols []string, disableSSL bool) string {
- if disableSSL {
- return "http"
- }
-
- return getByPriority(protocols, protocolPriority, defaultProtocol)
-}
-
-func (e *endpoint) mergeIn(other endpoint) {
- if len(other.Hostname) > 0 {
- e.Hostname = other.Hostname
- }
- if len(other.Protocols) > 0 {
- e.Protocols = other.Protocols
- }
- if len(other.SignatureVersions) > 0 {
- e.SignatureVersions = other.SignatureVersions
- }
- if len(other.CredentialScope.Region) > 0 {
- e.CredentialScope.Region = other.CredentialScope.Region
- }
- if len(other.CredentialScope.Service) > 0 {
- e.CredentialScope.Service = other.CredentialScope.Service
- }
- if len(other.SSLCommonName) > 0 {
- e.SSLCommonName = other.SSLCommonName
- }
- if len(other.DNSSuffix) > 0 {
- e.DNSSuffix = other.DNSSuffix
- }
- if other.Deprecated != boxedBoolUnset {
- e.Deprecated = other.Deprecated
- }
-}
-
-type credentialScope struct {
- Region string `json:"region"`
- Service string `json:"service"`
-}
-
-type boxedBool int
-
-func (b *boxedBool) UnmarshalJSON(buf []byte) error {
- v, err := strconv.ParseBool(string(buf))
- if err != nil {
- return err
- }
-
- if v {
- *b = boxedTrue
- } else {
- *b = boxedFalse
- }
-
- return nil
-}
-
-const (
- boxedBoolUnset boxedBool = iota
- boxedFalse
- boxedTrue
-)
-
-func validateInputRegion(region string) bool {
- return regionValidationRegex.MatchString(region)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
deleted file mode 100644
index 84922bca8..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
+++ /dev/null
@@ -1,412 +0,0 @@
-//go:build codegen
-// +build codegen
-
-package endpoints
-
-import (
- "fmt"
- "io"
- "reflect"
- "strings"
- "text/template"
- "unicode"
-)
-
-// A CodeGenOptions are the options for code generating the endpoints into
-// Go code from the endpoints model definition.
-type CodeGenOptions struct {
- // Options for how the model will be decoded.
- DecodeModelOptions DecodeModelOptions
-
- // Disables code generation of the service endpoint prefix IDs defined in
- // the model.
- DisableGenerateServiceIDs bool
-}
-
-// Set combines all of the option functions together
-func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) {
- for _, fn := range optFns {
- fn(d)
- }
-}
-
-// CodeGenModel given a endpoints model file will decode it and attempt to
-// generate Go code from the model definition. Error will be returned if
-// the code is unable to be generated, or decoded.
-func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error {
- var opts CodeGenOptions
- opts.Set(optFns...)
-
- resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) {
- *d = opts.DecodeModelOptions
- })
- if err != nil {
- return err
- }
-
- v := struct {
- Resolver
- CodeGenOptions
- }{
- Resolver: resolver,
- CodeGenOptions: opts,
- }
-
- tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl))
- if err := tmpl.ExecuteTemplate(outFile, "defaults", v); err != nil {
- return fmt.Errorf("failed to execute template, %v", err)
- }
-
- return nil
-}
-
-func toSymbol(v string) string {
- out := []rune{}
- for _, c := range strings.Title(v) {
- if !(unicode.IsNumber(c) || unicode.IsLetter(c)) {
- continue
- }
-
- out = append(out, c)
- }
-
- return string(out)
-}
-
-func quoteString(v string) string {
- return fmt.Sprintf("%q", v)
-}
-
-func regionConstName(p, r string) string {
- return toSymbol(p) + toSymbol(r)
-}
-
-func partitionGetter(id string) string {
- return fmt.Sprintf("%sPartition", toSymbol(id))
-}
-
-func partitionVarName(id string) string {
- return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id)))
-}
-
-func listPartitionNames(ps partitions) string {
- names := []string{}
- switch len(ps) {
- case 1:
- return ps[0].Name
- case 2:
- return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name)
- default:
- for i, p := range ps {
- if i == len(ps)-1 {
- names = append(names, "and "+p.Name)
- } else {
- names = append(names, p.Name)
- }
- }
- return strings.Join(names, ", ")
- }
-}
-
-func boxedBoolIfSet(msg string, v boxedBool) string {
- switch v {
- case boxedTrue:
- return fmt.Sprintf(msg, "boxedTrue")
- case boxedFalse:
- return fmt.Sprintf(msg, "boxedFalse")
- default:
- return ""
- }
-}
-
-func stringIfSet(msg, v string) string {
- if len(v) == 0 {
- return ""
- }
-
- return fmt.Sprintf(msg, v)
-}
-
-func stringSliceIfSet(msg string, vs []string) string {
- if len(vs) == 0 {
- return ""
- }
-
- names := []string{}
- for _, v := range vs {
- names = append(names, `"`+v+`"`)
- }
-
- return fmt.Sprintf(msg, strings.Join(names, ","))
-}
-
-func endpointIsSet(v endpoint) bool {
- return !reflect.DeepEqual(v, endpoint{})
-}
-
-func serviceSet(ps partitions) map[string]struct{} {
- set := map[string]struct{}{}
- for _, p := range ps {
- for id := range p.Services {
- set[id] = struct{}{}
- }
- }
-
- return set
-}
-
-func endpointVariantSetter(variant endpointVariant) (string, error) {
- if variant == 0 {
- return "0", nil
- }
-
- if variant > (fipsVariant | dualStackVariant) {
- return "", fmt.Errorf("unknown endpoint variant")
- }
-
- var symbols []string
- if variant&fipsVariant != 0 {
- symbols = append(symbols, "fipsVariant")
- }
- if variant&dualStackVariant != 0 {
- symbols = append(symbols, "dualStackVariant")
- }
- v := strings.Join(symbols, "|")
-
- return v, nil
-}
-
-func endpointKeySetter(e endpointKey) (string, error) {
- var sb strings.Builder
- sb.WriteString("endpointKey{\n")
- sb.WriteString(fmt.Sprintf("Region: %q,\n", e.Region))
- if e.Variant != 0 {
- variantSetter, err := endpointVariantSetter(e.Variant)
- if err != nil {
- return "", err
- }
- sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter))
- }
- sb.WriteString("}")
- return sb.String(), nil
-}
-
-func defaultKeySetter(e defaultKey) (string, error) {
- var sb strings.Builder
- sb.WriteString("defaultKey{\n")
- if e.Variant != 0 {
- variantSetter, err := endpointVariantSetter(e.Variant)
- if err != nil {
- return "", err
- }
- sb.WriteString(fmt.Sprintf("Variant: %s,\n", variantSetter))
- }
- sb.WriteString("}")
- return sb.String(), nil
-}
-
-var funcMap = template.FuncMap{
- "ToSymbol": toSymbol,
- "QuoteString": quoteString,
- "RegionConst": regionConstName,
- "PartitionGetter": partitionGetter,
- "PartitionVarName": partitionVarName,
- "ListPartitionNames": listPartitionNames,
- "BoxedBoolIfSet": boxedBoolIfSet,
- "StringIfSet": stringIfSet,
- "StringSliceIfSet": stringSliceIfSet,
- "EndpointIsSet": endpointIsSet,
- "ServicesSet": serviceSet,
- "EndpointVariantSetter": endpointVariantSetter,
- "EndpointKeySetter": endpointKeySetter,
- "DefaultKeySetter": defaultKeySetter,
-}
-
-const v3Tmpl = `
-{{ define "defaults" -}}
-// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
-
-package endpoints
-
-import (
- "regexp"
-)
-
- {{ template "partition consts" $.Resolver }}
-
- {{ range $_, $partition := $.Resolver }}
- {{ template "partition region consts" $partition }}
- {{ end }}
-
- {{ if not $.DisableGenerateServiceIDs -}}
- {{ template "service consts" $.Resolver }}
- {{- end }}
-
- {{ template "endpoint resolvers" $.Resolver }}
-{{- end }}
-
-{{ define "partition consts" }}
- // Partition identifiers
- const (
- {{ range $_, $p := . -}}
- {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition.
- {{ end -}}
- )
-{{- end }}
-
-{{ define "partition region consts" }}
- // {{ .Name }} partition's regions.
- const (
- {{ range $id, $region := .Regions -}}
- {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}.
- {{ end -}}
- )
-{{- end }}
-
-{{ define "service consts" }}
- // Service identifiers
- const (
- {{ $serviceSet := ServicesSet . -}}
- {{ range $id, $_ := $serviceSet -}}
- {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}.
- {{ end -}}
- )
-{{- end }}
-
-{{ define "endpoint resolvers" }}
- // DefaultResolver returns an Endpoint resolver that will be able
- // to resolve endpoints for: {{ ListPartitionNames . }}.
- //
- // Use DefaultPartitions() to get the list of the default partitions.
- func DefaultResolver() Resolver {
- return defaultPartitions
- }
-
- // DefaultPartitions returns a list of the partitions the SDK is bundled
- // with. The available partitions are: {{ ListPartitionNames . }}.
- //
- // partitions := endpoints.DefaultPartitions
- // for _, p := range partitions {
- // // ... inspect partitions
- // }
- func DefaultPartitions() []Partition {
- return defaultPartitions.Partitions()
- }
-
- var defaultPartitions = partitions{
- {{ range $_, $partition := . -}}
- {{ PartitionVarName $partition.ID }},
- {{ end }}
- }
-
- {{ range $_, $partition := . -}}
- {{ $name := PartitionGetter $partition.ID -}}
- // {{ $name }} returns the Resolver for {{ $partition.Name }}.
- func {{ $name }}() Partition {
- return {{ PartitionVarName $partition.ID }}.Partition()
- }
- var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }}
- {{ end }}
-{{ end }}
-
-{{ define "default partitions" }}
- func DefaultPartitions() []Partition {
- return []partition{
- {{ range $_, $partition := . -}}
- // {{ ToSymbol $partition.ID}}Partition(),
- {{ end }}
- }
- }
-{{ end }}
-
-{{ define "gocode Partition" -}}
-partition{
- {{ StringIfSet "ID: %q,\n" .ID -}}
- {{ StringIfSet "Name: %q,\n" .Name -}}
- {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
- RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }},
- {{ if (gt (len .Defaults) 0) -}}
- Defaults: {{ template "gocode Defaults" .Defaults -}},
- {{ end -}}
- Regions: {{ template "gocode Regions" .Regions }},
- Services: {{ template "gocode Services" .Services }},
-}
-{{- end }}
-
-{{ define "gocode RegionRegex" -}}
-regionRegex{
- Regexp: func() *regexp.Regexp{
- reg, _ := regexp.Compile({{ QuoteString .Regexp.String }})
- return reg
- }(),
-}
-{{- end }}
-
-{{ define "gocode Regions" -}}
-regions{
- {{ range $id, $region := . -}}
- "{{ $id }}": {{ template "gocode Region" $region }},
- {{ end -}}
-}
-{{- end }}
-
-{{ define "gocode Region" -}}
-region{
- {{ StringIfSet "Description: %q,\n" .Description -}}
-}
-{{- end }}
-
-{{ define "gocode Services" -}}
-services{
- {{ range $id, $service := . -}}
- "{{ $id }}": {{ template "gocode Service" $service }},
- {{ end }}
-}
-{{- end }}
-
-{{ define "gocode Service" -}}
-service{
- {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}}
- {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}}
- {{ if (gt (len .Defaults) 0) -}}
- Defaults: {{ template "gocode Defaults" .Defaults -}},
- {{ end -}}
- {{ if .Endpoints -}}
- Endpoints: {{ template "gocode Endpoints" .Endpoints }},
- {{- end }}
-}
-{{- end }}
-
-{{ define "gocode Defaults" -}}
-endpointDefaults{
- {{ range $id, $endpoint := . -}}
- {{ DefaultKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }},
- {{ end }}
-}
-{{- end }}
-
-{{ define "gocode Endpoints" -}}
-serviceEndpoints{
- {{ range $id, $endpoint := . -}}
- {{ EndpointKeySetter $id }}: {{ template "gocode Endpoint" $endpoint }},
- {{ end }}
-}
-{{- end }}
-
-{{ define "gocode Endpoint" -}}
-endpoint{
- {{ StringIfSet "Hostname: %q,\n" .Hostname -}}
- {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}}
- {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}}
- {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}}
- {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}}
- {{ if or .CredentialScope.Region .CredentialScope.Service -}}
- CredentialScope: credentialScope{
- {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}}
- {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}}
- },
- {{- end }}
- {{ BoxedBoolIfSet "Deprecated: %s,\n" .Deprecated -}}
-}
-{{- end }}
-`
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
deleted file mode 100644
index fa06f7a8f..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/errors.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package aws
-
-import "github.com/aws/aws-sdk-go/aws/awserr"
-
-var (
- // ErrMissingRegion is an error that is returned if region configuration is
- // not found.
- ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
-
- // ErrMissingEndpoint is an error that is returned if an endpoint cannot be
- // resolved for a service.
- ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
-)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
deleted file mode 100644
index 91a6f277a..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package aws
-
-// JSONValue is a representation of a grab bag type that will be marshaled
-// into a json string. This type can be used just like any other map.
-//
-// Example:
-//
-// values := aws.JSONValue{
-// "Foo": "Bar",
-// }
-// values["Baz"] = "Qux"
-type JSONValue map[string]interface{}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
deleted file mode 100644
index 49674cc79..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package aws
-
-import (
- "log"
- "os"
-)
-
-// A LogLevelType defines the level logging should be performed at. Used to instruct
-// the SDK which statements should be logged.
-type LogLevelType uint
-
-// LogLevel returns the pointer to a LogLevel. Should be used to workaround
-// not being able to take the address of a non-composite literal.
-func LogLevel(l LogLevelType) *LogLevelType {
- return &l
-}
-
-// Value returns the LogLevel value or the default value LogOff if the LogLevel
-// is nil. Safe to use on nil value LogLevelTypes.
-func (l *LogLevelType) Value() LogLevelType {
- if l != nil {
- return *l
- }
- return LogOff
-}
-
-// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
-// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
-// LogLevel is nil, will default to LogOff comparison.
-func (l *LogLevelType) Matches(v LogLevelType) bool {
- c := l.Value()
- return c&v == v
-}
-
-// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
-// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default
-// to LogOff comparison.
-func (l *LogLevelType) AtLeast(v LogLevelType) bool {
- c := l.Value()
- return c >= v
-}
-
-const (
- // LogOff states that no logging should be performed by the SDK. This is the
- // default state of the SDK, and should be use to disable all logging.
- LogOff LogLevelType = iota * 0x1000
-
- // LogDebug state that debug output should be logged by the SDK. This should
- // be used to inspect request made and responses received.
- LogDebug
-)
-
-// Debug Logging Sub Levels
-const (
- // LogDebugWithSigning states that the SDK should log request signing and
- // presigning events. This should be used to log the signing details of
- // requests for debugging. Will also enable LogDebug.
- LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
-
- // LogDebugWithHTTPBody states the SDK should log HTTP request and response
- // HTTP bodys in addition to the headers and path. This should be used to
- // see the body content of requests and responses made while using the SDK
- // Will also enable LogDebug.
- LogDebugWithHTTPBody
-
- // LogDebugWithRequestRetries states the SDK should log when service requests will
- // be retried. This should be used to log when you want to log when service
- // requests are being retried. Will also enable LogDebug.
- LogDebugWithRequestRetries
-
- // LogDebugWithRequestErrors states the SDK should log when service requests fail
- // to build, send, validate, or unmarshal.
- LogDebugWithRequestErrors
-
- // LogDebugWithEventStreamBody states the SDK should log EventStream
- // request and response bodys. This should be used to log the EventStream
- // wire unmarshaled message content of requests and responses made while
- // using the SDK Will also enable LogDebug.
- LogDebugWithEventStreamBody
-
- // LogDebugWithDeprecated states the SDK should log details about deprecated functionality.
- LogDebugWithDeprecated
-)
-
-// A Logger is a minimalistic interface for the SDK to log messages to. Should
-// be used to provide custom logging writers for the SDK to use.
-type Logger interface {
- Log(...interface{})
-}
-
-// A LoggerFunc is a convenience type to convert a function taking a variadic
-// list of arguments and wrap it so the Logger interface can be used.
-//
-// Example:
-// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
-// fmt.Fprintln(os.Stdout, args...)
-// })})
-type LoggerFunc func(...interface{})
-
-// Log calls the wrapped function with the arguments provided
-func (f LoggerFunc) Log(args ...interface{}) {
- f(args...)
-}
-
-// NewDefaultLogger returns a Logger which will write log messages to stdout, and
-// use same formatting runes as the stdlib log.Logger
-func NewDefaultLogger() Logger {
- return &defaultLogger{
- logger: log.New(os.Stdout, "", log.LstdFlags),
- }
-}
-
-// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
-type defaultLogger struct {
- logger *log.Logger
-}
-
-// Log logs the parameters to the stdlib logger. See log.Println.
-func (l defaultLogger) Log(args ...interface{}) {
- l.logger.Println(args...)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
deleted file mode 100644
index 2ba3c56c1..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package request
-
-import (
- "strings"
-)
-
-func isErrConnectionReset(err error) bool {
- if strings.Contains(err.Error(), "read: connection reset") {
- return false
- }
-
- if strings.Contains(err.Error(), "use of closed network connection") ||
- strings.Contains(err.Error(), "connection reset") ||
- strings.Contains(err.Error(), "broken pipe") {
- return true
- }
-
- return false
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
deleted file mode 100644
index 9556332b6..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package request
-
-import (
- "fmt"
- "strings"
-)
-
-// A Handlers provides a collection of request handlers for various
-// stages of handling requests.
-type Handlers struct {
- Validate HandlerList
- Build HandlerList
- BuildStream HandlerList
- Sign HandlerList
- Send HandlerList
- ValidateResponse HandlerList
- Unmarshal HandlerList
- UnmarshalStream HandlerList
- UnmarshalMeta HandlerList
- UnmarshalError HandlerList
- Retry HandlerList
- AfterRetry HandlerList
- CompleteAttempt HandlerList
- Complete HandlerList
-}
-
-// Copy returns a copy of this handler's lists.
-func (h *Handlers) Copy() Handlers {
- return Handlers{
- Validate: h.Validate.copy(),
- Build: h.Build.copy(),
- BuildStream: h.BuildStream.copy(),
- Sign: h.Sign.copy(),
- Send: h.Send.copy(),
- ValidateResponse: h.ValidateResponse.copy(),
- Unmarshal: h.Unmarshal.copy(),
- UnmarshalStream: h.UnmarshalStream.copy(),
- UnmarshalError: h.UnmarshalError.copy(),
- UnmarshalMeta: h.UnmarshalMeta.copy(),
- Retry: h.Retry.copy(),
- AfterRetry: h.AfterRetry.copy(),
- CompleteAttempt: h.CompleteAttempt.copy(),
- Complete: h.Complete.copy(),
- }
-}
-
-// Clear removes callback functions for all handlers.
-func (h *Handlers) Clear() {
- h.Validate.Clear()
- h.Build.Clear()
- h.BuildStream.Clear()
- h.Send.Clear()
- h.Sign.Clear()
- h.Unmarshal.Clear()
- h.UnmarshalStream.Clear()
- h.UnmarshalMeta.Clear()
- h.UnmarshalError.Clear()
- h.ValidateResponse.Clear()
- h.Retry.Clear()
- h.AfterRetry.Clear()
- h.CompleteAttempt.Clear()
- h.Complete.Clear()
-}
-
-// IsEmpty returns if there are no handlers in any of the handlerlists.
-func (h *Handlers) IsEmpty() bool {
- if h.Validate.Len() != 0 {
- return false
- }
- if h.Build.Len() != 0 {
- return false
- }
- if h.BuildStream.Len() != 0 {
- return false
- }
- if h.Send.Len() != 0 {
- return false
- }
- if h.Sign.Len() != 0 {
- return false
- }
- if h.Unmarshal.Len() != 0 {
- return false
- }
- if h.UnmarshalStream.Len() != 0 {
- return false
- }
- if h.UnmarshalMeta.Len() != 0 {
- return false
- }
- if h.UnmarshalError.Len() != 0 {
- return false
- }
- if h.ValidateResponse.Len() != 0 {
- return false
- }
- if h.Retry.Len() != 0 {
- return false
- }
- if h.AfterRetry.Len() != 0 {
- return false
- }
- if h.CompleteAttempt.Len() != 0 {
- return false
- }
- if h.Complete.Len() != 0 {
- return false
- }
-
- return true
-}
-
-// A HandlerListRunItem represents an entry in the HandlerList which
-// is being run.
-type HandlerListRunItem struct {
- Index int
- Handler NamedHandler
- Request *Request
-}
-
-// A HandlerList manages zero or more handlers in a list.
-type HandlerList struct {
- list []NamedHandler
-
- // Called after each request handler in the list is called. If set
- // and the func returns true the HandlerList will continue to iterate
- // over the request handlers. If false is returned the HandlerList
- // will stop iterating.
- //
- // Should be used if extra logic to be performed between each handler
- // in the list. This can be used to terminate a list's iteration
- // based on a condition such as error like, HandlerListStopOnError.
- // Or for logging like HandlerListLogItem.
- AfterEachFn func(item HandlerListRunItem) bool
-}
-
-// A NamedHandler is a struct that contains a name and function callback.
-type NamedHandler struct {
- Name string
- Fn func(*Request)
-}
-
-// copy creates a copy of the handler list.
-func (l *HandlerList) copy() HandlerList {
- n := HandlerList{
- AfterEachFn: l.AfterEachFn,
- }
- if len(l.list) == 0 {
- return n
- }
-
- n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
- return n
-}
-
-// Clear clears the handler list.
-func (l *HandlerList) Clear() {
- l.list = l.list[0:0]
-}
-
-// Len returns the number of handlers in the list.
-func (l *HandlerList) Len() int {
- return len(l.list)
-}
-
-// PushBack pushes handler f to the back of the handler list.
-func (l *HandlerList) PushBack(f func(*Request)) {
- l.PushBackNamed(NamedHandler{"__anonymous", f})
-}
-
-// PushBackNamed pushes named handler f to the back of the handler list.
-func (l *HandlerList) PushBackNamed(n NamedHandler) {
- if cap(l.list) == 0 {
- l.list = make([]NamedHandler, 0, 5)
- }
- l.list = append(l.list, n)
-}
-
-// PushFront pushes handler f to the front of the handler list.
-func (l *HandlerList) PushFront(f func(*Request)) {
- l.PushFrontNamed(NamedHandler{"__anonymous", f})
-}
-
-// PushFrontNamed pushes named handler f to the front of the handler list.
-func (l *HandlerList) PushFrontNamed(n NamedHandler) {
- if cap(l.list) == len(l.list) {
- // Allocating new list required
- l.list = append([]NamedHandler{n}, l.list...)
- } else {
- // Enough room to prepend into list.
- l.list = append(l.list, NamedHandler{})
- copy(l.list[1:], l.list)
- l.list[0] = n
- }
-}
-
-// Remove removes a NamedHandler n
-func (l *HandlerList) Remove(n NamedHandler) {
- l.RemoveByName(n.Name)
-}
-
-// RemoveByName removes a NamedHandler by name.
-func (l *HandlerList) RemoveByName(name string) {
- for i := 0; i < len(l.list); i++ {
- m := l.list[i]
- if m.Name == name {
- // Shift array preventing creating new arrays
- copy(l.list[i:], l.list[i+1:])
- l.list[len(l.list)-1] = NamedHandler{}
- l.list = l.list[:len(l.list)-1]
-
- // decrement list so next check to length is correct
- i--
- }
- }
-}
-
-// SwapNamed will swap out any existing handlers with the same name as the
-// passed in NamedHandler returning true if handlers were swapped. False is
-// returned otherwise.
-func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
- for i := 0; i < len(l.list); i++ {
- if l.list[i].Name == n.Name {
- l.list[i].Fn = n.Fn
- swapped = true
- }
- }
-
- return swapped
-}
-
-// Swap will swap out all handlers matching the name passed in. The matched
-// handlers will be swapped in. True is returned if the handlers were swapped.
-func (l *HandlerList) Swap(name string, replace NamedHandler) bool {
- var swapped bool
-
- for i := 0; i < len(l.list); i++ {
- if l.list[i].Name == name {
- l.list[i] = replace
- swapped = true
- }
- }
-
- return swapped
-}
-
-// SetBackNamed will replace the named handler if it exists in the handler list.
-// If the handler does not exist the handler will be added to the end of the list.
-func (l *HandlerList) SetBackNamed(n NamedHandler) {
- if !l.SwapNamed(n) {
- l.PushBackNamed(n)
- }
-}
-
-// SetFrontNamed will replace the named handler if it exists in the handler list.
-// If the handler does not exist the handler will be added to the beginning of
-// the list.
-func (l *HandlerList) SetFrontNamed(n NamedHandler) {
- if !l.SwapNamed(n) {
- l.PushFrontNamed(n)
- }
-}
-
-// Run executes all handlers in the list with a given request object.
-func (l *HandlerList) Run(r *Request) {
- for i, h := range l.list {
- h.Fn(r)
- item := HandlerListRunItem{
- Index: i, Handler: h, Request: r,
- }
- if l.AfterEachFn != nil && !l.AfterEachFn(item) {
- return
- }
- }
-}
-
-// HandlerListLogItem logs the request handler and the state of the
-// request's Error value. Always returns true to continue iterating
-// request handlers in a HandlerList.
-func HandlerListLogItem(item HandlerListRunItem) bool {
- if item.Request.Config.Logger == nil {
- return true
- }
- item.Request.Config.Logger.Log("DEBUG: RequestHandler",
- item.Index, item.Handler.Name, item.Request.Error)
-
- return true
-}
-
-// HandlerListStopOnError returns false to stop the HandlerList iterating
-// over request handlers if Request.Error is not nil. True otherwise
-// to continue iterating.
-func HandlerListStopOnError(item HandlerListRunItem) bool {
- return item.Request.Error == nil
-}
-
-// WithAppendUserAgent will add a string to the user agent prefixed with a
-// single white space.
-func WithAppendUserAgent(s string) Option {
- return func(r *Request) {
- r.Handlers.Build.PushBack(func(r2 *Request) {
- AddToUserAgent(r, s)
- })
- }
-}
-
-// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
-// header. If the extra parameters are provided they will be added as metadata to the
-// name/version pair resulting in the following format.
-// "name/version (extra0; extra1; ...)"
-// The user agent part will be concatenated with this current request's user agent string.
-func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
- ua := fmt.Sprintf("%s/%s", name, version)
- if len(extra) > 0 {
- ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
- }
- return func(r *Request) {
- AddToUserAgent(r, ua)
- }
-}
-
-// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
-// The input string will be concatenated with the current request's user agent string.
-func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
- return func(r *Request) {
- AddToUserAgent(r, s)
- }
-}
-
-// WithSetRequestHeaders updates the operation request's HTTP header to contain
-// the header key value pairs provided. If the header key already exists in the
-// request's HTTP header set, the existing value(s) will be replaced.
-//
-// Header keys added will be added as canonical format with title casing
-// applied via http.Header.Set method.
-func WithSetRequestHeaders(h map[string]string) Option {
- return withRequestHeader(h).SetRequestHeaders
-}
-
-type withRequestHeader map[string]string
-
-func (h withRequestHeader) SetRequestHeaders(r *Request) {
- for k, v := range h {
- r.HTTPRequest.Header.Set(k, v)
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
deleted file mode 100644
index 79f79602b..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package request
-
-import (
- "io"
- "net/http"
- "net/url"
-)
-
-func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
- req := new(http.Request)
- *req = *r
- req.URL = &url.URL{}
- *req.URL = *r.URL
- req.Body = body
-
- req.Header = http.Header{}
- for k, v := range r.Header {
- for _, vv := range v {
- req.Header.Add(k, vv)
- }
- }
-
- return req
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
deleted file mode 100644
index 9370fa50c..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package request
-
-import (
- "io"
- "sync"
-
- "github.com/aws/aws-sdk-go/internal/sdkio"
-)
-
-// offsetReader is a thread-safe io.ReadCloser to prevent racing
-// with retrying requests
-type offsetReader struct {
- buf io.ReadSeeker
- lock sync.Mutex
- closed bool
-}
-
-func newOffsetReader(buf io.ReadSeeker, offset int64) (*offsetReader, error) {
- reader := &offsetReader{}
- _, err := buf.Seek(offset, sdkio.SeekStart)
- if err != nil {
- return nil, err
- }
-
- reader.buf = buf
- return reader, nil
-}
-
-// Close will close the instance of the offset reader's access to
-// the underlying io.ReadSeeker.
-func (o *offsetReader) Close() error {
- o.lock.Lock()
- defer o.lock.Unlock()
- o.closed = true
- return nil
-}
-
-// Read is a thread-safe read of the underlying io.ReadSeeker
-func (o *offsetReader) Read(p []byte) (int, error) {
- o.lock.Lock()
- defer o.lock.Unlock()
-
- if o.closed {
- return 0, io.EOF
- }
-
- return o.buf.Read(p)
-}
-
-// Seek is a thread-safe seeking operation.
-func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
- o.lock.Lock()
- defer o.lock.Unlock()
-
- return o.buf.Seek(offset, whence)
-}
-
-// CloseAndCopy will return a new offsetReader with a copy of the old buffer
-// and close the old buffer.
-func (o *offsetReader) CloseAndCopy(offset int64) (*offsetReader, error) {
- if err := o.Close(); err != nil {
- return nil, err
- }
- return newOffsetReader(o.buf, offset)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
deleted file mode 100644
index 636d9ec94..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
+++ /dev/null
@@ -1,722 +0,0 @@
-package request
-
-import (
- "bytes"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "reflect"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/internal/sdkio"
-)
-
-const (
- // ErrCodeSerialization is the serialization error code that is received
- // during protocol unmarshaling.
- ErrCodeSerialization = "SerializationError"
-
- // ErrCodeRead is an error that is returned during HTTP reads.
- ErrCodeRead = "ReadError"
-
- // ErrCodeResponseTimeout is the connection timeout error that is received
- // during body reads.
- ErrCodeResponseTimeout = "ResponseTimeout"
-
- // ErrCodeInvalidPresignExpire is returned when the expire time provided to
- // presign is invalid
- ErrCodeInvalidPresignExpire = "InvalidPresignExpireError"
-
- // CanceledErrorCode is the error code that will be returned by an
- // API request that was canceled. Requests given a aws.Context may
- // return this error when canceled.
- CanceledErrorCode = "RequestCanceled"
-
- // ErrCodeRequestError is an error preventing the SDK from continuing to
- // process the request.
- ErrCodeRequestError = "RequestError"
-)
-
-// A Request is the service request to be made.
-type Request struct {
- Config aws.Config
- ClientInfo metadata.ClientInfo
- Handlers Handlers
-
- Retryer
- AttemptTime time.Time
- Time time.Time
- Operation *Operation
- HTTPRequest *http.Request
- HTTPResponse *http.Response
- Body io.ReadSeeker
- streamingBody io.ReadCloser
- BodyStart int64 // offset from beginning of Body that the request body starts
- Params interface{}
- Error error
- Data interface{}
- RequestID string
- RetryCount int
- Retryable *bool
- RetryDelay time.Duration
- NotHoist bool
- SignedHeaderVals http.Header
- LastSignedAt time.Time
- DisableFollowRedirects bool
-
- // Additional API error codes that should be retried. IsErrorRetryable
- // will consider these codes in addition to its built in cases.
- RetryErrorCodes []string
-
- // Additional API error codes that should be retried with throttle backoff
- // delay. IsErrorThrottle will consider these codes in addition to its
- // built in cases.
- ThrottleErrorCodes []string
-
- // A value greater than 0 instructs the request to be signed as Presigned URL
- // You should not set this field directly. Instead use Request's
- // Presign or PresignRequest methods.
- ExpireTime time.Duration
-
- context aws.Context
-
- built bool
-
- // Need to persist an intermediate body between the input Body and HTTP
- // request body because the HTTP Client's transport can maintain a reference
- // to the HTTP request's body after the client has returned. This value is
- // safe to use concurrently and wrap the input Body for each HTTP request.
- safeBody *offsetReader
-}
-
-// An Operation is the service API operation to be made.
-type Operation struct {
- Name string
- HTTPMethod string
- HTTPPath string
- *Paginator
-
- BeforePresignFn func(r *Request) error
-}
-
-// New returns a new Request pointer for the service API operation and
-// parameters.
-//
-// A Retryer should be provided to direct how the request is retried. If
-// Retryer is nil, a default no retry value will be used. You can use
-// NoOpRetryer in the Client package to disable retry behavior directly.
-//
-// Params is any value of input parameters to be the request payload.
-// Data is pointer value to an object which the request's response
-// payload will be deserialized to.
-func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
- retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
-
- if retryer == nil {
- retryer = noOpRetryer{}
- }
-
- method := operation.HTTPMethod
- if method == "" {
- method = "POST"
- }
-
- httpReq, _ := http.NewRequest(method, "", nil)
-
- var err error
- httpReq.URL, err = url.Parse(clientInfo.Endpoint)
- if err != nil {
- httpReq.URL = &url.URL{}
- err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
- }
-
- if len(operation.HTTPPath) != 0 {
- opHTTPPath := operation.HTTPPath
- var opQueryString string
- if idx := strings.Index(opHTTPPath, "?"); idx >= 0 {
- opQueryString = opHTTPPath[idx+1:]
- opHTTPPath = opHTTPPath[:idx]
- }
-
- if strings.HasSuffix(httpReq.URL.Path, "/") && strings.HasPrefix(opHTTPPath, "/") {
- opHTTPPath = opHTTPPath[1:]
- }
- httpReq.URL.Path += opHTTPPath
- httpReq.URL.RawQuery = opQueryString
- }
-
- r := &Request{
- Config: cfg,
- ClientInfo: clientInfo,
- Handlers: handlers.Copy(),
-
- Retryer: retryer,
- Time: time.Now(),
- ExpireTime: 0,
- Operation: operation,
- HTTPRequest: httpReq,
- Body: nil,
- Params: params,
- Error: err,
- Data: data,
- }
- r.SetBufferBody([]byte{})
-
- return r
-}
-
-// A Option is a functional option that can augment or modify a request when
-// using a WithContext API operation method.
-type Option func(*Request)
-
-// WithGetResponseHeader builds a request Option which will retrieve a single
-// header value from the HTTP Response. If there are multiple values for the
-// header key use WithGetResponseHeaders instead to access the http.Header
-// map directly. The passed in val pointer must be non-nil.
-//
-// This Option can be used multiple times with a single API operation.
-//
-// var id2, versionID string
-// svc.PutObjectWithContext(ctx, params,
-// request.WithGetResponseHeader("x-amz-id-2", &id2),
-// request.WithGetResponseHeader("x-amz-version-id", &versionID),
-// )
-func WithGetResponseHeader(key string, val *string) Option {
- return func(r *Request) {
- r.Handlers.Complete.PushBack(func(req *Request) {
- *val = req.HTTPResponse.Header.Get(key)
- })
- }
-}
-
-// WithGetResponseHeaders builds a request Option which will retrieve the
-// headers from the HTTP response and assign them to the passed in headers
-// variable. The passed in headers pointer must be non-nil.
-//
-// var headers http.Header
-// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers))
-func WithGetResponseHeaders(headers *http.Header) Option {
- return func(r *Request) {
- r.Handlers.Complete.PushBack(func(req *Request) {
- *headers = req.HTTPResponse.Header
- })
- }
-}
-
-// WithLogLevel is a request option that will set the request to use a specific
-// log level when the request is made.
-//
-// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody)
-func WithLogLevel(l aws.LogLevelType) Option {
- return func(r *Request) {
- r.Config.LogLevel = aws.LogLevel(l)
- }
-}
-
-// ApplyOptions will apply each option to the request calling them in the order
-// the were provided.
-func (r *Request) ApplyOptions(opts ...Option) {
- for _, opt := range opts {
- opt(r)
- }
-}
-
-// Context will always returns a non-nil context. If Request does not have a
-// context aws.BackgroundContext will be returned.
-func (r *Request) Context() aws.Context {
- if r.context != nil {
- return r.context
- }
- return aws.BackgroundContext()
-}
-
-// SetContext adds a Context to the current request that can be used to cancel
-// a in-flight request. The Context value must not be nil, or this method will
-// panic.
-//
-// Unlike http.Request.WithContext, SetContext does not return a copy of the
-// Request. It is not safe to use use a single Request value for multiple
-// requests. A new Request should be created for each API operation request.
-//
-// Go 1.6 and below:
-// The http.Request's Cancel field will be set to the Done() value of
-// the context. This will overwrite the Cancel field's value.
-//
-// Go 1.7 and above:
-// The http.Request.WithContext will be used to set the context on the underlying
-// http.Request. This will create a shallow copy of the http.Request. The SDK
-// may create sub contexts in the future for nested requests such as retries.
-func (r *Request) SetContext(ctx aws.Context) {
- if ctx == nil {
- panic("context cannot be nil")
- }
- setRequestContext(r, ctx)
-}
-
-// WillRetry returns if the request's can be retried.
-func (r *Request) WillRetry() bool {
- if !aws.IsReaderSeekable(r.Body) && r.HTTPRequest.Body != NoBody {
- return false
- }
- return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
-}
-
-func fmtAttemptCount(retryCount, maxRetries int) string {
- return fmt.Sprintf("attempt %v/%v", retryCount, maxRetries)
-}
-
-// ParamsFilled returns if the request's parameters have been populated
-// and the parameters are valid. False is returned if no parameters are
-// provided or invalid.
-func (r *Request) ParamsFilled() bool {
- return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
-}
-
-// DataFilled returns true if the request's data for response deserialization
-// target has been set and is a valid. False is returned if data is not
-// set, or is invalid.
-func (r *Request) DataFilled() bool {
- return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
-}
-
-// SetBufferBody will set the request's body bytes that will be sent to
-// the service API.
-func (r *Request) SetBufferBody(buf []byte) {
- r.SetReaderBody(bytes.NewReader(buf))
-}
-
-// SetStringBody sets the body of the request to be backed by a string.
-func (r *Request) SetStringBody(s string) {
- r.SetReaderBody(strings.NewReader(s))
-}
-
-// SetReaderBody will set the request's body reader.
-func (r *Request) SetReaderBody(reader io.ReadSeeker) {
- r.Body = reader
-
- if aws.IsReaderSeekable(reader) {
- var err error
- // Get the Bodies current offset so retries will start from the same
- // initial position.
- r.BodyStart, err = reader.Seek(0, sdkio.SeekCurrent)
- if err != nil {
- r.Error = awserr.New(ErrCodeSerialization,
- "failed to determine start of request body", err)
- return
- }
- }
- r.ResetBody()
-}
-
-// SetStreamingBody set the reader to be used for the request that will stream
-// bytes to the server. Request's Body must not be set to any reader.
-func (r *Request) SetStreamingBody(reader io.ReadCloser) {
- r.streamingBody = reader
- r.SetReaderBody(aws.ReadSeekCloser(reader))
-}
-
-// Presign returns the request's signed URL. Error will be returned
-// if the signing fails. The expire parameter is only used for presigned Amazon
-// S3 API requests. All other AWS services will use a fixed expiration
-// time of 15 minutes.
-//
-// It is invalid to create a presigned URL with a expire duration 0 or less. An
-// error is returned if expire duration is 0 or less.
-func (r *Request) Presign(expire time.Duration) (string, error) {
- r = r.copy()
-
- // Presign requires all headers be hoisted. There is no way to retrieve
- // the signed headers not hoisted without this. Making the presigned URL
- // useless.
- r.NotHoist = false
-
- u, _, err := getPresignedURL(r, expire)
- return u, err
-}
-
-// PresignRequest behaves just like presign, with the addition of returning a
-// set of headers that were signed. The expire parameter is only used for
-// presigned Amazon S3 API requests. All other AWS services will use a fixed
-// expiration time of 15 minutes.
-//
-// It is invalid to create a presigned URL with a expire duration 0 or less. An
-// error is returned if expire duration is 0 or less.
-//
-// Returns the URL string for the API operation with signature in the query string,
-// and the HTTP headers that were included in the signature. These headers must
-// be included in any HTTP request made with the presigned URL.
-//
-// To prevent hoisting any headers to the query string set NotHoist to true on
-// this Request value prior to calling PresignRequest.
-func (r *Request) PresignRequest(expire time.Duration) (string, http.Header, error) {
- r = r.copy()
- return getPresignedURL(r, expire)
-}
-
-// IsPresigned returns true if the request represents a presigned API url.
-func (r *Request) IsPresigned() bool {
- return r.ExpireTime != 0
-}
-
-func getPresignedURL(r *Request, expire time.Duration) (string, http.Header, error) {
- if expire <= 0 {
- return "", nil, awserr.New(
- ErrCodeInvalidPresignExpire,
- "presigned URL requires an expire duration greater than 0",
- nil,
- )
- }
-
- r.ExpireTime = expire
-
- if r.Operation.BeforePresignFn != nil {
- if err := r.Operation.BeforePresignFn(r); err != nil {
- return "", nil, err
- }
- }
-
- if err := r.Sign(); err != nil {
- return "", nil, err
- }
-
- return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
-}
-
-const (
- notRetrying = "not retrying"
-)
-
-func debugLogReqError(r *Request, stage, retryStr string, err error) {
- if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
- return
- }
-
- r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
- stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
-}
-
-// Build will build the request's object so it can be signed and sent
-// to the service. Build will also validate all the request's parameters.
-// Any additional build Handlers set on this request will be run
-// in the order they were set.
-//
-// The request will only be built once. Multiple calls to build will have
-// no effect.
-//
-// If any Validate or Build errors occur the build will stop and the error
-// which occurred will be returned.
-func (r *Request) Build() error {
- if !r.built {
- r.Handlers.Validate.Run(r)
- if r.Error != nil {
- debugLogReqError(r, "Validate Request", notRetrying, r.Error)
- return r.Error
- }
- r.Handlers.Build.Run(r)
- if r.Error != nil {
- debugLogReqError(r, "Build Request", notRetrying, r.Error)
- return r.Error
- }
- r.built = true
- }
-
- return r.Error
-}
-
-// Sign will sign the request, returning error if errors are encountered.
-//
-// Sign will build the request prior to signing. All Sign Handlers will
-// be executed in the order they were set.
-func (r *Request) Sign() error {
- r.Build()
- if r.Error != nil {
- debugLogReqError(r, "Build Request", notRetrying, r.Error)
- return r.Error
- }
-
- SanitizeHostForHeader(r.HTTPRequest)
-
- r.Handlers.Sign.Run(r)
- return r.Error
-}
-
-func (r *Request) getNextRequestBody() (body io.ReadCloser, err error) {
- if r.streamingBody != nil {
- return r.streamingBody, nil
- }
-
- if r.safeBody != nil {
- r.safeBody.Close()
- }
-
- r.safeBody, err = newOffsetReader(r.Body, r.BodyStart)
- if err != nil {
- return nil, awserr.New(ErrCodeSerialization,
- "failed to get next request body reader", err)
- }
-
- // Go 1.8 tightened and clarified the rules code needs to use when building
- // requests with the http package. Go 1.8 removed the automatic detection
- // of if the Request.Body was empty, or actually had bytes in it. The SDK
- // always sets the Request.Body even if it is empty and should not actually
- // be sent. This is incorrect.
- //
- // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http
- // client that the request really should be sent without a body. The
- // Request.Body cannot be set to nil, which is preferable, because the
- // field is exported and could introduce nil pointer dereferences for users
- // of the SDK if they used that field.
- //
- // Related golang/go#18257
- l, err := aws.SeekerLen(r.Body)
- if err != nil {
- return nil, awserr.New(ErrCodeSerialization,
- "failed to compute request body size", err)
- }
-
- if l == 0 {
- body = NoBody
- } else if l > 0 {
- body = r.safeBody
- } else {
- // Hack to prevent sending bodies for methods where the body
- // should be ignored by the server. Sending bodies on these
- // methods without an associated ContentLength will cause the
- // request to socket timeout because the server does not handle
- // Transfer-Encoding: chunked bodies for these methods.
- //
- // This would only happen if a aws.ReaderSeekerCloser was used with
- // a io.Reader that was not also an io.Seeker, or did not implement
- // Len() method.
- switch r.Operation.HTTPMethod {
- case "GET", "HEAD", "DELETE":
- body = NoBody
- default:
- body = r.safeBody
- }
- }
-
- return body, nil
-}
-
-// GetBody will return an io.ReadSeeker of the Request's underlying
-// input body with a concurrency safe wrapper.
-func (r *Request) GetBody() io.ReadSeeker {
- return r.safeBody
-}
-
-// Send will send the request, returning error if errors are encountered.
-//
-// Send will sign the request prior to sending. All Send Handlers will
-// be executed in the order they were set.
-//
-// Canceling a request is non-deterministic. If a request has been canceled,
-// then the transport will choose, randomly, one of the state channels during
-// reads or getting the connection.
-//
-// readLoop() and getConn(req *Request, cm connectMethod)
-// https://github.com/golang/go/blob/master/src/net/http/transport.go
-//
-// Send will not close the request.Request's body.
-func (r *Request) Send() error {
- defer func() {
- // Ensure a non-nil HTTPResponse parameter is set to ensure handlers
- // checking for HTTPResponse values, don't fail.
- if r.HTTPResponse == nil {
- r.HTTPResponse = &http.Response{
- Header: http.Header{},
- Body: ioutil.NopCloser(&bytes.Buffer{}),
- }
- }
- // Regardless of success or failure of the request trigger the Complete
- // request handlers.
- r.Handlers.Complete.Run(r)
- }()
-
- if err := r.Error; err != nil {
- return err
- }
-
- for {
- r.Error = nil
- r.AttemptTime = time.Now()
-
- if err := r.Sign(); err != nil {
- debugLogReqError(r, "Sign Request", notRetrying, err)
- return err
- }
-
- if err := r.sendRequest(); err == nil {
- return nil
- }
- r.Handlers.Retry.Run(r)
- r.Handlers.AfterRetry.Run(r)
-
- if r.Error != nil || !aws.BoolValue(r.Retryable) {
- return r.Error
- }
-
- if err := r.prepareRetry(); err != nil {
- r.Error = err
- return err
- }
- }
-}
-
-func (r *Request) prepareRetry() error {
- if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
- r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
- r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
- }
-
- // The previous http.Request will have a reference to the r.Body
- // and the HTTP Client's Transport may still be reading from
- // the request's body even though the Client's Do returned.
- r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
- r.ResetBody()
- if err := r.Error; err != nil {
- return awserr.New(ErrCodeSerialization,
- "failed to prepare body for retry", err)
-
- }
-
- // Closing response body to ensure that no response body is leaked
- // between retry attempts.
- if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
- r.HTTPResponse.Body.Close()
- }
-
- return nil
-}
-
-func (r *Request) sendRequest() (sendErr error) {
- defer r.Handlers.CompleteAttempt.Run(r)
-
- r.Retryable = nil
- r.Handlers.Send.Run(r)
- if r.Error != nil {
- debugLogReqError(r, "Send Request",
- fmtAttemptCount(r.RetryCount, r.MaxRetries()),
- r.Error)
- return r.Error
- }
-
- r.Handlers.UnmarshalMeta.Run(r)
- r.Handlers.ValidateResponse.Run(r)
- if r.Error != nil {
- r.Handlers.UnmarshalError.Run(r)
- debugLogReqError(r, "Validate Response",
- fmtAttemptCount(r.RetryCount, r.MaxRetries()),
- r.Error)
- return r.Error
- }
-
- r.Handlers.Unmarshal.Run(r)
- if r.Error != nil {
- debugLogReqError(r, "Unmarshal Response",
- fmtAttemptCount(r.RetryCount, r.MaxRetries()),
- r.Error)
- return r.Error
- }
-
- return nil
-}
-
-// copy will copy a request which will allow for local manipulation of the
-// request.
-func (r *Request) copy() *Request {
- req := &Request{}
- *req = *r
- req.Handlers = r.Handlers.Copy()
- op := *r.Operation
- req.Operation = &op
- return req
-}
-
-// AddToUserAgent adds the string to the end of the request's current user agent.
-func AddToUserAgent(r *Request, s string) {
- curUA := r.HTTPRequest.Header.Get("User-Agent")
- if len(curUA) > 0 {
- s = curUA + " " + s
- }
- r.HTTPRequest.Header.Set("User-Agent", s)
-}
-
-// SanitizeHostForHeader removes default port from host and updates request.Host
-func SanitizeHostForHeader(r *http.Request) {
- host := getHost(r)
- port := portOnly(host)
- if port != "" && isDefaultPort(r.URL.Scheme, port) {
- r.Host = stripPort(host)
- }
-}
-
-// Returns host from request
-func getHost(r *http.Request) string {
- if r.Host != "" {
- return r.Host
- }
-
- if r.URL == nil {
- return ""
- }
-
- return r.URL.Host
-}
-
-// Hostname returns u.Host, without any port number.
-//
-// If Host is an IPv6 literal with a port number, Hostname returns the
-// IPv6 literal without the square brackets. IPv6 literals may include
-// a zone identifier.
-//
-// Copied from the Go 1.8 standard library (net/url)
-func stripPort(hostport string) string {
- colon := strings.IndexByte(hostport, ':')
- if colon == -1 {
- return hostport
- }
- if i := strings.IndexByte(hostport, ']'); i != -1 {
- return strings.TrimPrefix(hostport[:i], "[")
- }
- return hostport[:colon]
-}
-
-// Port returns the port part of u.Host, without the leading colon.
-// If u.Host doesn't contain a port, Port returns an empty string.
-//
-// Copied from the Go 1.8 standard library (net/url)
-func portOnly(hostport string) string {
- colon := strings.IndexByte(hostport, ':')
- if colon == -1 {
- return ""
- }
- if i := strings.Index(hostport, "]:"); i != -1 {
- return hostport[i+len("]:"):]
- }
- if strings.Contains(hostport, "]") {
- return ""
- }
- return hostport[colon+len(":"):]
-}
-
-// Returns true if the specified URI is using the standard port
-// (i.e. port 80 for HTTP URIs or 443 for HTTPS URIs)
-func isDefaultPort(scheme, port string) bool {
- if port == "" {
- return true
- }
-
- lowerCaseScheme := strings.ToLower(scheme)
- if (lowerCaseScheme == "http" && port == "80") || (lowerCaseScheme == "https" && port == "443") {
- return true
- }
-
- return false
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
deleted file mode 100644
index 5921b8ff2..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
+++ /dev/null
@@ -1,40 +0,0 @@
-//go:build !go1.8
-// +build !go1.8
-
-package request
-
-import "io"
-
-// NoBody is an io.ReadCloser with no bytes. Read always returns EOF
-// and Close always returns nil. It can be used in an outgoing client
-// request to explicitly signal that a request has zero bytes.
-// An alternative, however, is to simply set Request.Body to nil.
-//
-// Copy of Go 1.8 NoBody type from net/http/http.go
-type noBody struct{}
-
-func (noBody) Read([]byte) (int, error) { return 0, io.EOF }
-func (noBody) Close() error { return nil }
-func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
-
-// NoBody is an empty reader that will trigger the Go HTTP client to not include
-// and body in the HTTP request.
-var NoBody = noBody{}
-
-// ResetBody rewinds the request body back to its starting position, and
-// sets the HTTP Request body reference. When the body is read prior
-// to being sent in the HTTP request it will need to be rewound.
-//
-// ResetBody will automatically be called by the SDK's build handler, but if
-// the request is being used directly ResetBody must be called before the request
-// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
-// call ResetBody.
-func (r *Request) ResetBody() {
- body, err := r.getNextRequestBody()
- if err != nil {
- r.Error = err
- return
- }
-
- r.HTTPRequest.Body = body
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
deleted file mode 100644
index ea643c9c4..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
+++ /dev/null
@@ -1,37 +0,0 @@
-//go:build go1.8
-// +build go1.8
-
-package request
-
-import (
- "net/http"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-// NoBody is a http.NoBody reader instructing Go HTTP client to not include
-// and body in the HTTP request.
-var NoBody = http.NoBody
-
-// ResetBody rewinds the request body back to its starting position, and
-// sets the HTTP Request body reference. When the body is read prior
-// to being sent in the HTTP request it will need to be rewound.
-//
-// ResetBody will automatically be called by the SDK's build handler, but if
-// the request is being used directly ResetBody must be called before the request
-// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
-// call ResetBody.
-//
-// Will also set the Go 1.8's http.Request.GetBody member to allow retrying
-// PUT/POST redirects.
-func (r *Request) ResetBody() {
- body, err := r.getNextRequestBody()
- if err != nil {
- r.Error = awserr.New(ErrCodeSerialization,
- "failed to reset request body", err)
- return
- }
-
- r.HTTPRequest.Body = body
- r.HTTPRequest.GetBody = r.getNextRequestBody
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
deleted file mode 100644
index d8c505302..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
+++ /dev/null
@@ -1,15 +0,0 @@
-//go:build go1.7
-// +build go1.7
-
-package request
-
-import "github.com/aws/aws-sdk-go/aws"
-
-// setContext updates the Request to use the passed in context for cancellation.
-// Context will also be used for request retry delay.
-//
-// Creates shallow copy of the http.Request with the WithContext method.
-func setRequestContext(r *Request, ctx aws.Context) {
- r.context = ctx
- r.HTTPRequest = r.HTTPRequest.WithContext(ctx)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
deleted file mode 100644
index 49a243ef2..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
+++ /dev/null
@@ -1,15 +0,0 @@
-//go:build !go1.7
-// +build !go1.7
-
-package request
-
-import "github.com/aws/aws-sdk-go/aws"
-
-// setContext updates the Request to use the passed in context for cancellation.
-// Context will also be used for request retry delay.
-//
-// Creates shallow copy of the http.Request with the WithContext method.
-func setRequestContext(r *Request, ctx aws.Context) {
- r.context = ctx
- r.HTTPRequest.Cancel = ctx.Done()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
deleted file mode 100644
index 64784e16f..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
+++ /dev/null
@@ -1,266 +0,0 @@
-package request
-
-import (
- "reflect"
- "sync/atomic"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awsutil"
-)
-
-// A Pagination provides paginating of SDK API operations which are paginatable.
-// Generally you should not use this type directly, but use the "Pages" API
-// operations method to automatically perform pagination for you. Such as,
-// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods.
-//
-// Pagination differs from a Paginator type in that pagination is the type that
-// does the pagination between API operations, and Paginator defines the
-// configuration that will be used per page request.
-//
-// for p.Next() {
-// data := p.Page().(*s3.ListObjectsOutput)
-// // process the page's data
-// // ...
-// // break out of loop to stop fetching additional pages
-// }
-//
-// return p.Err()
-//
-// See service client API operation Pages methods for examples how the SDK will
-// use the Pagination type.
-type Pagination struct {
- // Function to return a Request value for each pagination request.
- // Any configuration or handlers that need to be applied to the request
- // prior to getting the next page should be done here before the request
- // returned.
- //
- // NewRequest should always be built from the same API operations. It is
- // undefined if different API operations are returned on subsequent calls.
- NewRequest func() (*Request, error)
- // EndPageOnSameToken, when enabled, will allow the paginator to stop on
- // token that are the same as its previous tokens.
- EndPageOnSameToken bool
-
- started bool
- prevTokens []interface{}
- nextTokens []interface{}
-
- err error
- curPage interface{}
-}
-
-// HasNextPage will return true if Pagination is able to determine that the API
-// operation has additional pages. False will be returned if there are no more
-// pages remaining.
-//
-// Will always return true if Next has not been called yet.
-func (p *Pagination) HasNextPage() bool {
- if !p.started {
- return true
- }
-
- hasNextPage := len(p.nextTokens) != 0
- if p.EndPageOnSameToken {
- return hasNextPage && !awsutil.DeepEqual(p.nextTokens, p.prevTokens)
- }
- return hasNextPage
-}
-
-// Err returns the error Pagination encountered when retrieving the next page.
-func (p *Pagination) Err() error {
- return p.err
-}
-
-// Page returns the current page. Page should only be called after a successful
-// call to Next. It is undefined what Page will return if Page is called after
-// Next returns false.
-func (p *Pagination) Page() interface{} {
- return p.curPage
-}
-
-// Next will attempt to retrieve the next page for the API operation. When a page
-// is retrieved true will be returned. If the page cannot be retrieved, or there
-// are no more pages false will be returned.
-//
-// Use the Page method to retrieve the current page data. The data will need
-// to be cast to the API operation's output type.
-//
-// Use the Err method to determine if an error occurred if Page returns false.
-func (p *Pagination) Next() bool {
- if !p.HasNextPage() {
- return false
- }
-
- req, err := p.NewRequest()
- if err != nil {
- p.err = err
- return false
- }
-
- if p.started {
- for i, intok := range req.Operation.InputTokens {
- awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i])
- }
- }
- p.started = true
-
- err = req.Send()
- if err != nil {
- p.err = err
- return false
- }
-
- p.prevTokens = p.nextTokens
- p.nextTokens = req.nextPageTokens()
- p.curPage = req.Data
-
- return true
-}
-
-// A Paginator is the configuration data that defines how an API operation
-// should be paginated. This type is used by the API service models to define
-// the generated pagination config for service APIs.
-//
-// The Pagination type is what provides iterating between pages of an API. It
-// is only used to store the token metadata the SDK should use for performing
-// pagination.
-type Paginator struct {
- InputTokens []string
- OutputTokens []string
- LimitToken string
- TruncationToken string
-}
-
-// nextPageTokens returns the tokens to use when asking for the next page of data.
-func (r *Request) nextPageTokens() []interface{} {
- if r.Operation.Paginator == nil {
- return nil
- }
- if r.Operation.TruncationToken != "" {
- tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
- if len(tr) == 0 {
- return nil
- }
-
- switch v := tr[0].(type) {
- case *bool:
- if !aws.BoolValue(v) {
- return nil
- }
- case bool:
- if !v {
- return nil
- }
- }
- }
-
- tokens := []interface{}{}
- tokenAdded := false
- for _, outToken := range r.Operation.OutputTokens {
- vs, _ := awsutil.ValuesAtPath(r.Data, outToken)
- if len(vs) == 0 {
- tokens = append(tokens, nil)
- continue
- }
- v := vs[0]
-
- switch tv := v.(type) {
- case *string:
- if len(aws.StringValue(tv)) == 0 {
- tokens = append(tokens, nil)
- continue
- }
- case string:
- if len(tv) == 0 {
- tokens = append(tokens, nil)
- continue
- }
- }
-
- tokenAdded = true
- tokens = append(tokens, v)
- }
- if !tokenAdded {
- return nil
- }
-
- return tokens
-}
-
-// Ensure a deprecated item is only logged once instead of each time its used.
-func logDeprecatedf(logger aws.Logger, flag *int32, msg string) {
- if logger == nil {
- return
- }
- if atomic.CompareAndSwapInt32(flag, 0, 1) {
- logger.Log(msg)
- }
-}
-
-var (
- logDeprecatedHasNextPage int32
- logDeprecatedNextPage int32
- logDeprecatedEachPage int32
-)
-
-// HasNextPage returns true if this request has more pages of data available.
-//
-// Deprecated Use Pagination type for configurable pagination of API operations
-func (r *Request) HasNextPage() bool {
- logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage,
- "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations")
-
- return len(r.nextPageTokens()) > 0
-}
-
-// NextPage returns a new Request that can be executed to return the next
-// page of result data. Call .Send() on this request to execute it.
-//
-// Deprecated Use Pagination type for configurable pagination of API operations
-func (r *Request) NextPage() *Request {
- logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage,
- "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations")
-
- tokens := r.nextPageTokens()
- if len(tokens) == 0 {
- return nil
- }
-
- data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
- nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
- for i, intok := range nr.Operation.InputTokens {
- awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
- }
- return nr
-}
-
-// EachPage iterates over each page of a paginated request object. The fn
-// parameter should be a function with the following sample signature:
-//
-// func(page *T, lastPage bool) bool {
-// return true // return false to stop iterating
-// }
-//
-// Where "T" is the structure type matching the output structure of the given
-// operation. For example, a request object generated by
-// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
-// as the structure "T". The lastPage value represents whether the page is
-// the last page of data or not. The return value of this function should
-// return true to keep iterating or false to stop.
-//
-// Deprecated Use Pagination type for configurable pagination of API operations
-func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
- logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage,
- "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations")
-
- for page := r; page != nil; page = page.NextPage() {
- if err := page.Send(); err != nil {
- return err
- }
- if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
- return page.Error
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
deleted file mode 100644
index 3f0001f91..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
+++ /dev/null
@@ -1,309 +0,0 @@
-package request
-
-import (
- "net"
- "net/url"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-// Retryer provides the interface drive the SDK's request retry behavior. The
-// Retryer implementation is responsible for implementing exponential backoff,
-// and determine if a request API error should be retried.
-//
-// client.DefaultRetryer is the SDK's default implementation of the Retryer. It
-// uses the Request.IsErrorRetryable and Request.IsErrorThrottle methods to
-// determine if the request is retried.
-type Retryer interface {
- // RetryRules return the retry delay that should be used by the SDK before
- // making another request attempt for the failed request.
- RetryRules(*Request) time.Duration
-
- // ShouldRetry returns if the failed request is retryable.
- //
- // Implementations may consider request attempt count when determining if a
- // request is retryable, but the SDK will use MaxRetries to limit the
- // number of attempts a request are made.
- ShouldRetry(*Request) bool
-
- // MaxRetries is the number of times a request may be retried before
- // failing.
- MaxRetries() int
-}
-
-// WithRetryer sets a Retryer value to the given Config returning the Config
-// value for chaining. The value must not be nil.
-func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
- if retryer == nil {
- if cfg.Logger != nil {
- cfg.Logger.Log("ERROR: Request.WithRetryer called with nil retryer. Replacing with retry disabled Retryer.")
- }
- retryer = noOpRetryer{}
- }
- cfg.Retryer = retryer
- return cfg
-
-}
-
-// noOpRetryer is a internal no op retryer used when a request is created
-// without a retryer.
-//
-// Provides a retryer that performs no retries.
-// It should be used when we do not want retries to be performed.
-type noOpRetryer struct{}
-
-// MaxRetries returns the number of maximum returns the service will use to make
-// an individual API; For NoOpRetryer the MaxRetries will always be zero.
-func (d noOpRetryer) MaxRetries() int {
- return 0
-}
-
-// ShouldRetry will always return false for NoOpRetryer, as it should never retry.
-func (d noOpRetryer) ShouldRetry(_ *Request) bool {
- return false
-}
-
-// RetryRules returns the delay duration before retrying this request again;
-// since NoOpRetryer does not retry, RetryRules always returns 0.
-func (d noOpRetryer) RetryRules(_ *Request) time.Duration {
- return 0
-}
-
-// retryableCodes is a collection of service response codes which are retry-able
-// without any further action.
-var retryableCodes = map[string]struct{}{
- ErrCodeRequestError: {},
- "RequestTimeout": {},
- ErrCodeResponseTimeout: {},
- "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout
-}
-
-var throttleCodes = map[string]struct{}{
- "ProvisionedThroughputExceededException": {},
- "ThrottledException": {}, // SNS, XRay, ResourceGroupsTagging API
- "Throttling": {},
- "ThrottlingException": {},
- "RequestLimitExceeded": {},
- "RequestThrottled": {},
- "RequestThrottledException": {},
- "TooManyRequestsException": {}, // Lambda functions
- "PriorRequestNotComplete": {}, // Route53
- "TransactionInProgressException": {},
- "EC2ThrottledException": {}, // EC2
-}
-
-// credsExpiredCodes is a collection of error codes which signify the credentials
-// need to be refreshed. Expired tokens require refreshing of credentials, and
-// resigning before the request can be retried.
-var credsExpiredCodes = map[string]struct{}{
- "ExpiredToken": {},
- "ExpiredTokenException": {},
- "RequestExpired": {}, // EC2 Only
-}
-
-func isCodeThrottle(code string) bool {
- _, ok := throttleCodes[code]
- return ok
-}
-
-func isCodeRetryable(code string) bool {
- if _, ok := retryableCodes[code]; ok {
- return true
- }
-
- return isCodeExpiredCreds(code)
-}
-
-func isCodeExpiredCreds(code string) bool {
- _, ok := credsExpiredCodes[code]
- return ok
-}
-
-var validParentCodes = map[string]struct{}{
- ErrCodeSerialization: {},
- ErrCodeRead: {},
-}
-
-func isNestedErrorRetryable(parentErr awserr.Error) bool {
- if parentErr == nil {
- return false
- }
-
- if _, ok := validParentCodes[parentErr.Code()]; !ok {
- return false
- }
-
- err := parentErr.OrigErr()
- if err == nil {
- return false
- }
-
- if aerr, ok := err.(awserr.Error); ok {
- return isCodeRetryable(aerr.Code())
- }
-
- if t, ok := err.(temporary); ok {
- return t.Temporary() || isErrConnectionReset(err)
- }
-
- return isErrConnectionReset(err)
-}
-
-// IsErrorRetryable returns whether the error is retryable, based on its Code.
-// Returns false if error is nil.
-func IsErrorRetryable(err error) bool {
- if err == nil {
- return false
- }
- return shouldRetryError(err)
-}
-
-type temporary interface {
- Temporary() bool
-}
-
-func shouldRetryError(origErr error) bool {
- switch err := origErr.(type) {
- case awserr.Error:
- if err.Code() == CanceledErrorCode {
- return false
- }
- if isNestedErrorRetryable(err) {
- return true
- }
-
- origErr := err.OrigErr()
- var shouldRetry bool
- if origErr != nil {
- shouldRetry = shouldRetryError(origErr)
- if err.Code() == ErrCodeRequestError && !shouldRetry {
- return false
- }
- }
- if isCodeRetryable(err.Code()) {
- return true
- }
- return shouldRetry
-
- case *url.Error:
- if strings.Contains(err.Error(), "connection refused") {
- // Refused connections should be retried as the service may not yet
- // be running on the port. Go TCP dial considers refused
- // connections as not temporary.
- return true
- }
- // *url.Error only implements Temporary after golang 1.6 but since
- // url.Error only wraps the error:
- return shouldRetryError(err.Err)
-
- case temporary:
- if netErr, ok := err.(*net.OpError); ok && netErr.Op == "dial" {
- return true
- }
- // If the error is temporary, we want to allow continuation of the
- // retry process
- return err.Temporary() || isErrConnectionReset(origErr)
-
- case nil:
- // `awserr.Error.OrigErr()` can be nil, meaning there was an error but
- // because we don't know the cause, it is marked as retryable. See
- // TestRequest4xxUnretryable for an example.
- return true
-
- default:
- switch err.Error() {
- case "net/http: request canceled",
- "net/http: request canceled while waiting for connection":
- // known 1.5 error case when an http request is cancelled
- return false
- }
- // here we don't know the error; so we allow a retry.
- return true
- }
-}
-
-// IsErrorThrottle returns whether the error is to be throttled based on its code.
-// Returns false if error is nil.
-func IsErrorThrottle(err error) bool {
- if aerr, ok := err.(awserr.Error); ok && aerr != nil {
- return isCodeThrottle(aerr.Code())
- }
- return false
-}
-
-// IsErrorExpiredCreds returns whether the error code is a credential expiry
-// error. Returns false if error is nil.
-func IsErrorExpiredCreds(err error) bool {
- if aerr, ok := err.(awserr.Error); ok && aerr != nil {
- return isCodeExpiredCreds(aerr.Code())
- }
- return false
-}
-
-// IsErrorRetryable returns whether the error is retryable, based on its Code.
-// Returns false if the request has no Error set.
-//
-// Alias for the utility function IsErrorRetryable
-func (r *Request) IsErrorRetryable() bool {
- if isErrCode(r.Error, r.RetryErrorCodes) {
- return true
- }
-
- // HTTP response status code 501 should not be retried.
- // 501 represents Not Implemented which means the request method is not
- // supported by the server and cannot be handled.
- if r.HTTPResponse != nil {
- // HTTP response status code 500 represents internal server error and
- // should be retried without any throttle.
- if r.HTTPResponse.StatusCode == 500 {
- return true
- }
- }
- return IsErrorRetryable(r.Error)
-}
-
-// IsErrorThrottle returns whether the error is to be throttled based on its
-// code. Returns false if the request has no Error set.
-//
-// Alias for the utility function IsErrorThrottle
-func (r *Request) IsErrorThrottle() bool {
- if isErrCode(r.Error, r.ThrottleErrorCodes) {
- return true
- }
-
- if r.HTTPResponse != nil {
- switch r.HTTPResponse.StatusCode {
- case
- 429, // error caused due to too many requests
- 502, // Bad Gateway error should be throttled
- 503, // caused when service is unavailable
- 504: // error occurred due to gateway timeout
- return true
- }
- }
-
- return IsErrorThrottle(r.Error)
-}
-
-func isErrCode(err error, codes []string) bool {
- if aerr, ok := err.(awserr.Error); ok && aerr != nil {
- for _, code := range codes {
- if code == aerr.Code() {
- return true
- }
- }
- }
-
- return false
-}
-
-// IsErrorExpired returns whether the error code is a credential expiry error.
-// Returns false if the request has no Error set.
-//
-// Alias for the utility function IsErrorExpiredCreds
-func (r *Request) IsErrorExpired() bool {
- return IsErrorExpiredCreds(r.Error)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
deleted file mode 100644
index 09a44eb98..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package request
-
-import (
- "io"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-var timeoutErr = awserr.New(
- ErrCodeResponseTimeout,
- "read on body has reached the timeout limit",
- nil,
-)
-
-type readResult struct {
- n int
- err error
-}
-
-// timeoutReadCloser will handle body reads that take too long.
-// We will return a ErrReadTimeout error if a timeout occurs.
-type timeoutReadCloser struct {
- reader io.ReadCloser
- duration time.Duration
-}
-
-// Read will spin off a goroutine to call the reader's Read method. We will
-// select on the timer's channel or the read's channel. Whoever completes first
-// will be returned.
-func (r *timeoutReadCloser) Read(b []byte) (int, error) {
- timer := time.NewTimer(r.duration)
- c := make(chan readResult, 1)
-
- go func() {
- n, err := r.reader.Read(b)
- timer.Stop()
- c <- readResult{n: n, err: err}
- }()
-
- select {
- case data := <-c:
- return data.n, data.err
- case <-timer.C:
- return 0, timeoutErr
- }
-}
-
-func (r *timeoutReadCloser) Close() error {
- return r.reader.Close()
-}
-
-const (
- // HandlerResponseTimeout is what we use to signify the name of the
- // response timeout handler.
- HandlerResponseTimeout = "ResponseTimeoutHandler"
-)
-
-// adaptToResponseTimeoutError is a handler that will replace any top level error
-// to a ErrCodeResponseTimeout, if its child is that.
-func adaptToResponseTimeoutError(req *Request) {
- if err, ok := req.Error.(awserr.Error); ok {
- aerr, ok := err.OrigErr().(awserr.Error)
- if ok && aerr.Code() == ErrCodeResponseTimeout {
- req.Error = aerr
- }
- }
-}
-
-// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer.
-// This will allow for per read timeouts. If a timeout occurred, we will return the
-// ErrCodeResponseTimeout.
-//
-// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second)
-func WithResponseReadTimeout(duration time.Duration) Option {
- return func(r *Request) {
-
- var timeoutHandler = NamedHandler{
- HandlerResponseTimeout,
- func(req *Request) {
- req.HTTPResponse.Body = &timeoutReadCloser{
- reader: req.HTTPResponse.Body,
- duration: duration,
- }
- }}
-
- // remove the handler so we are not stomping over any new durations.
- r.Handlers.Send.RemoveByName(HandlerResponseTimeout)
- r.Handlers.Send.PushBackNamed(timeoutHandler)
-
- r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError)
- r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError)
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
deleted file mode 100644
index 8630683f3..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
+++ /dev/null
@@ -1,286 +0,0 @@
-package request
-
-import (
- "bytes"
- "fmt"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-const (
- // InvalidParameterErrCode is the error code for invalid parameters errors
- InvalidParameterErrCode = "InvalidParameter"
- // ParamRequiredErrCode is the error code for required parameter errors
- ParamRequiredErrCode = "ParamRequiredError"
- // ParamMinValueErrCode is the error code for fields with too low of a
- // number value.
- ParamMinValueErrCode = "ParamMinValueError"
- // ParamMinLenErrCode is the error code for fields without enough elements.
- ParamMinLenErrCode = "ParamMinLenError"
- // ParamMaxLenErrCode is the error code for value being too long.
- ParamMaxLenErrCode = "ParamMaxLenError"
-
- // ParamFormatErrCode is the error code for a field with invalid
- // format or characters.
- ParamFormatErrCode = "ParamFormatInvalidError"
-)
-
-// Validator provides a way for types to perform validation logic on their
-// input values that external code can use to determine if a type's values
-// are valid.
-type Validator interface {
- Validate() error
-}
-
-// An ErrInvalidParams provides wrapping of invalid parameter errors found when
-// validating API operation input parameters.
-type ErrInvalidParams struct {
- // Context is the base context of the invalid parameter group.
- Context string
- errs []ErrInvalidParam
-}
-
-// Add adds a new invalid parameter error to the collection of invalid
-// parameters. The context of the invalid parameter will be updated to reflect
-// this collection.
-func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
- err.SetContext(e.Context)
- e.errs = append(e.errs, err)
-}
-
-// AddNested adds the invalid parameter errors from another ErrInvalidParams
-// value into this collection. The nested errors will have their nested context
-// updated and base context to reflect the merging.
-//
-// Use for nested validations errors.
-func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
- for _, err := range nested.errs {
- err.SetContext(e.Context)
- err.AddNestedContext(nestedCtx)
- e.errs = append(e.errs, err)
- }
-}
-
-// Len returns the number of invalid parameter errors
-func (e ErrInvalidParams) Len() int {
- return len(e.errs)
-}
-
-// Code returns the code of the error
-func (e ErrInvalidParams) Code() string {
- return InvalidParameterErrCode
-}
-
-// Message returns the message of the error
-func (e ErrInvalidParams) Message() string {
- return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
-}
-
-// Error returns the string formatted form of the invalid parameters.
-func (e ErrInvalidParams) Error() string {
- w := &bytes.Buffer{}
- fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
-
- for _, err := range e.errs {
- fmt.Fprintf(w, "- %s\n", err.Message())
- }
-
- return w.String()
-}
-
-// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
-func (e ErrInvalidParams) OrigErr() error {
- return awserr.NewBatchError(
- InvalidParameterErrCode, e.Message(), e.OrigErrs())
-}
-
-// OrigErrs returns a slice of the invalid parameters
-func (e ErrInvalidParams) OrigErrs() []error {
- errs := make([]error, len(e.errs))
- for i := 0; i < len(errs); i++ {
- errs[i] = e.errs[i]
- }
-
- return errs
-}
-
-// An ErrInvalidParam represents an invalid parameter error type.
-type ErrInvalidParam interface {
- awserr.Error
-
- // Field name the error occurred on.
- Field() string
-
- // SetContext updates the context of the error.
- SetContext(string)
-
- // AddNestedContext updates the error's context to include a nested level.
- AddNestedContext(string)
-}
-
-type errInvalidParam struct {
- context string
- nestedContext string
- field string
- code string
- msg string
-}
-
-// Code returns the error code for the type of invalid parameter.
-func (e *errInvalidParam) Code() string {
- return e.code
-}
-
-// Message returns the reason the parameter was invalid, and its context.
-func (e *errInvalidParam) Message() string {
- return fmt.Sprintf("%s, %s.", e.msg, e.Field())
-}
-
-// Error returns the string version of the invalid parameter error.
-func (e *errInvalidParam) Error() string {
- return fmt.Sprintf("%s: %s", e.code, e.Message())
-}
-
-// OrigErr returns nil, Implemented for awserr.Error interface.
-func (e *errInvalidParam) OrigErr() error {
- return nil
-}
-
-// Field Returns the field and context the error occurred.
-func (e *errInvalidParam) Field() string {
- field := e.context
- if len(field) > 0 {
- field += "."
- }
- if len(e.nestedContext) > 0 {
- field += fmt.Sprintf("%s.", e.nestedContext)
- }
- field += e.field
-
- return field
-}
-
-// SetContext updates the base context of the error.
-func (e *errInvalidParam) SetContext(ctx string) {
- e.context = ctx
-}
-
-// AddNestedContext prepends a context to the field's path.
-func (e *errInvalidParam) AddNestedContext(ctx string) {
- if len(e.nestedContext) == 0 {
- e.nestedContext = ctx
- } else {
- e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
- }
-
-}
-
-// An ErrParamRequired represents an required parameter error.
-type ErrParamRequired struct {
- errInvalidParam
-}
-
-// NewErrParamRequired creates a new required parameter error.
-func NewErrParamRequired(field string) *ErrParamRequired {
- return &ErrParamRequired{
- errInvalidParam{
- code: ParamRequiredErrCode,
- field: field,
- msg: fmt.Sprintf("missing required field"),
- },
- }
-}
-
-// An ErrParamMinValue represents a minimum value parameter error.
-type ErrParamMinValue struct {
- errInvalidParam
- min float64
-}
-
-// NewErrParamMinValue creates a new minimum value parameter error.
-func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
- return &ErrParamMinValue{
- errInvalidParam: errInvalidParam{
- code: ParamMinValueErrCode,
- field: field,
- msg: fmt.Sprintf("minimum field value of %v", min),
- },
- min: min,
- }
-}
-
-// MinValue returns the field's require minimum value.
-//
-// float64 is returned for both int and float min values.
-func (e *ErrParamMinValue) MinValue() float64 {
- return e.min
-}
-
-// An ErrParamMinLen represents a minimum length parameter error.
-type ErrParamMinLen struct {
- errInvalidParam
- min int
-}
-
-// NewErrParamMinLen creates a new minimum length parameter error.
-func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
- return &ErrParamMinLen{
- errInvalidParam: errInvalidParam{
- code: ParamMinLenErrCode,
- field: field,
- msg: fmt.Sprintf("minimum field size of %v", min),
- },
- min: min,
- }
-}
-
-// MinLen returns the field's required minimum length.
-func (e *ErrParamMinLen) MinLen() int {
- return e.min
-}
-
-// An ErrParamMaxLen represents a maximum length parameter error.
-type ErrParamMaxLen struct {
- errInvalidParam
- max int
-}
-
-// NewErrParamMaxLen creates a new maximum length parameter error.
-func NewErrParamMaxLen(field string, max int, value string) *ErrParamMaxLen {
- return &ErrParamMaxLen{
- errInvalidParam: errInvalidParam{
- code: ParamMaxLenErrCode,
- field: field,
- msg: fmt.Sprintf("maximum size of %v, %v", max, value),
- },
- max: max,
- }
-}
-
-// MaxLen returns the field's required minimum length.
-func (e *ErrParamMaxLen) MaxLen() int {
- return e.max
-}
-
-// An ErrParamFormat represents a invalid format parameter error.
-type ErrParamFormat struct {
- errInvalidParam
- format string
-}
-
-// NewErrParamFormat creates a new invalid format parameter error.
-func NewErrParamFormat(field string, format, value string) *ErrParamFormat {
- return &ErrParamFormat{
- errInvalidParam: errInvalidParam{
- code: ParamFormatErrCode,
- field: field,
- msg: fmt.Sprintf("format %v, %v", format, value),
- },
- format: format,
- }
-}
-
-// Format returns the field's required format.
-func (e *ErrParamFormat) Format() string {
- return e.format
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
deleted file mode 100644
index 992ed0464..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
+++ /dev/null
@@ -1,304 +0,0 @@
-package request
-
-import (
- "fmt"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/awsutil"
-)
-
-// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when
-// the waiter's max attempts have been exhausted.
-const WaiterResourceNotReadyErrorCode = "ResourceNotReady"
-
-// A WaiterOption is a function that will update the Waiter value's fields to
-// configure the waiter.
-type WaiterOption func(*Waiter)
-
-// WithWaiterMaxAttempts returns the maximum number of times the waiter should
-// attempt to check the resource for the target state.
-func WithWaiterMaxAttempts(max int) WaiterOption {
- return func(w *Waiter) {
- w.MaxAttempts = max
- }
-}
-
-// WaiterDelay will return a delay the waiter should pause between attempts to
-// check the resource state. The passed in attempt is the number of times the
-// Waiter has checked the resource state.
-//
-// Attempt is the number of attempts the Waiter has made checking the resource
-// state.
-type WaiterDelay func(attempt int) time.Duration
-
-// ConstantWaiterDelay returns a WaiterDelay that will always return a constant
-// delay the waiter should use between attempts. It ignores the number of
-// attempts made.
-func ConstantWaiterDelay(delay time.Duration) WaiterDelay {
- return func(attempt int) time.Duration {
- return delay
- }
-}
-
-// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in.
-func WithWaiterDelay(delayer WaiterDelay) WaiterOption {
- return func(w *Waiter) {
- w.Delay = delayer
- }
-}
-
-// WithWaiterLogger returns a waiter option to set the logger a waiter
-// should use to log warnings and errors to.
-func WithWaiterLogger(logger aws.Logger) WaiterOption {
- return func(w *Waiter) {
- w.Logger = logger
- }
-}
-
-// WithWaiterRequestOptions returns a waiter option setting the request
-// options for each request the waiter makes. Appends to waiter's request
-// options already set.
-func WithWaiterRequestOptions(opts ...Option) WaiterOption {
- return func(w *Waiter) {
- w.RequestOptions = append(w.RequestOptions, opts...)
- }
-}
-
-// A Waiter provides the functionality to perform a blocking call which will
-// wait for a resource state to be satisfied by a service.
-//
-// This type should not be used directly. The API operations provided in the
-// service packages prefixed with "WaitUntil" should be used instead.
-type Waiter struct {
- Name string
- Acceptors []WaiterAcceptor
- Logger aws.Logger
-
- MaxAttempts int
- Delay WaiterDelay
-
- RequestOptions []Option
- NewRequest func([]Option) (*Request, error)
- SleepWithContext func(aws.Context, time.Duration) error
-}
-
-// ApplyOptions updates the waiter with the list of waiter options provided.
-func (w *Waiter) ApplyOptions(opts ...WaiterOption) {
- for _, fn := range opts {
- fn(w)
- }
-}
-
-// WaiterState are states the waiter uses based on WaiterAcceptor definitions
-// to identify if the resource state the waiter is waiting on has occurred.
-type WaiterState int
-
-// String returns the string representation of the waiter state.
-func (s WaiterState) String() string {
- switch s {
- case SuccessWaiterState:
- return "success"
- case FailureWaiterState:
- return "failure"
- case RetryWaiterState:
- return "retry"
- default:
- return "unknown waiter state"
- }
-}
-
-// States the waiter acceptors will use to identify target resource states.
-const (
- SuccessWaiterState WaiterState = iota // waiter successful
- FailureWaiterState // waiter failed
- RetryWaiterState // waiter needs to be retried
-)
-
-// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor
-// definition's Expected attribute.
-type WaiterMatchMode int
-
-// Modes the waiter will use when inspecting API response to identify target
-// resource states.
-const (
- PathAllWaiterMatch WaiterMatchMode = iota // match on all paths
- PathWaiterMatch // match on specific path
- PathAnyWaiterMatch // match on any path
- PathListWaiterMatch // match on list of paths
- StatusWaiterMatch // match on status code
- ErrorWaiterMatch // match on error
-)
-
-// String returns the string representation of the waiter match mode.
-func (m WaiterMatchMode) String() string {
- switch m {
- case PathAllWaiterMatch:
- return "pathAll"
- case PathWaiterMatch:
- return "path"
- case PathAnyWaiterMatch:
- return "pathAny"
- case PathListWaiterMatch:
- return "pathList"
- case StatusWaiterMatch:
- return "status"
- case ErrorWaiterMatch:
- return "error"
- default:
- return "unknown waiter match mode"
- }
-}
-
-// WaitWithContext will make requests for the API operation using NewRequest to
-// build API requests. The request's response will be compared against the
-// Waiter's Acceptors to determine the successful state of the resource the
-// waiter is inspecting.
-//
-// The passed in context must not be nil. If it is nil a panic will occur. The
-// Context will be used to cancel the waiter's pending requests and retry delays.
-// Use aws.BackgroundContext if no context is available.
-//
-// The waiter will continue until the target state defined by the Acceptors,
-// or the max attempts expires.
-//
-// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's
-// retryer ShouldRetry returns false. This normally will happen when the max
-// wait attempts expires.
-func (w Waiter) WaitWithContext(ctx aws.Context) error {
-
- for attempt := 1; ; attempt++ {
- req, err := w.NewRequest(w.RequestOptions)
- if err != nil {
- waiterLogf(w.Logger, "unable to create request %v", err)
- return err
- }
- req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter"))
- err = req.Send()
-
- // See if any of the acceptors match the request's response, or error
- for _, a := range w.Acceptors {
- if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched {
- return matchErr
- }
- }
-
- // The Waiter should only check the resource state MaxAttempts times
- // This is here instead of in the for loop above to prevent delaying
- // unnecessary when the waiter will not retry.
- if attempt == w.MaxAttempts {
- break
- }
-
- // Delay to wait before inspecting the resource again
- delay := w.Delay(attempt)
- if sleepFn := req.Config.SleepDelay; sleepFn != nil {
- // Support SleepDelay for backwards compatibility and testing
- sleepFn(delay)
- } else {
- sleepCtxFn := w.SleepWithContext
- if sleepCtxFn == nil {
- sleepCtxFn = aws.SleepWithContext
- }
-
- if err := sleepCtxFn(ctx, delay); err != nil {
- return awserr.New(CanceledErrorCode, "waiter context canceled", err)
- }
- }
- }
-
- return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil)
-}
-
-// A WaiterAcceptor provides the information needed to wait for an API operation
-// to complete.
-type WaiterAcceptor struct {
- State WaiterState
- Matcher WaiterMatchMode
- Argument string
- Expected interface{}
-}
-
-// match returns if the acceptor found a match with the passed in request
-// or error. True is returned if the acceptor made a match, error is returned
-// if there was an error attempting to perform the match.
-func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) {
- result := false
- var vals []interface{}
-
- switch a.Matcher {
- case PathAllWaiterMatch, PathWaiterMatch:
- // Require all matches to be equal for result to match
- vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
- if len(vals) == 0 {
- break
- }
- result = true
- for _, val := range vals {
- if !awsutil.DeepEqual(val, a.Expected) {
- result = false
- break
- }
- }
- case PathAnyWaiterMatch:
- // Only a single match needs to equal for the result to match
- vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
- for _, val := range vals {
- if awsutil.DeepEqual(val, a.Expected) {
- result = true
- break
- }
- }
- case PathListWaiterMatch:
- // ignored matcher
- case StatusWaiterMatch:
- s := a.Expected.(int)
- result = s == req.HTTPResponse.StatusCode
- case ErrorWaiterMatch:
- switch ex := a.Expected.(type) {
- case string:
- if aerr, ok := err.(awserr.Error); ok {
- result = aerr.Code() == ex
- }
- case bool:
- if ex {
- result = err != nil
- } else {
- result = err == nil
- }
- }
- default:
- waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s",
- name, a.Matcher)
- }
-
- if !result {
- // If there was no matching result found there is nothing more to do
- // for this response, retry the request.
- return false, nil
- }
-
- switch a.State {
- case SuccessWaiterState:
- // waiter completed
- return true, nil
- case FailureWaiterState:
- // Waiter failure state triggered
- return true, awserr.New(WaiterResourceNotReadyErrorCode,
- "failed waiting for successful resource state", err)
- case RetryWaiterState:
- // clear the error and retry the operation
- return false, nil
- default:
- waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s",
- name, a.State)
- return false, nil
- }
-}
-
-func waiterLogf(logger aws.Logger, msg string, args ...interface{}) {
- if logger != nil {
- logger.Log(fmt.Sprintf(msg, args...))
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
deleted file mode 100644
index ea8e35376..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/credentials.go
+++ /dev/null
@@ -1,333 +0,0 @@
-package session
-
-import (
- "fmt"
- "os"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/credentials/processcreds"
- "github.com/aws/aws-sdk-go/aws/credentials/ssocreds"
- "github.com/aws/aws-sdk-go/aws/credentials/stscreds"
- "github.com/aws/aws-sdk-go/aws/defaults"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/internal/shareddefaults"
- "github.com/aws/aws-sdk-go/service/ssooidc"
- "github.com/aws/aws-sdk-go/service/sts"
-)
-
-// CredentialsProviderOptions specifies additional options for configuring
-// credentials providers.
-type CredentialsProviderOptions struct {
- // WebIdentityRoleProviderOptions configures a WebIdentityRoleProvider,
- // such as setting its ExpiryWindow.
- WebIdentityRoleProviderOptions func(*stscreds.WebIdentityRoleProvider)
-
- // ProcessProviderOptions configures a ProcessProvider,
- // such as setting its Timeout.
- ProcessProviderOptions func(*processcreds.ProcessProvider)
-}
-
-func resolveCredentials(cfg *aws.Config,
- envCfg envConfig, sharedCfg sharedConfig,
- handlers request.Handlers,
- sessOpts Options,
-) (*credentials.Credentials, error) {
-
- switch {
- case len(sessOpts.Profile) != 0:
- // User explicitly provided a Profile in the session's configuration
- // so load that profile from shared config first.
- // Github(aws/aws-sdk-go#2727)
- return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts)
-
- case envCfg.Creds.HasKeys():
- // Environment credentials
- return credentials.NewStaticCredentialsFromCreds(envCfg.Creds), nil
-
- case len(envCfg.WebIdentityTokenFilePath) != 0:
- // Web identity token from environment, RoleARN required to also be
- // set.
- return assumeWebIdentity(cfg, handlers,
- envCfg.WebIdentityTokenFilePath,
- envCfg.RoleARN,
- envCfg.RoleSessionName,
- sessOpts.CredentialsProviderOptions,
- )
-
- default:
- // Fallback to the "default" credential resolution chain.
- return resolveCredsFromProfile(cfg, envCfg, sharedCfg, handlers, sessOpts)
- }
-}
-
-// WebIdentityEmptyRoleARNErr will occur if 'AWS_WEB_IDENTITY_TOKEN_FILE' was set but
-// 'AWS_ROLE_ARN' was not set.
-var WebIdentityEmptyRoleARNErr = awserr.New(stscreds.ErrCodeWebIdentity, "role ARN is not set", nil)
-
-// WebIdentityEmptyTokenFilePathErr will occur if 'AWS_ROLE_ARN' was set but
-// 'AWS_WEB_IDENTITY_TOKEN_FILE' was not set.
-var WebIdentityEmptyTokenFilePathErr = awserr.New(stscreds.ErrCodeWebIdentity, "token file path is not set", nil)
-
-func assumeWebIdentity(cfg *aws.Config, handlers request.Handlers,
- filepath string,
- roleARN, sessionName string,
- credOptions *CredentialsProviderOptions,
-) (*credentials.Credentials, error) {
-
- if len(filepath) == 0 {
- return nil, WebIdentityEmptyTokenFilePathErr
- }
-
- if len(roleARN) == 0 {
- return nil, WebIdentityEmptyRoleARNErr
- }
-
- svc := sts.New(&Session{
- Config: cfg,
- Handlers: handlers.Copy(),
- })
-
- var optFns []func(*stscreds.WebIdentityRoleProvider)
- if credOptions != nil && credOptions.WebIdentityRoleProviderOptions != nil {
- optFns = append(optFns, credOptions.WebIdentityRoleProviderOptions)
- }
-
- p := stscreds.NewWebIdentityRoleProviderWithOptions(svc, roleARN, sessionName, stscreds.FetchTokenPath(filepath), optFns...)
- return credentials.NewCredentials(p), nil
-}
-
-func resolveCredsFromProfile(cfg *aws.Config,
- envCfg envConfig, sharedCfg sharedConfig,
- handlers request.Handlers,
- sessOpts Options,
-) (creds *credentials.Credentials, err error) {
-
- switch {
- case sharedCfg.SourceProfile != nil:
- // Assume IAM role with credentials source from a different profile.
- creds, err = resolveCredsFromProfile(cfg, envCfg,
- *sharedCfg.SourceProfile, handlers, sessOpts,
- )
-
- case sharedCfg.Creds.HasKeys():
- // Static Credentials from Shared Config/Credentials file.
- creds = credentials.NewStaticCredentialsFromCreds(
- sharedCfg.Creds,
- )
-
- case len(sharedCfg.CredentialSource) != 0:
- creds, err = resolveCredsFromSource(cfg, envCfg,
- sharedCfg, handlers, sessOpts,
- )
-
- case len(sharedCfg.WebIdentityTokenFile) != 0:
- // Credentials from Assume Web Identity token require an IAM Role, and
- // that roll will be assumed. May be wrapped with another assume role
- // via SourceProfile.
- return assumeWebIdentity(cfg, handlers,
- sharedCfg.WebIdentityTokenFile,
- sharedCfg.RoleARN,
- sharedCfg.RoleSessionName,
- sessOpts.CredentialsProviderOptions,
- )
-
- case sharedCfg.hasSSOConfiguration():
- creds, err = resolveSSOCredentials(cfg, sharedCfg, handlers)
-
- case len(sharedCfg.CredentialProcess) != 0:
- // Get credentials from CredentialProcess
- var optFns []func(*processcreds.ProcessProvider)
- if sessOpts.CredentialsProviderOptions != nil && sessOpts.CredentialsProviderOptions.ProcessProviderOptions != nil {
- optFns = append(optFns, sessOpts.CredentialsProviderOptions.ProcessProviderOptions)
- }
- creds = processcreds.NewCredentials(sharedCfg.CredentialProcess, optFns...)
-
- default:
- // Fallback to default credentials provider, include mock errors for
- // the credential chain so user can identify why credentials failed to
- // be retrieved.
- creds = credentials.NewCredentials(&credentials.ChainProvider{
- VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
- Providers: []credentials.Provider{
- &credProviderError{
- Err: awserr.New("EnvAccessKeyNotFound",
- "failed to find credentials in the environment.", nil),
- },
- &credProviderError{
- Err: awserr.New("SharedCredsLoad",
- fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil),
- },
- defaults.RemoteCredProvider(*cfg, handlers),
- },
- })
- }
- if err != nil {
- return nil, err
- }
-
- if len(sharedCfg.RoleARN) > 0 {
- cfgCp := *cfg
- cfgCp.Credentials = creds
- return credsFromAssumeRole(cfgCp, handlers, sharedCfg, sessOpts)
- }
-
- return creds, nil
-}
-
-func resolveSSOCredentials(cfg *aws.Config, sharedCfg sharedConfig, handlers request.Handlers) (*credentials.Credentials, error) {
- if err := sharedCfg.validateSSOConfiguration(); err != nil {
- return nil, err
- }
-
- var optFns []func(provider *ssocreds.Provider)
- cfgCopy := cfg.Copy()
-
- if sharedCfg.SSOSession != nil {
- cfgCopy.Region = &sharedCfg.SSOSession.SSORegion
- cachedPath, err := ssocreds.StandardCachedTokenFilepath(sharedCfg.SSOSession.Name)
- if err != nil {
- return nil, err
- }
- // create oidcClient with AnonymousCredentials to avoid recursively resolving credentials
- mySession := Must(NewSession(&aws.Config{
- Credentials: credentials.AnonymousCredentials,
- }))
- oidcClient := ssooidc.New(mySession, cfgCopy)
- tokenProvider := ssocreds.NewSSOTokenProvider(oidcClient, cachedPath)
- optFns = append(optFns, func(p *ssocreds.Provider) {
- p.TokenProvider = tokenProvider
- p.CachedTokenFilepath = cachedPath
- })
- } else {
- cfgCopy.Region = &sharedCfg.SSORegion
- }
-
- return ssocreds.NewCredentials(
- &Session{
- Config: cfgCopy,
- Handlers: handlers.Copy(),
- },
- sharedCfg.SSOAccountID,
- sharedCfg.SSORoleName,
- sharedCfg.SSOStartURL,
- optFns...,
- ), nil
-}
-
-// valid credential source values
-const (
- credSourceEc2Metadata = "Ec2InstanceMetadata"
- credSourceEnvironment = "Environment"
- credSourceECSContainer = "EcsContainer"
-)
-
-func resolveCredsFromSource(cfg *aws.Config,
- envCfg envConfig, sharedCfg sharedConfig,
- handlers request.Handlers,
- sessOpts Options,
-) (creds *credentials.Credentials, err error) {
-
- switch sharedCfg.CredentialSource {
- case credSourceEc2Metadata:
- p := defaults.RemoteCredProvider(*cfg, handlers)
- creds = credentials.NewCredentials(p)
-
- case credSourceEnvironment:
- creds = credentials.NewStaticCredentialsFromCreds(envCfg.Creds)
-
- case credSourceECSContainer:
- if len(os.Getenv(shareddefaults.ECSCredsProviderEnvVar)) == 0 {
- return nil, ErrSharedConfigECSContainerEnvVarEmpty
- }
-
- p := defaults.RemoteCredProvider(*cfg, handlers)
- creds = credentials.NewCredentials(p)
-
- default:
- return nil, ErrSharedConfigInvalidCredSource
- }
-
- return creds, nil
-}
-
-func credsFromAssumeRole(cfg aws.Config,
- handlers request.Handlers,
- sharedCfg sharedConfig,
- sessOpts Options,
-) (*credentials.Credentials, error) {
-
- if len(sharedCfg.MFASerial) != 0 && sessOpts.AssumeRoleTokenProvider == nil {
- // AssumeRole Token provider is required if doing Assume Role
- // with MFA.
- return nil, AssumeRoleTokenProviderNotSetError{}
- }
-
- return stscreds.NewCredentials(
- &Session{
- Config: &cfg,
- Handlers: handlers.Copy(),
- },
- sharedCfg.RoleARN,
- func(opt *stscreds.AssumeRoleProvider) {
- opt.RoleSessionName = sharedCfg.RoleSessionName
-
- if sessOpts.AssumeRoleDuration == 0 &&
- sharedCfg.AssumeRoleDuration != nil &&
- *sharedCfg.AssumeRoleDuration/time.Minute > 15 {
- opt.Duration = *sharedCfg.AssumeRoleDuration
- } else if sessOpts.AssumeRoleDuration != 0 {
- opt.Duration = sessOpts.AssumeRoleDuration
- }
-
- // Assume role with external ID
- if len(sharedCfg.ExternalID) > 0 {
- opt.ExternalID = aws.String(sharedCfg.ExternalID)
- }
-
- // Assume role with MFA
- if len(sharedCfg.MFASerial) > 0 {
- opt.SerialNumber = aws.String(sharedCfg.MFASerial)
- opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
- }
- },
- ), nil
-}
-
-// AssumeRoleTokenProviderNotSetError is an error returned when creating a
-// session when the MFAToken option is not set when shared config is configured
-// load assume a role with an MFA token.
-type AssumeRoleTokenProviderNotSetError struct{}
-
-// Code is the short id of the error.
-func (e AssumeRoleTokenProviderNotSetError) Code() string {
- return "AssumeRoleTokenProviderNotSetError"
-}
-
-// Message is the description of the error
-func (e AssumeRoleTokenProviderNotSetError) Message() string {
- return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
-}
-
-// OrigErr is the underlying error that caused the failure.
-func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
- return nil
-}
-
-// Error satisfies the error interface.
-func (e AssumeRoleTokenProviderNotSetError) Error() string {
- return awserr.SprintError(e.Code(), e.Message(), "", nil)
-}
-
-type credProviderError struct {
- Err error
-}
-
-func (c credProviderError) Retrieve() (credentials.Value, error) {
- return credentials.Value{}, c.Err
-}
-func (c credProviderError) IsExpired() bool {
- return true
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go
deleted file mode 100644
index 4390ad52f..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go
+++ /dev/null
@@ -1,28 +0,0 @@
-//go:build go1.13
-// +build go1.13
-
-package session
-
-import (
- "net"
- "net/http"
- "time"
-)
-
-// Transport that should be used when a custom CA bundle is specified with the
-// SDK.
-func getCustomTransport() *http.Transport {
- return &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- DualStack: true,
- }).DialContext,
- ForceAttemptHTTP2: true,
- MaxIdleConns: 100,
- IdleConnTimeout: 90 * time.Second,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go
deleted file mode 100644
index 668565bea..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go
+++ /dev/null
@@ -1,27 +0,0 @@
-//go:build !go1.13 && go1.7
-// +build !go1.13,go1.7
-
-package session
-
-import (
- "net"
- "net/http"
- "time"
-)
-
-// Transport that should be used when a custom CA bundle is specified with the
-// SDK.
-func getCustomTransport() *http.Transport {
- return &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- DialContext: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- DualStack: true,
- }).DialContext,
- MaxIdleConns: 100,
- IdleConnTimeout: 90 * time.Second,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go
deleted file mode 100644
index e101aa6b6..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go
+++ /dev/null
@@ -1,23 +0,0 @@
-//go:build !go1.6 && go1.5
-// +build !go1.6,go1.5
-
-package session
-
-import (
- "net"
- "net/http"
- "time"
-)
-
-// Transport that should be used when a custom CA bundle is specified with the
-// SDK.
-func getCustomTransport() *http.Transport {
- return &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }).Dial,
- TLSHandshakeTimeout: 10 * time.Second,
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go
deleted file mode 100644
index b5fcbe0d1..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go
+++ /dev/null
@@ -1,24 +0,0 @@
-//go:build !go1.7 && go1.6
-// +build !go1.7,go1.6
-
-package session
-
-import (
- "net"
- "net/http"
- "time"
-)
-
-// Transport that should be used when a custom CA bundle is specified with the
-// SDK.
-func getCustomTransport() *http.Transport {
- return &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }).Dial,
- TLSHandshakeTimeout: 10 * time.Second,
- ExpectContinueTimeout: 1 * time.Second,
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
deleted file mode 100644
index ff3cc012a..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
+++ /dev/null
@@ -1,367 +0,0 @@
-/*
-Package session provides configuration for the SDK's service clients. Sessions
-can be shared across service clients that share the same base configuration.
-
-Sessions are safe to use concurrently as long as the Session is not being
-modified. Sessions should be cached when possible, because creating a new
-Session will load all configuration values from the environment, and config
-files each time the Session is created. Sharing the Session value across all of
-your service clients will ensure the configuration is loaded the fewest number
-of times possible.
-
-Sessions options from Shared Config
-
-By default NewSession will only load credentials from the shared credentials
-file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is
-set to a truthy value the Session will be created from the configuration
-values from the shared config (~/.aws/config) and shared credentials
-(~/.aws/credentials) files. Using the NewSessionWithOptions with
-SharedConfigState set to SharedConfigEnable will create the session as if the
-AWS_SDK_LOAD_CONFIG environment variable was set.
-
-Credential and config loading order
-
-The Session will attempt to load configuration and credentials from the
-environment, configuration files, and other credential sources. The order
-configuration is loaded in is:
-
- * Environment Variables
- * Shared Credentials file
- * Shared Configuration file (if SharedConfig is enabled)
- * EC2 Instance Metadata (credentials only)
-
-The Environment variables for credentials will have precedence over shared
-config even if SharedConfig is enabled. To override this behavior, and use
-shared config credentials instead specify the session.Options.Profile, (e.g.
-when using credential_source=Environment to assume a role).
-
- sess, err := session.NewSessionWithOptions(session.Options{
- Profile: "myProfile",
- })
-
-Creating Sessions
-
-Creating a Session without additional options will load credentials region, and
-profile loaded from the environment and shared config automatically. See,
-"Environment Variables" section for information on environment variables used
-by Session.
-
- // Create Session
- sess, err := session.NewSession()
-
-
-When creating Sessions optional aws.Config values can be passed in that will
-override the default, or loaded, config values the Session is being created
-with. This allows you to provide additional, or case based, configuration
-as needed.
-
- // Create a Session with a custom region
- sess, err := session.NewSession(&aws.Config{
- Region: aws.String("us-west-2"),
- })
-
-Use NewSessionWithOptions to provide additional configuration driving how the
-Session's configuration will be loaded. Such as, specifying shared config
-profile, or override the shared config state, (AWS_SDK_LOAD_CONFIG).
-
- // Equivalent to session.NewSession()
- sess, err := session.NewSessionWithOptions(session.Options{
- // Options
- })
-
- sess, err := session.NewSessionWithOptions(session.Options{
- // Specify profile to load for the session's config
- Profile: "profile_name",
-
- // Provide SDK Config options, such as Region.
- Config: aws.Config{
- Region: aws.String("us-west-2"),
- },
-
- // Force enable Shared Config support
- SharedConfigState: session.SharedConfigEnable,
- })
-
-Adding Handlers
-
-You can add handlers to a session to decorate API operation, (e.g. adding HTTP
-headers). All clients that use the Session receive a copy of the Session's
-handlers. For example, the following request handler added to the Session logs
-every requests made.
-
- // Create a session, and add additional handlers for all service
- // clients created with the Session to inherit. Adds logging handler.
- sess := session.Must(session.NewSession())
-
- sess.Handlers.Send.PushFront(func(r *request.Request) {
- // Log every request made and its payload
- logger.Printf("Request: %s/%s, Params: %s",
- r.ClientInfo.ServiceName, r.Operation, r.Params)
- })
-
-Shared Config Fields
-
-By default the SDK will only load the shared credentials file's
-(~/.aws/credentials) credentials values, and all other config is provided by
-the environment variables, SDK defaults, and user provided aws.Config values.
-
-If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable
-option is used to create the Session the full shared config values will be
-loaded. This includes credentials, region, and support for assume role. In
-addition the Session will load its configuration from both the shared config
-file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
-files have the same format.
-
-If both config files are present the configuration from both files will be
-read. The Session will be created from configuration values from the shared
-credentials file (~/.aws/credentials) over those in the shared config file
-(~/.aws/config).
-
-Credentials are the values the SDK uses to authenticating requests with AWS
-Services. When specified in a file, both aws_access_key_id and
-aws_secret_access_key must be provided together in the same file to be
-considered valid. They will be ignored if both are not present.
-aws_session_token is an optional field that can be provided in addition to the
-other two fields.
-
- aws_access_key_id = AKID
- aws_secret_access_key = SECRET
- aws_session_token = TOKEN
-
- ; region only supported if SharedConfigEnabled.
- region = us-east-1
-
-Assume Role configuration
-
-The role_arn field allows you to configure the SDK to assume an IAM role using
-a set of credentials from another source. Such as when paired with static
-credentials, "profile_source", "credential_process", or "credential_source"
-fields. If "role_arn" is provided, a source of credentials must also be
-specified, such as "source_profile", "credential_source", or
-"credential_process".
-
- role_arn = arn:aws:iam:::role/
- source_profile = profile_with_creds
- external_id = 1234
- mfa_serial =
- role_session_name = session_name
-
-
-The SDK supports assuming a role with MFA token. If "mfa_serial" is set, you
-must also set the Session Option.AssumeRoleTokenProvider. The Session will fail
-to load if the AssumeRoleTokenProvider is not specified.
-
- sess := session.Must(session.NewSessionWithOptions(session.Options{
- AssumeRoleTokenProvider: stscreds.StdinTokenProvider,
- }))
-
-To setup Assume Role outside of a session see the stscreds.AssumeRoleProvider
-documentation.
-
-Environment Variables
-
-When a Session is created several environment variables can be set to adjust
-how the SDK functions, and what configuration data it loads when creating
-Sessions. All environment values are optional, but some values like credentials
-require multiple of the values to set or the partial values will be ignored.
-All environment variable values are strings unless otherwise noted.
-
-Environment configuration values. If set both Access Key ID and Secret Access
-Key must be provided. Session Token and optionally also be provided, but is
-not required.
-
- # Access Key ID
- AWS_ACCESS_KEY_ID=AKID
- AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
-
- # Secret Access Key
- AWS_SECRET_ACCESS_KEY=SECRET
- AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
-
- # Session Token
- AWS_SESSION_TOKEN=TOKEN
-
-Region value will instruct the SDK where to make service API requests to. If is
-not provided in the environment the region must be provided before a service
-client request is made.
-
- AWS_REGION=us-east-1
-
- # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
- # and AWS_REGION is not also set.
- AWS_DEFAULT_REGION=us-east-1
-
-Profile name the SDK should load use when loading shared config from the
-configuration files. If not provided "default" will be used as the profile name.
-
- AWS_PROFILE=my_profile
-
- # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
- # and AWS_PROFILE is not also set.
- AWS_DEFAULT_PROFILE=my_profile
-
-SDK load config instructs the SDK to load the shared config in addition to
-shared credentials. This also expands the configuration loaded so the shared
-credentials will have parity with the shared config file. This also enables
-Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
-env values as well.
-
- AWS_SDK_LOAD_CONFIG=1
-
-Custom Shared Config and Credential Files
-
-Shared credentials file path can be set to instruct the SDK to use an alternative
-file for the shared credentials. If not set the file will be loaded from
-$HOME/.aws/credentials on Linux/Unix based systems, and
-%USERPROFILE%\.aws\credentials on Windows.
-
- AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
-
-Shared config file path can be set to instruct the SDK to use an alternative
-file for the shared config. If not set the file will be loaded from
-$HOME/.aws/config on Linux/Unix based systems, and
-%USERPROFILE%\.aws\config on Windows.
-
- AWS_CONFIG_FILE=$HOME/my_shared_config
-
-Custom CA Bundle
-
-Path to a custom Credentials Authority (CA) bundle PEM file that the SDK
-will use instead of the default system's root CA bundle. Use this only
-if you want to replace the CA bundle the SDK uses for TLS requests.
-
- AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
-
-Enabling this option will attempt to merge the Transport into the SDK's HTTP
-client. If the client's Transport is not a http.Transport an error will be
-returned. If the Transport's TLS config is set this option will cause the SDK
-to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file
-contains multiple certificates all of them will be loaded.
-
-The Session option CustomCABundle is also available when creating sessions
-to also enable this feature. CustomCABundle session option field has priority
-over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
-
-Setting a custom HTTPClient in the aws.Config options will override this setting.
-To use this option and custom HTTP client, the HTTP client needs to be provided
-when creating the session. Not the service client.
-
-Custom Client TLS Certificate
-
-The SDK supports the environment and session option being configured with
-Client TLS certificates that are sent as a part of the client's TLS handshake
-for client authentication. If used, both Cert and Key values are required. If
-one is missing, or either fail to load the contents of the file an error will
-be returned.
-
-HTTP Client's Transport concrete implementation must be a http.Transport
-or creating the session will fail.
-
- AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
- AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
-
-This can also be configured via the session.Options ClientTLSCert and ClientTLSKey.
-
- sess, err := session.NewSessionWithOptions(session.Options{
- ClientTLSCert: myCertFile,
- ClientTLSKey: myKeyFile,
- })
-
-Custom EC2 IMDS Endpoint
-
-The endpoint of the EC2 IMDS client can be configured via the environment
-variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a
-Session. See Options.EC2IMDSEndpoint for more details.
-
- AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254
-
-If using an URL with an IPv6 address literal, the IPv6 address
-component must be enclosed in square brackets.
-
- AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
-
-The custom EC2 IMDS endpoint can also be specified via the Session options.
-
- sess, err := session.NewSessionWithOptions(session.Options{
- EC2MetadataEndpoint: "http://[::1]",
- })
-
-FIPS and DualStack Endpoints
-
-The SDK can be configured to resolve an endpoint with certain capabilities such as FIPS and DualStack.
-
-You can configure a FIPS endpoint using an environment variable, shared config ($HOME/.aws/config),
-or programmatically.
-
-To configure a FIPS endpoint set the environment variable set the AWS_USE_FIPS_ENDPOINT to true or false to enable
-or disable FIPS endpoint resolution.
-
- AWS_USE_FIPS_ENDPOINT=true
-
-To configure a FIPS endpoint using shared config, set use_fips_endpoint to true or false to enable
-or disable FIPS endpoint resolution.
-
- [profile myprofile]
- region=us-west-2
- use_fips_endpoint=true
-
-To configure a FIPS endpoint programmatically
-
- // Option 1: Configure it on a session for all clients
- sess, err := session.NewSessionWithOptions(session.Options{
- UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled,
- })
- if err != nil {
- // handle error
- }
-
- client := s3.New(sess)
-
- // Option 2: Configure it per client
- sess, err := session.NewSession()
- if err != nil {
- // handle error
- }
-
- client := s3.New(sess, &aws.Config{
- UseFIPSEndpoint: endpoints.FIPSEndpointStateEnabled,
- })
-
-You can configure a DualStack endpoint using an environment variable, shared config ($HOME/.aws/config),
-or programmatically.
-
-To configure a DualStack endpoint set the environment variable set the AWS_USE_DUALSTACK_ENDPOINT to true or false to
-enable or disable DualStack endpoint resolution.
-
- AWS_USE_DUALSTACK_ENDPOINT=true
-
-To configure a DualStack endpoint using shared config, set use_dualstack_endpoint to true or false to enable
-or disable DualStack endpoint resolution.
-
- [profile myprofile]
- region=us-west-2
- use_dualstack_endpoint=true
-
-To configure a DualStack endpoint programmatically
-
- // Option 1: Configure it on a session for all clients
- sess, err := session.NewSessionWithOptions(session.Options{
- UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled,
- })
- if err != nil {
- // handle error
- }
-
- client := s3.New(sess)
-
- // Option 2: Configure it per client
- sess, err := session.NewSession()
- if err != nil {
- // handle error
- }
-
- client := s3.New(sess, &aws.Config{
- UseDualStackEndpoint: endpoints.DualStackEndpointStateEnabled,
- })
-*/
-package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
deleted file mode 100644
index 93bb5de64..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
+++ /dev/null
@@ -1,499 +0,0 @@
-package session
-
-import (
- "fmt"
- "os"
- "strconv"
- "strings"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/defaults"
- "github.com/aws/aws-sdk-go/aws/endpoints"
-)
-
-// EnvProviderName provides a name of the provider when config is loaded from environment.
-const EnvProviderName = "EnvConfigCredentials"
-
-// envConfig is a collection of environment values the SDK will read
-// setup config from. All environment values are optional. But some values
-// such as credentials require multiple values to be complete or the values
-// will be ignored.
-type envConfig struct {
- // Environment configuration values. If set both Access Key ID and Secret Access
- // Key must be provided. Session Token and optionally also be provided, but is
- // not required.
- //
- // # Access Key ID
- // AWS_ACCESS_KEY_ID=AKID
- // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set.
- //
- // # Secret Access Key
- // AWS_SECRET_ACCESS_KEY=SECRET
- // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set.
- //
- // # Session Token
- // AWS_SESSION_TOKEN=TOKEN
- Creds credentials.Value
-
- // Region value will instruct the SDK where to make service API requests to. If is
- // not provided in the environment the region must be provided before a service
- // client request is made.
- //
- // AWS_REGION=us-east-1
- //
- // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set,
- // # and AWS_REGION is not also set.
- // AWS_DEFAULT_REGION=us-east-1
- Region string
-
- // Profile name the SDK should load use when loading shared configuration from the
- // shared configuration files. If not provided "default" will be used as the
- // profile name.
- //
- // AWS_PROFILE=my_profile
- //
- // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set,
- // # and AWS_PROFILE is not also set.
- // AWS_DEFAULT_PROFILE=my_profile
- Profile string
-
- // SDK load config instructs the SDK to load the shared config in addition to
- // shared credentials. This also expands the configuration loaded from the shared
- // credentials to have parity with the shared config file. This also enables
- // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE
- // env values as well.
- //
- // AWS_SDK_LOAD_CONFIG=1
- EnableSharedConfig bool
-
- // Shared credentials file path can be set to instruct the SDK to use an alternate
- // file for the shared credentials. If not set the file will be loaded from
- // $HOME/.aws/credentials on Linux/Unix based systems, and
- // %USERPROFILE%\.aws\credentials on Windows.
- //
- // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials
- SharedCredentialsFile string
-
- // Shared config file path can be set to instruct the SDK to use an alternate
- // file for the shared config. If not set the file will be loaded from
- // $HOME/.aws/config on Linux/Unix based systems, and
- // %USERPROFILE%\.aws\config on Windows.
- //
- // AWS_CONFIG_FILE=$HOME/my_shared_config
- SharedConfigFile string
-
- // Sets the path to a custom Credentials Authority (CA) Bundle PEM file
- // that the SDK will use instead of the system's root CA bundle.
- // Only use this if you want to configure the SDK to use a custom set
- // of CAs.
- //
- // Enabling this option will attempt to merge the Transport
- // into the SDK's HTTP client. If the client's Transport is
- // not a http.Transport an error will be returned. If the
- // Transport's TLS config is set this option will cause the
- // SDK to overwrite the Transport's TLS config's RootCAs value.
- //
- // Setting a custom HTTPClient in the aws.Config options will override this setting.
- // To use this option and custom HTTP client, the HTTP client needs to be provided
- // when creating the session. Not the service client.
- //
- // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
- CustomCABundle string
-
- // Sets the TLC client certificate that should be used by the SDK's HTTP transport
- // when making requests. The certificate must be paired with a TLS client key file.
- //
- // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
- ClientTLSCert string
-
- // Sets the TLC client key that should be used by the SDK's HTTP transport
- // when making requests. The key must be paired with a TLS client certificate file.
- //
- // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
- ClientTLSKey string
-
- csmEnabled string
- CSMEnabled *bool
- CSMPort string
- CSMHost string
- CSMClientID string
-
- // Enables endpoint discovery via environment variables.
- //
- // AWS_ENABLE_ENDPOINT_DISCOVERY=true
- EnableEndpointDiscovery *bool
- enableEndpointDiscovery string
-
- // Specifies the WebIdentity token the SDK should use to assume a role
- // with.
- //
- // AWS_WEB_IDENTITY_TOKEN_FILE=file_path
- WebIdentityTokenFilePath string
-
- // Specifies the IAM role arn to use when assuming an role.
- //
- // AWS_ROLE_ARN=role_arn
- RoleARN string
-
- // Specifies the IAM role session name to use when assuming a role.
- //
- // AWS_ROLE_SESSION_NAME=session_name
- RoleSessionName string
-
- // Specifies the STS Regional Endpoint flag for the SDK to resolve the endpoint
- // for a service.
- //
- // AWS_STS_REGIONAL_ENDPOINTS=regional
- // This can take value as `regional` or `legacy`
- STSRegionalEndpoint endpoints.STSRegionalEndpoint
-
- // Specifies the S3 Regional Endpoint flag for the SDK to resolve the
- // endpoint for a service.
- //
- // AWS_S3_US_EAST_1_REGIONAL_ENDPOINT=regional
- // This can take value as `regional` or `legacy`
- S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
-
- // Specifies if the S3 service should allow ARNs to direct the region
- // the client's requests are sent to.
- //
- // AWS_S3_USE_ARN_REGION=true
- S3UseARNRegion bool
-
- // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode.
- //
- // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
- EC2IMDSEndpoint string
-
- // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6)
- //
- // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6
- EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState
-
- // Specifies that IMDS clients should not fallback to IMDSv1 if token
- // requests fail.
- //
- // AWS_EC2_METADATA_V1_DISABLED=true
- EC2IMDSv1Disabled *bool
-
- // Specifies that SDK clients must resolve a dual-stack endpoint for
- // services.
- //
- // AWS_USE_DUALSTACK_ENDPOINT=true
- UseDualStackEndpoint endpoints.DualStackEndpointState
-
- // Specifies that SDK clients must resolve a FIPS endpoint for
- // services.
- //
- // AWS_USE_FIPS_ENDPOINT=true
- UseFIPSEndpoint endpoints.FIPSEndpointState
-}
-
-var (
- csmEnabledEnvKey = []string{
- "AWS_CSM_ENABLED",
- }
- csmHostEnvKey = []string{
- "AWS_CSM_HOST",
- }
- csmPortEnvKey = []string{
- "AWS_CSM_PORT",
- }
- csmClientIDEnvKey = []string{
- "AWS_CSM_CLIENT_ID",
- }
- credAccessEnvKey = []string{
- "AWS_ACCESS_KEY_ID",
- "AWS_ACCESS_KEY",
- }
- credSecretEnvKey = []string{
- "AWS_SECRET_ACCESS_KEY",
- "AWS_SECRET_KEY",
- }
- credSessionEnvKey = []string{
- "AWS_SESSION_TOKEN",
- }
-
- enableEndpointDiscoveryEnvKey = []string{
- "AWS_ENABLE_ENDPOINT_DISCOVERY",
- }
-
- regionEnvKeys = []string{
- "AWS_REGION",
- "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set
- }
- profileEnvKeys = []string{
- "AWS_PROFILE",
- "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
- }
- sharedCredsFileEnvKey = []string{
- "AWS_SHARED_CREDENTIALS_FILE",
- }
- sharedConfigFileEnvKey = []string{
- "AWS_CONFIG_FILE",
- }
- webIdentityTokenFilePathEnvKey = []string{
- "AWS_WEB_IDENTITY_TOKEN_FILE",
- }
- roleARNEnvKey = []string{
- "AWS_ROLE_ARN",
- }
- roleSessionNameEnvKey = []string{
- "AWS_ROLE_SESSION_NAME",
- }
- stsRegionalEndpointKey = []string{
- "AWS_STS_REGIONAL_ENDPOINTS",
- }
- s3UsEast1RegionalEndpoint = []string{
- "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT",
- }
- s3UseARNRegionEnvKey = []string{
- "AWS_S3_USE_ARN_REGION",
- }
- ec2IMDSEndpointEnvKey = []string{
- "AWS_EC2_METADATA_SERVICE_ENDPOINT",
- }
- ec2IMDSEndpointModeEnvKey = []string{
- "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE",
- }
- ec2MetadataV1DisabledEnvKey = []string{
- "AWS_EC2_METADATA_V1_DISABLED",
- }
- useCABundleKey = []string{
- "AWS_CA_BUNDLE",
- }
- useClientTLSCert = []string{
- "AWS_SDK_GO_CLIENT_TLS_CERT",
- }
- useClientTLSKey = []string{
- "AWS_SDK_GO_CLIENT_TLS_KEY",
- }
- awsUseDualStackEndpoint = []string{
- "AWS_USE_DUALSTACK_ENDPOINT",
- }
- awsUseFIPSEndpoint = []string{
- "AWS_USE_FIPS_ENDPOINT",
- }
-)
-
-// loadEnvConfig retrieves the SDK's environment configuration.
-// See `envConfig` for the values that will be retrieved.
-//
-// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value
-// the shared SDK config will be loaded in addition to the SDK's specific
-// configuration values.
-func loadEnvConfig() (envConfig, error) {
- enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG"))
- return envConfigLoad(enableSharedConfig)
-}
-
-// loadEnvSharedConfig retrieves the SDK's environment configuration, and the
-// SDK shared config. See `envConfig` for the values that will be retrieved.
-//
-// Loads the shared configuration in addition to the SDK's specific configuration.
-// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG`
-// environment variable is set.
-func loadSharedEnvConfig() (envConfig, error) {
- return envConfigLoad(true)
-}
-
-func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
- cfg := envConfig{}
-
- cfg.EnableSharedConfig = enableSharedConfig
-
- // Static environment credentials
- var creds credentials.Value
- setFromEnvVal(&creds.AccessKeyID, credAccessEnvKey)
- setFromEnvVal(&creds.SecretAccessKey, credSecretEnvKey)
- setFromEnvVal(&creds.SessionToken, credSessionEnvKey)
- if creds.HasKeys() {
- // Require logical grouping of credentials
- creds.ProviderName = EnvProviderName
- cfg.Creds = creds
- }
-
- // Role Metadata
- setFromEnvVal(&cfg.RoleARN, roleARNEnvKey)
- setFromEnvVal(&cfg.RoleSessionName, roleSessionNameEnvKey)
-
- // Web identity environment variables
- setFromEnvVal(&cfg.WebIdentityTokenFilePath, webIdentityTokenFilePathEnvKey)
-
- // CSM environment variables
- setFromEnvVal(&cfg.csmEnabled, csmEnabledEnvKey)
- setFromEnvVal(&cfg.CSMHost, csmHostEnvKey)
- setFromEnvVal(&cfg.CSMPort, csmPortEnvKey)
- setFromEnvVal(&cfg.CSMClientID, csmClientIDEnvKey)
-
- if len(cfg.csmEnabled) != 0 {
- v, _ := strconv.ParseBool(cfg.csmEnabled)
- cfg.CSMEnabled = &v
- }
-
- regionKeys := regionEnvKeys
- profileKeys := profileEnvKeys
- if !cfg.EnableSharedConfig {
- regionKeys = regionKeys[:1]
- profileKeys = profileKeys[:1]
- }
-
- setFromEnvVal(&cfg.Region, regionKeys)
- setFromEnvVal(&cfg.Profile, profileKeys)
-
- // endpoint discovery is in reference to it being enabled.
- setFromEnvVal(&cfg.enableEndpointDiscovery, enableEndpointDiscoveryEnvKey)
- if len(cfg.enableEndpointDiscovery) > 0 {
- cfg.EnableEndpointDiscovery = aws.Bool(cfg.enableEndpointDiscovery != "false")
- }
-
- setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
- setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)
-
- if len(cfg.SharedCredentialsFile) == 0 {
- cfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
- }
- if len(cfg.SharedConfigFile) == 0 {
- cfg.SharedConfigFile = defaults.SharedConfigFilename()
- }
-
- setFromEnvVal(&cfg.CustomCABundle, useCABundleKey)
- setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert)
- setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey)
-
- var err error
- // STS Regional Endpoint variable
- for _, k := range stsRegionalEndpointKey {
- if v := os.Getenv(k); len(v) != 0 {
- cfg.STSRegionalEndpoint, err = endpoints.GetSTSRegionalEndpoint(v)
- if err != nil {
- return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err)
- }
- }
- }
-
- // S3 Regional Endpoint variable
- for _, k := range s3UsEast1RegionalEndpoint {
- if v := os.Getenv(k); len(v) != 0 {
- cfg.S3UsEast1RegionalEndpoint, err = endpoints.GetS3UsEast1RegionalEndpoint(v)
- if err != nil {
- return cfg, fmt.Errorf("failed to load, %v from env config, %v", k, err)
- }
- }
- }
-
- var s3UseARNRegion string
- setFromEnvVal(&s3UseARNRegion, s3UseARNRegionEnvKey)
- if len(s3UseARNRegion) != 0 {
- switch {
- case strings.EqualFold(s3UseARNRegion, "false"):
- cfg.S3UseARNRegion = false
- case strings.EqualFold(s3UseARNRegion, "true"):
- cfg.S3UseARNRegion = true
- default:
- return envConfig{}, fmt.Errorf(
- "invalid value for environment variable, %s=%s, need true or false",
- s3UseARNRegionEnvKey[0], s3UseARNRegion)
- }
- }
-
- setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey)
- if err := setEC2IMDSEndpointMode(&cfg.EC2IMDSEndpointMode, ec2IMDSEndpointModeEnvKey); err != nil {
- return envConfig{}, err
- }
- setBoolPtrFromEnvVal(&cfg.EC2IMDSv1Disabled, ec2MetadataV1DisabledEnvKey)
-
- if err := setUseDualStackEndpointFromEnvVal(&cfg.UseDualStackEndpoint, awsUseDualStackEndpoint); err != nil {
- return cfg, err
- }
-
- if err := setUseFIPSEndpointFromEnvVal(&cfg.UseFIPSEndpoint, awsUseFIPSEndpoint); err != nil {
- return cfg, err
- }
-
- return cfg, nil
-}
-
-func setFromEnvVal(dst *string, keys []string) {
- for _, k := range keys {
- if v := os.Getenv(k); len(v) != 0 {
- *dst = v
- break
- }
- }
-}
-
-func setBoolPtrFromEnvVal(dst **bool, keys []string) {
- for _, k := range keys {
- value := os.Getenv(k)
- if len(value) == 0 {
- continue
- }
-
- switch {
- case strings.EqualFold(value, "false"):
- *dst = new(bool)
- **dst = false
- case strings.EqualFold(value, "true"):
- *dst = new(bool)
- **dst = true
- }
- }
-}
-
-func setEC2IMDSEndpointMode(mode *endpoints.EC2IMDSEndpointModeState, keys []string) error {
- for _, k := range keys {
- value := os.Getenv(k)
- if len(value) == 0 {
- continue
- }
- if err := mode.SetFromString(value); err != nil {
- return fmt.Errorf("invalid value for environment variable, %s=%s, %v", k, value, err)
- }
- return nil
- }
- return nil
-}
-
-func setUseDualStackEndpointFromEnvVal(dst *endpoints.DualStackEndpointState, keys []string) error {
- for _, k := range keys {
- value := os.Getenv(k)
- if len(value) == 0 {
- continue // skip if empty
- }
-
- switch {
- case strings.EqualFold(value, "true"):
- *dst = endpoints.DualStackEndpointStateEnabled
- case strings.EqualFold(value, "false"):
- *dst = endpoints.DualStackEndpointStateDisabled
- default:
- return fmt.Errorf(
- "invalid value for environment variable, %s=%s, need true, false",
- k, value)
- }
- }
- return nil
-}
-
-func setUseFIPSEndpointFromEnvVal(dst *endpoints.FIPSEndpointState, keys []string) error {
- for _, k := range keys {
- value := os.Getenv(k)
- if len(value) == 0 {
- continue // skip if empty
- }
-
- switch {
- case strings.EqualFold(value, "true"):
- *dst = endpoints.FIPSEndpointStateEnabled
- case strings.EqualFold(value, "false"):
- *dst = endpoints.FIPSEndpointStateDisabled
- default:
- return fmt.Errorf(
- "invalid value for environment variable, %s=%s, need true, false",
- k, value)
- }
- }
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
deleted file mode 100644
index 3c88dee52..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
+++ /dev/null
@@ -1,1005 +0,0 @@
-package session
-
-import (
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "os"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/corehandlers"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/csm"
- "github.com/aws/aws-sdk-go/aws/defaults"
- "github.com/aws/aws-sdk-go/aws/endpoints"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-const (
- // ErrCodeSharedConfig represents an error that occurs in the shared
- // configuration logic
- ErrCodeSharedConfig = "SharedConfigErr"
-
- // ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle.
- ErrCodeLoadCustomCABundle = "LoadCustomCABundleError"
-
- // ErrCodeLoadClientTLSCert error code for unable to load client TLS
- // certificate or key
- ErrCodeLoadClientTLSCert = "LoadClientTLSCertError"
-)
-
-// ErrSharedConfigSourceCollision will be returned if a section contains both
-// source_profile and credential_source
-var ErrSharedConfigSourceCollision = awserr.New(ErrCodeSharedConfig, "only one credential type may be specified per profile: source profile, credential source, credential process, web identity token", nil)
-
-// ErrSharedConfigECSContainerEnvVarEmpty will be returned if the environment
-// variables are empty and Environment was set as the credential source
-var ErrSharedConfigECSContainerEnvVarEmpty = awserr.New(ErrCodeSharedConfig, "EcsContainer was specified as the credential_source, but 'AWS_CONTAINER_CREDENTIALS_RELATIVE_URI' was not set", nil)
-
-// ErrSharedConfigInvalidCredSource will be returned if an invalid credential source was provided
-var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credential source values must be EcsContainer, Ec2InstanceMetadata, or Environment", nil)
-
-// A Session provides a central location to create service clients from and
-// store configurations and request handlers for those services.
-//
-// Sessions are safe to create service clients concurrently, but it is not safe
-// to mutate the Session concurrently.
-//
-// The Session satisfies the service client's client.ConfigProvider.
-type Session struct {
- Config *aws.Config
- Handlers request.Handlers
-
- options Options
-}
-
-// New creates a new instance of the handlers merging in the provided configs
-// on top of the SDK's default configurations. Once the Session is created it
-// can be mutated to modify the Config or Handlers. The Session is safe to be
-// read concurrently, but it should not be written to concurrently.
-//
-// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New
-// method could now encounter an error when loading the configuration. When
-// The environment variable is set, and an error occurs, New will return a
-// session that will fail all requests reporting the error that occurred while
-// loading the session. Use NewSession to get the error when creating the
-// session.
-//
-// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
-// the shared config file (~/.aws/config) will also be loaded, in addition to
-// the shared credentials file (~/.aws/credentials). Values set in both the
-// shared config, and shared credentials will be taken from the shared
-// credentials file.
-//
-// Deprecated: Use NewSession functions to create sessions instead. NewSession
-// has the same functionality as New except an error can be returned when the
-// func is called instead of waiting to receive an error until a request is made.
-func New(cfgs ...*aws.Config) *Session {
- // load initial config from environment
- envCfg, envErr := loadEnvConfig()
-
- if envCfg.EnableSharedConfig {
- var cfg aws.Config
- cfg.MergeIn(cfgs...)
- s, err := NewSessionWithOptions(Options{
- Config: cfg,
- SharedConfigState: SharedConfigEnable,
- })
- if err != nil {
- // Old session.New expected all errors to be discovered when
- // a request is made, and would report the errors then. This
- // needs to be replicated if an error occurs while creating
- // the session.
- msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " +
- "Use session.NewSession to handle errors occurring during session creation."
-
- // Session creation failed, need to report the error and prevent
- // any requests from succeeding.
- s = &Session{Config: defaults.Config()}
- s.logDeprecatedNewSessionError(msg, err, cfgs)
- }
-
- return s
- }
-
- s := deprecatedNewSession(envCfg, cfgs...)
- if envErr != nil {
- msg := "failed to load env config"
- s.logDeprecatedNewSessionError(msg, envErr, cfgs)
- }
-
- if csmCfg, err := loadCSMConfig(envCfg, []string{}); err != nil {
- if l := s.Config.Logger; l != nil {
- l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err))
- }
- } else if csmCfg.Enabled {
- err := enableCSM(&s.Handlers, csmCfg, s.Config.Logger)
- if err != nil {
- msg := "failed to enable CSM"
- s.logDeprecatedNewSessionError(msg, err, cfgs)
- }
- }
-
- return s
-}
-
-// NewSession returns a new Session created from SDK defaults, config files,
-// environment, and user provided config files. Once the Session is created
-// it can be mutated to modify the Config or Handlers. The Session is safe to
-// be read concurrently, but it should not be written to concurrently.
-//
-// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
-// the shared config file (~/.aws/config) will also be loaded in addition to
-// the shared credentials file (~/.aws/credentials). Values set in both the
-// shared config, and shared credentials will be taken from the shared
-// credentials file. Enabling the Shared Config will also allow the Session
-// to be built with retrieving credentials with AssumeRole set in the config.
-//
-// See the NewSessionWithOptions func for information on how to override or
-// control through code how the Session will be created, such as specifying the
-// config profile, and controlling if shared config is enabled or not.
-func NewSession(cfgs ...*aws.Config) (*Session, error) {
- opts := Options{}
- opts.Config.MergeIn(cfgs...)
-
- return NewSessionWithOptions(opts)
-}
-
-// SharedConfigState provides the ability to optionally override the state
-// of the session's creation based on the shared config being enabled or
-// disabled.
-type SharedConfigState int
-
-const (
- // SharedConfigStateFromEnv does not override any state of the
- // AWS_SDK_LOAD_CONFIG env var. It is the default value of the
- // SharedConfigState type.
- SharedConfigStateFromEnv SharedConfigState = iota
-
- // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value
- // and disables the shared config functionality.
- SharedConfigDisable
-
- // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value
- // and enables the shared config functionality.
- SharedConfigEnable
-)
-
-// Options provides the means to control how a Session is created and what
-// configuration values will be loaded.
-type Options struct {
- // Provides config values for the SDK to use when creating service clients
- // and making API requests to services. Any value set in with this field
- // will override the associated value provided by the SDK defaults,
- // environment or config files where relevant.
- //
- // If not set, configuration values from from SDK defaults, environment,
- // config will be used.
- Config aws.Config
-
- // Overrides the config profile the Session should be created from. If not
- // set the value of the environment variable will be loaded (AWS_PROFILE,
- // or AWS_DEFAULT_PROFILE if the Shared Config is enabled).
- //
- // If not set and environment variables are not set the "default"
- // (DefaultSharedConfigProfile) will be used as the profile to load the
- // session config from.
- Profile string
-
- // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG
- // environment variable. By default a Session will be created using the
- // value provided by the AWS_SDK_LOAD_CONFIG environment variable.
- //
- // Setting this value to SharedConfigEnable or SharedConfigDisable
- // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
- // and enable or disable the shared config functionality.
- SharedConfigState SharedConfigState
-
- // Ordered list of files the session will load configuration from.
- // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE.
- SharedConfigFiles []string
-
- // When the SDK's shared config is configured to assume a role with MFA
- // this option is required in order to provide the mechanism that will
- // retrieve the MFA token. There is no default value for this field. If
- // it is not set an error will be returned when creating the session.
- //
- // This token provider will be called when ever the assumed role's
- // credentials need to be refreshed. Within the context of service clients
- // all sharing the same session the SDK will ensure calls to the token
- // provider are atomic. When sharing a token provider across multiple
- // sessions additional synchronization logic is needed to ensure the
- // token providers do not introduce race conditions. It is recommend to
- // share the session where possible.
- //
- // stscreds.StdinTokenProvider is a basic implementation that will prompt
- // from stdin for the MFA token code.
- //
- // This field is only used if the shared configuration is enabled, and
- // the config enables assume role with MFA via the mfa_serial field.
- AssumeRoleTokenProvider func() (string, error)
-
- // When the SDK's shared config is configured to assume a role this option
- // may be provided to set the expiry duration of the STS credentials.
- // Defaults to 15 minutes if not set as documented in the
- // stscreds.AssumeRoleProvider.
- AssumeRoleDuration time.Duration
-
- // Reader for a custom Credentials Authority (CA) bundle in PEM format that
- // the SDK will use instead of the default system's root CA bundle. Use this
- // only if you want to replace the CA bundle the SDK uses for TLS requests.
- //
- // HTTP Client's Transport concrete implementation must be a http.Transport
- // or creating the session will fail.
- //
- // If the Transport's TLS config is set this option will cause the SDK
- // to overwrite the Transport's TLS config's RootCAs value. If the CA
- // bundle reader contains multiple certificates all of them will be loaded.
- //
- // Can also be specified via the environment variable:
- //
- // AWS_CA_BUNDLE=$HOME/ca_bundle
- //
- // Can also be specified via the shared config field:
- //
- // ca_bundle = $HOME/ca_bundle
- CustomCABundle io.Reader
-
- // Reader for the TLC client certificate that should be used by the SDK's
- // HTTP transport when making requests. The certificate must be paired with
- // a TLS client key file. Will be ignored if both are not provided.
- //
- // HTTP Client's Transport concrete implementation must be a http.Transport
- // or creating the session will fail.
- //
- // Can also be specified via the environment variable:
- //
- // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert
- ClientTLSCert io.Reader
-
- // Reader for the TLC client key that should be used by the SDK's HTTP
- // transport when making requests. The key must be paired with a TLS client
- // certificate file. Will be ignored if both are not provided.
- //
- // HTTP Client's Transport concrete implementation must be a http.Transport
- // or creating the session will fail.
- //
- // Can also be specified via the environment variable:
- //
- // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key
- ClientTLSKey io.Reader
-
- // The handlers that the session and all API clients will be created with.
- // This must be a complete set of handlers. Use the defaults.Handlers()
- // function to initialize this value before changing the handlers to be
- // used by the SDK.
- Handlers request.Handlers
-
- // Allows specifying a custom endpoint to be used by the EC2 IMDS client
- // when making requests to the EC2 IMDS API. The endpoint value should
- // include the URI scheme. If the scheme is not present it will be defaulted to http.
- //
- // If unset, will the EC2 IMDS client will use its default endpoint.
- //
- // Can also be specified via the environment variable,
- // AWS_EC2_METADATA_SERVICE_ENDPOINT.
- //
- // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254
- //
- // If using an URL with an IPv6 address literal, the IPv6 address
- // component must be enclosed in square brackets.
- //
- // AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
- EC2IMDSEndpoint string
-
- // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6)
- //
- // AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE=IPv6
- EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState
-
- // Specifies options for creating credential providers.
- // These are only used if the aws.Config does not already
- // include credentials.
- CredentialsProviderOptions *CredentialsProviderOptions
-}
-
-// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
-// environment, and user provided config files. This func uses the Options
-// values to configure how the Session is created.
-//
-// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value
-// the shared config file (~/.aws/config) will also be loaded in addition to
-// the shared credentials file (~/.aws/credentials). Values set in both the
-// shared config, and shared credentials will be taken from the shared
-// credentials file. Enabling the Shared Config will also allow the Session
-// to be built with retrieving credentials with AssumeRole set in the config.
-//
-// // Equivalent to session.New
-// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
-//
-// // Specify profile to load for the session's config
-// sess := session.Must(session.NewSessionWithOptions(session.Options{
-// Profile: "profile_name",
-// }))
-//
-// // Specify profile for config and region for requests
-// sess := session.Must(session.NewSessionWithOptions(session.Options{
-// Config: aws.Config{Region: aws.String("us-east-1")},
-// Profile: "profile_name",
-// }))
-//
-// // Force enable Shared Config support
-// sess := session.Must(session.NewSessionWithOptions(session.Options{
-// SharedConfigState: session.SharedConfigEnable,
-// }))
-func NewSessionWithOptions(opts Options) (*Session, error) {
- var envCfg envConfig
- var err error
- if opts.SharedConfigState == SharedConfigEnable {
- envCfg, err = loadSharedEnvConfig()
- if err != nil {
- return nil, fmt.Errorf("failed to load shared config, %v", err)
- }
- } else {
- envCfg, err = loadEnvConfig()
- if err != nil {
- return nil, fmt.Errorf("failed to load environment config, %v", err)
- }
- }
-
- if len(opts.Profile) != 0 {
- envCfg.Profile = opts.Profile
- }
-
- switch opts.SharedConfigState {
- case SharedConfigDisable:
- envCfg.EnableSharedConfig = false
- case SharedConfigEnable:
- envCfg.EnableSharedConfig = true
- }
-
- return newSession(opts, envCfg, &opts.Config)
-}
-
-// Must is a helper function to ensure the Session is valid and there was no
-// error when calling a NewSession function.
-//
-// This helper is intended to be used in variable initialization to load the
-// Session and configuration at startup. Such as:
-//
-// var sess = session.Must(session.NewSession())
-func Must(sess *Session, err error) *Session {
- if err != nil {
- panic(err)
- }
-
- return sess
-}
-
-// Wraps the endpoint resolver with a resolver that will return a custom
-// endpoint for EC2 IMDS.
-func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string, mode endpoints.EC2IMDSEndpointModeState) endpoints.Resolver {
- return endpoints.ResolverFunc(
- func(service, region string, opts ...func(*endpoints.Options)) (
- endpoints.ResolvedEndpoint, error,
- ) {
- if service == ec2MetadataServiceID && len(endpoint) > 0 {
- return endpoints.ResolvedEndpoint{
- URL: endpoint,
- SigningName: ec2MetadataServiceID,
- SigningRegion: region,
- }, nil
- } else if service == ec2MetadataServiceID {
- opts = append(opts, func(o *endpoints.Options) {
- o.EC2MetadataEndpointMode = mode
- })
- }
- return resolver.EndpointFor(service, region, opts...)
- })
-}
-
-func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session {
- cfg := defaults.Config()
- handlers := defaults.Handlers()
-
- // Apply the passed in configs so the configuration can be applied to the
- // default credential chain
- cfg.MergeIn(cfgs...)
- if cfg.EndpointResolver == nil {
- // An endpoint resolver is required for a session to be able to provide
- // endpoints for service client configurations.
- cfg.EndpointResolver = endpoints.DefaultResolver()
- }
-
- if !(len(envCfg.EC2IMDSEndpoint) == 0 && envCfg.EC2IMDSEndpointMode == endpoints.EC2IMDSEndpointModeStateUnset) {
- cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint, envCfg.EC2IMDSEndpointMode)
- }
-
- cfg.Credentials = defaults.CredChain(cfg, handlers)
-
- // Reapply any passed in configs to override credentials if set
- cfg.MergeIn(cfgs...)
-
- s := &Session{
- Config: cfg,
- Handlers: handlers,
- options: Options{
- EC2IMDSEndpoint: envCfg.EC2IMDSEndpoint,
- },
- }
-
- initHandlers(s)
- return s
-}
-
-func enableCSM(handlers *request.Handlers, cfg csmConfig, logger aws.Logger) error {
- if logger != nil {
- logger.Log("Enabling CSM")
- }
-
- r, err := csm.Start(cfg.ClientID, csm.AddressWithDefaults(cfg.Host, cfg.Port))
- if err != nil {
- return err
- }
- r.InjectHandlers(handlers)
-
- return nil
-}
-
-func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
- cfg := defaults.Config()
-
- handlers := opts.Handlers
- if handlers.IsEmpty() {
- handlers = defaults.Handlers()
- }
-
- // Get a merged version of the user provided config to determine if
- // credentials were.
- userCfg := &aws.Config{}
- userCfg.MergeIn(cfgs...)
- cfg.MergeIn(userCfg)
-
- // Ordered config files will be loaded in with later files overwriting
- // previous config file values.
- var cfgFiles []string
- if opts.SharedConfigFiles != nil {
- cfgFiles = opts.SharedConfigFiles
- } else {
- cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
- if !envCfg.EnableSharedConfig {
- // The shared config file (~/.aws/config) is only loaded if instructed
- // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
- cfgFiles = cfgFiles[1:]
- }
- }
-
- // Load additional config from file(s)
- sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles, envCfg.EnableSharedConfig)
- if err != nil {
- if len(envCfg.Profile) == 0 && !envCfg.EnableSharedConfig && (envCfg.Creds.HasKeys() || userCfg.Credentials != nil) {
- // Special case where the user has not explicitly specified an AWS_PROFILE,
- // or session.Options.profile, shared config is not enabled, and the
- // environment has credentials, allow the shared config file to fail to
- // load since the user has already provided credentials, and nothing else
- // is required to be read file. Github(aws/aws-sdk-go#2455)
- } else if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
- return nil, err
- }
- }
-
- if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
- return nil, err
- }
-
- if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil {
- return nil, err
- }
-
- s := &Session{
- Config: cfg,
- Handlers: handlers,
- options: opts,
- }
-
- initHandlers(s)
-
- if csmCfg, err := loadCSMConfig(envCfg, cfgFiles); err != nil {
- if l := s.Config.Logger; l != nil {
- l.Log(fmt.Sprintf("ERROR: failed to load CSM configuration, %v", err))
- }
- } else if csmCfg.Enabled {
- err = enableCSM(&s.Handlers, csmCfg, s.Config.Logger)
- if err != nil {
- return nil, err
- }
- }
-
- return s, nil
-}
-
-type csmConfig struct {
- Enabled bool
- Host string
- Port string
- ClientID string
-}
-
-var csmProfileName = "aws_csm"
-
-func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) {
- if envCfg.CSMEnabled != nil {
- if *envCfg.CSMEnabled {
- return csmConfig{
- Enabled: true,
- ClientID: envCfg.CSMClientID,
- Host: envCfg.CSMHost,
- Port: envCfg.CSMPort,
- }, nil
- }
- return csmConfig{}, nil
- }
-
- sharedCfg, err := loadSharedConfig(csmProfileName, cfgFiles, false)
- if err != nil {
- if _, ok := err.(SharedConfigProfileNotExistsError); !ok {
- return csmConfig{}, err
- }
- }
- if sharedCfg.CSMEnabled != nil && *sharedCfg.CSMEnabled == true {
- return csmConfig{
- Enabled: true,
- ClientID: sharedCfg.CSMClientID,
- Host: sharedCfg.CSMHost,
- Port: sharedCfg.CSMPort,
- }, nil
- }
-
- return csmConfig{}, nil
-}
-
-func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error {
- // CA Bundle can be specified in both environment variable shared config file.
- var caBundleFilename = envCfg.CustomCABundle
- if len(caBundleFilename) == 0 {
- caBundleFilename = sharedCfg.CustomCABundle
- }
-
- // Only use environment value if session option is not provided.
- customTLSOptions := map[string]struct {
- filename string
- field *io.Reader
- errCode string
- }{
- "custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle},
- "custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert},
- "custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert},
- }
- for name, v := range customTLSOptions {
- if len(v.filename) != 0 && *v.field == nil {
- f, err := os.Open(v.filename)
- if err != nil {
- return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err)
- }
- defer f.Close()
- *v.field = f
- }
- }
-
- // Setup HTTP client with custom cert bundle if enabled
- if opts.CustomCABundle != nil {
- if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil {
- return err
- }
- }
-
- // Setup HTTP client TLS certificate and key for client TLS authentication.
- if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil {
- if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil {
- return err
- }
- } else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil {
- // Do nothing if neither values are available.
-
- } else {
- return awserr.New(ErrCodeLoadClientTLSCert,
- fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided",
- opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil)
- }
-
- return nil
-}
-
-func getHTTPTransport(client *http.Client) (*http.Transport, error) {
- var t *http.Transport
- switch v := client.Transport.(type) {
- case *http.Transport:
- t = v
- default:
- if client.Transport != nil {
- return nil, fmt.Errorf("unsupported transport, %T", client.Transport)
- }
- }
- if t == nil {
- // Nil transport implies `http.DefaultTransport` should be used. Since
- // the SDK cannot modify, nor copy the `DefaultTransport` specifying
- // the values the next closest behavior.
- t = getCustomTransport()
- }
-
- return t, nil
-}
-
-func loadCustomCABundle(client *http.Client, bundle io.Reader) error {
- t, err := getHTTPTransport(client)
- if err != nil {
- return awserr.New(ErrCodeLoadCustomCABundle,
- "unable to load custom CA bundle, HTTPClient's transport unsupported type", err)
- }
-
- p, err := loadCertPool(bundle)
- if err != nil {
- return err
- }
- if t.TLSClientConfig == nil {
- t.TLSClientConfig = &tls.Config{}
- }
- t.TLSClientConfig.RootCAs = p
-
- client.Transport = t
-
- return nil
-}
-
-func loadCertPool(r io.Reader) (*x509.CertPool, error) {
- b, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, awserr.New(ErrCodeLoadCustomCABundle,
- "failed to read custom CA bundle PEM file", err)
- }
-
- p := x509.NewCertPool()
- if !p.AppendCertsFromPEM(b) {
- return nil, awserr.New(ErrCodeLoadCustomCABundle,
- "failed to load custom CA bundle PEM file", err)
- }
-
- return p, nil
-}
-
-func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error {
- t, err := getHTTPTransport(client)
- if err != nil {
- return awserr.New(ErrCodeLoadClientTLSCert,
- "unable to get usable HTTP transport from client", err)
- }
-
- cert, err := ioutil.ReadAll(certFile)
- if err != nil {
- return awserr.New(ErrCodeLoadClientTLSCert,
- "unable to get read client TLS cert file", err)
- }
-
- key, err := ioutil.ReadAll(keyFile)
- if err != nil {
- return awserr.New(ErrCodeLoadClientTLSCert,
- "unable to get read client TLS key file", err)
- }
-
- clientCert, err := tls.X509KeyPair(cert, key)
- if err != nil {
- return awserr.New(ErrCodeLoadClientTLSCert,
- "unable to load x509 key pair from client cert", err)
- }
-
- tlsCfg := t.TLSClientConfig
- if tlsCfg == nil {
- tlsCfg = &tls.Config{}
- }
-
- tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert)
-
- t.TLSClientConfig = tlsCfg
- client.Transport = t
-
- return nil
-}
-
-func mergeConfigSrcs(cfg, userCfg *aws.Config,
- envCfg envConfig, sharedCfg sharedConfig,
- handlers request.Handlers,
- sessOpts Options,
-) error {
-
- // Region if not already set by user
- if len(aws.StringValue(cfg.Region)) == 0 {
- if len(envCfg.Region) > 0 {
- cfg.WithRegion(envCfg.Region)
- } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 {
- cfg.WithRegion(sharedCfg.Region)
- }
- }
-
- if cfg.EnableEndpointDiscovery == nil {
- if envCfg.EnableEndpointDiscovery != nil {
- cfg.WithEndpointDiscovery(*envCfg.EnableEndpointDiscovery)
- } else if envCfg.EnableSharedConfig && sharedCfg.EnableEndpointDiscovery != nil {
- cfg.WithEndpointDiscovery(*sharedCfg.EnableEndpointDiscovery)
- }
- }
-
- // Regional Endpoint flag for STS endpoint resolving
- mergeSTSRegionalEndpointConfig(cfg, []endpoints.STSRegionalEndpoint{
- userCfg.STSRegionalEndpoint,
- envCfg.STSRegionalEndpoint,
- sharedCfg.STSRegionalEndpoint,
- endpoints.LegacySTSEndpoint,
- })
-
- // Regional Endpoint flag for S3 endpoint resolving
- mergeS3UsEast1RegionalEndpointConfig(cfg, []endpoints.S3UsEast1RegionalEndpoint{
- userCfg.S3UsEast1RegionalEndpoint,
- envCfg.S3UsEast1RegionalEndpoint,
- sharedCfg.S3UsEast1RegionalEndpoint,
- endpoints.LegacyS3UsEast1Endpoint,
- })
-
- var ec2IMDSEndpoint string
- for _, v := range []string{
- sessOpts.EC2IMDSEndpoint,
- envCfg.EC2IMDSEndpoint,
- sharedCfg.EC2IMDSEndpoint,
- } {
- if len(v) != 0 {
- ec2IMDSEndpoint = v
- break
- }
- }
-
- var endpointMode endpoints.EC2IMDSEndpointModeState
- for _, v := range []endpoints.EC2IMDSEndpointModeState{
- sessOpts.EC2IMDSEndpointMode,
- envCfg.EC2IMDSEndpointMode,
- sharedCfg.EC2IMDSEndpointMode,
- } {
- if v != endpoints.EC2IMDSEndpointModeStateUnset {
- endpointMode = v
- break
- }
- }
-
- if len(ec2IMDSEndpoint) != 0 || endpointMode != endpoints.EC2IMDSEndpointModeStateUnset {
- cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint, endpointMode)
- }
-
- cfg.EC2MetadataEnableFallback = userCfg.EC2MetadataEnableFallback
- if cfg.EC2MetadataEnableFallback == nil && envCfg.EC2IMDSv1Disabled != nil {
- cfg.EC2MetadataEnableFallback = aws.Bool(!*envCfg.EC2IMDSv1Disabled)
- }
- if cfg.EC2MetadataEnableFallback == nil && sharedCfg.EC2IMDSv1Disabled != nil {
- cfg.EC2MetadataEnableFallback = aws.Bool(!*sharedCfg.EC2IMDSv1Disabled)
- }
-
- cfg.S3UseARNRegion = userCfg.S3UseARNRegion
- if cfg.S3UseARNRegion == nil {
- cfg.S3UseARNRegion = &envCfg.S3UseARNRegion
- }
- if cfg.S3UseARNRegion == nil {
- cfg.S3UseARNRegion = &sharedCfg.S3UseARNRegion
- }
-
- for _, v := range []endpoints.DualStackEndpointState{userCfg.UseDualStackEndpoint, envCfg.UseDualStackEndpoint, sharedCfg.UseDualStackEndpoint} {
- if v != endpoints.DualStackEndpointStateUnset {
- cfg.UseDualStackEndpoint = v
- break
- }
- }
-
- for _, v := range []endpoints.FIPSEndpointState{userCfg.UseFIPSEndpoint, envCfg.UseFIPSEndpoint, sharedCfg.UseFIPSEndpoint} {
- if v != endpoints.FIPSEndpointStateUnset {
- cfg.UseFIPSEndpoint = v
- break
- }
- }
-
- // Configure credentials if not already set by the user when creating the Session.
- // Credentials are resolved last such that all _resolved_ config values are propagated to credential providers.
- // ticket: P83606045
- if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
- creds, err := resolveCredentials(cfg, envCfg, sharedCfg, handlers, sessOpts)
- if err != nil {
- return err
- }
- cfg.Credentials = creds
- }
-
- return nil
-}
-
-func mergeSTSRegionalEndpointConfig(cfg *aws.Config, values []endpoints.STSRegionalEndpoint) {
- for _, v := range values {
- if v != endpoints.UnsetSTSEndpoint {
- cfg.STSRegionalEndpoint = v
- break
- }
- }
-}
-
-func mergeS3UsEast1RegionalEndpointConfig(cfg *aws.Config, values []endpoints.S3UsEast1RegionalEndpoint) {
- for _, v := range values {
- if v != endpoints.UnsetS3UsEast1Endpoint {
- cfg.S3UsEast1RegionalEndpoint = v
- break
- }
- }
-}
-
-func initHandlers(s *Session) {
- // Add the Validate parameter handler if it is not disabled.
- s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
- if !aws.BoolValue(s.Config.DisableParamValidation) {
- s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
- }
-}
-
-// Copy creates and returns a copy of the current Session, copying the config
-// and handlers. If any additional configs are provided they will be merged
-// on top of the Session's copied config.
-//
-// // Create a copy of the current Session, configured for the us-west-2 region.
-// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
-func (s *Session) Copy(cfgs ...*aws.Config) *Session {
- newSession := &Session{
- Config: s.Config.Copy(cfgs...),
- Handlers: s.Handlers.Copy(),
- options: s.options,
- }
-
- initHandlers(newSession)
-
- return newSession
-}
-
-// ClientConfig satisfies the client.ConfigProvider interface and is used to
-// configure the service client instances. Passing the Session to the service
-// client's constructor (New) will use this method to configure the client.
-func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Config {
- s = s.Copy(cfgs...)
-
- resolvedRegion := normalizeRegion(s.Config)
-
- region := aws.StringValue(s.Config.Region)
- resolved, err := s.resolveEndpoint(service, region, resolvedRegion, s.Config)
- if err != nil {
- s.Handlers.Validate.PushBack(func(r *request.Request) {
- if len(r.ClientInfo.Endpoint) != 0 {
- // Error occurred while resolving endpoint, but the request
- // being invoked has had an endpoint specified after the client
- // was created.
- return
- }
- r.Error = err
- })
- }
-
- return client.Config{
- Config: s.Config,
- Handlers: s.Handlers,
- PartitionID: resolved.PartitionID,
- Endpoint: resolved.URL,
- SigningRegion: resolved.SigningRegion,
- SigningNameDerived: resolved.SigningNameDerived,
- SigningName: resolved.SigningName,
- ResolvedRegion: resolvedRegion,
- }
-}
-
-const ec2MetadataServiceID = "ec2metadata"
-
-func (s *Session) resolveEndpoint(service, region, resolvedRegion string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) {
-
- if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 {
- return endpoints.ResolvedEndpoint{
- URL: endpoints.AddScheme(ep, aws.BoolValue(cfg.DisableSSL)),
- SigningRegion: region,
- }, nil
- }
-
- resolved, err := cfg.EndpointResolver.EndpointFor(service, region,
- func(opt *endpoints.Options) {
- opt.DisableSSL = aws.BoolValue(cfg.DisableSSL)
-
- opt.UseDualStack = aws.BoolValue(cfg.UseDualStack)
- opt.UseDualStackEndpoint = cfg.UseDualStackEndpoint
-
- opt.UseFIPSEndpoint = cfg.UseFIPSEndpoint
-
- // Support for STSRegionalEndpoint where the STSRegionalEndpoint is
- // provided in envConfig or sharedConfig with envConfig getting
- // precedence.
- opt.STSRegionalEndpoint = cfg.STSRegionalEndpoint
-
- // Support for S3UsEast1RegionalEndpoint where the S3UsEast1RegionalEndpoint is
- // provided in envConfig or sharedConfig with envConfig getting
- // precedence.
- opt.S3UsEast1RegionalEndpoint = cfg.S3UsEast1RegionalEndpoint
-
- // Support the condition where the service is modeled but its
- // endpoint metadata is not available.
- opt.ResolveUnknownService = true
-
- opt.ResolvedRegion = resolvedRegion
-
- opt.Logger = cfg.Logger
- opt.LogDeprecated = cfg.LogLevel.Matches(aws.LogDebugWithDeprecated)
- },
- )
- if err != nil {
- return endpoints.ResolvedEndpoint{}, err
- }
-
- return resolved, nil
-}
-
-// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception
-// that the EndpointResolver will not be used to resolve the endpoint. The only
-// endpoint set must come from the aws.Config.Endpoint field.
-func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config {
- s = s.Copy(cfgs...)
-
- resolvedRegion := normalizeRegion(s.Config)
-
- var resolved endpoints.ResolvedEndpoint
- if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 {
- resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL))
- resolved.SigningRegion = aws.StringValue(s.Config.Region)
- }
-
- return client.Config{
- Config: s.Config,
- Handlers: s.Handlers,
- Endpoint: resolved.URL,
- SigningRegion: resolved.SigningRegion,
- SigningNameDerived: resolved.SigningNameDerived,
- SigningName: resolved.SigningName,
- ResolvedRegion: resolvedRegion,
- }
-}
-
-// logDeprecatedNewSessionError function enables error handling for session
-func (s *Session) logDeprecatedNewSessionError(msg string, err error, cfgs []*aws.Config) {
- // Session creation failed, need to report the error and prevent
- // any requests from succeeding.
- s.Config.MergeIn(cfgs...)
- s.Config.Logger.Log("ERROR:", msg, "Error:", err)
- s.Handlers.Validate.PushBack(func(r *request.Request) {
- r.Error = err
- })
-}
-
-// normalizeRegion resolves / normalizes the configured region (converts pseudo fips regions), and modifies the provided
-// config to have the equivalent options for resolution and returns the resolved region name.
-func normalizeRegion(cfg *aws.Config) (resolved string) {
- const fipsInfix = "-fips-"
- const fipsPrefix = "-fips"
- const fipsSuffix = "fips-"
-
- region := aws.StringValue(cfg.Region)
-
- if strings.Contains(region, fipsInfix) ||
- strings.Contains(region, fipsPrefix) ||
- strings.Contains(region, fipsSuffix) {
- resolved = strings.Replace(strings.Replace(strings.Replace(
- region, fipsInfix, "-", -1), fipsPrefix, "", -1), fipsSuffix, "", -1)
- cfg.UseFIPSEndpoint = endpoints.FIPSEndpointStateEnabled
- }
-
- return resolved
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
deleted file mode 100644
index f3ce8183d..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
+++ /dev/null
@@ -1,856 +0,0 @@
-package session
-
-import (
- "fmt"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/endpoints"
- "github.com/aws/aws-sdk-go/internal/ini"
-)
-
-const (
- // Static Credentials group
- accessKeyIDKey = `aws_access_key_id` // group required
- secretAccessKey = `aws_secret_access_key` // group required
- sessionTokenKey = `aws_session_token` // optional
-
- // Assume Role Credentials group
- roleArnKey = `role_arn` // group required
- sourceProfileKey = `source_profile` // group required (or credential_source)
- credentialSourceKey = `credential_source` // group required (or source_profile)
- externalIDKey = `external_id` // optional
- mfaSerialKey = `mfa_serial` // optional
- roleSessionNameKey = `role_session_name` // optional
- roleDurationSecondsKey = "duration_seconds" // optional
-
- // Prefix to be used for SSO sections. These are supposed to only exist in
- // the shared config file, not the credentials file.
- ssoSectionPrefix = `sso-session `
-
- // AWS Single Sign-On (AWS SSO) group
- ssoSessionNameKey = "sso_session"
-
- // AWS Single Sign-On (AWS SSO) group
- ssoAccountIDKey = "sso_account_id"
- ssoRegionKey = "sso_region"
- ssoRoleNameKey = "sso_role_name"
- ssoStartURL = "sso_start_url"
-
- // CSM options
- csmEnabledKey = `csm_enabled`
- csmHostKey = `csm_host`
- csmPortKey = `csm_port`
- csmClientIDKey = `csm_client_id`
-
- // Additional Config fields
- regionKey = `region`
-
- // custom CA Bundle filename
- customCABundleKey = `ca_bundle`
-
- // endpoint discovery group
- enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional
-
- // External Credential Process
- credentialProcessKey = `credential_process` // optional
-
- // Web Identity Token File
- webIdentityTokenFileKey = `web_identity_token_file` // optional
-
- // Additional config fields for regional or legacy endpoints
- stsRegionalEndpointSharedKey = `sts_regional_endpoints`
-
- // Additional config fields for regional or legacy endpoints
- s3UsEast1RegionalSharedKey = `s3_us_east_1_regional_endpoint`
-
- // DefaultSharedConfigProfile is the default profile to be used when
- // loading configuration from the config files if another profile name
- // is not provided.
- DefaultSharedConfigProfile = `default`
-
- // S3 ARN Region Usage
- s3UseARNRegionKey = "s3_use_arn_region"
-
- // EC2 IMDS Endpoint Mode
- ec2MetadataServiceEndpointModeKey = "ec2_metadata_service_endpoint_mode"
-
- // EC2 IMDS Endpoint
- ec2MetadataServiceEndpointKey = "ec2_metadata_service_endpoint"
-
- // ECS IMDSv1 disable fallback
- ec2MetadataV1DisabledKey = "ec2_metadata_v1_disabled"
-
- // Use DualStack Endpoint Resolution
- useDualStackEndpoint = "use_dualstack_endpoint"
-
- // Use FIPS Endpoint Resolution
- useFIPSEndpointKey = "use_fips_endpoint"
-)
-
-// sharedConfig represents the configuration fields of the SDK config files.
-type sharedConfig struct {
- Profile string
-
- // Credentials values from the config file. Both aws_access_key_id and
- // aws_secret_access_key must be provided together in the same file to be
- // considered valid. The values will be ignored if not a complete group.
- // aws_session_token is an optional field that can be provided if both of
- // the other two fields are also provided.
- //
- // aws_access_key_id
- // aws_secret_access_key
- // aws_session_token
- Creds credentials.Value
-
- CredentialSource string
- CredentialProcess string
- WebIdentityTokenFile string
-
- // SSO session options
- SSOSessionName string
- SSOSession *ssoSession
-
- SSOAccountID string
- SSORegion string
- SSORoleName string
- SSOStartURL string
-
- RoleARN string
- RoleSessionName string
- ExternalID string
- MFASerial string
- AssumeRoleDuration *time.Duration
-
- SourceProfileName string
- SourceProfile *sharedConfig
-
- // Region is the region the SDK should use for looking up AWS service
- // endpoints and signing requests.
- //
- // region
- Region string
-
- // CustomCABundle is the file path to a PEM file the SDK will read and
- // use to configure the HTTP transport with additional CA certs that are
- // not present in the platforms default CA store.
- //
- // This value will be ignored if the file does not exist.
- //
- // ca_bundle
- CustomCABundle string
-
- // EnableEndpointDiscovery can be enabled in the shared config by setting
- // endpoint_discovery_enabled to true
- //
- // endpoint_discovery_enabled = true
- EnableEndpointDiscovery *bool
-
- // CSM Options
- CSMEnabled *bool
- CSMHost string
- CSMPort string
- CSMClientID string
-
- // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service
- //
- // sts_regional_endpoints = regional
- // This can take value as `LegacySTSEndpoint` or `RegionalSTSEndpoint`
- STSRegionalEndpoint endpoints.STSRegionalEndpoint
-
- // Specifies the Regional Endpoint flag for the SDK to resolve the endpoint for a service
- //
- // s3_us_east_1_regional_endpoint = regional
- // This can take value as `LegacyS3UsEast1Endpoint` or `RegionalS3UsEast1Endpoint`
- S3UsEast1RegionalEndpoint endpoints.S3UsEast1RegionalEndpoint
-
- // Specifies if the S3 service should allow ARNs to direct the region
- // the client's requests are sent to.
- //
- // s3_use_arn_region=true
- S3UseARNRegion bool
-
- // Specifies the EC2 Instance Metadata Service default endpoint selection mode (IPv4 or IPv6)
- //
- // ec2_metadata_service_endpoint_mode=IPv6
- EC2IMDSEndpointMode endpoints.EC2IMDSEndpointModeState
-
- // Specifies the EC2 Instance Metadata Service endpoint to use. If specified it overrides EC2IMDSEndpointMode.
- //
- // ec2_metadata_service_endpoint=http://fd00:ec2::254
- EC2IMDSEndpoint string
-
- // Specifies that IMDS clients should not fallback to IMDSv1 if token
- // requests fail.
- //
- // ec2_metadata_v1_disabled=true
- EC2IMDSv1Disabled *bool
-
- // Specifies that SDK clients must resolve a dual-stack endpoint for
- // services.
- //
- // use_dualstack_endpoint=true
- UseDualStackEndpoint endpoints.DualStackEndpointState
-
- // Specifies that SDK clients must resolve a FIPS endpoint for
- // services.
- //
- // use_fips_endpoint=true
- UseFIPSEndpoint endpoints.FIPSEndpointState
-}
-
-type sharedConfigFile struct {
- Filename string
- IniData ini.Sections
-}
-
-// SSOSession provides the shared configuration parameters of the sso-session
-// section.
-type ssoSession struct {
- Name string
- SSORegion string
- SSOStartURL string
-}
-
-func (s *ssoSession) setFromIniSection(section ini.Section) {
- updateString(&s.Name, section, ssoSessionNameKey)
- updateString(&s.SSORegion, section, ssoRegionKey)
- updateString(&s.SSOStartURL, section, ssoStartURL)
-}
-
-// loadSharedConfig retrieves the configuration from the list of files using
-// the profile provided. The order the files are listed will determine
-// precedence. Values in subsequent files will overwrite values defined in
-// earlier files.
-//
-// For example, given two files A and B. Both define credentials. If the order
-// of the files are A then B, B's credential values will be used instead of
-// A's.
-//
-// See sharedConfig.setFromFile for information how the config files
-// will be loaded.
-func loadSharedConfig(profile string, filenames []string, exOpts bool) (sharedConfig, error) {
- if len(profile) == 0 {
- profile = DefaultSharedConfigProfile
- }
-
- files, err := loadSharedConfigIniFiles(filenames)
- if err != nil {
- return sharedConfig{}, err
- }
-
- cfg := sharedConfig{}
- profiles := map[string]struct{}{}
- if err = cfg.setFromIniFiles(profiles, profile, files, exOpts); err != nil {
- return sharedConfig{}, err
- }
-
- return cfg, nil
-}
-
-func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
- files := make([]sharedConfigFile, 0, len(filenames))
-
- for _, filename := range filenames {
- sections, err := ini.OpenFile(filename)
- if aerr, ok := err.(awserr.Error); ok && aerr.Code() == ini.ErrCodeUnableToReadFile {
- // Skip files which can't be opened and read for whatever reason
- continue
- } else if err != nil {
- return nil, SharedConfigLoadError{Filename: filename, Err: err}
- }
-
- files = append(files, sharedConfigFile{
- Filename: filename, IniData: sections,
- })
- }
-
- return files, nil
-}
-
-func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile string, files []sharedConfigFile, exOpts bool) error {
- cfg.Profile = profile
-
- // Trim files from the list that don't exist.
- var skippedFiles int
- var profileNotFoundErr error
- for _, f := range files {
- if err := cfg.setFromIniFile(profile, f, exOpts); err != nil {
- if _, ok := err.(SharedConfigProfileNotExistsError); ok {
- // Ignore profiles not defined in individual files.
- profileNotFoundErr = err
- skippedFiles++
- continue
- }
- return err
- }
- }
- if skippedFiles == len(files) {
- // If all files were skipped because the profile is not found, return
- // the original profile not found error.
- return profileNotFoundErr
- }
-
- if _, ok := profiles[profile]; ok {
- // if this is the second instance of the profile the Assume Role
- // options must be cleared because they are only valid for the
- // first reference of a profile. The self linked instance of the
- // profile only have credential provider options.
- cfg.clearAssumeRoleOptions()
- } else {
- // First time a profile has been seen. Assert if the credential type
- // requires a role ARN, the ARN is also set
- if err := cfg.validateCredentialsConfig(profile); err != nil {
- return err
- }
- }
-
- profiles[profile] = struct{}{}
-
- if err := cfg.validateCredentialType(); err != nil {
- return err
- }
-
- // Link source profiles for assume roles
- if len(cfg.SourceProfileName) != 0 {
- // Linked profile via source_profile ignore credential provider
- // options, the source profile must provide the credentials.
- cfg.clearCredentialOptions()
-
- srcCfg := &sharedConfig{}
- err := srcCfg.setFromIniFiles(profiles, cfg.SourceProfileName, files, exOpts)
- if err != nil {
- // SourceProfile that doesn't exist is an error in configuration.
- if _, ok := err.(SharedConfigProfileNotExistsError); ok {
- err = SharedConfigAssumeRoleError{
- RoleARN: cfg.RoleARN,
- SourceProfile: cfg.SourceProfileName,
- }
- }
- return err
- }
-
- if !srcCfg.hasCredentials() {
- return SharedConfigAssumeRoleError{
- RoleARN: cfg.RoleARN,
- SourceProfile: cfg.SourceProfileName,
- }
- }
-
- cfg.SourceProfile = srcCfg
- }
-
- // If the profile contains an SSO session parameter, the session MUST exist
- // as a section in the config file. Load the SSO session using the name
- // provided. If the session section is not found or incomplete an error
- // will be returned.
- if cfg.hasSSOTokenProviderConfiguration() {
- skippedFiles = 0
- for _, f := range files {
- section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName)))
- if ok {
- var ssoSession ssoSession
- ssoSession.setFromIniSection(section)
- ssoSession.Name = cfg.SSOSessionName
- cfg.SSOSession = &ssoSession
- break
- }
- skippedFiles++
- }
- if skippedFiles == len(files) {
- // If all files were skipped because the sso session section is not found, return
- // the sso section not found error.
- return fmt.Errorf("failed to find SSO session section, %v", cfg.SSOSessionName)
- }
- }
-
- return nil
-}
-
-// setFromFile loads the configuration from the file using the profile
-// provided. A sharedConfig pointer type value is used so that multiple config
-// file loadings can be chained.
-//
-// Only loads complete logically grouped values, and will not set fields in cfg
-// for incomplete grouped values in the config. Such as credentials. For
-// example if a config file only includes aws_access_key_id but no
-// aws_secret_access_key the aws_access_key_id will be ignored.
-func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, exOpts bool) error {
- section, ok := file.IniData.GetSection(profile)
- if !ok {
- // Fallback to to alternate profile name: profile
- section, ok = file.IniData.GetSection(fmt.Sprintf("profile %s", profile))
- if !ok {
- return SharedConfigProfileNotExistsError{Profile: profile, Err: nil}
- }
- }
-
- if exOpts {
- // Assume Role Parameters
- updateString(&cfg.RoleARN, section, roleArnKey)
- updateString(&cfg.ExternalID, section, externalIDKey)
- updateString(&cfg.MFASerial, section, mfaSerialKey)
- updateString(&cfg.RoleSessionName, section, roleSessionNameKey)
- updateString(&cfg.SourceProfileName, section, sourceProfileKey)
- updateString(&cfg.CredentialSource, section, credentialSourceKey)
- updateString(&cfg.Region, section, regionKey)
- updateString(&cfg.CustomCABundle, section, customCABundleKey)
-
- // we're retaining a behavioral quirk with this field that existed before
- // the removal of literal parsing for (aws-sdk-go-v2/#2276):
- // - if the key is missing, the config field will not be set
- // - if the key is set to a non-numeric, the config field will be set to 0
- if section.Has(roleDurationSecondsKey) {
- var d time.Duration
- if v, ok := section.Int(roleDurationSecondsKey); ok {
- d = time.Duration(v) * time.Second
- }
- cfg.AssumeRoleDuration = &d
- }
-
- if v := section.String(stsRegionalEndpointSharedKey); len(v) != 0 {
- sre, err := endpoints.GetSTSRegionalEndpoint(v)
- if err != nil {
- return fmt.Errorf("failed to load %s from shared config, %s, %v",
- stsRegionalEndpointSharedKey, file.Filename, err)
- }
- cfg.STSRegionalEndpoint = sre
- }
-
- if v := section.String(s3UsEast1RegionalSharedKey); len(v) != 0 {
- sre, err := endpoints.GetS3UsEast1RegionalEndpoint(v)
- if err != nil {
- return fmt.Errorf("failed to load %s from shared config, %s, %v",
- s3UsEast1RegionalSharedKey, file.Filename, err)
- }
- cfg.S3UsEast1RegionalEndpoint = sre
- }
-
- // AWS Single Sign-On (AWS SSO)
- // SSO session options
- updateString(&cfg.SSOSessionName, section, ssoSessionNameKey)
-
- // AWS Single Sign-On (AWS SSO)
- updateString(&cfg.SSOAccountID, section, ssoAccountIDKey)
- updateString(&cfg.SSORegion, section, ssoRegionKey)
- updateString(&cfg.SSORoleName, section, ssoRoleNameKey)
- updateString(&cfg.SSOStartURL, section, ssoStartURL)
-
- if err := updateEC2MetadataServiceEndpointMode(&cfg.EC2IMDSEndpointMode, section, ec2MetadataServiceEndpointModeKey); err != nil {
- return fmt.Errorf("failed to load %s from shared config, %s, %v",
- ec2MetadataServiceEndpointModeKey, file.Filename, err)
- }
- updateString(&cfg.EC2IMDSEndpoint, section, ec2MetadataServiceEndpointKey)
- updateBoolPtr(&cfg.EC2IMDSv1Disabled, section, ec2MetadataV1DisabledKey)
-
- updateUseDualStackEndpoint(&cfg.UseDualStackEndpoint, section, useDualStackEndpoint)
-
- updateUseFIPSEndpoint(&cfg.UseFIPSEndpoint, section, useFIPSEndpointKey)
- }
-
- updateString(&cfg.CredentialProcess, section, credentialProcessKey)
- updateString(&cfg.WebIdentityTokenFile, section, webIdentityTokenFileKey)
-
- // Shared Credentials
- creds := credentials.Value{
- AccessKeyID: section.String(accessKeyIDKey),
- SecretAccessKey: section.String(secretAccessKey),
- SessionToken: section.String(sessionTokenKey),
- ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename),
- }
- if creds.HasKeys() {
- cfg.Creds = creds
- }
-
- // Endpoint discovery
- updateBoolPtr(&cfg.EnableEndpointDiscovery, section, enableEndpointDiscoveryKey)
-
- // CSM options
- updateBoolPtr(&cfg.CSMEnabled, section, csmEnabledKey)
- updateString(&cfg.CSMHost, section, csmHostKey)
- updateString(&cfg.CSMPort, section, csmPortKey)
- updateString(&cfg.CSMClientID, section, csmClientIDKey)
-
- updateBool(&cfg.S3UseARNRegion, section, s3UseARNRegionKey)
-
- return nil
-}
-
-func updateEC2MetadataServiceEndpointMode(endpointMode *endpoints.EC2IMDSEndpointModeState, section ini.Section, key string) error {
- if !section.Has(key) {
- return nil
- }
- value := section.String(key)
- return endpointMode.SetFromString(value)
-}
-
-func (cfg *sharedConfig) validateCredentialsConfig(profile string) error {
- if err := cfg.validateCredentialsRequireARN(profile); err != nil {
- return err
- }
-
- return nil
-}
-
-func (cfg *sharedConfig) validateCredentialsRequireARN(profile string) error {
- var credSource string
-
- switch {
- case len(cfg.SourceProfileName) != 0:
- credSource = sourceProfileKey
- case len(cfg.CredentialSource) != 0:
- credSource = credentialSourceKey
- case len(cfg.WebIdentityTokenFile) != 0:
- credSource = webIdentityTokenFileKey
- }
-
- if len(credSource) != 0 && len(cfg.RoleARN) == 0 {
- return CredentialRequiresARNError{
- Type: credSource,
- Profile: profile,
- }
- }
-
- return nil
-}
-
-func (cfg *sharedConfig) validateCredentialType() error {
- // Only one or no credential type can be defined.
- if !oneOrNone(
- len(cfg.SourceProfileName) != 0,
- len(cfg.CredentialSource) != 0,
- len(cfg.CredentialProcess) != 0,
- len(cfg.WebIdentityTokenFile) != 0,
- ) {
- return ErrSharedConfigSourceCollision
- }
-
- return nil
-}
-
-func (cfg *sharedConfig) validateSSOConfiguration() error {
- if cfg.hasSSOTokenProviderConfiguration() {
- err := cfg.validateSSOTokenProviderConfiguration()
- if err != nil {
- return err
- }
- return nil
- }
-
- if cfg.hasLegacySSOConfiguration() {
- err := cfg.validateLegacySSOConfiguration()
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (cfg *sharedConfig) hasCredentials() bool {
- switch {
- case len(cfg.SourceProfileName) != 0:
- case len(cfg.CredentialSource) != 0:
- case len(cfg.CredentialProcess) != 0:
- case len(cfg.WebIdentityTokenFile) != 0:
- case cfg.hasSSOConfiguration():
- case cfg.Creds.HasKeys():
- default:
- return false
- }
-
- return true
-}
-
-func (cfg *sharedConfig) clearCredentialOptions() {
- cfg.CredentialSource = ""
- cfg.CredentialProcess = ""
- cfg.WebIdentityTokenFile = ""
- cfg.Creds = credentials.Value{}
- cfg.SSOAccountID = ""
- cfg.SSORegion = ""
- cfg.SSORoleName = ""
- cfg.SSOStartURL = ""
-}
-
-func (cfg *sharedConfig) clearAssumeRoleOptions() {
- cfg.RoleARN = ""
- cfg.ExternalID = ""
- cfg.MFASerial = ""
- cfg.RoleSessionName = ""
- cfg.SourceProfileName = ""
-}
-
-func (cfg *sharedConfig) hasSSOConfiguration() bool {
- return cfg.hasSSOTokenProviderConfiguration() || cfg.hasLegacySSOConfiguration()
-}
-
-func (c *sharedConfig) hasSSOTokenProviderConfiguration() bool {
- return len(c.SSOSessionName) > 0
-}
-
-func (c *sharedConfig) hasLegacySSOConfiguration() bool {
- return len(c.SSORegion) > 0 || len(c.SSOAccountID) > 0 || len(c.SSOStartURL) > 0 || len(c.SSORoleName) > 0
-}
-
-func (c *sharedConfig) validateSSOTokenProviderConfiguration() error {
- var missing []string
-
- if len(c.SSOSessionName) == 0 {
- missing = append(missing, ssoSessionNameKey)
- }
-
- if c.SSOSession == nil {
- missing = append(missing, ssoSectionPrefix)
- } else {
- if len(c.SSOSession.SSORegion) == 0 {
- missing = append(missing, ssoRegionKey)
- }
-
- if len(c.SSOSession.SSOStartURL) == 0 {
- missing = append(missing, ssoStartURL)
- }
- }
-
- if len(missing) > 0 {
- return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
- c.Profile, strings.Join(missing, ", "))
- }
-
- if len(c.SSORegion) > 0 && c.SSORegion != c.SSOSession.SSORegion {
- return fmt.Errorf("%s in profile %q must match %s in %s", ssoRegionKey, c.Profile, ssoRegionKey, ssoSectionPrefix)
- }
-
- if len(c.SSOStartURL) > 0 && c.SSOStartURL != c.SSOSession.SSOStartURL {
- return fmt.Errorf("%s in profile %q must match %s in %s", ssoStartURL, c.Profile, ssoStartURL, ssoSectionPrefix)
- }
-
- return nil
-}
-
-func (c *sharedConfig) validateLegacySSOConfiguration() error {
- var missing []string
-
- if len(c.SSORegion) == 0 {
- missing = append(missing, ssoRegionKey)
- }
-
- if len(c.SSOStartURL) == 0 {
- missing = append(missing, ssoStartURL)
- }
-
- if len(c.SSOAccountID) == 0 {
- missing = append(missing, ssoAccountIDKey)
- }
-
- if len(c.SSORoleName) == 0 {
- missing = append(missing, ssoRoleNameKey)
- }
-
- if len(missing) > 0 {
- return fmt.Errorf("profile %q is configured to use SSO but is missing required configuration: %s",
- c.Profile, strings.Join(missing, ", "))
- }
- return nil
-}
-
-func oneOrNone(bs ...bool) bool {
- var count int
-
- for _, b := range bs {
- if b {
- count++
- if count > 1 {
- return false
- }
- }
- }
-
- return true
-}
-
-// updateString will only update the dst with the value in the section key, key
-// is present in the section.
-func updateString(dst *string, section ini.Section, key string) {
- if !section.Has(key) {
- return
- }
- *dst = section.String(key)
-}
-
-// updateBool will only update the dst with the value in the section key, key
-// is present in the section.
-func updateBool(dst *bool, section ini.Section, key string) {
- if !section.Has(key) {
- return
- }
-
- // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false
- v, _ := section.Bool(key)
- *dst = v
-}
-
-// updateBoolPtr will only update the dst with the value in the section key,
-// key is present in the section.
-func updateBoolPtr(dst **bool, section ini.Section, key string) {
- if !section.Has(key) {
- return
- }
-
- // retains pre-(aws-sdk-go-v2#2276) behavior where non-bool value would resolve to false
- v, _ := section.Bool(key)
- *dst = new(bool)
- **dst = v
-}
-
-// SharedConfigLoadError is an error for the shared config file failed to load.
-type SharedConfigLoadError struct {
- Filename string
- Err error
-}
-
-// Code is the short id of the error.
-func (e SharedConfigLoadError) Code() string {
- return "SharedConfigLoadError"
-}
-
-// Message is the description of the error
-func (e SharedConfigLoadError) Message() string {
- return fmt.Sprintf("failed to load config file, %s", e.Filename)
-}
-
-// OrigErr is the underlying error that caused the failure.
-func (e SharedConfigLoadError) OrigErr() error {
- return e.Err
-}
-
-// Error satisfies the error interface.
-func (e SharedConfigLoadError) Error() string {
- return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
-}
-
-// SharedConfigProfileNotExistsError is an error for the shared config when
-// the profile was not find in the config file.
-type SharedConfigProfileNotExistsError struct {
- Profile string
- Err error
-}
-
-// Code is the short id of the error.
-func (e SharedConfigProfileNotExistsError) Code() string {
- return "SharedConfigProfileNotExistsError"
-}
-
-// Message is the description of the error
-func (e SharedConfigProfileNotExistsError) Message() string {
- return fmt.Sprintf("failed to get profile, %s", e.Profile)
-}
-
-// OrigErr is the underlying error that caused the failure.
-func (e SharedConfigProfileNotExistsError) OrigErr() error {
- return e.Err
-}
-
-// Error satisfies the error interface.
-func (e SharedConfigProfileNotExistsError) Error() string {
- return awserr.SprintError(e.Code(), e.Message(), "", e.Err)
-}
-
-// SharedConfigAssumeRoleError is an error for the shared config when the
-// profile contains assume role information, but that information is invalid
-// or not complete.
-type SharedConfigAssumeRoleError struct {
- RoleARN string
- SourceProfile string
-}
-
-// Code is the short id of the error.
-func (e SharedConfigAssumeRoleError) Code() string {
- return "SharedConfigAssumeRoleError"
-}
-
-// Message is the description of the error
-func (e SharedConfigAssumeRoleError) Message() string {
- return fmt.Sprintf(
- "failed to load assume role for %s, source profile %s has no shared credentials",
- e.RoleARN, e.SourceProfile,
- )
-}
-
-// OrigErr is the underlying error that caused the failure.
-func (e SharedConfigAssumeRoleError) OrigErr() error {
- return nil
-}
-
-// Error satisfies the error interface.
-func (e SharedConfigAssumeRoleError) Error() string {
- return awserr.SprintError(e.Code(), e.Message(), "", nil)
-}
-
-// CredentialRequiresARNError provides the error for shared config credentials
-// that are incorrectly configured in the shared config or credentials file.
-type CredentialRequiresARNError struct {
- // type of credentials that were configured.
- Type string
-
- // Profile name the credentials were in.
- Profile string
-}
-
-// Code is the short id of the error.
-func (e CredentialRequiresARNError) Code() string {
- return "CredentialRequiresARNError"
-}
-
-// Message is the description of the error
-func (e CredentialRequiresARNError) Message() string {
- return fmt.Sprintf(
- "credential type %s requires role_arn, profile %s",
- e.Type, e.Profile,
- )
-}
-
-// OrigErr is the underlying error that caused the failure.
-func (e CredentialRequiresARNError) OrigErr() error {
- return nil
-}
-
-// Error satisfies the error interface.
-func (e CredentialRequiresARNError) Error() string {
- return awserr.SprintError(e.Code(), e.Message(), "", nil)
-}
-
-// updateEndpointDiscoveryType will only update the dst with the value in the section, if
-// a valid key and corresponding EndpointDiscoveryType is found.
-func updateUseDualStackEndpoint(dst *endpoints.DualStackEndpointState, section ini.Section, key string) {
- if !section.Has(key) {
- return
- }
-
- // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false
- if v, _ := section.Bool(key); v {
- *dst = endpoints.DualStackEndpointStateEnabled
- } else {
- *dst = endpoints.DualStackEndpointStateDisabled
- }
-
- return
-}
-
-// updateEndpointDiscoveryType will only update the dst with the value in the section, if
-// a valid key and corresponding EndpointDiscoveryType is found.
-func updateUseFIPSEndpoint(dst *endpoints.FIPSEndpointState, section ini.Section, key string) {
- if !section.Has(key) {
- return
- }
-
- // retains pre-(aws-sdk-go-v2/#2276) behavior where non-bool value would resolve to false
- if v, _ := section.Bool(key); v {
- *dst = endpoints.FIPSEndpointStateEnabled
- } else {
- *dst = endpoints.FIPSEndpointStateDisabled
- }
-
- return
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
deleted file mode 100644
index 993753831..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package v4
-
-import (
- "github.com/aws/aws-sdk-go/internal/strings"
-)
-
-// validator houses a set of rule needed for validation of a
-// string value
-type rules []rule
-
-// rule interface allows for more flexible rules and just simply
-// checks whether or not a value adheres to that rule
-type rule interface {
- IsValid(value string) bool
-}
-
-// IsValid will iterate through all rules and see if any rules
-// apply to the value and supports nested rules
-func (r rules) IsValid(value string) bool {
- for _, rule := range r {
- if rule.IsValid(value) {
- return true
- }
- }
- return false
-}
-
-// mapRule generic rule for maps
-type mapRule map[string]struct{}
-
-// IsValid for the map rule satisfies whether it exists in the map
-func (m mapRule) IsValid(value string) bool {
- _, ok := m[value]
- return ok
-}
-
-// allowList is a generic rule for allow listing
-type allowList struct {
- rule
-}
-
-// IsValid for allow list checks if the value is within the allow list
-func (w allowList) IsValid(value string) bool {
- return w.rule.IsValid(value)
-}
-
-// excludeList is a generic rule for exclude listing
-type excludeList struct {
- rule
-}
-
-// IsValid for exclude list checks if the value is within the exclude list
-func (b excludeList) IsValid(value string) bool {
- return !b.rule.IsValid(value)
-}
-
-type patterns []string
-
-// IsValid for patterns checks each pattern and returns if a match has
-// been found
-func (p patterns) IsValid(value string) bool {
- for _, pattern := range p {
- if strings.HasPrefixFold(value, pattern) {
- return true
- }
- }
- return false
-}
-
-// inclusiveRules rules allow for rules to depend on one another
-type inclusiveRules []rule
-
-// IsValid will return true if all rules are true
-func (r inclusiveRules) IsValid(value string) bool {
- for _, rule := range r {
- if !rule.IsValid(value) {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
deleted file mode 100644
index 6aa2ed241..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package v4
-
-// WithUnsignedPayload will enable and set the UnsignedPayload field to
-// true of the signer.
-func WithUnsignedPayload(v4 *Signer) {
- v4.UnsignedPayload = true
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go
deleted file mode 100644
index cf672b6ac..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.5.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//go:build !go1.7
-// +build !go1.7
-
-package v4
-
-import (
- "net/http"
-
- "github.com/aws/aws-sdk-go/aws"
-)
-
-func requestContext(r *http.Request) aws.Context {
- return aws.BackgroundContext()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go
deleted file mode 100644
index 21fe74e6f..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/request_context_go1.7.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//go:build go1.7
-// +build go1.7
-
-package v4
-
-import (
- "net/http"
-
- "github.com/aws/aws-sdk-go/aws"
-)
-
-func requestContext(r *http.Request) aws.Context {
- return r.Context()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go
deleted file mode 100644
index 02cbd97e2..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/stream.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package v4
-
-import (
- "encoding/hex"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/credentials"
-)
-
-type credentialValueProvider interface {
- Get() (credentials.Value, error)
-}
-
-// StreamSigner implements signing of event stream encoded payloads
-type StreamSigner struct {
- region string
- service string
-
- credentials credentialValueProvider
-
- prevSig []byte
-}
-
-// NewStreamSigner creates a SigV4 signer used to sign Event Stream encoded messages
-func NewStreamSigner(region, service string, seedSignature []byte, credentials *credentials.Credentials) *StreamSigner {
- return &StreamSigner{
- region: region,
- service: service,
- credentials: credentials,
- prevSig: seedSignature,
- }
-}
-
-// GetSignature takes an event stream encoded headers and payload and returns a signature
-func (s *StreamSigner) GetSignature(headers, payload []byte, date time.Time) ([]byte, error) {
- credValue, err := s.credentials.Get()
- if err != nil {
- return nil, err
- }
-
- sigKey := deriveSigningKey(s.region, s.service, credValue.SecretAccessKey, date)
-
- keyPath := buildSigningScope(s.region, s.service, date)
-
- stringToSign := buildEventStreamStringToSign(headers, payload, s.prevSig, keyPath, date)
-
- signature := hmacSHA256(sigKey, []byte(stringToSign))
- s.prevSig = signature
-
- return signature, nil
-}
-
-func buildEventStreamStringToSign(headers, payload, prevSig []byte, scope string, date time.Time) string {
- return strings.Join([]string{
- "AWS4-HMAC-SHA256-PAYLOAD",
- formatTime(date),
- scope,
- hex.EncodeToString(prevSig),
- hex.EncodeToString(hashSHA256(headers)),
- hex.EncodeToString(hashSHA256(payload)),
- }, "\n")
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
deleted file mode 100644
index 7711ec737..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go
+++ /dev/null
@@ -1,25 +0,0 @@
-//go:build go1.5
-// +build go1.5
-
-package v4
-
-import (
- "net/url"
- "strings"
-)
-
-func getURIPath(u *url.URL) string {
- var uri string
-
- if len(u.Opaque) > 0 {
- uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
- } else {
- uri = u.EscapedPath()
- }
-
- if len(uri) == 0 {
- uri = "/"
- }
-
- return uri
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
deleted file mode 100644
index b542df931..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
+++ /dev/null
@@ -1,857 +0,0 @@
-// Package v4 implements signing for AWS V4 signer
-//
-// Provides request signing for request that need to be signed with
-// AWS V4 Signatures.
-//
-// # Standalone Signer
-//
-// Generally using the signer outside of the SDK should not require any additional
-// logic when using Go v1.5 or higher. The signer does this by taking advantage
-// of the URL.EscapedPath method. If your request URI requires additional escaping
-// you may need to use the URL.Opaque to define what the raw URI should be sent
-// to the service as.
-//
-// The signer will first check the URL.Opaque field, and use its value if set.
-// The signer does require the URL.Opaque field to be set in the form of:
-//
-// "///"
-//
-// // e.g.
-// "//example.com/some/path"
-//
-// The leading "//" and hostname are required or the URL.Opaque escaping will
-// not work correctly.
-//
-// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
-// method and using the returned value. If you're using Go v1.4 you must set
-// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
-// Go v1.5 the signer will fallback to URL.Path.
-//
-// AWS v4 signature validation requires that the canonical string's URI path
-// element must be the URI escaped form of the HTTP request's path.
-// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
-//
-// The Go HTTP client will perform escaping automatically on the request. Some
-// of these escaping may cause signature validation errors because the HTTP
-// request differs from the URI path or query that the signature was generated.
-// https://golang.org/pkg/net/url/#URL.EscapedPath
-//
-// Because of this, it is recommended that when using the signer outside of the
-// SDK that explicitly escaping the request prior to being signed is preferable,
-// and will help prevent signature validation errors. This can be done by setting
-// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
-// call URL.EscapedPath() if Opaque is not set.
-//
-// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
-// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
-// request URL. https://github.com/golang/go/issues/16847 points to a bug in
-// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
-// message. URL.Opaque generally will force Go to make requests with absolute URL.
-// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
-// or url.EscapedPath will ignore the RawPath escaping.
-//
-// Test `TestStandaloneSign` provides a complete example of using the signer
-// outside of the SDK and pre-escaping the URI path.
-package v4
-
-import (
- "crypto/hmac"
- "crypto/sha256"
- "encoding/hex"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "net/url"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/internal/sdkio"
- "github.com/aws/aws-sdk-go/private/protocol/rest"
-)
-
-const (
- authorizationHeader = "Authorization"
- authHeaderSignatureElem = "Signature="
- signatureQueryKey = "X-Amz-Signature"
-
- authHeaderPrefix = "AWS4-HMAC-SHA256"
- timeFormat = "20060102T150405Z"
- shortTimeFormat = "20060102"
- awsV4Request = "aws4_request"
-
- // emptyStringSHA256 is a SHA256 of an empty string
- emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855`
-)
-
-var ignoredHeaders = rules{
- excludeList{
- mapRule{
- authorizationHeader: struct{}{},
- "User-Agent": struct{}{},
- "X-Amzn-Trace-Id": struct{}{},
- },
- },
-}
-
-// requiredSignedHeaders is a allow list for build canonical headers.
-var requiredSignedHeaders = rules{
- allowList{
- mapRule{
- "Cache-Control": struct{}{},
- "Content-Disposition": struct{}{},
- "Content-Encoding": struct{}{},
- "Content-Language": struct{}{},
- "Content-Md5": struct{}{},
- "Content-Type": struct{}{},
- "Expires": struct{}{},
- "If-Match": struct{}{},
- "If-Modified-Since": struct{}{},
- "If-None-Match": struct{}{},
- "If-Unmodified-Since": struct{}{},
- "Range": struct{}{},
- "X-Amz-Acl": struct{}{},
- "X-Amz-Copy-Source": struct{}{},
- "X-Amz-Copy-Source-If-Match": struct{}{},
- "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
- "X-Amz-Copy-Source-If-None-Match": struct{}{},
- "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
- "X-Amz-Copy-Source-Range": struct{}{},
- "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
- "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
- "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
- "X-Amz-Expected-Bucket-Owner": struct{}{},
- "X-Amz-Grant-Full-control": struct{}{},
- "X-Amz-Grant-Read": struct{}{},
- "X-Amz-Grant-Read-Acp": struct{}{},
- "X-Amz-Grant-Write": struct{}{},
- "X-Amz-Grant-Write-Acp": struct{}{},
- "X-Amz-Metadata-Directive": struct{}{},
- "X-Amz-Mfa": struct{}{},
- "X-Amz-Request-Payer": struct{}{},
- "X-Amz-Server-Side-Encryption": struct{}{},
- "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
- "X-Amz-Server-Side-Encryption-Context": struct{}{},
- "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
- "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
- "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
- "X-Amz-Storage-Class": struct{}{},
- "X-Amz-Tagging": struct{}{},
- "X-Amz-Website-Redirect-Location": struct{}{},
- "X-Amz-Content-Sha256": struct{}{},
- },
- },
- patterns{"X-Amz-Meta-"},
- patterns{"X-Amz-Object-Lock-"},
-}
-
-// allowedHoisting is a allow list for build query headers. The boolean value
-// represents whether or not it is a pattern.
-var allowedQueryHoisting = inclusiveRules{
- excludeList{requiredSignedHeaders},
- patterns{"X-Amz-"},
-}
-
-// Signer applies AWS v4 signing to given request. Use this to sign requests
-// that need to be signed with AWS V4 Signatures.
-type Signer struct {
- // The authentication credentials the request will be signed against.
- // This value must be set to sign requests.
- Credentials *credentials.Credentials
-
- // Sets the log level the signer should use when reporting information to
- // the logger. If the logger is nil nothing will be logged. See
- // aws.LogLevelType for more information on available logging levels
- //
- // By default nothing will be logged.
- Debug aws.LogLevelType
-
- // The logger loging information will be written to. If there the logger
- // is nil, nothing will be logged.
- Logger aws.Logger
-
- // Disables the Signer's moving HTTP header key/value pairs from the HTTP
- // request header to the request's query string. This is most commonly used
- // with pre-signed requests preventing headers from being added to the
- // request's query string.
- DisableHeaderHoisting bool
-
- // Disables the automatic escaping of the URI path of the request for the
- // siganture's canonical string's path. For services that do not need additional
- // escaping then use this to disable the signer escaping the path.
- //
- // S3 is an example of a service that does not need additional escaping.
- //
- // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
- DisableURIPathEscaping bool
-
- // Disables the automatical setting of the HTTP request's Body field with the
- // io.ReadSeeker passed in to the signer. This is useful if you're using a
- // custom wrapper around the body for the io.ReadSeeker and want to preserve
- // the Body value on the Request.Body.
- //
- // This does run the risk of signing a request with a body that will not be
- // sent in the request. Need to ensure that the underlying data of the Body
- // values are the same.
- DisableRequestBodyOverwrite bool
-
- // currentTimeFn returns the time value which represents the current time.
- // This value should only be used for testing. If it is nil the default
- // time.Now will be used.
- currentTimeFn func() time.Time
-
- // UnsignedPayload will prevent signing of the payload. This will only
- // work for services that have support for this.
- UnsignedPayload bool
-}
-
-// NewSigner returns a Signer pointer configured with the credentials and optional
-// option values provided. If not options are provided the Signer will use its
-// default configuration.
-func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer {
- v4 := &Signer{
- Credentials: credentials,
- }
-
- for _, option := range options {
- option(v4)
- }
-
- return v4
-}
-
-type signingCtx struct {
- ServiceName string
- Region string
- Request *http.Request
- Body io.ReadSeeker
- Query url.Values
- Time time.Time
- ExpireTime time.Duration
- SignedHeaderVals http.Header
-
- DisableURIPathEscaping bool
-
- credValues credentials.Value
- isPresign bool
- unsignedPayload bool
-
- bodyDigest string
- signedHeaders string
- canonicalHeaders string
- canonicalString string
- credentialString string
- stringToSign string
- signature string
- authorization string
-}
-
-// Sign signs AWS v4 requests with the provided body, service name, region the
-// request is made to, and time the request is signed at. The signTime allows
-// you to specify that a request is signed for the future, and cannot be
-// used until then.
-//
-// Returns a list of HTTP headers that were included in the signature or an
-// error if signing the request failed. Generally for signed requests this value
-// is not needed as the full request context will be captured by the http.Request
-// value. It is included for reference though.
-//
-// Sign will set the request's Body to be the `body` parameter passed in. If
-// the body is not already an io.ReadCloser, it will be wrapped within one. If
-// a `nil` body parameter passed to Sign, the request's Body field will be
-// also set to nil. Its important to note that this functionality will not
-// change the request's ContentLength of the request.
-//
-// Sign differs from Presign in that it will sign the request using HTTP
-// header values. This type of signing is intended for http.Request values that
-// will not be shared, or are shared in a way the header values on the request
-// will not be lost.
-//
-// The requests body is an io.ReadSeeker so the SHA256 of the body can be
-// generated. To bypass the signer computing the hash you can set the
-// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
-// only compute the hash if the request header value is empty.
-func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) {
- return v4.signWithBody(r, body, service, region, 0, false, signTime)
-}
-
-// Presign signs AWS v4 requests with the provided body, service name, region
-// the request is made to, and time the request is signed at. The signTime
-// allows you to specify that a request is signed for the future, and cannot
-// be used until then.
-//
-// Returns a list of HTTP headers that were included in the signature or an
-// error if signing the request failed. For presigned requests these headers
-// and their values must be included on the HTTP request when it is made. This
-// is helpful to know what header values need to be shared with the party the
-// presigned request will be distributed to.
-//
-// Presign differs from Sign in that it will sign the request using query string
-// instead of header values. This allows you to share the Presigned Request's
-// URL with third parties, or distribute it throughout your system with minimal
-// dependencies.
-//
-// Presign also takes an exp value which is the duration the
-// signed request will be valid after the signing time. This is allows you to
-// set when the request will expire.
-//
-// The requests body is an io.ReadSeeker so the SHA256 of the body can be
-// generated. To bypass the signer computing the hash you can set the
-// "X-Amz-Content-Sha256" header with a precomputed value. The signer will
-// only compute the hash if the request header value is empty.
-//
-// Presigning a S3 request will not compute the body's SHA256 hash by default.
-// This is done due to the general use case for S3 presigned URLs is to share
-// PUT/GET capabilities. If you would like to include the body's SHA256 in the
-// presigned request's signature you can set the "X-Amz-Content-Sha256"
-// HTTP header and that will be included in the request's signature.
-func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) {
- return v4.signWithBody(r, body, service, region, exp, true, signTime)
-}
-
-func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, isPresign bool, signTime time.Time) (http.Header, error) {
- currentTimeFn := v4.currentTimeFn
- if currentTimeFn == nil {
- currentTimeFn = time.Now
- }
-
- ctx := &signingCtx{
- Request: r,
- Body: body,
- Query: r.URL.Query(),
- Time: signTime,
- ExpireTime: exp,
- isPresign: isPresign,
- ServiceName: service,
- Region: region,
- DisableURIPathEscaping: v4.DisableURIPathEscaping,
- unsignedPayload: v4.UnsignedPayload,
- }
-
- for key := range ctx.Query {
- sort.Strings(ctx.Query[key])
- }
-
- if ctx.isRequestSigned() {
- ctx.Time = currentTimeFn()
- ctx.handlePresignRemoval()
- }
-
- var err error
- ctx.credValues, err = v4.Credentials.GetWithContext(requestContext(r))
- if err != nil {
- return http.Header{}, err
- }
-
- ctx.sanitizeHostForHeader()
- ctx.assignAmzQueryValues()
- if err := ctx.build(v4.DisableHeaderHoisting); err != nil {
- return nil, err
- }
-
- // If the request is not presigned the body should be attached to it. This
- // prevents the confusion of wanting to send a signed request without
- // the body the request was signed for attached.
- if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) {
- var reader io.ReadCloser
- if body != nil {
- var ok bool
- if reader, ok = body.(io.ReadCloser); !ok {
- reader = ioutil.NopCloser(body)
- }
- }
- r.Body = reader
- }
-
- if v4.Debug.Matches(aws.LogDebugWithSigning) {
- v4.logSigningInfo(ctx)
- }
-
- return ctx.SignedHeaderVals, nil
-}
-
-func (ctx *signingCtx) sanitizeHostForHeader() {
- request.SanitizeHostForHeader(ctx.Request)
-}
-
-func (ctx *signingCtx) handlePresignRemoval() {
- if !ctx.isPresign {
- return
- }
-
- // The credentials have expired for this request. The current signing
- // is invalid, and needs to be request because the request will fail.
- ctx.removePresign()
-
- // Update the request's query string to ensure the values stays in
- // sync in the case retrieving the new credentials fails.
- ctx.Request.URL.RawQuery = ctx.Query.Encode()
-}
-
-func (ctx *signingCtx) assignAmzQueryValues() {
- if ctx.isPresign {
- ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
- if ctx.credValues.SessionToken != "" {
- ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
- } else {
- ctx.Query.Del("X-Amz-Security-Token")
- }
-
- return
- }
-
- if ctx.credValues.SessionToken != "" {
- ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken)
- }
-}
-
-// SignRequestHandler is a named request handler the SDK will use to sign
-// service client request with using the V4 signature.
-var SignRequestHandler = request.NamedHandler{
- Name: "v4.SignRequestHandler", Fn: SignSDKRequest,
-}
-
-// SignSDKRequest signs an AWS request with the V4 signature. This
-// request handler should only be used with the SDK's built in service client's
-// API operation requests.
-//
-// This function should not be used on its own, but in conjunction with
-// an AWS service client's API operation call. To sign a standalone request
-// not created by a service client's API operation method use the "Sign" or
-// "Presign" functions of the "Signer" type.
-//
-// If the credentials of the request's config are set to
-// credentials.AnonymousCredentials the request will not be signed.
-func SignSDKRequest(req *request.Request) {
- SignSDKRequestWithCurrentTime(req, time.Now)
-}
-
-// BuildNamedHandler will build a generic handler for signing.
-func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
- return request.NamedHandler{
- Name: name,
- Fn: func(req *request.Request) {
- SignSDKRequestWithCurrentTime(req, time.Now, opts...)
- },
- }
-}
-
-// SignSDKRequestWithCurrentTime will sign the SDK's request using the time
-// function passed in. Behaves the same as SignSDKRequest with the exception
-// the request is signed with the value returned by the current time function.
-func SignSDKRequestWithCurrentTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
- // If the request does not need to be signed ignore the signing of the
- // request if the AnonymousCredentials object is used.
- if req.Config.Credentials == credentials.AnonymousCredentials {
- return
- }
-
- region := req.ClientInfo.SigningRegion
- if region == "" {
- region = aws.StringValue(req.Config.Region)
- }
-
- name := req.ClientInfo.SigningName
- if name == "" {
- name = req.ClientInfo.ServiceName
- }
-
- v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) {
- v4.Debug = req.Config.LogLevel.Value()
- v4.Logger = req.Config.Logger
- v4.DisableHeaderHoisting = req.NotHoist
- v4.currentTimeFn = curTimeFn
- if name == "s3" {
- // S3 service should not have any escaping applied
- v4.DisableURIPathEscaping = true
- }
- // Prevents setting the HTTPRequest's Body. Since the Body could be
- // wrapped in a custom io.Closer that we do not want to be stompped
- // on top of by the signer.
- v4.DisableRequestBodyOverwrite = true
- })
-
- for _, opt := range opts {
- opt(v4)
- }
-
- curTime := curTimeFn()
- signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
- name, region, req.ExpireTime, req.ExpireTime > 0, curTime,
- )
- if err != nil {
- req.Error = err
- req.SignedHeaderVals = nil
- return
- }
-
- req.SignedHeaderVals = signedHeaders
- req.LastSignedAt = curTime
-}
-
-const logSignInfoMsg = `DEBUG: Request Signature:
----[ CANONICAL STRING ]-----------------------------
-%s
----[ STRING TO SIGN ]--------------------------------
-%s%s
------------------------------------------------------`
-const logSignedURLMsg = `
----[ SIGNED URL ]------------------------------------
-%s`
-
-func (v4 *Signer) logSigningInfo(ctx *signingCtx) {
- signedURLMsg := ""
- if ctx.isPresign {
- signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String())
- }
- msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg)
- v4.Logger.Log(msg)
-}
-
-func (ctx *signingCtx) build(disableHeaderHoisting bool) error {
- ctx.buildTime() // no depends
- ctx.buildCredentialString() // no depends
-
- if err := ctx.buildBodyDigest(); err != nil {
- return err
- }
-
- unsignedHeaders := ctx.Request.Header
- if ctx.isPresign {
- if !disableHeaderHoisting {
- urlValues := url.Values{}
- urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
- for k := range urlValues {
- ctx.Query[k] = urlValues[k]
- }
- }
- }
-
- ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
- ctx.buildCanonicalString() // depends on canon headers / signed headers
- ctx.buildStringToSign() // depends on canon string
- ctx.buildSignature() // depends on string to sign
-
- if ctx.isPresign {
- ctx.Request.URL.RawQuery += "&" + signatureQueryKey + "=" + ctx.signature
- } else {
- parts := []string{
- authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString,
- "SignedHeaders=" + ctx.signedHeaders,
- authHeaderSignatureElem + ctx.signature,
- }
- ctx.Request.Header.Set(authorizationHeader, strings.Join(parts, ", "))
- }
-
- return nil
-}
-
-// GetSignedRequestSignature attempts to extract the signature of the request.
-// Returning an error if the request is unsigned, or unable to extract the
-// signature.
-func GetSignedRequestSignature(r *http.Request) ([]byte, error) {
-
- if auth := r.Header.Get(authorizationHeader); len(auth) != 0 {
- ps := strings.Split(auth, ", ")
- for _, p := range ps {
- if idx := strings.Index(p, authHeaderSignatureElem); idx >= 0 {
- sig := p[len(authHeaderSignatureElem):]
- if len(sig) == 0 {
- return nil, fmt.Errorf("invalid request signature authorization header")
- }
- return hex.DecodeString(sig)
- }
- }
- }
-
- if sig := r.URL.Query().Get("X-Amz-Signature"); len(sig) != 0 {
- return hex.DecodeString(sig)
- }
-
- return nil, fmt.Errorf("request not signed")
-}
-
-func (ctx *signingCtx) buildTime() {
- if ctx.isPresign {
- duration := int64(ctx.ExpireTime / time.Second)
- ctx.Query.Set("X-Amz-Date", formatTime(ctx.Time))
- ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
- } else {
- ctx.Request.Header.Set("X-Amz-Date", formatTime(ctx.Time))
- }
-}
-
-func (ctx *signingCtx) buildCredentialString() {
- ctx.credentialString = buildSigningScope(ctx.Region, ctx.ServiceName, ctx.Time)
-
- if ctx.isPresign {
- ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString)
- }
-}
-
-func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
- query := url.Values{}
- unsignedHeaders := http.Header{}
- for k, h := range header {
- if r.IsValid(k) {
- query[k] = h
- } else {
- unsignedHeaders[k] = h
- }
- }
-
- return query, unsignedHeaders
-}
-func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
- var headers []string
- headers = append(headers, "host")
- for k, v := range header {
- if !r.IsValid(k) {
- continue // ignored header
- }
- if ctx.SignedHeaderVals == nil {
- ctx.SignedHeaderVals = make(http.Header)
- }
-
- lowerCaseKey := strings.ToLower(k)
- if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok {
- // include additional values
- ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...)
- continue
- }
-
- headers = append(headers, lowerCaseKey)
- ctx.SignedHeaderVals[lowerCaseKey] = v
- }
- sort.Strings(headers)
-
- ctx.signedHeaders = strings.Join(headers, ";")
-
- if ctx.isPresign {
- ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders)
- }
-
- headerItems := make([]string, len(headers))
- for i, k := range headers {
- if k == "host" {
- if ctx.Request.Host != "" {
- headerItems[i] = "host:" + ctx.Request.Host
- } else {
- headerItems[i] = "host:" + ctx.Request.URL.Host
- }
- } else {
- headerValues := make([]string, len(ctx.SignedHeaderVals[k]))
- for i, v := range ctx.SignedHeaderVals[k] {
- headerValues[i] = strings.TrimSpace(v)
- }
- headerItems[i] = k + ":" +
- strings.Join(headerValues, ",")
- }
- }
- stripExcessSpaces(headerItems)
- ctx.canonicalHeaders = strings.Join(headerItems, "\n")
-}
-
-func (ctx *signingCtx) buildCanonicalString() {
- ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
-
- uri := getURIPath(ctx.Request.URL)
-
- if !ctx.DisableURIPathEscaping {
- uri = rest.EscapePath(uri, false)
- }
-
- ctx.canonicalString = strings.Join([]string{
- ctx.Request.Method,
- uri,
- ctx.Request.URL.RawQuery,
- ctx.canonicalHeaders + "\n",
- ctx.signedHeaders,
- ctx.bodyDigest,
- }, "\n")
-}
-
-func (ctx *signingCtx) buildStringToSign() {
- ctx.stringToSign = strings.Join([]string{
- authHeaderPrefix,
- formatTime(ctx.Time),
- ctx.credentialString,
- hex.EncodeToString(hashSHA256([]byte(ctx.canonicalString))),
- }, "\n")
-}
-
-func (ctx *signingCtx) buildSignature() {
- creds := deriveSigningKey(ctx.Region, ctx.ServiceName, ctx.credValues.SecretAccessKey, ctx.Time)
- signature := hmacSHA256(creds, []byte(ctx.stringToSign))
- ctx.signature = hex.EncodeToString(signature)
-}
-
-func (ctx *signingCtx) buildBodyDigest() error {
- hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
- if hash == "" {
- includeSHA256Header := ctx.unsignedPayload ||
- ctx.ServiceName == "s3" ||
- ctx.ServiceName == "s3-object-lambda" ||
- ctx.ServiceName == "glacier" ||
- ctx.ServiceName == "s3-outposts"
-
- s3Presign := ctx.isPresign &&
- (ctx.ServiceName == "s3" ||
- ctx.ServiceName == "s3-object-lambda")
-
- if ctx.unsignedPayload || s3Presign {
- hash = "UNSIGNED-PAYLOAD"
- includeSHA256Header = !s3Presign
- } else if ctx.Body == nil {
- hash = emptyStringSHA256
- } else {
- if !aws.IsReaderSeekable(ctx.Body) {
- return fmt.Errorf("cannot use unseekable request body %T, for signed request with body", ctx.Body)
- }
- hashBytes, err := makeSha256Reader(ctx.Body)
- if err != nil {
- return err
- }
- hash = hex.EncodeToString(hashBytes)
- }
-
- if includeSHA256Header {
- ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
- }
- }
- ctx.bodyDigest = hash
-
- return nil
-}
-
-// isRequestSigned returns if the request is currently signed or presigned
-func (ctx *signingCtx) isRequestSigned() bool {
- if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" {
- return true
- }
- if ctx.Request.Header.Get("Authorization") != "" {
- return true
- }
-
- return false
-}
-
-// unsign removes signing flags for both signed and presigned requests.
-func (ctx *signingCtx) removePresign() {
- ctx.Query.Del("X-Amz-Algorithm")
- ctx.Query.Del("X-Amz-Signature")
- ctx.Query.Del("X-Amz-Security-Token")
- ctx.Query.Del("X-Amz-Date")
- ctx.Query.Del("X-Amz-Expires")
- ctx.Query.Del("X-Amz-Credential")
- ctx.Query.Del("X-Amz-SignedHeaders")
-}
-
-func hmacSHA256(key []byte, data []byte) []byte {
- hash := hmac.New(sha256.New, key)
- hash.Write(data)
- return hash.Sum(nil)
-}
-
-func hashSHA256(data []byte) []byte {
- hash := sha256.New()
- hash.Write(data)
- return hash.Sum(nil)
-}
-
-func makeSha256Reader(reader io.ReadSeeker) (hashBytes []byte, err error) {
- hash := sha256.New()
- start, err := reader.Seek(0, sdkio.SeekCurrent)
- if err != nil {
- return nil, err
- }
- defer func() {
- // ensure error is return if unable to seek back to start of payload.
- _, err = reader.Seek(start, sdkio.SeekStart)
- }()
-
- // Use CopyN to avoid allocating the 32KB buffer in io.Copy for bodies
- // smaller than 32KB. Fall back to io.Copy if we fail to determine the size.
- size, err := aws.SeekerLen(reader)
- if err != nil {
- io.Copy(hash, reader)
- } else {
- io.CopyN(hash, reader, size)
- }
-
- return hash.Sum(nil), nil
-}
-
-const doubleSpace = " "
-
-// stripExcessSpaces will rewrite the passed in slice's string values to not
-// contain multiple side-by-side spaces.
-func stripExcessSpaces(vals []string) {
- var j, k, l, m, spaces int
- for i, str := range vals {
- // Trim trailing spaces
- for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
- }
-
- // Trim leading spaces
- for k = 0; k < j && str[k] == ' '; k++ {
- }
- str = str[k : j+1]
-
- // Strip multiple spaces.
- j = strings.Index(str, doubleSpace)
- if j < 0 {
- vals[i] = str
- continue
- }
-
- buf := []byte(str)
- for k, m, l = j, j, len(buf); k < l; k++ {
- if buf[k] == ' ' {
- if spaces == 0 {
- // First space.
- buf[m] = buf[k]
- m++
- }
- spaces++
- } else {
- // End of multiple spaces.
- spaces = 0
- buf[m] = buf[k]
- m++
- }
- }
-
- vals[i] = string(buf[:m])
- }
-}
-
-func buildSigningScope(region, service string, dt time.Time) string {
- return strings.Join([]string{
- formatShortTime(dt),
- region,
- service,
- awsV4Request,
- }, "/")
-}
-
-func deriveSigningKey(region, service, secretKey string, dt time.Time) []byte {
- kDate := hmacSHA256([]byte("AWS4"+secretKey), []byte(formatShortTime(dt)))
- kRegion := hmacSHA256(kDate, []byte(region))
- kService := hmacSHA256(kRegion, []byte(service))
- signingKey := hmacSHA256(kService, []byte(awsV4Request))
- return signingKey
-}
-
-func formatShortTime(dt time.Time) string {
- return dt.UTC().Format(shortTimeFormat)
-}
-
-func formatTime(dt time.Time) string {
- return dt.UTC().Format(timeFormat)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go
deleted file mode 100644
index 98751ee84..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/types.go
+++ /dev/null
@@ -1,264 +0,0 @@
-package aws
-
-import (
- "io"
- "strings"
- "sync"
-
- "github.com/aws/aws-sdk-go/internal/sdkio"
-)
-
-// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Allows the
-// SDK to accept an io.Reader that is not also an io.Seeker for unsigned
-// streaming payload API operations.
-//
-// A ReadSeekCloser wrapping an nonseekable io.Reader used in an API
-// operation's input will prevent that operation being retried in the case of
-// network errors, and cause operation requests to fail if the operation
-// requires payload signing.
-//
-// Note: If using With S3 PutObject to stream an object upload The SDK's S3
-// Upload manager (s3manager.Uploader) provides support for streaming with the
-// ability to retry network errors.
-func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
- return ReaderSeekerCloser{r}
-}
-
-// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
-// io.Closer interfaces to the underlying object if they are available.
-type ReaderSeekerCloser struct {
- r io.Reader
-}
-
-// IsReaderSeekable returns if the underlying reader type can be seeked. A
-// io.Reader might not actually be seekable if it is the ReaderSeekerCloser
-// type.
-func IsReaderSeekable(r io.Reader) bool {
- switch v := r.(type) {
- case ReaderSeekerCloser:
- return v.IsSeeker()
- case *ReaderSeekerCloser:
- return v.IsSeeker()
- case io.ReadSeeker:
- return true
- default:
- return false
- }
-}
-
-// Read reads from the reader up to size of p. The number of bytes read, and
-// error if it occurred will be returned.
-//
-// If the reader is not an io.Reader zero bytes read, and nil error will be
-// returned.
-//
-// Performs the same functionality as io.Reader Read
-func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
- switch t := r.r.(type) {
- case io.Reader:
- return t.Read(p)
- }
- return 0, nil
-}
-
-// Seek sets the offset for the next Read to offset, interpreted according to
-// whence: 0 means relative to the origin of the file, 1 means relative to the
-// current offset, and 2 means relative to the end. Seek returns the new offset
-// and an error, if any.
-//
-// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
-func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
- switch t := r.r.(type) {
- case io.Seeker:
- return t.Seek(offset, whence)
- }
- return int64(0), nil
-}
-
-// IsSeeker returns if the underlying reader is also a seeker.
-func (r ReaderSeekerCloser) IsSeeker() bool {
- _, ok := r.r.(io.Seeker)
- return ok
-}
-
-// HasLen returns the length of the underlying reader if the value implements
-// the Len() int method.
-func (r ReaderSeekerCloser) HasLen() (int, bool) {
- type lenner interface {
- Len() int
- }
-
- if lr, ok := r.r.(lenner); ok {
- return lr.Len(), true
- }
-
- return 0, false
-}
-
-// GetLen returns the length of the bytes remaining in the underlying reader.
-// Checks first for Len(), then io.Seeker to determine the size of the
-// underlying reader.
-//
-// Will return -1 if the length cannot be determined.
-func (r ReaderSeekerCloser) GetLen() (int64, error) {
- if l, ok := r.HasLen(); ok {
- return int64(l), nil
- }
-
- if s, ok := r.r.(io.Seeker); ok {
- return seekerLen(s)
- }
-
- return -1, nil
-}
-
-// SeekerLen attempts to get the number of bytes remaining at the seeker's
-// current position. Returns the number of bytes remaining or error.
-func SeekerLen(s io.Seeker) (int64, error) {
- // Determine if the seeker is actually seekable. ReaderSeekerCloser
- // hides the fact that a io.Readers might not actually be seekable.
- switch v := s.(type) {
- case ReaderSeekerCloser:
- return v.GetLen()
- case *ReaderSeekerCloser:
- return v.GetLen()
- }
-
- return seekerLen(s)
-}
-
-func seekerLen(s io.Seeker) (int64, error) {
- curOffset, err := s.Seek(0, sdkio.SeekCurrent)
- if err != nil {
- return 0, err
- }
-
- endOffset, err := s.Seek(0, sdkio.SeekEnd)
- if err != nil {
- return 0, err
- }
-
- _, err = s.Seek(curOffset, sdkio.SeekStart)
- if err != nil {
- return 0, err
- }
-
- return endOffset - curOffset, nil
-}
-
-// Close closes the ReaderSeekerCloser.
-//
-// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
-func (r ReaderSeekerCloser) Close() error {
- switch t := r.r.(type) {
- case io.Closer:
- return t.Close()
- }
- return nil
-}
-
-// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
-// Can be used with the s3manager.Downloader to download content to a buffer
-// in memory. Safe to use concurrently.
-type WriteAtBuffer struct {
- buf []byte
- m sync.Mutex
-
- // GrowthCoeff defines the growth rate of the internal buffer. By
- // default, the growth rate is 1, where expanding the internal
- // buffer will allocate only enough capacity to fit the new expected
- // length.
- GrowthCoeff float64
-}
-
-// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
-// provided by buf.
-func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
- return &WriteAtBuffer{buf: buf}
-}
-
-// WriteAt writes a slice of bytes to a buffer starting at the position provided
-// The number of bytes written will be returned, or error. Can overwrite previous
-// written slices if the write ats overlap.
-func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
- pLen := len(p)
- expLen := pos + int64(pLen)
- b.m.Lock()
- defer b.m.Unlock()
- if int64(len(b.buf)) < expLen {
- if int64(cap(b.buf)) < expLen {
- if b.GrowthCoeff < 1 {
- b.GrowthCoeff = 1
- }
- newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
- copy(newBuf, b.buf)
- b.buf = newBuf
- }
- b.buf = b.buf[:expLen]
- }
- copy(b.buf[pos:], p)
- return pLen, nil
-}
-
-// Bytes returns a slice of bytes written to the buffer.
-func (b *WriteAtBuffer) Bytes() []byte {
- b.m.Lock()
- defer b.m.Unlock()
- return b.buf
-}
-
-// MultiCloser is a utility to close multiple io.Closers within a single
-// statement.
-type MultiCloser []io.Closer
-
-// Close closes all of the io.Closers making up the MultiClosers. Any
-// errors that occur while closing will be returned in the order they
-// occur.
-func (m MultiCloser) Close() error {
- var errs errors
- for _, c := range m {
- err := c.Close()
- if err != nil {
- errs = append(errs, err)
- }
- }
- if len(errs) != 0 {
- return errs
- }
-
- return nil
-}
-
-type errors []error
-
-func (es errors) Error() string {
- var parts []string
- for _, e := range es {
- parts = append(parts, e.Error())
- }
-
- return strings.Join(parts, "\n")
-}
-
-// CopySeekableBody copies the seekable body to an io.Writer
-func CopySeekableBody(dst io.Writer, src io.ReadSeeker) (int64, error) {
- curPos, err := src.Seek(0, sdkio.SeekCurrent)
- if err != nil {
- return 0, err
- }
-
- // copy errors may be assumed to be from the body.
- n, err := io.Copy(dst, src)
- if err != nil {
- return n, err
- }
-
- // seek back to the first position after reading to reset
- // the body for transmission.
- _, err = src.Seek(curPos, sdkio.SeekStart)
- if err != nil {
- return n, err
- }
-
- return n, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go
deleted file mode 100644
index fed561bd5..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/url.go
+++ /dev/null
@@ -1,13 +0,0 @@
-//go:build go1.8
-// +build go1.8
-
-package aws
-
-import "net/url"
-
-// URLHostname will extract the Hostname without port from the URL value.
-//
-// Wrapper of net/url#URL.Hostname for backwards Go version compatibility.
-func URLHostname(url *url.URL) string {
- return url.Hostname()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
deleted file mode 100644
index 95282db03..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
+++ /dev/null
@@ -1,30 +0,0 @@
-//go:build !go1.8
-// +build !go1.8
-
-package aws
-
-import (
- "net/url"
- "strings"
-)
-
-// URLHostname will extract the Hostname without port from the URL value.
-//
-// Copy of Go 1.8's net/url#URL.Hostname functionality.
-func URLHostname(url *url.URL) string {
- return stripPort(url.Host)
-
-}
-
-// stripPort is copy of Go 1.8 url#URL.Hostname functionality.
-// https://golang.org/src/net/url/url.go
-func stripPort(hostport string) string {
- colon := strings.IndexByte(hostport, ':')
- if colon == -1 {
- return hostport
- }
- if i := strings.IndexByte(hostport, ']'); i != -1 {
- return strings.TrimPrefix(hostport[:i], "[")
- }
- return hostport[:colon]
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
deleted file mode 100644
index d15e3c84c..000000000
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// Package aws provides core functionality for making requests to AWS services.
-package aws
-
-// SDKName is the name of this AWS SDK
-const SDKName = "aws-sdk-go"
-
-// SDKVersion is the version of this SDK
-const SDKVersion = "1.55.5"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go b/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go
deleted file mode 100644
index 365345353..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/context/background_go1.5.go
+++ /dev/null
@@ -1,41 +0,0 @@
-//go:build !go1.7
-// +build !go1.7
-
-package context
-
-import "time"
-
-// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
-// provide a 1.6 and 1.5 safe version of context that is compatible with Go
-// 1.7's Context.
-//
-// An emptyCtx is never canceled, has no values, and has no deadline. It is not
-// struct{}, since vars of this type must have distinct addresses.
-type emptyCtx int
-
-func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
- return
-}
-
-func (*emptyCtx) Done() <-chan struct{} {
- return nil
-}
-
-func (*emptyCtx) Err() error {
- return nil
-}
-
-func (*emptyCtx) Value(key interface{}) interface{} {
- return nil
-}
-
-func (e *emptyCtx) String() string {
- switch e {
- case BackgroundCtx:
- return "aws.BackgroundContext"
- }
- return "unknown empty Context"
-}
-
-// BackgroundCtx is the common base context.
-var BackgroundCtx = new(emptyCtx)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go
deleted file mode 100644
index e83a99886..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ast.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package ini
-
-// ASTKind represents different states in the parse table
-// and the type of AST that is being constructed
-type ASTKind int
-
-// ASTKind* is used in the parse table to transition between
-// the different states
-const (
- ASTKindNone = ASTKind(iota)
- ASTKindStart
- ASTKindExpr
- ASTKindEqualExpr
- ASTKindStatement
- ASTKindSkipStatement
- ASTKindExprStatement
- ASTKindSectionStatement
- ASTKindNestedSectionStatement
- ASTKindCompletedNestedSectionStatement
- ASTKindCommentStatement
- ASTKindCompletedSectionStatement
-)
-
-func (k ASTKind) String() string {
- switch k {
- case ASTKindNone:
- return "none"
- case ASTKindStart:
- return "start"
- case ASTKindExpr:
- return "expr"
- case ASTKindStatement:
- return "stmt"
- case ASTKindSectionStatement:
- return "section_stmt"
- case ASTKindExprStatement:
- return "expr_stmt"
- case ASTKindCommentStatement:
- return "comment"
- case ASTKindNestedSectionStatement:
- return "nested_section_stmt"
- case ASTKindCompletedSectionStatement:
- return "completed_stmt"
- case ASTKindSkipStatement:
- return "skip"
- default:
- return ""
- }
-}
-
-// AST interface allows us to determine what kind of node we
-// are on and casting may not need to be necessary.
-//
-// The root is always the first node in Children
-type AST struct {
- Kind ASTKind
- Root Token
- RootToken bool
- Children []AST
-}
-
-func newAST(kind ASTKind, root AST, children ...AST) AST {
- return AST{
- Kind: kind,
- Children: append([]AST{root}, children...),
- }
-}
-
-func newASTWithRootToken(kind ASTKind, root Token, children ...AST) AST {
- return AST{
- Kind: kind,
- Root: root,
- RootToken: true,
- Children: children,
- }
-}
-
-// AppendChild will append to the list of children an AST has.
-func (a *AST) AppendChild(child AST) {
- a.Children = append(a.Children, child)
-}
-
-// GetRoot will return the root AST which can be the first entry
-// in the children list or a token.
-func (a *AST) GetRoot() AST {
- if a.RootToken {
- return *a
- }
-
- if len(a.Children) == 0 {
- return AST{}
- }
-
- return a.Children[0]
-}
-
-// GetChildren will return the current AST's list of children
-func (a *AST) GetChildren() []AST {
- if len(a.Children) == 0 {
- return []AST{}
- }
-
- if a.RootToken {
- return a.Children
- }
-
- return a.Children[1:]
-}
-
-// SetChildren will set and override all children of the AST.
-func (a *AST) SetChildren(children []AST) {
- if a.RootToken {
- a.Children = children
- } else {
- a.Children = append(a.Children[:1], children...)
- }
-}
-
-// Start is used to indicate the starting state of the parse table.
-var Start = newAST(ASTKindStart, AST{})
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go
deleted file mode 100644
index 0895d53cb..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/comma_token.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package ini
-
-var commaRunes = []rune(",")
-
-func isComma(b rune) bool {
- return b == ','
-}
-
-func newCommaToken() Token {
- return newToken(TokenComma, commaRunes, NoneType)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go
deleted file mode 100644
index 0b76999ba..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/comment_token.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package ini
-
-// isComment will return whether or not the next byte(s) is a
-// comment.
-func isComment(b []rune) bool {
- if len(b) == 0 {
- return false
- }
-
- switch b[0] {
- case ';':
- return true
- case '#':
- return true
- }
-
- return false
-}
-
-// newCommentToken will create a comment token and
-// return how many bytes were read.
-func newCommentToken(b []rune) (Token, int, error) {
- i := 0
- for ; i < len(b); i++ {
- if b[i] == '\n' {
- break
- }
-
- if len(b)-i > 2 && b[i] == '\r' && b[i+1] == '\n' {
- break
- }
- }
-
- return newToken(TokenComment, b[:i], NoneType), i, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go
deleted file mode 100644
index 1e55bbd07..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/doc.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Package ini is an LL(1) parser for configuration files.
-//
-// Example:
-// sections, err := ini.OpenFile("/path/to/file")
-// if err != nil {
-// panic(err)
-// }
-//
-// profile := "foo"
-// section, ok := sections.GetSection(profile)
-// if !ok {
-// fmt.Printf("section %q could not be found", profile)
-// }
-//
-// Below is the BNF that describes this parser
-// Grammar:
-// stmt -> section | stmt'
-// stmt' -> epsilon | expr
-// expr -> value (stmt)* | equal_expr (stmt)*
-// equal_expr -> value ( ':' | '=' ) equal_expr'
-// equal_expr' -> number | string | quoted_string
-// quoted_string -> " quoted_string'
-// quoted_string' -> string quoted_string_end
-// quoted_string_end -> "
-//
-// section -> [ section'
-// section' -> section_value section_close
-// section_value -> number | string_subset | boolean | quoted_string_subset
-// quoted_string_subset -> " quoted_string_subset'
-// quoted_string_subset' -> string_subset quoted_string_end
-// quoted_string_subset -> "
-// section_close -> ]
-//
-// value -> number | string_subset | boolean
-// string -> ? UTF-8 Code-Points except '\n' (U+000A) and '\r\n' (U+000D U+000A) ?
-// string_subset -> ? Code-points excepted by grammar except ':' (U+003A), '=' (U+003D), '[' (U+005B), and ']' (U+005D) ?
-//
-// SkipState will skip (NL WS)+
-//
-// comment -> # comment' | ; comment'
-// comment' -> epsilon | value
-package ini
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go
deleted file mode 100644
index 04345a54c..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/empty_token.go
+++ /dev/null
@@ -1,4 +0,0 @@
-package ini
-
-// emptyToken is used to satisfy the Token interface
-var emptyToken = newToken(TokenNone, []rune{}, NoneType)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go
deleted file mode 100644
index 91ba2a59d..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/expression.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package ini
-
-// newExpression will return an expression AST.
-// Expr represents an expression
-//
-// grammar:
-// expr -> string | number
-func newExpression(tok Token) AST {
- return newASTWithRootToken(ASTKindExpr, tok)
-}
-
-func newEqualExpr(left AST, tok Token) AST {
- return newASTWithRootToken(ASTKindEqualExpr, tok, left)
-}
-
-// EqualExprKey will return a LHS value in the equal expr
-func EqualExprKey(ast AST) string {
- children := ast.GetChildren()
- if len(children) == 0 || ast.Kind != ASTKindEqualExpr {
- return ""
- }
-
- return string(children[0].Root.Raw())
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go
deleted file mode 100644
index 6e545b63b..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/fuzz.go
+++ /dev/null
@@ -1,18 +0,0 @@
-//go:build gofuzz
-// +build gofuzz
-
-package ini
-
-import (
- "bytes"
-)
-
-func Fuzz(data []byte) int {
- b := bytes.NewReader(data)
-
- if _, err := Parse(b); err != nil {
- return 0
- }
-
- return 1
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go
deleted file mode 100644
index 3b0ca7afe..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package ini
-
-import (
- "io"
- "os"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-// OpenFile takes a path to a given file, and will open and parse
-// that file.
-func OpenFile(path string) (Sections, error) {
- f, err := os.Open(path)
- if err != nil {
- return Sections{}, awserr.New(ErrCodeUnableToReadFile, "unable to open file", err)
- }
- defer f.Close()
-
- return Parse(f)
-}
-
-// Parse will parse the given file using the shared config
-// visitor.
-func Parse(f io.Reader) (Sections, error) {
- tree, err := ParseAST(f)
- if err != nil {
- return Sections{}, err
- }
-
- v := NewDefaultVisitor()
- if err = Walk(tree, v); err != nil {
- return Sections{}, err
- }
-
- return v.Sections, nil
-}
-
-// ParseBytes will parse the given bytes and return the parsed sections.
-func ParseBytes(b []byte) (Sections, error) {
- tree, err := ParseASTBytes(b)
- if err != nil {
- return Sections{}, err
- }
-
- v := NewDefaultVisitor()
- if err = Walk(tree, v); err != nil {
- return Sections{}, err
- }
-
- return v.Sections, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go
deleted file mode 100644
index 582c024ad..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_lexer.go
+++ /dev/null
@@ -1,165 +0,0 @@
-package ini
-
-import (
- "bytes"
- "io"
- "io/ioutil"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
-)
-
-const (
- // ErrCodeUnableToReadFile is used when a file is failed to be
- // opened or read from.
- ErrCodeUnableToReadFile = "FailedRead"
-)
-
-// TokenType represents the various different tokens types
-type TokenType int
-
-func (t TokenType) String() string {
- switch t {
- case TokenNone:
- return "none"
- case TokenLit:
- return "literal"
- case TokenSep:
- return "sep"
- case TokenOp:
- return "op"
- case TokenWS:
- return "ws"
- case TokenNL:
- return "newline"
- case TokenComment:
- return "comment"
- case TokenComma:
- return "comma"
- default:
- return ""
- }
-}
-
-// TokenType enums
-const (
- TokenNone = TokenType(iota)
- TokenLit
- TokenSep
- TokenComma
- TokenOp
- TokenWS
- TokenNL
- TokenComment
-)
-
-type iniLexer struct{}
-
-// Tokenize will return a list of tokens during lexical analysis of the
-// io.Reader.
-func (l *iniLexer) Tokenize(r io.Reader) ([]Token, error) {
- b, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, awserr.New(ErrCodeUnableToReadFile, "unable to read file", err)
- }
-
- return l.tokenize(b)
-}
-
-func (l *iniLexer) tokenize(b []byte) ([]Token, error) {
- runes := bytes.Runes(b)
- var err error
- n := 0
- tokenAmount := countTokens(runes)
- tokens := make([]Token, tokenAmount)
- count := 0
-
- for len(runes) > 0 && count < tokenAmount {
- switch {
- case isWhitespace(runes[0]):
- tokens[count], n, err = newWSToken(runes)
- case isComma(runes[0]):
- tokens[count], n = newCommaToken(), 1
- case isComment(runes):
- tokens[count], n, err = newCommentToken(runes)
- case isNewline(runes):
- tokens[count], n, err = newNewlineToken(runes)
- case isSep(runes):
- tokens[count], n, err = newSepToken(runes)
- case isOp(runes):
- tokens[count], n, err = newOpToken(runes)
- default:
- tokens[count], n, err = newLitToken(runes)
- }
-
- if err != nil {
- return nil, err
- }
-
- count++
-
- runes = runes[n:]
- }
-
- return tokens[:count], nil
-}
-
-func countTokens(runes []rune) int {
- count, n := 0, 0
- var err error
-
- for len(runes) > 0 {
- switch {
- case isWhitespace(runes[0]):
- _, n, err = newWSToken(runes)
- case isComma(runes[0]):
- _, n = newCommaToken(), 1
- case isComment(runes):
- _, n, err = newCommentToken(runes)
- case isNewline(runes):
- _, n, err = newNewlineToken(runes)
- case isSep(runes):
- _, n, err = newSepToken(runes)
- case isOp(runes):
- _, n, err = newOpToken(runes)
- default:
- _, n, err = newLitToken(runes)
- }
-
- if err != nil {
- return 0
- }
-
- count++
- runes = runes[n:]
- }
-
- return count + 1
-}
-
-// Token indicates a metadata about a given value.
-type Token struct {
- t TokenType
- ValueType ValueType
- base int
- raw []rune
-}
-
-var emptyValue = Value{}
-
-func newToken(t TokenType, raw []rune, v ValueType) Token {
- return Token{
- t: t,
- raw: raw,
- ValueType: v,
- }
-}
-
-// Raw return the raw runes that were consumed
-func (tok Token) Raw() []rune {
- return tok.raw
-}
-
-// Type returns the token type
-func (tok Token) Type() TokenType {
- return tok.t
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
deleted file mode 100644
index 0ba319491..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ini_parser.go
+++ /dev/null
@@ -1,350 +0,0 @@
-package ini
-
-import (
- "fmt"
- "io"
-)
-
-// ParseState represents the current state of the parser.
-type ParseState uint
-
-// State enums for the parse table
-const (
- InvalidState ParseState = iota
- // stmt -> value stmt'
- StatementState
- // stmt' -> MarkComplete | op stmt
- StatementPrimeState
- // value -> number | string | boolean | quoted_string
- ValueState
- // section -> [ section'
- OpenScopeState
- // section' -> value section_close
- SectionState
- // section_close -> ]
- CloseScopeState
- // SkipState will skip (NL WS)+
- SkipState
- // SkipTokenState will skip any token and push the previous
- // state onto the stack.
- SkipTokenState
- // comment -> # comment' | ; comment'
- // comment' -> MarkComplete | value
- CommentState
- // MarkComplete state will complete statements and move that
- // to the completed AST list
- MarkCompleteState
- // TerminalState signifies that the tokens have been fully parsed
- TerminalState
-)
-
-// parseTable is a state machine to dictate the grammar above.
-var parseTable = map[ASTKind]map[TokenType]ParseState{
- ASTKindStart: {
- TokenLit: StatementState,
- TokenSep: OpenScopeState,
- TokenWS: SkipTokenState,
- TokenNL: SkipTokenState,
- TokenComment: CommentState,
- TokenNone: TerminalState,
- },
- ASTKindCommentStatement: {
- TokenLit: StatementState,
- TokenSep: OpenScopeState,
- TokenWS: SkipTokenState,
- TokenNL: SkipTokenState,
- TokenComment: CommentState,
- TokenNone: MarkCompleteState,
- },
- ASTKindExpr: {
- TokenOp: StatementPrimeState,
- TokenLit: ValueState,
- TokenSep: OpenScopeState,
- TokenWS: ValueState,
- TokenNL: SkipState,
- TokenComment: CommentState,
- TokenNone: MarkCompleteState,
- },
- ASTKindEqualExpr: {
- TokenLit: ValueState,
- TokenSep: ValueState,
- TokenOp: ValueState,
- TokenWS: SkipTokenState,
- TokenNL: SkipState,
- TokenNone: SkipState,
- },
- ASTKindStatement: {
- TokenLit: SectionState,
- TokenSep: CloseScopeState,
- TokenWS: SkipTokenState,
- TokenNL: SkipTokenState,
- TokenComment: CommentState,
- TokenNone: MarkCompleteState,
- },
- ASTKindExprStatement: {
- TokenLit: ValueState,
- TokenSep: ValueState,
- TokenOp: ValueState,
- TokenWS: ValueState,
- TokenNL: MarkCompleteState,
- TokenComment: CommentState,
- TokenNone: TerminalState,
- TokenComma: SkipState,
- },
- ASTKindSectionStatement: {
- TokenLit: SectionState,
- TokenOp: SectionState,
- TokenSep: CloseScopeState,
- TokenWS: SectionState,
- TokenNL: SkipTokenState,
- },
- ASTKindCompletedSectionStatement: {
- TokenWS: SkipTokenState,
- TokenNL: SkipTokenState,
- TokenLit: StatementState,
- TokenSep: OpenScopeState,
- TokenComment: CommentState,
- TokenNone: MarkCompleteState,
- },
- ASTKindSkipStatement: {
- TokenLit: StatementState,
- TokenSep: OpenScopeState,
- TokenWS: SkipTokenState,
- TokenNL: SkipTokenState,
- TokenComment: CommentState,
- TokenNone: TerminalState,
- },
-}
-
-// ParseAST will parse input from an io.Reader using
-// an LL(1) parser.
-func ParseAST(r io.Reader) ([]AST, error) {
- lexer := iniLexer{}
- tokens, err := lexer.Tokenize(r)
- if err != nil {
- return []AST{}, err
- }
-
- return parse(tokens)
-}
-
-// ParseASTBytes will parse input from a byte slice using
-// an LL(1) parser.
-func ParseASTBytes(b []byte) ([]AST, error) {
- lexer := iniLexer{}
- tokens, err := lexer.tokenize(b)
- if err != nil {
- return []AST{}, err
- }
-
- return parse(tokens)
-}
-
-func parse(tokens []Token) ([]AST, error) {
- start := Start
- stack := newParseStack(3, len(tokens))
-
- stack.Push(start)
- s := newSkipper()
-
-loop:
- for stack.Len() > 0 {
- k := stack.Pop()
-
- var tok Token
- if len(tokens) == 0 {
- // this occurs when all the tokens have been processed
- // but reduction of what's left on the stack needs to
- // occur.
- tok = emptyToken
- } else {
- tok = tokens[0]
- }
-
- step := parseTable[k.Kind][tok.Type()]
- if s.ShouldSkip(tok) {
- // being in a skip state with no tokens will break out of
- // the parse loop since there is nothing left to process.
- if len(tokens) == 0 {
- break loop
- }
- // if should skip is true, we skip the tokens until should skip is set to false.
- step = SkipTokenState
- }
-
- switch step {
- case TerminalState:
- // Finished parsing. Push what should be the last
- // statement to the stack. If there is anything left
- // on the stack, an error in parsing has occurred.
- if k.Kind != ASTKindStart {
- stack.MarkComplete(k)
- }
- break loop
- case SkipTokenState:
- // When skipping a token, the previous state was popped off the stack.
- // To maintain the correct state, the previous state will be pushed
- // onto the stack.
- stack.Push(k)
- case StatementState:
- if k.Kind != ASTKindStart {
- stack.MarkComplete(k)
- }
- expr := newExpression(tok)
- stack.Push(expr)
- case StatementPrimeState:
- if tok.Type() != TokenOp {
- stack.MarkComplete(k)
- continue
- }
-
- if k.Kind != ASTKindExpr {
- return nil, NewParseError(
- fmt.Sprintf("invalid expression: expected Expr type, but found %T type", k),
- )
- }
-
- k = trimSpaces(k)
- expr := newEqualExpr(k, tok)
- stack.Push(expr)
- case ValueState:
- // ValueState requires the previous state to either be an equal expression
- // or an expression statement.
- switch k.Kind {
- case ASTKindEqualExpr:
- // assigning a value to some key
- k.AppendChild(newExpression(tok))
- stack.Push(newExprStatement(k))
- case ASTKindExpr:
- k.Root.raw = append(k.Root.raw, tok.Raw()...)
- stack.Push(k)
- case ASTKindExprStatement:
- root := k.GetRoot()
- children := root.GetChildren()
- if len(children) == 0 {
- return nil, NewParseError(
- fmt.Sprintf("invalid expression: AST contains no children %s", k.Kind),
- )
- }
-
- rhs := children[len(children)-1]
-
- if rhs.Root.ValueType != QuotedStringType {
- rhs.Root.ValueType = StringType
- rhs.Root.raw = append(rhs.Root.raw, tok.Raw()...)
-
- }
-
- children[len(children)-1] = rhs
- root.SetChildren(children)
-
- stack.Push(k)
- }
- case OpenScopeState:
- if !runeCompare(tok.Raw(), openBrace) {
- return nil, NewParseError("expected '['")
- }
- // If OpenScopeState is not at the start, we must mark the previous ast as complete
- //
- // for example: if previous ast was a skip statement;
- // we should mark it as complete before we create a new statement
- if k.Kind != ASTKindStart {
- stack.MarkComplete(k)
- }
-
- stmt := newStatement()
- stack.Push(stmt)
- case CloseScopeState:
- if !runeCompare(tok.Raw(), closeBrace) {
- return nil, NewParseError("expected ']'")
- }
-
- k = trimSpaces(k)
- stack.Push(newCompletedSectionStatement(k))
- case SectionState:
- var stmt AST
-
- switch k.Kind {
- case ASTKindStatement:
- // If there are multiple literals inside of a scope declaration,
- // then the current token's raw value will be appended to the Name.
- //
- // This handles cases like [ profile default ]
- //
- // k will represent a SectionStatement with the children representing
- // the label of the section
- stmt = newSectionStatement(tok)
- case ASTKindSectionStatement:
- k.Root.raw = append(k.Root.raw, tok.Raw()...)
- stmt = k
- default:
- return nil, NewParseError(
- fmt.Sprintf("invalid statement: expected statement: %v", k.Kind),
- )
- }
-
- stack.Push(stmt)
- case MarkCompleteState:
- if k.Kind != ASTKindStart {
- stack.MarkComplete(k)
- }
-
- if stack.Len() == 0 {
- stack.Push(start)
- }
- case SkipState:
- stack.Push(newSkipStatement(k))
- s.Skip()
- case CommentState:
- if k.Kind == ASTKindStart {
- stack.Push(k)
- } else {
- stack.MarkComplete(k)
- }
-
- stmt := newCommentStatement(tok)
- stack.Push(stmt)
- default:
- return nil, NewParseError(
- fmt.Sprintf("invalid state with ASTKind %v and TokenType %v",
- k, tok.Type()))
- }
-
- if len(tokens) > 0 {
- tokens = tokens[1:]
- }
- }
-
- // this occurs when a statement has not been completed
- if stack.top > 1 {
- return nil, NewParseError(fmt.Sprintf("incomplete ini expression"))
- }
-
- // returns a sublist which excludes the start symbol
- return stack.List(), nil
-}
-
-// trimSpaces will trim spaces on the left and right hand side of
-// the literal.
-func trimSpaces(k AST) AST {
- // trim left hand side of spaces
- for i := 0; i < len(k.Root.raw); i++ {
- if !isWhitespace(k.Root.raw[i]) {
- break
- }
-
- k.Root.raw = k.Root.raw[1:]
- i--
- }
-
- // trim right hand side of spaces
- for i := len(k.Root.raw) - 1; i >= 0; i-- {
- if !isWhitespace(k.Root.raw[i]) {
- break
- }
-
- k.Root.raw = k.Root.raw[:len(k.Root.raw)-1]
- }
-
- return k
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
deleted file mode 100644
index b1b686086..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/literal_tokens.go
+++ /dev/null
@@ -1,337 +0,0 @@
-package ini
-
-import (
- "fmt"
- "strconv"
- "strings"
- "unicode"
-)
-
-var (
- runesTrue = []rune("true")
- runesFalse = []rune("false")
-)
-
-var literalValues = [][]rune{
- runesTrue,
- runesFalse,
-}
-
-func isBoolValue(b []rune) bool {
- for _, lv := range literalValues {
- if isCaselessLitValue(lv, b) {
- return true
- }
- }
- return false
-}
-
-func isLitValue(want, have []rune) bool {
- if len(have) < len(want) {
- return false
- }
-
- for i := 0; i < len(want); i++ {
- if want[i] != have[i] {
- return false
- }
- }
-
- return true
-}
-
-// isCaselessLitValue is a caseless value comparison, assumes want is already lower-cased for efficiency.
-func isCaselessLitValue(want, have []rune) bool {
- if len(have) < len(want) {
- return false
- }
-
- for i := 0; i < len(want); i++ {
- if want[i] != unicode.ToLower(have[i]) {
- return false
- }
- }
-
- return true
-}
-
-// isNumberValue will return whether not the leading characters in
-// a byte slice is a number. A number is delimited by whitespace or
-// the newline token.
-//
-// A number is defined to be in a binary, octal, decimal (int | float), hex format,
-// or in scientific notation.
-func isNumberValue(b []rune) bool {
- negativeIndex := 0
- helper := numberHelper{}
- needDigit := false
-
- for i := 0; i < len(b); i++ {
- negativeIndex++
-
- switch b[i] {
- case '-':
- if helper.IsNegative() || negativeIndex != 1 {
- return false
- }
- helper.Determine(b[i])
- needDigit = true
- continue
- case 'e', 'E':
- if err := helper.Determine(b[i]); err != nil {
- return false
- }
- negativeIndex = 0
- needDigit = true
- continue
- case 'b':
- if helper.numberFormat == hex {
- break
- }
- fallthrough
- case 'o', 'x':
- needDigit = true
- if i == 0 {
- return false
- }
-
- fallthrough
- case '.':
- if err := helper.Determine(b[i]); err != nil {
- return false
- }
- needDigit = true
- continue
- }
-
- if i > 0 && (isNewline(b[i:]) || isWhitespace(b[i])) {
- return !needDigit
- }
-
- if !helper.CorrectByte(b[i]) {
- return false
- }
- needDigit = false
- }
-
- return !needDigit
-}
-
-func isValid(b []rune) (bool, int, error) {
- if len(b) == 0 {
- // TODO: should probably return an error
- return false, 0, nil
- }
-
- return isValidRune(b[0]), 1, nil
-}
-
-func isValidRune(r rune) bool {
- return r != ':' && r != '=' && r != '[' && r != ']' && r != ' ' && r != '\n'
-}
-
-// ValueType is an enum that will signify what type
-// the Value is
-type ValueType int
-
-func (v ValueType) String() string {
- switch v {
- case NoneType:
- return "NONE"
- case DecimalType:
- return "FLOAT"
- case IntegerType:
- return "INT"
- case StringType:
- return "STRING"
- case BoolType:
- return "BOOL"
- }
-
- return ""
-}
-
-// ValueType enums
-const (
- NoneType = ValueType(iota)
- DecimalType // deprecated
- IntegerType // deprecated
- StringType
- QuotedStringType
- BoolType // deprecated
-)
-
-// Value is a union container
-type Value struct {
- Type ValueType
- raw []rune
-
- integer int64 // deprecated
- decimal float64 // deprecated
- boolean bool // deprecated
- str string
-}
-
-func newValue(t ValueType, base int, raw []rune) (Value, error) {
- v := Value{
- Type: t,
- raw: raw,
- }
- var err error
-
- switch t {
- case DecimalType:
- v.decimal, err = strconv.ParseFloat(string(raw), 64)
- case IntegerType:
- if base != 10 {
- raw = raw[2:]
- }
-
- v.integer, err = strconv.ParseInt(string(raw), base, 64)
- case StringType:
- v.str = string(raw)
- case QuotedStringType:
- v.str = string(raw[1 : len(raw)-1])
- case BoolType:
- v.boolean = isCaselessLitValue(runesTrue, v.raw)
- }
-
- // issue 2253
- //
- // if the value trying to be parsed is too large, then we will use
- // the 'StringType' and raw value instead.
- if nerr, ok := err.(*strconv.NumError); ok && nerr.Err == strconv.ErrRange {
- v.Type = StringType
- v.str = string(raw)
- err = nil
- }
-
- return v, err
-}
-
-// Append will append values and change the type to a string
-// type.
-func (v *Value) Append(tok Token) {
- r := tok.Raw()
- if v.Type != QuotedStringType {
- v.Type = StringType
- r = tok.raw[1 : len(tok.raw)-1]
- }
- if tok.Type() != TokenLit {
- v.raw = append(v.raw, tok.Raw()...)
- } else {
- v.raw = append(v.raw, r...)
- }
-}
-
-func (v Value) String() string {
- switch v.Type {
- case DecimalType:
- return fmt.Sprintf("decimal: %f", v.decimal)
- case IntegerType:
- return fmt.Sprintf("integer: %d", v.integer)
- case StringType:
- return fmt.Sprintf("string: %s", string(v.raw))
- case QuotedStringType:
- return fmt.Sprintf("quoted string: %s", string(v.raw))
- case BoolType:
- return fmt.Sprintf("bool: %t", v.boolean)
- default:
- return "union not set"
- }
-}
-
-func newLitToken(b []rune) (Token, int, error) {
- n := 0
- var err error
-
- token := Token{}
- if b[0] == '"' {
- n, err = getStringValue(b)
- if err != nil {
- return token, n, err
- }
-
- token = newToken(TokenLit, b[:n], QuotedStringType)
- } else {
- n, err = getValue(b)
- token = newToken(TokenLit, b[:n], StringType)
- }
-
- return token, n, err
-}
-
-// IntValue returns an integer value
-func (v Value) IntValue() (int64, bool) {
- i, err := strconv.ParseInt(string(v.raw), 0, 64)
- if err != nil {
- return 0, false
- }
- return i, true
-}
-
-// FloatValue returns a float value
-func (v Value) FloatValue() (float64, bool) {
- f, err := strconv.ParseFloat(string(v.raw), 64)
- if err != nil {
- return 0, false
- }
- return f, true
-}
-
-// BoolValue returns a bool value
-func (v Value) BoolValue() (bool, bool) {
- // we don't use ParseBool as it recognizes more than what we've
- // historically supported
- if isCaselessLitValue(runesTrue, v.raw) {
- return true, true
- } else if isCaselessLitValue(runesFalse, v.raw) {
- return false, true
- }
- return false, false
-}
-
-func isTrimmable(r rune) bool {
- switch r {
- case '\n', ' ':
- return true
- }
- return false
-}
-
-// StringValue returns the string value
-func (v Value) StringValue() string {
- switch v.Type {
- case StringType:
- return strings.TrimFunc(string(v.raw), isTrimmable)
- case QuotedStringType:
- // preserve all characters in the quotes
- return string(removeEscapedCharacters(v.raw[1 : len(v.raw)-1]))
- default:
- return strings.TrimFunc(string(v.raw), isTrimmable)
- }
-}
-
-func contains(runes []rune, c rune) bool {
- for i := 0; i < len(runes); i++ {
- if runes[i] == c {
- return true
- }
- }
-
- return false
-}
-
-func runeCompare(v1 []rune, v2 []rune) bool {
- if len(v1) != len(v2) {
- return false
- }
-
- for i := 0; i < len(v1); i++ {
- if v1[i] != v2[i] {
- return false
- }
- }
-
- return true
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go
deleted file mode 100644
index e52ac399f..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/newline_token.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package ini
-
-func isNewline(b []rune) bool {
- if len(b) == 0 {
- return false
- }
-
- if b[0] == '\n' {
- return true
- }
-
- if len(b) < 2 {
- return false
- }
-
- return b[0] == '\r' && b[1] == '\n'
-}
-
-func newNewlineToken(b []rune) (Token, int, error) {
- i := 1
- if b[0] == '\r' && isNewline(b[1:]) {
- i++
- }
-
- if !isNewline([]rune(b[:i])) {
- return emptyToken, 0, NewParseError("invalid new line token")
- }
-
- return newToken(TokenNL, b[:i], NoneType), i, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go
deleted file mode 100644
index a45c0bc56..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/number_helper.go
+++ /dev/null
@@ -1,152 +0,0 @@
-package ini
-
-import (
- "bytes"
- "fmt"
- "strconv"
-)
-
-const (
- none = numberFormat(iota)
- binary
- octal
- decimal
- hex
- exponent
-)
-
-type numberFormat int
-
-// numberHelper is used to dictate what format a number is in
-// and what to do for negative values. Since -1e-4 is a valid
-// number, we cannot just simply check for duplicate negatives.
-type numberHelper struct {
- numberFormat numberFormat
-
- negative bool
- negativeExponent bool
-}
-
-func (b numberHelper) Exists() bool {
- return b.numberFormat != none
-}
-
-func (b numberHelper) IsNegative() bool {
- return b.negative || b.negativeExponent
-}
-
-func (b *numberHelper) Determine(c rune) error {
- if b.Exists() {
- return NewParseError(fmt.Sprintf("multiple number formats: 0%v", string(c)))
- }
-
- switch c {
- case 'b':
- b.numberFormat = binary
- case 'o':
- b.numberFormat = octal
- case 'x':
- b.numberFormat = hex
- case 'e', 'E':
- b.numberFormat = exponent
- case '-':
- if b.numberFormat != exponent {
- b.negative = true
- } else {
- b.negativeExponent = true
- }
- case '.':
- b.numberFormat = decimal
- default:
- return NewParseError(fmt.Sprintf("invalid number character: %v", string(c)))
- }
-
- return nil
-}
-
-func (b numberHelper) CorrectByte(c rune) bool {
- switch {
- case b.numberFormat == binary:
- if !isBinaryByte(c) {
- return false
- }
- case b.numberFormat == octal:
- if !isOctalByte(c) {
- return false
- }
- case b.numberFormat == hex:
- if !isHexByte(c) {
- return false
- }
- case b.numberFormat == decimal:
- if !isDigit(c) {
- return false
- }
- case b.numberFormat == exponent:
- if !isDigit(c) {
- return false
- }
- case b.negativeExponent:
- if !isDigit(c) {
- return false
- }
- case b.negative:
- if !isDigit(c) {
- return false
- }
- default:
- if !isDigit(c) {
- return false
- }
- }
-
- return true
-}
-
-func (b numberHelper) Base() int {
- switch b.numberFormat {
- case binary:
- return 2
- case octal:
- return 8
- case hex:
- return 16
- default:
- return 10
- }
-}
-
-func (b numberHelper) String() string {
- buf := bytes.Buffer{}
- i := 0
-
- switch b.numberFormat {
- case binary:
- i++
- buf.WriteString(strconv.Itoa(i) + ": binary format\n")
- case octal:
- i++
- buf.WriteString(strconv.Itoa(i) + ": octal format\n")
- case hex:
- i++
- buf.WriteString(strconv.Itoa(i) + ": hex format\n")
- case exponent:
- i++
- buf.WriteString(strconv.Itoa(i) + ": exponent format\n")
- default:
- i++
- buf.WriteString(strconv.Itoa(i) + ": integer format\n")
- }
-
- if b.negative {
- i++
- buf.WriteString(strconv.Itoa(i) + ": negative format\n")
- }
-
- if b.negativeExponent {
- i++
- buf.WriteString(strconv.Itoa(i) + ": negative exponent format\n")
- }
-
- return buf.String()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go
deleted file mode 100644
index 8a84c7cbe..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/op_tokens.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package ini
-
-import (
- "fmt"
-)
-
-var (
- equalOp = []rune("=")
- equalColonOp = []rune(":")
-)
-
-func isOp(b []rune) bool {
- if len(b) == 0 {
- return false
- }
-
- switch b[0] {
- case '=':
- return true
- case ':':
- return true
- default:
- return false
- }
-}
-
-func newOpToken(b []rune) (Token, int, error) {
- tok := Token{}
-
- switch b[0] {
- case '=':
- tok = newToken(TokenOp, equalOp, NoneType)
- case ':':
- tok = newToken(TokenOp, equalColonOp, NoneType)
- default:
- return tok, 0, NewParseError(fmt.Sprintf("unexpected op type, %v", b[0]))
- }
- return tok, 1, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go
deleted file mode 100644
index 457287019..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_error.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package ini
-
-import "fmt"
-
-const (
- // ErrCodeParseError is returned when a parsing error
- // has occurred.
- ErrCodeParseError = "INIParseError"
-)
-
-// ParseError is an error which is returned during any part of
-// the parsing process.
-type ParseError struct {
- msg string
-}
-
-// NewParseError will return a new ParseError where message
-// is the description of the error.
-func NewParseError(message string) *ParseError {
- return &ParseError{
- msg: message,
- }
-}
-
-// Code will return the ErrCodeParseError
-func (err *ParseError) Code() string {
- return ErrCodeParseError
-}
-
-// Message returns the error's message
-func (err *ParseError) Message() string {
- return err.msg
-}
-
-// OrigError return nothing since there will never be any
-// original error.
-func (err *ParseError) OrigError() error {
- return nil
-}
-
-func (err *ParseError) Error() string {
- return fmt.Sprintf("%s: %s", err.Code(), err.Message())
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go
deleted file mode 100644
index 7f01cf7c7..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/parse_stack.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package ini
-
-import (
- "bytes"
- "fmt"
-)
-
-// ParseStack is a stack that contains a container, the stack portion,
-// and the list which is the list of ASTs that have been successfully
-// parsed.
-type ParseStack struct {
- top int
- container []AST
- list []AST
- index int
-}
-
-func newParseStack(sizeContainer, sizeList int) ParseStack {
- return ParseStack{
- container: make([]AST, sizeContainer),
- list: make([]AST, sizeList),
- }
-}
-
-// Pop will return and truncate the last container element.
-func (s *ParseStack) Pop() AST {
- s.top--
- return s.container[s.top]
-}
-
-// Push will add the new AST to the container
-func (s *ParseStack) Push(ast AST) {
- s.container[s.top] = ast
- s.top++
-}
-
-// MarkComplete will append the AST to the list of completed statements
-func (s *ParseStack) MarkComplete(ast AST) {
- s.list[s.index] = ast
- s.index++
-}
-
-// List will return the completed statements
-func (s ParseStack) List() []AST {
- return s.list[:s.index]
-}
-
-// Len will return the length of the container
-func (s *ParseStack) Len() int {
- return s.top
-}
-
-func (s ParseStack) String() string {
- buf := bytes.Buffer{}
- for i, node := range s.list {
- buf.WriteString(fmt.Sprintf("%d: %v\n", i+1, node))
- }
-
- return buf.String()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go
deleted file mode 100644
index f82095ba2..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/sep_tokens.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package ini
-
-import (
- "fmt"
-)
-
-var (
- emptyRunes = []rune{}
-)
-
-func isSep(b []rune) bool {
- if len(b) == 0 {
- return false
- }
-
- switch b[0] {
- case '[', ']':
- return true
- default:
- return false
- }
-}
-
-var (
- openBrace = []rune("[")
- closeBrace = []rune("]")
-)
-
-func newSepToken(b []rune) (Token, int, error) {
- tok := Token{}
-
- switch b[0] {
- case '[':
- tok = newToken(TokenSep, openBrace, NoneType)
- case ']':
- tok = newToken(TokenSep, closeBrace, NoneType)
- default:
- return tok, 0, NewParseError(fmt.Sprintf("unexpected sep type, %v", b[0]))
- }
- return tok, 1, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
deleted file mode 100644
index da7a4049c..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/skipper.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package ini
-
-// skipper is used to skip certain blocks of an ini file.
-// Currently skipper is used to skip nested blocks of ini
-// files. See example below
-//
-// [ foo ]
-// nested = ; this section will be skipped
-// a=b
-// c=d
-// bar=baz ; this will be included
-type skipper struct {
- shouldSkip bool
- TokenSet bool
- prevTok Token
-}
-
-func newSkipper() skipper {
- return skipper{
- prevTok: emptyToken,
- }
-}
-
-func (s *skipper) ShouldSkip(tok Token) bool {
- // should skip state will be modified only if previous token was new line (NL);
- // and the current token is not WhiteSpace (WS).
- if s.shouldSkip &&
- s.prevTok.Type() == TokenNL &&
- tok.Type() != TokenWS {
- s.Continue()
- return false
- }
- s.prevTok = tok
- return s.shouldSkip
-}
-
-func (s *skipper) Skip() {
- s.shouldSkip = true
-}
-
-func (s *skipper) Continue() {
- s.shouldSkip = false
- // empty token is assigned as we return to default state, when should skip is false
- s.prevTok = emptyToken
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go
deleted file mode 100644
index 18f3fe893..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/statement.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package ini
-
-// Statement is an empty AST mostly used for transitioning states.
-func newStatement() AST {
- return newAST(ASTKindStatement, AST{})
-}
-
-// SectionStatement represents a section AST
-func newSectionStatement(tok Token) AST {
- return newASTWithRootToken(ASTKindSectionStatement, tok)
-}
-
-// ExprStatement represents a completed expression AST
-func newExprStatement(ast AST) AST {
- return newAST(ASTKindExprStatement, ast)
-}
-
-// CommentStatement represents a comment in the ini definition.
-//
-// grammar:
-// comment -> #comment' | ;comment'
-// comment' -> epsilon | value
-func newCommentStatement(tok Token) AST {
- return newAST(ASTKindCommentStatement, newExpression(tok))
-}
-
-// CompletedSectionStatement represents a completed section
-func newCompletedSectionStatement(ast AST) AST {
- return newAST(ASTKindCompletedSectionStatement, ast)
-}
-
-// SkipStatement is used to skip whole statements
-func newSkipStatement(ast AST) AST {
- return newAST(ASTKindSkipStatement, ast)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go
deleted file mode 100644
index b5480fdeb..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/value_util.go
+++ /dev/null
@@ -1,284 +0,0 @@
-package ini
-
-import (
- "fmt"
-)
-
-// getStringValue will return a quoted string and the amount
-// of bytes read
-//
-// an error will be returned if the string is not properly formatted
-func getStringValue(b []rune) (int, error) {
- if b[0] != '"' {
- return 0, NewParseError("strings must start with '\"'")
- }
-
- endQuote := false
- i := 1
-
- for ; i < len(b) && !endQuote; i++ {
- if escaped := isEscaped(b[:i], b[i]); b[i] == '"' && !escaped {
- endQuote = true
- break
- } else if escaped {
- /*c, err := getEscapedByte(b[i])
- if err != nil {
- return 0, err
- }
-
- b[i-1] = c
- b = append(b[:i], b[i+1:]...)
- i--*/
-
- continue
- }
- }
-
- if !endQuote {
- return 0, NewParseError("missing '\"' in string value")
- }
-
- return i + 1, nil
-}
-
-// getBoolValue will return a boolean and the amount
-// of bytes read
-//
-// an error will be returned if the boolean is not of a correct
-// value
-func getBoolValue(b []rune) (int, error) {
- if len(b) < 4 {
- return 0, NewParseError("invalid boolean value")
- }
-
- n := 0
- for _, lv := range literalValues {
- if len(lv) > len(b) {
- continue
- }
-
- if isCaselessLitValue(lv, b) {
- n = len(lv)
- }
- }
-
- if n == 0 {
- return 0, NewParseError("invalid boolean value")
- }
-
- return n, nil
-}
-
-// getNumericalValue will return a numerical string, the amount
-// of bytes read, and the base of the number
-//
-// an error will be returned if the number is not of a correct
-// value
-func getNumericalValue(b []rune) (int, int, error) {
- if !isDigit(b[0]) {
- return 0, 0, NewParseError("invalid digit value")
- }
-
- i := 0
- helper := numberHelper{}
-
-loop:
- for negativeIndex := 0; i < len(b); i++ {
- negativeIndex++
-
- if !isDigit(b[i]) {
- switch b[i] {
- case '-':
- if helper.IsNegative() || negativeIndex != 1 {
- return 0, 0, NewParseError("parse error '-'")
- }
-
- n := getNegativeNumber(b[i:])
- i += (n - 1)
- helper.Determine(b[i])
- continue
- case '.':
- if err := helper.Determine(b[i]); err != nil {
- return 0, 0, err
- }
- case 'e', 'E':
- if err := helper.Determine(b[i]); err != nil {
- return 0, 0, err
- }
-
- negativeIndex = 0
- case 'b':
- if helper.numberFormat == hex {
- break
- }
- fallthrough
- case 'o', 'x':
- if i == 0 && b[i] != '0' {
- return 0, 0, NewParseError("incorrect base format, expected leading '0'")
- }
-
- if i != 1 {
- return 0, 0, NewParseError(fmt.Sprintf("incorrect base format found %s at %d index", string(b[i]), i))
- }
-
- if err := helper.Determine(b[i]); err != nil {
- return 0, 0, err
- }
- default:
- if isWhitespace(b[i]) {
- break loop
- }
-
- if isNewline(b[i:]) {
- break loop
- }
-
- if !(helper.numberFormat == hex && isHexByte(b[i])) {
- if i+2 < len(b) && !isNewline(b[i:i+2]) {
- return 0, 0, NewParseError("invalid numerical character")
- } else if !isNewline([]rune{b[i]}) {
- return 0, 0, NewParseError("invalid numerical character")
- }
-
- break loop
- }
- }
- }
- }
-
- return helper.Base(), i, nil
-}
-
-// isDigit will return whether or not something is an integer
-func isDigit(b rune) bool {
- return b >= '0' && b <= '9'
-}
-
-func hasExponent(v []rune) bool {
- return contains(v, 'e') || contains(v, 'E')
-}
-
-func isBinaryByte(b rune) bool {
- switch b {
- case '0', '1':
- return true
- default:
- return false
- }
-}
-
-func isOctalByte(b rune) bool {
- switch b {
- case '0', '1', '2', '3', '4', '5', '6', '7':
- return true
- default:
- return false
- }
-}
-
-func isHexByte(b rune) bool {
- if isDigit(b) {
- return true
- }
- return (b >= 'A' && b <= 'F') ||
- (b >= 'a' && b <= 'f')
-}
-
-func getValue(b []rune) (int, error) {
- i := 0
-
- for i < len(b) {
- if isNewline(b[i:]) {
- break
- }
-
- if isOp(b[i:]) {
- break
- }
-
- valid, n, err := isValid(b[i:])
- if err != nil {
- return 0, err
- }
-
- if !valid {
- break
- }
-
- i += n
- }
-
- return i, nil
-}
-
-// getNegativeNumber will return a negative number from a
-// byte slice. This will iterate through all characters until
-// a non-digit has been found.
-func getNegativeNumber(b []rune) int {
- if b[0] != '-' {
- return 0
- }
-
- i := 1
- for ; i < len(b); i++ {
- if !isDigit(b[i]) {
- return i
- }
- }
-
- return i
-}
-
-// isEscaped will return whether or not the character is an escaped
-// character.
-func isEscaped(value []rune, b rune) bool {
- if len(value) == 0 {
- return false
- }
-
- switch b {
- case '\'': // single quote
- case '"': // quote
- case 'n': // newline
- case 't': // tab
- case '\\': // backslash
- default:
- return false
- }
-
- return value[len(value)-1] == '\\'
-}
-
-func getEscapedByte(b rune) (rune, error) {
- switch b {
- case '\'': // single quote
- return '\'', nil
- case '"': // quote
- return '"', nil
- case 'n': // newline
- return '\n', nil
- case 't': // table
- return '\t', nil
- case '\\': // backslash
- return '\\', nil
- default:
- return b, NewParseError(fmt.Sprintf("invalid escaped character %c", b))
- }
-}
-
-func removeEscapedCharacters(b []rune) []rune {
- for i := 0; i < len(b); i++ {
- if isEscaped(b[:i], b[i]) {
- c, err := getEscapedByte(b[i])
- if err != nil {
- return b
- }
-
- b[i-1] = c
- b = append(b[:i], b[i+1:]...)
- i--
- }
- }
-
- return b
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
deleted file mode 100644
index 1d08e138a..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/visitor.go
+++ /dev/null
@@ -1,169 +0,0 @@
-package ini
-
-import (
- "fmt"
- "sort"
-)
-
-// Visitor is an interface used by walkers that will
-// traverse an array of ASTs.
-type Visitor interface {
- VisitExpr(AST) error
- VisitStatement(AST) error
-}
-
-// DefaultVisitor is used to visit statements and expressions
-// and ensure that they are both of the correct format.
-// In addition, upon visiting this will build sections and populate
-// the Sections field which can be used to retrieve profile
-// configuration.
-type DefaultVisitor struct {
- scope string
- Sections Sections
-}
-
-// NewDefaultVisitor return a DefaultVisitor
-func NewDefaultVisitor() *DefaultVisitor {
- return &DefaultVisitor{
- Sections: Sections{
- container: map[string]Section{},
- },
- }
-}
-
-// VisitExpr visits expressions...
-func (v *DefaultVisitor) VisitExpr(expr AST) error {
- t := v.Sections.container[v.scope]
- if t.values == nil {
- t.values = values{}
- }
-
- switch expr.Kind {
- case ASTKindExprStatement:
- opExpr := expr.GetRoot()
- switch opExpr.Kind {
- case ASTKindEqualExpr:
- children := opExpr.GetChildren()
- if len(children) <= 1 {
- return NewParseError("unexpected token type")
- }
-
- rhs := children[1]
-
- // The right-hand value side the equality expression is allowed to contain '[', ']', ':', '=' in the values.
- // If the token is not either a literal or one of the token types that identifies those four additional
- // tokens then error.
- if !(rhs.Root.Type() == TokenLit || rhs.Root.Type() == TokenOp || rhs.Root.Type() == TokenSep) {
- return NewParseError("unexpected token type")
- }
-
- key := EqualExprKey(opExpr)
- v, err := newValue(rhs.Root.ValueType, rhs.Root.base, rhs.Root.Raw())
- if err != nil {
- return err
- }
-
- t.values[key] = v
- default:
- return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
- }
- default:
- return NewParseError(fmt.Sprintf("unsupported expression %v", expr))
- }
-
- v.Sections.container[v.scope] = t
- return nil
-}
-
-// VisitStatement visits statements...
-func (v *DefaultVisitor) VisitStatement(stmt AST) error {
- switch stmt.Kind {
- case ASTKindCompletedSectionStatement:
- child := stmt.GetRoot()
- if child.Kind != ASTKindSectionStatement {
- return NewParseError(fmt.Sprintf("unsupported child statement: %T", child))
- }
-
- name := string(child.Root.Raw())
- v.Sections.container[name] = Section{}
- v.scope = name
- default:
- return NewParseError(fmt.Sprintf("unsupported statement: %s", stmt.Kind))
- }
-
- return nil
-}
-
-// Sections is a map of Section structures that represent
-// a configuration.
-type Sections struct {
- container map[string]Section
-}
-
-// GetSection will return section p. If section p does not exist,
-// false will be returned in the second parameter.
-func (t Sections) GetSection(p string) (Section, bool) {
- v, ok := t.container[p]
- return v, ok
-}
-
-// values represents a map of union values.
-type values map[string]Value
-
-// List will return a list of all sections that were successfully
-// parsed.
-func (t Sections) List() []string {
- keys := make([]string, len(t.container))
- i := 0
- for k := range t.container {
- keys[i] = k
- i++
- }
-
- sort.Strings(keys)
- return keys
-}
-
-// Section contains a name and values. This represent
-// a sectioned entry in a configuration file.
-type Section struct {
- Name string
- values values
-}
-
-// Has will return whether or not an entry exists in a given section
-func (t Section) Has(k string) bool {
- _, ok := t.values[k]
- return ok
-}
-
-// ValueType will returned what type the union is set to. If
-// k was not found, the NoneType will be returned.
-func (t Section) ValueType(k string) (ValueType, bool) {
- v, ok := t.values[k]
- return v.Type, ok
-}
-
-// Bool returns a bool value at k
-func (t Section) Bool(k string) (bool, bool) {
- return t.values[k].BoolValue()
-}
-
-// Int returns an integer value at k
-func (t Section) Int(k string) (int64, bool) {
- return t.values[k].IntValue()
-}
-
-// Float64 returns a float value at k
-func (t Section) Float64(k string) (float64, bool) {
- return t.values[k].FloatValue()
-}
-
-// String returns the string value at k
-func (t Section) String(k string) string {
- _, ok := t.values[k]
- if !ok {
- return ""
- }
- return t.values[k].StringValue()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go
deleted file mode 100644
index 99915f7f7..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/walker.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package ini
-
-// Walk will traverse the AST using the v, the Visitor.
-func Walk(tree []AST, v Visitor) error {
- for _, node := range tree {
- switch node.Kind {
- case ASTKindExpr,
- ASTKindExprStatement:
-
- if err := v.VisitExpr(node); err != nil {
- return err
- }
- case ASTKindStatement,
- ASTKindCompletedSectionStatement,
- ASTKindNestedSectionStatement,
- ASTKindCompletedNestedSectionStatement:
-
- if err := v.VisitStatement(node); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go b/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go
deleted file mode 100644
index 7ffb4ae06..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/ini/ws_token.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package ini
-
-import (
- "unicode"
-)
-
-// isWhitespace will return whether or not the character is
-// a whitespace character.
-//
-// Whitespace is defined as a space or tab.
-func isWhitespace(c rune) bool {
- return unicode.IsSpace(c) && c != '\n' && c != '\r'
-}
-
-func newWSToken(b []rune) (Token, int, error) {
- i := 0
- for ; i < len(b); i++ {
- if !isWhitespace(b[i]) {
- break
- }
- }
-
- return newToken(TokenWS, b[:i], NoneType), i, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go
deleted file mode 100644
index 6c443988b..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/byte.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package sdkio
-
-const (
- // Byte is 8 bits
- Byte int64 = 1
- // KibiByte (KiB) is 1024 Bytes
- KibiByte = Byte * 1024
- // MebiByte (MiB) is 1024 KiB
- MebiByte = KibiByte * 1024
- // GibiByte (GiB) is 1024 MiB
- GibiByte = MebiByte * 1024
-)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
deleted file mode 100644
index 037a998c4..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.6.go
+++ /dev/null
@@ -1,11 +0,0 @@
-//go:build !go1.7
-// +build !go1.7
-
-package sdkio
-
-// Copy of Go 1.7 io package's Seeker constants.
-const (
- SeekStart = 0 // seek relative to the origin of the file
- SeekCurrent = 1 // seek relative to the current offset
- SeekEnd = 2 // seek relative to the end
-)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
deleted file mode 100644
index 65e7c60c4..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkio/io_go1.7.go
+++ /dev/null
@@ -1,13 +0,0 @@
-//go:build go1.7
-// +build go1.7
-
-package sdkio
-
-import "io"
-
-// Alias for Go 1.7 io package Seeker constants
-const (
- SeekStart = io.SeekStart // seek relative to the origin of the file
- SeekCurrent = io.SeekCurrent // seek relative to the current offset
- SeekEnd = io.SeekEnd // seek relative to the end
-)
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go
deleted file mode 100644
index a84528783..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor.go
+++ /dev/null
@@ -1,16 +0,0 @@
-//go:build go1.10
-// +build go1.10
-
-package sdkmath
-
-import "math"
-
-// Round returns the nearest integer, rounding half away from zero.
-//
-// Special cases are:
-// Round(±0) = ±0
-// Round(±Inf) = ±Inf
-// Round(NaN) = NaN
-func Round(x float64) float64 {
- return math.Round(x)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go
deleted file mode 100644
index a3ae3e5db..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkmath/floor_go1.9.go
+++ /dev/null
@@ -1,57 +0,0 @@
-//go:build !go1.10
-// +build !go1.10
-
-package sdkmath
-
-import "math"
-
-// Copied from the Go standard library's (Go 1.12) math/floor.go for use in
-// Go version prior to Go 1.10.
-const (
- uvone = 0x3FF0000000000000
- mask = 0x7FF
- shift = 64 - 11 - 1
- bias = 1023
- signMask = 1 << 63
- fracMask = 1<= 0.5 {
- // return t + Copysign(1, x)
- // }
- // return t
- // }
- bits := math.Float64bits(x)
- e := uint(bits>>shift) & mask
- if e < bias {
- // Round abs(x) < 1 including denormals.
- bits &= signMask // +-0
- if e == bias-1 {
- bits |= uvone // +-1
- }
- } else if e < bias+shift {
- // Round any abs(x) >= 1 containing a fractional component [0,1).
- //
- // Numbers with larger exponents are returned unchanged since they
- // must be either an integer, infinity, or NaN.
- const half = 1 << (shift - 1)
- e -= bias
- bits += half >> e
- bits &^= fracMask >> e
- }
- return math.Float64frombits(bits)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
deleted file mode 100644
index 0c9802d87..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/locked_source.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package sdkrand
-
-import (
- "math/rand"
- "sync"
- "time"
-)
-
-// lockedSource is a thread-safe implementation of rand.Source
-type lockedSource struct {
- lk sync.Mutex
- src rand.Source
-}
-
-func (r *lockedSource) Int63() (n int64) {
- r.lk.Lock()
- n = r.src.Int63()
- r.lk.Unlock()
- return
-}
-
-func (r *lockedSource) Seed(seed int64) {
- r.lk.Lock()
- r.src.Seed(seed)
- r.lk.Unlock()
-}
-
-// SeededRand is a new RNG using a thread safe implementation of rand.Source
-var SeededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go
deleted file mode 100644
index 4bae66cee..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build go1.6
-// +build go1.6
-
-package sdkrand
-
-import "math/rand"
-
-// Read provides the stub for math.Rand.Read method support for go version's
-// 1.6 and greater.
-func Read(r *rand.Rand, p []byte) (int, error) {
- return r.Read(p)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go
deleted file mode 100644
index 3a6ab8825..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkrand/read_1_5.go
+++ /dev/null
@@ -1,25 +0,0 @@
-//go:build !go1.6
-// +build !go1.6
-
-package sdkrand
-
-import "math/rand"
-
-// Read backfills Go 1.6's math.Rand.Reader for Go 1.5
-func Read(r *rand.Rand, p []byte) (n int, err error) {
- // Copy of Go standard libraries math package's read function not added to
- // standard library until Go 1.6.
- var pos int8
- var val int64
- for n = 0; n < len(p); n++ {
- if pos == 0 {
- val = r.Int63()
- pos = 7
- }
- p[n] = byte(val)
- val >>= 8
- pos--
- }
-
- return n, err
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go b/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
deleted file mode 100644
index 38ea61afe..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/sdkuri/path.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package sdkuri
-
-import (
- "path"
- "strings"
-)
-
-// PathJoin will join the elements of the path delimited by the "/"
-// character. Similar to path.Join with the exception the trailing "/"
-// character is preserved if present.
-func PathJoin(elems ...string) string {
- if len(elems) == 0 {
- return ""
- }
-
- hasTrailing := strings.HasSuffix(elems[len(elems)-1], "/")
- str := path.Join(elems...)
- if hasTrailing && str != "/" {
- str += "/"
- }
-
- return str
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go
deleted file mode 100644
index 7da8a49ce..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/ecs_container.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package shareddefaults
-
-const (
- // ECSCredsProviderEnvVar is an environmental variable key used to
- // determine which path needs to be hit.
- ECSCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
-)
-
-// ECSContainerCredentialsURI is the endpoint to retrieve container
-// credentials. This can be overridden to test to ensure the credential process
-// is behaving correctly.
-var ECSContainerCredentialsURI = "http://169.254.170.2"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
deleted file mode 100644
index 34fea49ca..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package shareddefaults
-
-import (
- "os/user"
- "path/filepath"
-)
-
-// SharedCredentialsFilename returns the SDK's default file path
-// for the shared credentials file.
-//
-// Builds the shared config file path based on the OS's platform.
-//
-// - Linux/Unix: $HOME/.aws/credentials
-// - Windows: %USERPROFILE%\.aws\credentials
-func SharedCredentialsFilename() string {
- return filepath.Join(UserHomeDir(), ".aws", "credentials")
-}
-
-// SharedConfigFilename returns the SDK's default file path for
-// the shared config file.
-//
-// Builds the shared config file path based on the OS's platform.
-//
-// - Linux/Unix: $HOME/.aws/config
-// - Windows: %USERPROFILE%\.aws\config
-func SharedConfigFilename() string {
- return filepath.Join(UserHomeDir(), ".aws", "config")
-}
-
-// UserHomeDir returns the home directory for the user the process is
-// running under.
-func UserHomeDir() string {
- var home string
-
- home = userHomeDir()
- if len(home) > 0 {
- return home
- }
-
- currUser, _ := user.Current()
- if currUser != nil {
- home = currUser.HomeDir
- }
-
- return home
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go
deleted file mode 100644
index eb298ae0f..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home.go
+++ /dev/null
@@ -1,18 +0,0 @@
-//go:build !go1.12
-// +build !go1.12
-
-package shareddefaults
-
-import (
- "os"
- "runtime"
-)
-
-func userHomeDir() string {
- if runtime.GOOS == "windows" { // Windows
- return os.Getenv("USERPROFILE")
- }
-
- // *nix
- return os.Getenv("HOME")
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go
deleted file mode 100644
index 51541b508..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config_resolve_home_go1.12.go
+++ /dev/null
@@ -1,13 +0,0 @@
-//go:build go1.12
-// +build go1.12
-
-package shareddefaults
-
-import (
- "os"
-)
-
-func userHomeDir() string {
- home, _ := os.UserHomeDir()
- return home
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go b/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go
deleted file mode 100644
index d008ae27c..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/strings/strings.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package strings
-
-import (
- "strings"
-)
-
-// HasPrefixFold tests whether the string s begins with prefix, interpreted as UTF-8 strings,
-// under Unicode case-folding.
-func HasPrefixFold(s, prefix string) bool {
- return len(s) >= len(prefix) && strings.EqualFold(s[0:len(prefix)], prefix)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE
deleted file mode 100644
index 6a66aea5e..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go b/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go
deleted file mode 100644
index 14ad0c589..000000000
--- a/vendor/github.com/aws/aws-sdk-go/internal/sync/singleflight/singleflight.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package singleflight provides a duplicate function call suppression
-// mechanism.
-package singleflight
-
-import "sync"
-
-// call is an in-flight or completed singleflight.Do call
-type call struct {
- wg sync.WaitGroup
-
- // These fields are written once before the WaitGroup is done
- // and are only read after the WaitGroup is done.
- val interface{}
- err error
-
- // forgotten indicates whether Forget was called with this call's key
- // while the call was still in flight.
- forgotten bool
-
- // These fields are read and written with the singleflight
- // mutex held before the WaitGroup is done, and are read but
- // not written after the WaitGroup is done.
- dups int
- chans []chan<- Result
-}
-
-// Group represents a class of work and forms a namespace in
-// which units of work can be executed with duplicate suppression.
-type Group struct {
- mu sync.Mutex // protects m
- m map[string]*call // lazily initialized
-}
-
-// Result holds the results of Do, so they can be passed
-// on a channel.
-type Result struct {
- Val interface{}
- Err error
- Shared bool
-}
-
-// Do executes and returns the results of the given function, making
-// sure that only one execution is in-flight for a given key at a
-// time. If a duplicate comes in, the duplicate caller waits for the
-// original to complete and receives the same results.
-// The return value shared indicates whether v was given to multiple callers.
-func (g *Group) Do(key string, fn func() (interface{}, error)) (v interface{}, err error, shared bool) {
- g.mu.Lock()
- if g.m == nil {
- g.m = make(map[string]*call)
- }
- if c, ok := g.m[key]; ok {
- c.dups++
- g.mu.Unlock()
- c.wg.Wait()
- return c.val, c.err, true
- }
- c := new(call)
- c.wg.Add(1)
- g.m[key] = c
- g.mu.Unlock()
-
- g.doCall(c, key, fn)
- return c.val, c.err, c.dups > 0
-}
-
-// DoChan is like Do but returns a channel that will receive the
-// results when they are ready.
-func (g *Group) DoChan(key string, fn func() (interface{}, error)) <-chan Result {
- ch := make(chan Result, 1)
- g.mu.Lock()
- if g.m == nil {
- g.m = make(map[string]*call)
- }
- if c, ok := g.m[key]; ok {
- c.dups++
- c.chans = append(c.chans, ch)
- g.mu.Unlock()
- return ch
- }
- c := &call{chans: []chan<- Result{ch}}
- c.wg.Add(1)
- g.m[key] = c
- g.mu.Unlock()
-
- go g.doCall(c, key, fn)
-
- return ch
-}
-
-// doCall handles the single call for a key.
-func (g *Group) doCall(c *call, key string, fn func() (interface{}, error)) {
- c.val, c.err = fn()
- c.wg.Done()
-
- g.mu.Lock()
- if !c.forgotten {
- delete(g.m, key)
- }
- for _, ch := range c.chans {
- ch <- Result{c.val, c.err, c.dups > 0}
- }
- g.mu.Unlock()
-}
-
-// Forget tells the singleflight to forget about a key. Future calls
-// to Do for this key will call the function rather than waiting for
-// an earlier call to complete.
-func (g *Group) Forget(key string) {
- g.mu.Lock()
- if c, ok := g.m[key]; ok {
- c.forgotten = true
- }
- delete(g.m, key)
- g.mu.Unlock()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
deleted file mode 100644
index 1f1d27aea..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package protocol
-
-import (
- "github.com/aws/aws-sdk-go/aws/request"
- "net"
- "strconv"
- "strings"
-)
-
-// ValidateEndpointHostHandler is a request handler that will validate the
-// request endpoint's hosts is a valid RFC 3986 host.
-var ValidateEndpointHostHandler = request.NamedHandler{
- Name: "awssdk.protocol.ValidateEndpointHostHandler",
- Fn: func(r *request.Request) {
- err := ValidateEndpointHost(r.Operation.Name, r.HTTPRequest.URL.Host)
- if err != nil {
- r.Error = err
- }
- },
-}
-
-// ValidateEndpointHost validates that the host string passed in is a valid RFC
-// 3986 host. Returns error if the host is not valid.
-func ValidateEndpointHost(opName, host string) error {
- paramErrs := request.ErrInvalidParams{Context: opName}
-
- var hostname string
- var port string
- var err error
-
- if strings.Contains(host, ":") {
- hostname, port, err = net.SplitHostPort(host)
-
- if err != nil {
- paramErrs.Add(request.NewErrParamFormat("endpoint", err.Error(), host))
- }
-
- if !ValidPortNumber(port) {
- paramErrs.Add(request.NewErrParamFormat("endpoint port number", "[0-65535]", port))
- }
- } else {
- hostname = host
- }
-
- labels := strings.Split(hostname, ".")
- for i, label := range labels {
- if i == len(labels)-1 && len(label) == 0 {
- // Allow trailing dot for FQDN hosts.
- continue
- }
-
- if !ValidHostLabel(label) {
- paramErrs.Add(request.NewErrParamFormat(
- "endpoint host label", "[a-zA-Z0-9-]{1,63}", label))
- }
- }
-
- if len(hostname) == 0 {
- paramErrs.Add(request.NewErrParamMinLen("endpoint host", 1))
- }
-
- if len(hostname) > 255 {
- paramErrs.Add(request.NewErrParamMaxLen(
- "endpoint host", 255, host,
- ))
- }
-
- if paramErrs.Len() > 0 {
- return paramErrs
- }
- return nil
-}
-
-// ValidHostLabel returns if the label is a valid RFC 3986 host label.
-func ValidHostLabel(label string) bool {
- if l := len(label); l == 0 || l > 63 {
- return false
- }
- for _, r := range label {
- switch {
- case r >= '0' && r <= '9':
- case r >= 'A' && r <= 'Z':
- case r >= 'a' && r <= 'z':
- case r == '-':
- default:
- return false
- }
- }
-
- return true
-}
-
-// ValidPortNumber return if the port is valid RFC 3986 port
-func ValidPortNumber(port string) bool {
- i, err := strconv.Atoi(port)
- if err != nil {
- return false
- }
-
- if i < 0 || i > 65535 {
- return false
- }
- return true
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go
deleted file mode 100644
index 915b0fcaf..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/host_prefix.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package protocol
-
-import (
- "strings"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// HostPrefixHandlerName is the handler name for the host prefix request
-// handler.
-const HostPrefixHandlerName = "awssdk.endpoint.HostPrefixHandler"
-
-// NewHostPrefixHandler constructs a build handler
-func NewHostPrefixHandler(prefix string, labelsFn func() map[string]string) request.NamedHandler {
- builder := HostPrefixBuilder{
- Prefix: prefix,
- LabelsFn: labelsFn,
- }
-
- return request.NamedHandler{
- Name: HostPrefixHandlerName,
- Fn: builder.Build,
- }
-}
-
-// HostPrefixBuilder provides the request handler to expand and prepend
-// the host prefix into the operation's request endpoint host.
-type HostPrefixBuilder struct {
- Prefix string
- LabelsFn func() map[string]string
-}
-
-// Build updates the passed in Request with the HostPrefix template expanded.
-func (h HostPrefixBuilder) Build(r *request.Request) {
- if aws.BoolValue(r.Config.DisableEndpointHostPrefix) {
- return
- }
-
- var labels map[string]string
- if h.LabelsFn != nil {
- labels = h.LabelsFn()
- }
-
- prefix := h.Prefix
- for name, value := range labels {
- prefix = strings.Replace(prefix, "{"+name+"}", value, -1)
- }
-
- r.HTTPRequest.URL.Host = prefix + r.HTTPRequest.URL.Host
- if len(r.HTTPRequest.Host) > 0 {
- r.HTTPRequest.Host = prefix + r.HTTPRequest.Host
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
deleted file mode 100644
index 53831dff9..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package protocol
-
-import (
- "crypto/rand"
- "fmt"
- "reflect"
-)
-
-// RandReader is the random reader the protocol package will use to read
-// random bytes from. This is exported for testing, and should not be used.
-var RandReader = rand.Reader
-
-const idempotencyTokenFillTag = `idempotencyToken`
-
-// CanSetIdempotencyToken returns true if the struct field should be
-// automatically populated with a Idempotency token.
-//
-// Only *string and string type fields that are tagged with idempotencyToken
-// which are not already set can be auto filled.
-func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
- switch u := v.Interface().(type) {
- // To auto fill an Idempotency token the field must be a string,
- // tagged for auto fill, and have a zero value.
- case *string:
- return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
- case string:
- return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
- }
-
- return false
-}
-
-// GetIdempotencyToken returns a randomly generated idempotency token.
-func GetIdempotencyToken() string {
- b := make([]byte, 16)
- RandReader.Read(b)
-
- return UUIDVersion4(b)
-}
-
-// SetIdempotencyToken will set the value provided with a Idempotency Token.
-// Given that the value can be set. Will panic if value is not setable.
-func SetIdempotencyToken(v reflect.Value) {
- if v.Kind() == reflect.Ptr {
- if v.IsNil() && v.CanSet() {
- v.Set(reflect.New(v.Type().Elem()))
- }
- v = v.Elem()
- }
- v = reflect.Indirect(v)
-
- if !v.CanSet() {
- panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
- }
-
- b := make([]byte, 16)
- _, err := rand.Read(b)
- if err != nil {
- // TODO handle error
- return
- }
-
- v.Set(reflect.ValueOf(UUIDVersion4(b)))
-}
-
-// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
-func UUIDVersion4(u []byte) string {
- // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
- // 13th character is "4"
- u[6] = (u[6] | 0x40) & 0x4F
- // 17th character is "8", "9", "a", or "b"
- u[8] = (u[8] | 0x80) & 0xBF
-
- return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
deleted file mode 100644
index 12e814ddf..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go
+++ /dev/null
@@ -1,309 +0,0 @@
-// Package jsonutil provides JSON serialization of AWS requests and responses.
-package jsonutil
-
-import (
- "bytes"
- "encoding/base64"
- "fmt"
- "math"
- "reflect"
- "sort"
- "strconv"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/private/protocol"
-)
-
-const (
- floatNaN = "NaN"
- floatInf = "Infinity"
- floatNegInf = "-Infinity"
-)
-
-var timeType = reflect.ValueOf(time.Time{}).Type()
-var byteSliceType = reflect.ValueOf([]byte{}).Type()
-
-// BuildJSON builds a JSON string for a given object v.
-func BuildJSON(v interface{}) ([]byte, error) {
- var buf bytes.Buffer
-
- err := buildAny(reflect.ValueOf(v), &buf, "")
- return buf.Bytes(), err
-}
-
-func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
- origVal := value
- value = reflect.Indirect(value)
- if !value.IsValid() {
- return nil
- }
-
- vtype := value.Type()
-
- t := tag.Get("type")
- if t == "" {
- switch vtype.Kind() {
- case reflect.Struct:
- // also it can't be a time object
- if value.Type() != timeType {
- t = "structure"
- }
- case reflect.Slice:
- // also it can't be a byte slice
- if _, ok := value.Interface().([]byte); !ok {
- t = "list"
- }
- case reflect.Map:
- // cannot be a JSONValue map
- if _, ok := value.Interface().(aws.JSONValue); !ok {
- t = "map"
- }
- }
- }
-
- switch t {
- case "structure":
- if field, ok := vtype.FieldByName("_"); ok {
- tag = field.Tag
- }
- return buildStruct(value, buf, tag)
- case "list":
- return buildList(value, buf, tag)
- case "map":
- return buildMap(value, buf, tag)
- default:
- return buildScalar(origVal, buf, tag)
- }
-}
-
-func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
- if !value.IsValid() {
- return nil
- }
-
- // unwrap payloads
- if payload := tag.Get("payload"); payload != "" {
- field, _ := value.Type().FieldByName(payload)
- tag = field.Tag
- value = elemOf(value.FieldByName(payload))
- if !value.IsValid() && tag.Get("type") != "structure" {
- return nil
- }
- }
-
- buf.WriteByte('{')
- defer buf.WriteString("}")
-
- if !value.IsValid() {
- return nil
- }
-
- t := value.Type()
- first := true
- for i := 0; i < t.NumField(); i++ {
- member := value.Field(i)
-
- // This allocates the most memory.
- // Additionally, we cannot skip nil fields due to
- // idempotency auto filling.
- field := t.Field(i)
-
- if field.PkgPath != "" {
- continue // ignore unexported fields
- }
- if field.Tag.Get("json") == "-" {
- continue
- }
- if field.Tag.Get("location") != "" {
- continue // ignore non-body elements
- }
- if field.Tag.Get("ignore") != "" {
- continue
- }
-
- if protocol.CanSetIdempotencyToken(member, field) {
- token := protocol.GetIdempotencyToken()
- member = reflect.ValueOf(&token)
- }
-
- if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() {
- continue // ignore unset fields
- }
-
- if first {
- first = false
- } else {
- buf.WriteByte(',')
- }
-
- // figure out what this field is called
- name := field.Name
- if locName := field.Tag.Get("locationName"); locName != "" {
- name = locName
- }
-
- writeString(name, buf)
- buf.WriteString(`:`)
-
- err := buildAny(member, buf, field.Tag)
- if err != nil {
- return err
- }
-
- }
-
- return nil
-}
-
-func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
- buf.WriteString("[")
-
- for i := 0; i < value.Len(); i++ {
- buildAny(value.Index(i), buf, "")
-
- if i < value.Len()-1 {
- buf.WriteString(",")
- }
- }
-
- buf.WriteString("]")
-
- return nil
-}
-
-type sortedValues []reflect.Value
-
-func (sv sortedValues) Len() int { return len(sv) }
-func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
-func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() }
-
-func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
- buf.WriteString("{")
-
- sv := sortedValues(value.MapKeys())
- sort.Sort(sv)
-
- for i, k := range sv {
- if i > 0 {
- buf.WriteByte(',')
- }
-
- writeString(k.String(), buf)
- buf.WriteString(`:`)
-
- buildAny(value.MapIndex(k), buf, "")
- }
-
- buf.WriteString("}")
-
- return nil
-}
-
-func buildScalar(v reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error {
- // prevents allocation on the heap.
- scratch := [64]byte{}
- switch value := reflect.Indirect(v); value.Kind() {
- case reflect.String:
- writeString(value.String(), buf)
- case reflect.Bool:
- if value.Bool() {
- buf.WriteString("true")
- } else {
- buf.WriteString("false")
- }
- case reflect.Int64:
- buf.Write(strconv.AppendInt(scratch[:0], value.Int(), 10))
- case reflect.Float64:
- f := value.Float()
- switch {
- case math.IsNaN(f):
- writeString(floatNaN, buf)
- case math.IsInf(f, 1):
- writeString(floatInf, buf)
- case math.IsInf(f, -1):
- writeString(floatNegInf, buf)
- default:
- buf.Write(strconv.AppendFloat(scratch[:0], f, 'f', -1, 64))
- }
- default:
- switch converted := value.Interface().(type) {
- case time.Time:
- format := tag.Get("timestampFormat")
- if len(format) == 0 {
- format = protocol.UnixTimeFormatName
- }
-
- ts := protocol.FormatTime(format, converted)
- if format != protocol.UnixTimeFormatName {
- ts = `"` + ts + `"`
- }
-
- buf.WriteString(ts)
- case []byte:
- if !value.IsNil() {
- buf.WriteByte('"')
- if len(converted) < 1024 {
- // for small buffers, using Encode directly is much faster.
- dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted)))
- base64.StdEncoding.Encode(dst, converted)
- buf.Write(dst)
- } else {
- // for large buffers, avoid unnecessary extra temporary
- // buffer space.
- enc := base64.NewEncoder(base64.StdEncoding, buf)
- enc.Write(converted)
- enc.Close()
- }
- buf.WriteByte('"')
- }
- case aws.JSONValue:
- str, err := protocol.EncodeJSONValue(converted, protocol.QuotedEscape)
- if err != nil {
- return fmt.Errorf("unable to encode JSONValue, %v", err)
- }
- buf.WriteString(str)
- default:
- return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type())
- }
- }
- return nil
-}
-
-var hex = "0123456789abcdef"
-
-func writeString(s string, buf *bytes.Buffer) {
- buf.WriteByte('"')
- for i := 0; i < len(s); i++ {
- if s[i] == '"' {
- buf.WriteString(`\"`)
- } else if s[i] == '\\' {
- buf.WriteString(`\\`)
- } else if s[i] == '\b' {
- buf.WriteString(`\b`)
- } else if s[i] == '\f' {
- buf.WriteString(`\f`)
- } else if s[i] == '\r' {
- buf.WriteString(`\r`)
- } else if s[i] == '\t' {
- buf.WriteString(`\t`)
- } else if s[i] == '\n' {
- buf.WriteString(`\n`)
- } else if s[i] < 32 {
- buf.WriteString("\\u00")
- buf.WriteByte(hex[s[i]>>4])
- buf.WriteByte(hex[s[i]&0xF])
- } else {
- buf.WriteByte(s[i])
- }
- }
- buf.WriteByte('"')
-}
-
-// Returns the reflection element of a value, if it is a pointer.
-func elemOf(value reflect.Value) reflect.Value {
- for value.Kind() == reflect.Ptr {
- value = value.Elem()
- }
- return value
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
deleted file mode 100644
index f9334879b..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go
+++ /dev/null
@@ -1,317 +0,0 @@
-package jsonutil
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/json"
- "fmt"
- "io"
- "math"
- "math/big"
- "reflect"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/private/protocol"
-)
-
-var millisecondsFloat = new(big.Float).SetInt64(1e3)
-
-// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in
-// type. The value to unmarshal the json document into must be a pointer to the
-// type.
-func UnmarshalJSONError(v interface{}, stream io.Reader) error {
- var errBuf bytes.Buffer
- body := io.TeeReader(stream, &errBuf)
-
- err := json.NewDecoder(body).Decode(v)
- if err != nil {
- msg := "failed decoding error message"
- if err == io.EOF {
- msg = "error message missing"
- err = nil
- }
- return awserr.NewUnmarshalError(err, msg, errBuf.Bytes())
- }
-
- return nil
-}
-
-// UnmarshalJSON reads a stream and unmarshals the results in object v.
-func UnmarshalJSON(v interface{}, stream io.Reader) error {
- var out interface{}
-
- decoder := json.NewDecoder(stream)
- decoder.UseNumber()
- err := decoder.Decode(&out)
- if err == io.EOF {
- return nil
- } else if err != nil {
- return err
- }
-
- return unmarshaler{}.unmarshalAny(reflect.ValueOf(v), out, "")
-}
-
-// UnmarshalJSONCaseInsensitive reads a stream and unmarshals the result into the
-// object v. Ignores casing for structure members.
-func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error {
- var out interface{}
-
- decoder := json.NewDecoder(stream)
- decoder.UseNumber()
- err := decoder.Decode(&out)
- if err == io.EOF {
- return nil
- } else if err != nil {
- return err
- }
-
- return unmarshaler{
- caseInsensitive: true,
- }.unmarshalAny(reflect.ValueOf(v), out, "")
-}
-
-type unmarshaler struct {
- caseInsensitive bool
-}
-
-func (u unmarshaler) unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error {
- vtype := value.Type()
- if vtype.Kind() == reflect.Ptr {
- vtype = vtype.Elem() // check kind of actual element type
- }
-
- t := tag.Get("type")
- if t == "" {
- switch vtype.Kind() {
- case reflect.Struct:
- // also it can't be a time object
- if _, ok := value.Interface().(*time.Time); !ok {
- t = "structure"
- }
- case reflect.Slice:
- // also it can't be a byte slice
- if _, ok := value.Interface().([]byte); !ok {
- t = "list"
- }
- case reflect.Map:
- // cannot be a JSONValue map
- if _, ok := value.Interface().(aws.JSONValue); !ok {
- t = "map"
- }
- }
- }
-
- switch t {
- case "structure":
- if field, ok := vtype.FieldByName("_"); ok {
- tag = field.Tag
- }
- return u.unmarshalStruct(value, data, tag)
- case "list":
- return u.unmarshalList(value, data, tag)
- case "map":
- return u.unmarshalMap(value, data, tag)
- default:
- return u.unmarshalScalar(value, data, tag)
- }
-}
-
-func (u unmarshaler) unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error {
- if data == nil {
- return nil
- }
- mapData, ok := data.(map[string]interface{})
- if !ok {
- return fmt.Errorf("JSON value is not a structure (%#v)", data)
- }
-
- t := value.Type()
- if value.Kind() == reflect.Ptr {
- if value.IsNil() { // create the structure if it's nil
- s := reflect.New(value.Type().Elem())
- value.Set(s)
- value = s
- }
-
- value = value.Elem()
- t = t.Elem()
- }
-
- // unwrap any payloads
- if payload := tag.Get("payload"); payload != "" {
- field, _ := t.FieldByName(payload)
- return u.unmarshalAny(value.FieldByName(payload), data, field.Tag)
- }
-
- for i := 0; i < t.NumField(); i++ {
- field := t.Field(i)
- if field.PkgPath != "" {
- continue // ignore unexported fields
- }
-
- // figure out what this field is called
- name := field.Name
- if locName := field.Tag.Get("locationName"); locName != "" {
- name = locName
- }
- if u.caseInsensitive {
- if _, ok := mapData[name]; !ok {
- // Fallback to uncased name search if the exact name didn't match.
- for kn, v := range mapData {
- if strings.EqualFold(kn, name) {
- mapData[name] = v
- }
- }
- }
- }
-
- member := value.FieldByIndex(field.Index)
- err := u.unmarshalAny(member, mapData[name], field.Tag)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (u unmarshaler) unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error {
- if data == nil {
- return nil
- }
- listData, ok := data.([]interface{})
- if !ok {
- return fmt.Errorf("JSON value is not a list (%#v)", data)
- }
-
- if value.IsNil() {
- l := len(listData)
- value.Set(reflect.MakeSlice(value.Type(), l, l))
- }
-
- for i, c := range listData {
- err := u.unmarshalAny(value.Index(i), c, "")
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (u unmarshaler) unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error {
- if data == nil {
- return nil
- }
- mapData, ok := data.(map[string]interface{})
- if !ok {
- return fmt.Errorf("JSON value is not a map (%#v)", data)
- }
-
- if value.IsNil() {
- value.Set(reflect.MakeMap(value.Type()))
- }
-
- for k, v := range mapData {
- kvalue := reflect.ValueOf(k)
- vvalue := reflect.New(value.Type().Elem()).Elem()
-
- u.unmarshalAny(vvalue, v, "")
- value.SetMapIndex(kvalue, vvalue)
- }
-
- return nil
-}
-
-func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error {
-
- switch d := data.(type) {
- case nil:
- return nil // nothing to do here
- case string:
- switch value.Interface().(type) {
- case *string:
- value.Set(reflect.ValueOf(&d))
- case []byte:
- b, err := base64.StdEncoding.DecodeString(d)
- if err != nil {
- return err
- }
- value.Set(reflect.ValueOf(b))
- case *time.Time:
- format := tag.Get("timestampFormat")
- if len(format) == 0 {
- format = protocol.ISO8601TimeFormatName
- }
-
- t, err := protocol.ParseTime(format, d)
- if err != nil {
- return err
- }
- value.Set(reflect.ValueOf(&t))
- case aws.JSONValue:
- // No need to use escaping as the value is a non-quoted string.
- v, err := protocol.DecodeJSONValue(d, protocol.NoEscape)
- if err != nil {
- return err
- }
- value.Set(reflect.ValueOf(v))
- case *float64:
- // These are regular strings when parsed by encoding/json's unmarshaler.
- switch {
- case strings.EqualFold(d, floatNaN):
- value.Set(reflect.ValueOf(aws.Float64(math.NaN())))
- case strings.EqualFold(d, floatInf):
- value.Set(reflect.ValueOf(aws.Float64(math.Inf(1))))
- case strings.EqualFold(d, floatNegInf):
- value.Set(reflect.ValueOf(aws.Float64(math.Inf(-1))))
- default:
- return fmt.Errorf("unknown JSON number value: %s", d)
- }
- default:
- return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
- }
- case json.Number:
- switch value.Interface().(type) {
- case *int64:
- // Retain the old behavior where we would just truncate the float64
- // calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt
- f, err := d.Float64()
- if err != nil {
- return err
- }
- di := int64(f)
- value.Set(reflect.ValueOf(&di))
- case *float64:
- f, err := d.Float64()
- if err != nil {
- return err
- }
- value.Set(reflect.ValueOf(&f))
- case *time.Time:
- float, ok := new(big.Float).SetString(d.String())
- if !ok {
- return fmt.Errorf("unsupported float time representation: %v", d.String())
- }
- float = float.Mul(float, millisecondsFloat)
- ms, _ := float.Int64()
- t := time.Unix(0, ms*1e6).UTC()
- value.Set(reflect.ValueOf(&t))
- default:
- return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
- }
- case bool:
- switch value.Interface().(type) {
- case *bool:
- value.Set(reflect.ValueOf(&d))
- default:
- return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
- }
- default:
- return fmt.Errorf("unsupported JSON value (%v)", data)
- }
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
deleted file mode 100644
index d9aa27114..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Package jsonrpc provides JSON RPC utilities for serialization of AWS
-// requests and responses.
-package jsonrpc
-
-//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/json.json build_test.go
-//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/json.json unmarshal_test.go
-
-import (
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
- "github.com/aws/aws-sdk-go/private/protocol/rest"
-)
-
-var emptyJSON = []byte("{}")
-
-// BuildHandler is a named request handler for building jsonrpc protocol
-// requests
-var BuildHandler = request.NamedHandler{
- Name: "awssdk.jsonrpc.Build",
- Fn: Build,
-}
-
-// UnmarshalHandler is a named request handler for unmarshaling jsonrpc
-// protocol requests
-var UnmarshalHandler = request.NamedHandler{
- Name: "awssdk.jsonrpc.Unmarshal",
- Fn: Unmarshal,
-}
-
-// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc
-// protocol request metadata
-var UnmarshalMetaHandler = request.NamedHandler{
- Name: "awssdk.jsonrpc.UnmarshalMeta",
- Fn: UnmarshalMeta,
-}
-
-// Build builds a JSON payload for a JSON RPC request.
-func Build(req *request.Request) {
- var buf []byte
- var err error
- if req.ParamsFilled() {
- buf, err = jsonutil.BuildJSON(req.Params)
- if err != nil {
- req.Error = awserr.New(request.ErrCodeSerialization, "failed encoding JSON RPC request", err)
- return
- }
- } else {
- buf = emptyJSON
- }
-
- // Always serialize the body, don't suppress it.
- req.SetBufferBody(buf)
-
- if req.ClientInfo.TargetPrefix != "" {
- target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name
- req.HTTPRequest.Header.Add("X-Amz-Target", target)
- }
-
- // Only set the content type if one is not already specified and an
- // JSONVersion is specified.
- if ct, v := req.HTTPRequest.Header.Get("Content-Type"), req.ClientInfo.JSONVersion; len(ct) == 0 && len(v) != 0 {
- jsonVersion := req.ClientInfo.JSONVersion
- req.HTTPRequest.Header.Set("Content-Type", "application/x-amz-json-"+jsonVersion)
- }
-}
-
-// Unmarshal unmarshals a response for a JSON RPC service.
-func Unmarshal(req *request.Request) {
- defer req.HTTPResponse.Body.Close()
- if req.DataFilled() {
- err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body)
- if err != nil {
- req.Error = awserr.NewRequestFailure(
- awserr.New(request.ErrCodeSerialization, "failed decoding JSON RPC response", err),
- req.HTTPResponse.StatusCode,
- req.RequestID,
- )
- }
- }
- return
-}
-
-// UnmarshalMeta unmarshals headers from a response for a JSON RPC service.
-func UnmarshalMeta(req *request.Request) {
- rest.UnmarshalMeta(req)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go
deleted file mode 100644
index 9c1ccde54..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/unmarshal_error.go
+++ /dev/null
@@ -1,160 +0,0 @@
-package jsonrpc
-
-import (
- "bytes"
- "io"
- "io/ioutil"
- "net/http"
- "strings"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/protocol"
- "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
-)
-
-const (
- awsQueryError = "x-amzn-query-error"
- // A valid header example - "x-amzn-query-error": ";"
- awsQueryErrorPartsCount = 2
-)
-
-// UnmarshalTypedError provides unmarshaling errors API response errors
-// for both typed and untyped errors.
-type UnmarshalTypedError struct {
- exceptions map[string]func(protocol.ResponseMetadata) error
- queryExceptions map[string]func(protocol.ResponseMetadata, string) error
-}
-
-// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the
-// set of exception names to the error unmarshalers
-func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError {
- return &UnmarshalTypedError{
- exceptions: exceptions,
- queryExceptions: map[string]func(protocol.ResponseMetadata, string) error{},
- }
-}
-
-// NewUnmarshalTypedErrorWithOptions works similar to NewUnmarshalTypedError applying options to the UnmarshalTypedError
-// before returning it
-func NewUnmarshalTypedErrorWithOptions(exceptions map[string]func(protocol.ResponseMetadata) error, optFns ...func(*UnmarshalTypedError)) *UnmarshalTypedError {
- unmarshaledError := NewUnmarshalTypedError(exceptions)
- for _, fn := range optFns {
- fn(unmarshaledError)
- }
- return unmarshaledError
-}
-
-// WithQueryCompatibility is a helper function to construct a functional option for use with NewUnmarshalTypedErrorWithOptions.
-// The queryExceptions given act as an override for unmarshalling errors when query compatible error codes are found.
-// See also [awsQueryCompatible trait]
-//
-// [awsQueryCompatible trait]: https://smithy.io/2.0/aws/protocols/aws-query-protocol.html#aws-protocols-awsquerycompatible-trait
-func WithQueryCompatibility(queryExceptions map[string]func(protocol.ResponseMetadata, string) error) func(*UnmarshalTypedError) {
- return func(typedError *UnmarshalTypedError) {
- typedError.queryExceptions = queryExceptions
- }
-}
-
-// UnmarshalError attempts to unmarshal the HTTP response error as a known
-// error type. If unable to unmarshal the error type, the generic SDK error
-// type will be used.
-func (u *UnmarshalTypedError) UnmarshalError(
- resp *http.Response,
- respMeta protocol.ResponseMetadata,
-) (error, error) {
-
- var buf bytes.Buffer
- var jsonErr jsonErrorResponse
- teeReader := io.TeeReader(resp.Body, &buf)
- err := jsonutil.UnmarshalJSONError(&jsonErr, teeReader)
- if err != nil {
- return nil, err
- }
- body := ioutil.NopCloser(&buf)
-
- // Code may be separated by hash(#), with the last element being the code
- // used by the SDK.
- codeParts := strings.SplitN(jsonErr.Code, "#", 2)
- code := codeParts[len(codeParts)-1]
- msg := jsonErr.Message
-
- queryCodeParts := queryCodeParts(resp, u)
-
- if fn, ok := u.exceptions[code]; ok {
- // If query-compatible exceptions are found and query-error-header is found,
- // then use associated constructor to get exception with query error code.
- //
- // If exception code is known, use associated constructor to get a value
- // for the exception that the JSON body can be unmarshaled into.
- var v error
- queryErrFn, queryExceptionsFound := u.queryExceptions[code]
- if len(queryCodeParts) == awsQueryErrorPartsCount && queryExceptionsFound {
- v = queryErrFn(respMeta, queryCodeParts[0])
- } else {
- v = fn(respMeta)
- }
- err := jsonutil.UnmarshalJSONCaseInsensitive(v, body)
- if err != nil {
- return nil, err
- }
- return v, nil
- }
-
- if len(queryCodeParts) == awsQueryErrorPartsCount && len(u.queryExceptions) > 0 {
- code = queryCodeParts[0]
- }
-
- // fallback to unmodeled generic exceptions
- return awserr.NewRequestFailure(
- awserr.New(code, msg, nil),
- respMeta.StatusCode,
- respMeta.RequestID,
- ), nil
-}
-
-// A valid header example - "x-amzn-query-error": ";"
-func queryCodeParts(resp *http.Response, u *UnmarshalTypedError) []string {
- queryCodeHeader := resp.Header.Get(awsQueryError)
- var queryCodeParts []string
- if queryCodeHeader != "" && len(u.queryExceptions) > 0 {
- queryCodeParts = strings.Split(queryCodeHeader, ";")
- }
- return queryCodeParts
-}
-
-// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc
-// protocol request errors
-var UnmarshalErrorHandler = request.NamedHandler{
- Name: "awssdk.jsonrpc.UnmarshalError",
- Fn: UnmarshalError,
-}
-
-// UnmarshalError unmarshals an error response for a JSON RPC service.
-func UnmarshalError(req *request.Request) {
- defer req.HTTPResponse.Body.Close()
-
- var jsonErr jsonErrorResponse
- err := jsonutil.UnmarshalJSONError(&jsonErr, req.HTTPResponse.Body)
- if err != nil {
- req.Error = awserr.NewRequestFailure(
- awserr.New(request.ErrCodeSerialization,
- "failed to unmarshal error message", err),
- req.HTTPResponse.StatusCode,
- req.RequestID,
- )
- return
- }
-
- codes := strings.SplitN(jsonErr.Code, "#", 2)
- req.Error = awserr.NewRequestFailure(
- awserr.New(codes[len(codes)-1], jsonErr.Message, nil),
- req.HTTPResponse.StatusCode,
- req.RequestID,
- )
-}
-
-type jsonErrorResponse struct {
- Code string `json:"__type"`
- Message string `json:"message"`
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
deleted file mode 100644
index 776d11018..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonvalue.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package protocol
-
-import (
- "encoding/base64"
- "encoding/json"
- "fmt"
- "strconv"
-
- "github.com/aws/aws-sdk-go/aws"
-)
-
-// EscapeMode is the mode that should be use for escaping a value
-type EscapeMode uint
-
-// The modes for escaping a value before it is marshaled, and unmarshaled.
-const (
- NoEscape EscapeMode = iota
- Base64Escape
- QuotedEscape
-)
-
-// EncodeJSONValue marshals the value into a JSON string, and optionally base64
-// encodes the string before returning it.
-//
-// Will panic if the escape mode is unknown.
-func EncodeJSONValue(v aws.JSONValue, escape EscapeMode) (string, error) {
- b, err := json.Marshal(v)
- if err != nil {
- return "", err
- }
-
- switch escape {
- case NoEscape:
- return string(b), nil
- case Base64Escape:
- return base64.StdEncoding.EncodeToString(b), nil
- case QuotedEscape:
- return strconv.Quote(string(b)), nil
- }
-
- panic(fmt.Sprintf("EncodeJSONValue called with unknown EscapeMode, %v", escape))
-}
-
-// DecodeJSONValue will attempt to decode the string input as a JSONValue.
-// Optionally decoding base64 the value first before JSON unmarshaling.
-//
-// Will panic if the escape mode is unknown.
-func DecodeJSONValue(v string, escape EscapeMode) (aws.JSONValue, error) {
- var b []byte
- var err error
-
- switch escape {
- case NoEscape:
- b = []byte(v)
- case Base64Escape:
- b, err = base64.StdEncoding.DecodeString(v)
- case QuotedEscape:
- var u string
- u, err = strconv.Unquote(v)
- b = []byte(u)
- default:
- panic(fmt.Sprintf("DecodeJSONValue called with unknown EscapeMode, %v", escape))
- }
-
- if err != nil {
- return nil, err
- }
-
- m := aws.JSONValue{}
- err = json.Unmarshal(b, &m)
- if err != nil {
- return nil, err
- }
-
- return m, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
deleted file mode 100644
index 0ea0647a5..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/payload.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package protocol
-
-import (
- "io"
- "io/ioutil"
- "net/http"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// PayloadUnmarshaler provides the interface for unmarshaling a payload's
-// reader into a SDK shape.
-type PayloadUnmarshaler interface {
- UnmarshalPayload(io.Reader, interface{}) error
-}
-
-// HandlerPayloadUnmarshal implements the PayloadUnmarshaler from a
-// HandlerList. This provides the support for unmarshaling a payload reader to
-// a shape without needing a SDK request first.
-type HandlerPayloadUnmarshal struct {
- Unmarshalers request.HandlerList
-}
-
-// UnmarshalPayload unmarshals the io.Reader payload into the SDK shape using
-// the Unmarshalers HandlerList provided. Returns an error if unable
-// unmarshaling fails.
-func (h HandlerPayloadUnmarshal) UnmarshalPayload(r io.Reader, v interface{}) error {
- req := &request.Request{
- HTTPRequest: &http.Request{},
- HTTPResponse: &http.Response{
- StatusCode: 200,
- Header: http.Header{},
- Body: ioutil.NopCloser(r),
- },
- Data: v,
- }
-
- h.Unmarshalers.Run(req)
-
- return req.Error
-}
-
-// PayloadMarshaler provides the interface for marshaling a SDK shape into and
-// io.Writer.
-type PayloadMarshaler interface {
- MarshalPayload(io.Writer, interface{}) error
-}
-
-// HandlerPayloadMarshal implements the PayloadMarshaler from a HandlerList.
-// This provides support for marshaling a SDK shape into an io.Writer without
-// needing a SDK request first.
-type HandlerPayloadMarshal struct {
- Marshalers request.HandlerList
-}
-
-// MarshalPayload marshals the SDK shape into the io.Writer using the
-// Marshalers HandlerList provided. Returns an error if unable if marshal
-// fails.
-func (h HandlerPayloadMarshal) MarshalPayload(w io.Writer, v interface{}) error {
- req := request.New(
- aws.Config{},
- metadata.ClientInfo{},
- request.Handlers{},
- nil,
- &request.Operation{HTTPMethod: "PUT"},
- v,
- nil,
- )
-
- h.Marshalers.Run(req)
-
- if req.Error != nil {
- return req.Error
- }
-
- io.Copy(w, req.GetBody())
-
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go
deleted file mode 100644
index 9d521dcb9..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/protocol.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package protocol
-
-import (
- "fmt"
- "strings"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// RequireHTTPMinProtocol request handler is used to enforce that
-// the target endpoint supports the given major and minor HTTP protocol version.
-type RequireHTTPMinProtocol struct {
- Major, Minor int
-}
-
-// Handler will mark the request.Request with an error if the
-// target endpoint did not connect with the required HTTP protocol
-// major and minor version.
-func (p RequireHTTPMinProtocol) Handler(r *request.Request) {
- if r.Error != nil || r.HTTPResponse == nil {
- return
- }
-
- if !strings.HasPrefix(r.HTTPResponse.Proto, "HTTP") {
- r.Error = newMinHTTPProtoError(p.Major, p.Minor, r)
- }
-
- if r.HTTPResponse.ProtoMajor < p.Major || r.HTTPResponse.ProtoMinor < p.Minor {
- r.Error = newMinHTTPProtoError(p.Major, p.Minor, r)
- }
-}
-
-// ErrCodeMinimumHTTPProtocolError error code is returned when the target endpoint
-// did not match the required HTTP major and minor protocol version.
-const ErrCodeMinimumHTTPProtocolError = "MinimumHTTPProtocolError"
-
-func newMinHTTPProtoError(major, minor int, r *request.Request) error {
- return awserr.NewRequestFailure(
- awserr.New("MinimumHTTPProtocolError",
- fmt.Sprintf(
- "operation requires minimum HTTP protocol of HTTP/%d.%d, but was %s",
- major, minor, r.HTTPResponse.Proto,
- ),
- nil,
- ),
- r.HTTPResponse.StatusCode, r.RequestID,
- )
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
deleted file mode 100644
index d40346a77..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Package query provides serialization of AWS query requests, and responses.
-package query
-
-//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/query.json build_test.go
-
-import (
- "net/url"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
-)
-
-// BuildHandler is a named request handler for building query protocol requests
-var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build}
-
-// Build builds a request for an AWS Query service.
-func Build(r *request.Request) {
- body := url.Values{
- "Action": {r.Operation.Name},
- "Version": {r.ClientInfo.APIVersion},
- }
- if err := queryutil.Parse(body, r.Params, false); err != nil {
- r.Error = awserr.New(request.ErrCodeSerialization, "failed encoding Query request", err)
- return
- }
-
- if !r.IsPresigned() {
- r.HTTPRequest.Method = "POST"
- r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
- r.SetBufferBody([]byte(body.Encode()))
- } else { // This is a pre-signed request
- r.HTTPRequest.Method = "GET"
- r.HTTPRequest.URL.RawQuery = body.Encode()
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
deleted file mode 100644
index 2ca0b19db..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
+++ /dev/null
@@ -1,276 +0,0 @@
-package queryutil
-
-import (
- "encoding/base64"
- "fmt"
- "math"
- "net/url"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/private/protocol"
-)
-
-const (
- floatNaN = "NaN"
- floatInf = "Infinity"
- floatNegInf = "-Infinity"
-)
-
-// Parse parses an object i and fills a url.Values object. The isEC2 flag
-// indicates if this is the EC2 Query sub-protocol.
-func Parse(body url.Values, i interface{}, isEC2 bool) error {
- q := queryParser{isEC2: isEC2}
- return q.parseValue(body, reflect.ValueOf(i), "", "")
-}
-
-func elemOf(value reflect.Value) reflect.Value {
- for value.Kind() == reflect.Ptr {
- value = value.Elem()
- }
- return value
-}
-
-type queryParser struct {
- isEC2 bool
-}
-
-func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
- value = elemOf(value)
-
- // no need to handle zero values
- if !value.IsValid() {
- return nil
- }
-
- t := tag.Get("type")
- if t == "" {
- switch value.Kind() {
- case reflect.Struct:
- t = "structure"
- case reflect.Slice:
- t = "list"
- case reflect.Map:
- t = "map"
- }
- }
-
- switch t {
- case "structure":
- return q.parseStruct(v, value, prefix)
- case "list":
- return q.parseList(v, value, prefix, tag)
- case "map":
- return q.parseMap(v, value, prefix, tag)
- default:
- return q.parseScalar(v, value, prefix, tag)
- }
-}
-
-func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
- if !value.IsValid() {
- return nil
- }
-
- t := value.Type()
- for i := 0; i < value.NumField(); i++ {
- elemValue := elemOf(value.Field(i))
- field := t.Field(i)
-
- if field.PkgPath != "" {
- continue // ignore unexported fields
- }
- if field.Tag.Get("ignore") != "" {
- continue
- }
-
- if protocol.CanSetIdempotencyToken(value.Field(i), field) {
- token := protocol.GetIdempotencyToken()
- elemValue = reflect.ValueOf(token)
- }
-
- var name string
- if q.isEC2 {
- name = field.Tag.Get("queryName")
- }
- if name == "" {
- if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
- name = field.Tag.Get("locationNameList")
- } else if locName := field.Tag.Get("locationName"); locName != "" {
- name = locName
- }
- if name != "" && q.isEC2 {
- name = strings.ToUpper(name[0:1]) + name[1:]
- }
- }
- if name == "" {
- name = field.Name
- }
-
- if prefix != "" {
- name = prefix + "." + name
- }
-
- if err := q.parseValue(v, elemValue, name, field.Tag); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
- // If it's empty, and not ec2, generate an empty value
- if !value.IsNil() && value.Len() == 0 && !q.isEC2 {
- v.Set(prefix, "")
- return nil
- }
-
- if _, ok := value.Interface().([]byte); ok {
- return q.parseScalar(v, value, prefix, tag)
- }
-
- // check for unflattened list member
- if !q.isEC2 && tag.Get("flattened") == "" {
- if listName := tag.Get("locationNameList"); listName == "" {
- prefix += ".member"
- } else {
- prefix += "." + listName
- }
- }
-
- for i := 0; i < value.Len(); i++ {
- slicePrefix := prefix
- if slicePrefix == "" {
- slicePrefix = strconv.Itoa(i + 1)
- } else {
- slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
- }
- if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
- // If it's empty, generate an empty value
- if !value.IsNil() && value.Len() == 0 {
- v.Set(prefix, "")
- return nil
- }
-
- // check for unflattened list member
- if !q.isEC2 && tag.Get("flattened") == "" {
- prefix += ".entry"
- }
-
- // sort keys for improved serialization consistency.
- // this is not strictly necessary for protocol support.
- mapKeyValues := value.MapKeys()
- mapKeys := map[string]reflect.Value{}
- mapKeyNames := make([]string, len(mapKeyValues))
- for i, mapKey := range mapKeyValues {
- name := mapKey.String()
- mapKeys[name] = mapKey
- mapKeyNames[i] = name
- }
- sort.Strings(mapKeyNames)
-
- for i, mapKeyName := range mapKeyNames {
- mapKey := mapKeys[mapKeyName]
- mapValue := value.MapIndex(mapKey)
-
- kname := tag.Get("locationNameKey")
- if kname == "" {
- kname = "key"
- }
- vname := tag.Get("locationNameValue")
- if vname == "" {
- vname = "value"
- }
-
- // serialize key
- var keyName string
- if prefix == "" {
- keyName = strconv.Itoa(i+1) + "." + kname
- } else {
- keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
- }
-
- if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
- return err
- }
-
- // serialize value
- var valueName string
- if prefix == "" {
- valueName = strconv.Itoa(i+1) + "." + vname
- } else {
- valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
- }
-
- if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
- switch value := r.Interface().(type) {
- case string:
- v.Set(name, value)
- case []byte:
- if !r.IsNil() {
- v.Set(name, base64.StdEncoding.EncodeToString(value))
- }
- case bool:
- v.Set(name, strconv.FormatBool(value))
- case int64:
- v.Set(name, strconv.FormatInt(value, 10))
- case int:
- v.Set(name, strconv.Itoa(value))
- case float64:
- var str string
- switch {
- case math.IsNaN(value):
- str = floatNaN
- case math.IsInf(value, 1):
- str = floatInf
- case math.IsInf(value, -1):
- str = floatNegInf
- default:
- str = strconv.FormatFloat(value, 'f', -1, 64)
- }
- v.Set(name, str)
- case float32:
- asFloat64 := float64(value)
- var str string
- switch {
- case math.IsNaN(asFloat64):
- str = floatNaN
- case math.IsInf(asFloat64, 1):
- str = floatInf
- case math.IsInf(asFloat64, -1):
- str = floatNegInf
- default:
- str = strconv.FormatFloat(asFloat64, 'f', -1, 32)
- }
- v.Set(name, str)
- case time.Time:
- const ISO8601UTC = "2006-01-02T15:04:05Z"
- format := tag.Get("timestampFormat")
- if len(format) == 0 {
- format = protocol.ISO8601TimeFormatName
- }
-
- v.Set(name, protocol.FormatTime(format, value))
- default:
- return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
- }
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
deleted file mode 100644
index 9231e95d1..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package query
-
-//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/query.json unmarshal_test.go
-
-import (
- "encoding/xml"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
-)
-
-// UnmarshalHandler is a named request handler for unmarshaling query protocol requests
-var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal}
-
-// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata
-var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta}
-
-// Unmarshal unmarshals a response for an AWS Query service.
-func Unmarshal(r *request.Request) {
- defer r.HTTPResponse.Body.Close()
- if r.DataFilled() {
- decoder := xml.NewDecoder(r.HTTPResponse.Body)
- err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
- if err != nil {
- r.Error = awserr.NewRequestFailure(
- awserr.New(request.ErrCodeSerialization, "failed decoding Query response", err),
- r.HTTPResponse.StatusCode,
- r.RequestID,
- )
- return
- }
- }
-}
-
-// UnmarshalMeta unmarshals header response values for an AWS Query service.
-func UnmarshalMeta(r *request.Request) {
- r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
deleted file mode 100644
index 2c0cbba90..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package query
-
-import (
- "encoding/xml"
- "fmt"
- "strings"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
-)
-
-// UnmarshalErrorHandler is a name request handler to unmarshal request errors
-var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
-
-type xmlErrorResponse struct {
- Code string `xml:"Error>Code"`
- Message string `xml:"Error>Message"`
- RequestID string `xml:"RequestId"`
-}
-
-type xmlResponseError struct {
- xmlErrorResponse
-}
-
-func (e *xmlResponseError) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
- const svcUnavailableTagName = "ServiceUnavailableException"
- const errorResponseTagName = "ErrorResponse"
-
- switch start.Name.Local {
- case svcUnavailableTagName:
- e.Code = svcUnavailableTagName
- e.Message = "service is unavailable"
- return d.Skip()
-
- case errorResponseTagName:
- return d.DecodeElement(&e.xmlErrorResponse, &start)
-
- default:
- return fmt.Errorf("unknown error response tag, %v", start)
- }
-}
-
-// UnmarshalError unmarshals an error response for an AWS Query service.
-func UnmarshalError(r *request.Request) {
- defer r.HTTPResponse.Body.Close()
-
- var respErr xmlResponseError
- err := xmlutil.UnmarshalXMLError(&respErr, r.HTTPResponse.Body)
- if err != nil {
- r.Error = awserr.NewRequestFailure(
- awserr.New(request.ErrCodeSerialization,
- "failed to unmarshal error message", err),
- r.HTTPResponse.StatusCode,
- r.RequestID,
- )
- return
- }
-
- reqID := respErr.RequestID
- if len(reqID) == 0 {
- reqID = r.RequestID
- }
-
- r.Error = awserr.NewRequestFailure(
- awserr.New(strings.TrimSpace(respErr.Code), strings.TrimSpace(respErr.Message), nil),
- r.HTTPResponse.StatusCode,
- reqID,
- )
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
deleted file mode 100644
index ecc521f88..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
+++ /dev/null
@@ -1,353 +0,0 @@
-// Package rest provides RESTful serialization of AWS requests and responses.
-package rest
-
-import (
- "bytes"
- "encoding/base64"
- "fmt"
- "io"
- "math"
- "net/http"
- "net/url"
- "path"
- "reflect"
- "strconv"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/protocol"
-)
-
-const (
- floatNaN = "NaN"
- floatInf = "Infinity"
- floatNegInf = "-Infinity"
-)
-
-// Whether the byte value can be sent without escaping in AWS URLs
-var noEscape [256]bool
-
-var errValueNotSet = fmt.Errorf("value not set")
-
-var byteSliceType = reflect.TypeOf([]byte{})
-
-func init() {
- for i := 0; i < len(noEscape); i++ {
- // AWS expects every character except these to be escaped
- noEscape[i] = (i >= 'A' && i <= 'Z') ||
- (i >= 'a' && i <= 'z') ||
- (i >= '0' && i <= '9') ||
- i == '-' ||
- i == '.' ||
- i == '_' ||
- i == '~'
- }
-}
-
-// BuildHandler is a named request handler for building rest protocol requests
-var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
-
-// Build builds the REST component of a service request.
-func Build(r *request.Request) {
- if r.ParamsFilled() {
- v := reflect.ValueOf(r.Params).Elem()
- buildLocationElements(r, v, false)
- buildBody(r, v)
- }
-}
-
-// BuildAsGET builds the REST component of a service request with the ability to hoist
-// data from the body.
-func BuildAsGET(r *request.Request) {
- if r.ParamsFilled() {
- v := reflect.ValueOf(r.Params).Elem()
- buildLocationElements(r, v, true)
- buildBody(r, v)
- }
-}
-
-func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) {
- query := r.HTTPRequest.URL.Query()
-
- // Setup the raw path to match the base path pattern. This is needed
- // so that when the path is mutated a custom escaped version can be
- // stored in RawPath that will be used by the Go client.
- r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path
-
- for i := 0; i < v.NumField(); i++ {
- m := v.Field(i)
- if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
- continue
- }
-
- if m.IsValid() {
- field := v.Type().Field(i)
- name := field.Tag.Get("locationName")
- if name == "" {
- name = field.Name
- }
- if kind := m.Kind(); kind == reflect.Ptr {
- m = m.Elem()
- } else if kind == reflect.Interface {
- if !m.Elem().IsValid() {
- continue
- }
- }
- if !m.IsValid() {
- continue
- }
- if field.Tag.Get("ignore") != "" {
- continue
- }
-
- // Support the ability to customize values to be marshaled as a
- // blob even though they were modeled as a string. Required for S3
- // API operations like SSECustomerKey is modeled as string but
- // required to be base64 encoded in request.
- if field.Tag.Get("marshal-as") == "blob" {
- m = m.Convert(byteSliceType)
- }
-
- var err error
- switch field.Tag.Get("location") {
- case "headers": // header maps
- err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag)
- case "header":
- err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag)
- case "uri":
- err = buildURI(r.HTTPRequest.URL, m, name, field.Tag)
- case "querystring":
- err = buildQueryString(query, m, name, field.Tag)
- default:
- if buildGETQuery {
- err = buildQueryString(query, m, name, field.Tag)
- }
- }
- r.Error = err
- }
- if r.Error != nil {
- return
- }
- }
-
- r.HTTPRequest.URL.RawQuery = query.Encode()
- if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) {
- cleanPath(r.HTTPRequest.URL)
- }
-}
-
-func buildBody(r *request.Request, v reflect.Value) {
- if field, ok := v.Type().FieldByName("_"); ok {
- if payloadName := field.Tag.Get("payload"); payloadName != "" {
- pfield, _ := v.Type().FieldByName(payloadName)
- if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
- payload := reflect.Indirect(v.FieldByName(payloadName))
- if payload.IsValid() && payload.Interface() != nil {
- switch reader := payload.Interface().(type) {
- case io.ReadSeeker:
- r.SetReaderBody(reader)
- case []byte:
- r.SetBufferBody(reader)
- case string:
- r.SetStringBody(reader)
- default:
- r.Error = awserr.New(request.ErrCodeSerialization,
- "failed to encode REST request",
- fmt.Errorf("unknown payload type %s", payload.Type()))
- }
- }
- }
- }
- }
-}
-
-func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error {
- str, err := convertType(v, tag)
- if err == errValueNotSet {
- return nil
- } else if err != nil {
- return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
- }
-
- name = strings.TrimSpace(name)
- str = strings.TrimSpace(str)
-
- header.Add(name, str)
-
- return nil
-}
-
-func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error {
- prefix := tag.Get("locationName")
- for _, key := range v.MapKeys() {
- str, err := convertType(v.MapIndex(key), tag)
- if err == errValueNotSet {
- continue
- } else if err != nil {
- return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
-
- }
- keyStr := strings.TrimSpace(key.String())
- str = strings.TrimSpace(str)
-
- header.Add(prefix+keyStr, str)
- }
- return nil
-}
-
-func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error {
- value, err := convertType(v, tag)
- if err == errValueNotSet {
- return nil
- } else if err != nil {
- return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
- }
-
- u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1)
- u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1)
-
- u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1)
- u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1)
-
- return nil
-}
-
-func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error {
- switch value := v.Interface().(type) {
- case []*string:
- for _, item := range value {
- query.Add(name, *item)
- }
- case map[string]*string:
- for key, item := range value {
- query.Add(key, *item)
- }
- case map[string][]*string:
- for key, items := range value {
- for _, item := range items {
- query.Add(key, *item)
- }
- }
- default:
- str, err := convertType(v, tag)
- if err == errValueNotSet {
- return nil
- } else if err != nil {
- return awserr.New(request.ErrCodeSerialization, "failed to encode REST request", err)
- }
- query.Set(name, str)
- }
-
- return nil
-}
-
-func cleanPath(u *url.URL) {
- hasSlash := strings.HasSuffix(u.Path, "/")
-
- // clean up path, removing duplicate `/`
- u.Path = path.Clean(u.Path)
- u.RawPath = path.Clean(u.RawPath)
-
- if hasSlash && !strings.HasSuffix(u.Path, "/") {
- u.Path += "/"
- u.RawPath += "/"
- }
-}
-
-// EscapePath escapes part of a URL path in Amazon style
-func EscapePath(path string, encodeSep bool) string {
- var buf bytes.Buffer
- for i := 0; i < len(path); i++ {
- c := path[i]
- if noEscape[c] || (c == '/' && !encodeSep) {
- buf.WriteByte(c)
- } else {
- fmt.Fprintf(&buf, "%%%02X", c)
- }
- }
- return buf.String()
-}
-
-func convertType(v reflect.Value, tag reflect.StructTag) (str string, err error) {
- v = reflect.Indirect(v)
- if !v.IsValid() {
- return "", errValueNotSet
- }
-
- switch value := v.Interface().(type) {
- case string:
- if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" {
- value = base64.StdEncoding.EncodeToString([]byte(value))
- }
- str = value
- case []*string:
- if tag.Get("location") != "header" || tag.Get("enum") == "" {
- return "", fmt.Errorf("%T is only supported with location header and enum shapes", value)
- }
- if len(value) == 0 {
- return "", errValueNotSet
- }
-
- buff := &bytes.Buffer{}
- for i, sv := range value {
- if sv == nil || len(*sv) == 0 {
- continue
- }
- if i != 0 {
- buff.WriteRune(',')
- }
- item := *sv
- if strings.Index(item, `,`) != -1 || strings.Index(item, `"`) != -1 {
- item = strconv.Quote(item)
- }
- buff.WriteString(item)
- }
- str = string(buff.Bytes())
- case []byte:
- str = base64.StdEncoding.EncodeToString(value)
- case bool:
- str = strconv.FormatBool(value)
- case int64:
- str = strconv.FormatInt(value, 10)
- case float64:
- switch {
- case math.IsNaN(value):
- str = floatNaN
- case math.IsInf(value, 1):
- str = floatInf
- case math.IsInf(value, -1):
- str = floatNegInf
- default:
- str = strconv.FormatFloat(value, 'f', -1, 64)
- }
- case time.Time:
- format := tag.Get("timestampFormat")
- if len(format) == 0 {
- format = protocol.RFC822TimeFormatName
- if tag.Get("location") == "querystring" {
- format = protocol.ISO8601TimeFormatName
- }
- }
- str = protocol.FormatTime(format, value)
- case aws.JSONValue:
- if len(value) == 0 {
- return "", errValueNotSet
- }
- escaping := protocol.NoEscape
- if tag.Get("location") == "header" {
- escaping = protocol.Base64Escape
- }
- str, err = protocol.EncodeJSONValue(value, escaping)
- if err != nil {
- return "", fmt.Errorf("unable to encode JSONValue, %v", err)
- }
- default:
- err := fmt.Errorf("unsupported value for param %v (%s)", v.Interface(), v.Type())
- return "", err
- }
-
- return str, nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
deleted file mode 100644
index b54c99eda..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package rest
-
-import "reflect"
-
-// PayloadMember returns the payload field member of i if there is one, or nil.
-func PayloadMember(i interface{}) interface{} {
- if i == nil {
- return nil
- }
-
- v := reflect.ValueOf(i).Elem()
- if !v.IsValid() {
- return nil
- }
- if field, ok := v.Type().FieldByName("_"); ok {
- if payloadName := field.Tag.Get("payload"); payloadName != "" {
- field, _ := v.Type().FieldByName(payloadName)
- if field.Tag.Get("type") != "structure" {
- return nil
- }
-
- payload := v.FieldByName(payloadName)
- if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
- return payload.Interface()
- }
- }
- }
- return nil
-}
-
-const nopayloadPayloadType = "nopayload"
-
-// PayloadType returns the type of a payload field member of i if there is one,
-// or "".
-func PayloadType(i interface{}) string {
- v := reflect.Indirect(reflect.ValueOf(i))
- if !v.IsValid() {
- return ""
- }
-
- if field, ok := v.Type().FieldByName("_"); ok {
- if noPayload := field.Tag.Get(nopayloadPayloadType); noPayload != "" {
- return nopayloadPayloadType
- }
-
- if payloadName := field.Tag.Get("payload"); payloadName != "" {
- if member, ok := v.Type().FieldByName(payloadName); ok {
- return member.Tag.Get("type")
- }
- }
- }
-
- return ""
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
deleted file mode 100644
index 79fcf1699..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
+++ /dev/null
@@ -1,276 +0,0 @@
-package rest
-
-import (
- "bytes"
- "encoding/base64"
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "net/http"
- "reflect"
- "strconv"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/request"
- awsStrings "github.com/aws/aws-sdk-go/internal/strings"
- "github.com/aws/aws-sdk-go/private/protocol"
-)
-
-// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
-var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal}
-
-// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata
-var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta}
-
-// Unmarshal unmarshals the REST component of a response in a REST service.
-func Unmarshal(r *request.Request) {
- if r.DataFilled() {
- v := reflect.Indirect(reflect.ValueOf(r.Data))
- if err := unmarshalBody(r, v); err != nil {
- r.Error = err
- }
- }
-}
-
-// UnmarshalMeta unmarshals the REST metadata of a response in a REST service
-func UnmarshalMeta(r *request.Request) {
- r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
- if r.RequestID == "" {
- // Alternative version of request id in the header
- r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
- }
- if r.DataFilled() {
- if err := UnmarshalResponse(r.HTTPResponse, r.Data, aws.BoolValue(r.Config.LowerCaseHeaderMaps)); err != nil {
- r.Error = err
- }
- }
-}
-
-// UnmarshalResponse attempts to unmarshal the REST response headers to
-// the data type passed in. The type must be a pointer. An error is returned
-// with any error unmarshaling the response into the target datatype.
-func UnmarshalResponse(resp *http.Response, data interface{}, lowerCaseHeaderMaps bool) error {
- v := reflect.Indirect(reflect.ValueOf(data))
- return unmarshalLocationElements(resp, v, lowerCaseHeaderMaps)
-}
-
-func unmarshalBody(r *request.Request, v reflect.Value) error {
- if field, ok := v.Type().FieldByName("_"); ok {
- if payloadName := field.Tag.Get("payload"); payloadName != "" {
- pfield, _ := v.Type().FieldByName(payloadName)
- if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
- payload := v.FieldByName(payloadName)
- if payload.IsValid() {
- switch payload.Interface().(type) {
- case []byte:
- defer r.HTTPResponse.Body.Close()
- b, err := ioutil.ReadAll(r.HTTPResponse.Body)
- if err != nil {
- return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
- }
-
- payload.Set(reflect.ValueOf(b))
-
- case *string:
- defer r.HTTPResponse.Body.Close()
- b, err := ioutil.ReadAll(r.HTTPResponse.Body)
- if err != nil {
- return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
- }
-
- str := string(b)
- payload.Set(reflect.ValueOf(&str))
-
- default:
- switch payload.Type().String() {
- case "io.ReadCloser":
- payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
-
- case "io.ReadSeeker":
- b, err := ioutil.ReadAll(r.HTTPResponse.Body)
- if err != nil {
- return awserr.New(request.ErrCodeSerialization,
- "failed to read response body", err)
- }
- payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b))))
-
- default:
- io.Copy(ioutil.Discard, r.HTTPResponse.Body)
- r.HTTPResponse.Body.Close()
- return awserr.New(request.ErrCodeSerialization,
- "failed to decode REST response",
- fmt.Errorf("unknown payload type %s", payload.Type()))
- }
- }
- }
- }
- }
- }
-
- return nil
-}
-
-func unmarshalLocationElements(resp *http.Response, v reflect.Value, lowerCaseHeaderMaps bool) error {
- for i := 0; i < v.NumField(); i++ {
- m, field := v.Field(i), v.Type().Field(i)
- if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
- continue
- }
-
- if m.IsValid() {
- name := field.Tag.Get("locationName")
- if name == "" {
- name = field.Name
- }
-
- switch field.Tag.Get("location") {
- case "statusCode":
- unmarshalStatusCode(m, resp.StatusCode)
-
- case "header":
- err := unmarshalHeader(m, resp.Header.Get(name), field.Tag)
- if err != nil {
- return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
- }
-
- case "headers":
- prefix := field.Tag.Get("locationName")
- err := unmarshalHeaderMap(m, resp.Header, prefix, lowerCaseHeaderMaps)
- if err != nil {
- return awserr.New(request.ErrCodeSerialization, "failed to decode REST response", err)
- }
- }
- }
- }
-
- return nil
-}
-
-func unmarshalStatusCode(v reflect.Value, statusCode int) {
- if !v.IsValid() {
- return
- }
-
- switch v.Interface().(type) {
- case *int64:
- s := int64(statusCode)
- v.Set(reflect.ValueOf(&s))
- }
-}
-
-func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string, normalize bool) error {
- if len(headers) == 0 {
- return nil
- }
- switch r.Interface().(type) {
- case map[string]*string: // we only support string map value types
- out := map[string]*string{}
- for k, v := range headers {
- if awsStrings.HasPrefixFold(k, prefix) {
- if normalize == true {
- k = strings.ToLower(k)
- } else {
- k = http.CanonicalHeaderKey(k)
- }
- out[k[len(prefix):]] = &v[0]
- }
- }
- if len(out) != 0 {
- r.Set(reflect.ValueOf(out))
- }
-
- }
- return nil
-}
-
-func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
- switch tag.Get("type") {
- case "jsonvalue":
- if len(header) == 0 {
- return nil
- }
- case "blob":
- if len(header) == 0 {
- return nil
- }
- default:
- if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
- return nil
- }
- }
-
- switch v.Interface().(type) {
- case *string:
- if tag.Get("suppressedJSONValue") == "true" && tag.Get("location") == "header" {
- b, err := base64.StdEncoding.DecodeString(header)
- if err != nil {
- return fmt.Errorf("failed to decode JSONValue, %v", err)
- }
- header = string(b)
- }
- v.Set(reflect.ValueOf(&header))
- case []byte:
- b, err := base64.StdEncoding.DecodeString(header)
- if err != nil {
- return err
- }
- v.Set(reflect.ValueOf(b))
- case *bool:
- b, err := strconv.ParseBool(header)
- if err != nil {
- return err
- }
- v.Set(reflect.ValueOf(&b))
- case *int64:
- i, err := strconv.ParseInt(header, 10, 64)
- if err != nil {
- return err
- }
- v.Set(reflect.ValueOf(&i))
- case *float64:
- var f float64
- switch {
- case strings.EqualFold(header, floatNaN):
- f = math.NaN()
- case strings.EqualFold(header, floatInf):
- f = math.Inf(1)
- case strings.EqualFold(header, floatNegInf):
- f = math.Inf(-1)
- default:
- var err error
- f, err = strconv.ParseFloat(header, 64)
- if err != nil {
- return err
- }
- }
- v.Set(reflect.ValueOf(&f))
- case *time.Time:
- format := tag.Get("timestampFormat")
- if len(format) == 0 {
- format = protocol.RFC822TimeFormatName
- }
- t, err := protocol.ParseTime(format, header)
- if err != nil {
- return err
- }
- v.Set(reflect.ValueOf(&t))
- case aws.JSONValue:
- escaping := protocol.NoEscape
- if tag.Get("location") == "header" {
- escaping = protocol.Base64Escape
- }
- m, err := protocol.DecodeJSONValue(header, escaping)
- if err != nil {
- return err
- }
- v.Set(reflect.ValueOf(m))
- default:
- err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
deleted file mode 100644
index 2e0e205af..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Package restjson provides RESTful JSON serialization of AWS
-// requests and responses.
-package restjson
-
-//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/input/rest-json.json build_test.go
-//go:generate go run -tags codegen ../../../private/model/cli/gen-protocol-tests ../../../models/protocol_tests/output/rest-json.json unmarshal_test.go
-
-import (
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
- "github.com/aws/aws-sdk-go/private/protocol/rest"
-)
-
-// BuildHandler is a named request handler for building restjson protocol
-// requests
-var BuildHandler = request.NamedHandler{
- Name: "awssdk.restjson.Build",
- Fn: Build,
-}
-
-// UnmarshalHandler is a named request handler for unmarshaling restjson
-// protocol requests
-var UnmarshalHandler = request.NamedHandler{
- Name: "awssdk.restjson.Unmarshal",
- Fn: Unmarshal,
-}
-
-// UnmarshalMetaHandler is a named request handler for unmarshaling restjson
-// protocol request metadata
-var UnmarshalMetaHandler = request.NamedHandler{
- Name: "awssdk.restjson.UnmarshalMeta",
- Fn: UnmarshalMeta,
-}
-
-// Build builds a request for the REST JSON protocol.
-func Build(r *request.Request) {
- rest.Build(r)
-
- if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
- if v := r.HTTPRequest.Header.Get("Content-Type"); len(v) == 0 {
- r.HTTPRequest.Header.Set("Content-Type", "application/json")
- }
- jsonrpc.Build(r)
- }
-}
-
-// Unmarshal unmarshals a response body for the REST JSON protocol.
-func Unmarshal(r *request.Request) {
- if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
- jsonrpc.Unmarshal(r)
- } else {
- rest.Unmarshal(r)
- }
-}
-
-// UnmarshalMeta unmarshals response headers for the REST JSON protocol.
-func UnmarshalMeta(r *request.Request) {
- rest.UnmarshalMeta(r)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
deleted file mode 100644
index 5366a646d..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/unmarshal_error.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package restjson
-
-import (
- "bytes"
- "encoding/json"
- "io"
- "io/ioutil"
- "net/http"
- "strings"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/protocol"
- "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
- "github.com/aws/aws-sdk-go/private/protocol/rest"
-)
-
-const (
- errorTypeHeader = "X-Amzn-Errortype"
- errorMessageHeader = "X-Amzn-Errormessage"
-)
-
-// UnmarshalTypedError provides unmarshaling errors API response errors
-// for both typed and untyped errors.
-type UnmarshalTypedError struct {
- exceptions map[string]func(protocol.ResponseMetadata) error
-}
-
-// NewUnmarshalTypedError returns an UnmarshalTypedError initialized for the
-// set of exception names to the error unmarshalers
-func NewUnmarshalTypedError(exceptions map[string]func(protocol.ResponseMetadata) error) *UnmarshalTypedError {
- return &UnmarshalTypedError{
- exceptions: exceptions,
- }
-}
-
-// UnmarshalError attempts to unmarshal the HTTP response error as a known
-// error type. If unable to unmarshal the error type, the generic SDK error
-// type will be used.
-func (u *UnmarshalTypedError) UnmarshalError(
- resp *http.Response,
- respMeta protocol.ResponseMetadata,
-) (error, error) {
- code, msg, err := unmarshalErrorInfo(resp)
- if err != nil {
- return nil, err
- }
-
- fn, ok := u.exceptions[code]
- if !ok {
- return awserr.NewRequestFailure(
- awserr.New(code, msg, nil),
- respMeta.StatusCode,
- respMeta.RequestID,
- ), nil
- }
-
- v := fn(respMeta)
- if err := jsonutil.UnmarshalJSONCaseInsensitive(v, resp.Body); err != nil {
- return nil, err
- }
-
- if err := rest.UnmarshalResponse(resp, v, true); err != nil {
- return nil, err
- }
-
- return v, nil
-}
-
-// UnmarshalErrorHandler is a named request handler for unmarshaling restjson
-// protocol request errors
-var UnmarshalErrorHandler = request.NamedHandler{
- Name: "awssdk.restjson.UnmarshalError",
- Fn: UnmarshalError,
-}
-
-// UnmarshalError unmarshals a response error for the REST JSON protocol.
-func UnmarshalError(r *request.Request) {
- defer r.HTTPResponse.Body.Close()
-
- code, msg, err := unmarshalErrorInfo(r.HTTPResponse)
- if err != nil {
- r.Error = awserr.NewRequestFailure(
- awserr.New(request.ErrCodeSerialization, "failed to unmarshal response error", err),
- r.HTTPResponse.StatusCode,
- r.RequestID,
- )
- return
- }
-
- r.Error = awserr.NewRequestFailure(
- awserr.New(code, msg, nil),
- r.HTTPResponse.StatusCode,
- r.RequestID,
- )
-}
-
-type jsonErrorResponse struct {
- Type string `json:"__type"`
- Code string `json:"code"`
- Message string `json:"message"`
-}
-
-func (j *jsonErrorResponse) SanitizedCode() string {
- code := j.Code
- if len(j.Type) > 0 {
- code = j.Type
- }
- return sanitizeCode(code)
-}
-
-// Remove superfluous components from a restJson error code.
-// - If a : character is present, then take only the contents before the
-// first : character in the value.
-// - If a # character is present, then take only the contents after the first
-// # character in the value.
-//
-// All of the following error values resolve to FooError:
-// - FooError
-// - FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/
-// - aws.protocoltests.restjson#FooError
-// - aws.protocoltests.restjson#FooError:http://internal.amazon.com/coral/com.amazon.coral.validate/
-func sanitizeCode(code string) string {
- noColon := strings.SplitN(code, ":", 2)[0]
- hashSplit := strings.SplitN(noColon, "#", 2)
- return hashSplit[len(hashSplit)-1]
-}
-
-// attempt to garner error details from the response, preferring header values
-// when present
-func unmarshalErrorInfo(resp *http.Response) (code string, msg string, err error) {
- code = sanitizeCode(resp.Header.Get(errorTypeHeader))
- msg = resp.Header.Get(errorMessageHeader)
- if len(code) > 0 && len(msg) > 0 {
- return
- }
-
- // a modeled error will have to be re-deserialized later, so the body must
- // be preserved
- var buf bytes.Buffer
- tee := io.TeeReader(resp.Body, &buf)
- defer func() { resp.Body = ioutil.NopCloser(&buf) }()
-
- var jsonErr jsonErrorResponse
- if decodeErr := json.NewDecoder(tee).Decode(&jsonErr); decodeErr != nil && decodeErr != io.EOF {
- err = awserr.NewUnmarshalError(decodeErr, "failed to decode response body", buf.Bytes())
- return
- }
-
- if len(code) == 0 {
- code = jsonErr.SanitizedCode()
- }
- if len(msg) == 0 {
- msg = jsonErr.Message
- }
- return
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
deleted file mode 100644
index d9a4e7649..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/timestamp.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package protocol
-
-import (
- "bytes"
- "fmt"
- "math"
- "strconv"
- "time"
-
- "github.com/aws/aws-sdk-go/internal/sdkmath"
-)
-
-// Names of time formats supported by the SDK
-const (
- RFC822TimeFormatName = "rfc822"
- ISO8601TimeFormatName = "iso8601"
- UnixTimeFormatName = "unixTimestamp"
-)
-
-// Time formats supported by the SDK
-// Output time is intended to not contain decimals
-const (
- // RFC 7231#section-7.1.1.1 timetamp format. e.g Tue, 29 Apr 2014 18:30:38 GMT
- RFC822TimeFormat = "Mon, 2 Jan 2006 15:04:05 GMT"
- rfc822TimeFormatSingleDigitDay = "Mon, _2 Jan 2006 15:04:05 GMT"
- rfc822TimeFormatSingleDigitDayTwoDigitYear = "Mon, _2 Jan 06 15:04:05 GMT"
-
- // This format is used for output time without seconds precision
- RFC822OutputTimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT"
-
- // RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
- ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z"
- iso8601TimeFormatNoZ = "2006-01-02T15:04:05.999999999"
-
- // This format is used for output time with fractional second precision up to milliseconds
- ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z"
-)
-
-// IsKnownTimestampFormat returns if the timestamp format name
-// is know to the SDK's protocols.
-func IsKnownTimestampFormat(name string) bool {
- switch name {
- case RFC822TimeFormatName:
- fallthrough
- case ISO8601TimeFormatName:
- fallthrough
- case UnixTimeFormatName:
- return true
- default:
- return false
- }
-}
-
-// FormatTime returns a string value of the time.
-func FormatTime(name string, t time.Time) string {
- t = t.UTC().Truncate(time.Millisecond)
-
- switch name {
- case RFC822TimeFormatName:
- return t.Format(RFC822OutputTimeFormat)
- case ISO8601TimeFormatName:
- return t.Format(ISO8601OutputTimeFormat)
- case UnixTimeFormatName:
- ms := t.UnixNano() / int64(time.Millisecond)
- return strconv.FormatFloat(float64(ms)/1e3, 'f', -1, 64)
- default:
- panic("unknown timestamp format name, " + name)
- }
-}
-
-// ParseTime attempts to parse the time given the format. Returns
-// the time if it was able to be parsed, and fails otherwise.
-func ParseTime(formatName, value string) (time.Time, error) {
- switch formatName {
- case RFC822TimeFormatName: // Smithy HTTPDate format
- return tryParse(value,
- RFC822TimeFormat,
- rfc822TimeFormatSingleDigitDay,
- rfc822TimeFormatSingleDigitDayTwoDigitYear,
- time.RFC850,
- time.ANSIC,
- )
- case ISO8601TimeFormatName: // Smithy DateTime format
- return tryParse(value,
- ISO8601TimeFormat,
- iso8601TimeFormatNoZ,
- time.RFC3339Nano,
- time.RFC3339,
- )
- case UnixTimeFormatName:
- v, err := strconv.ParseFloat(value, 64)
- _, dec := math.Modf(v)
- dec = sdkmath.Round(dec*1e3) / 1e3 //Rounds 0.1229999 to 0.123
- if err != nil {
- return time.Time{}, err
- }
- return time.Unix(int64(v), int64(dec*(1e9))), nil
- default:
- panic("unknown timestamp format name, " + formatName)
- }
-}
-
-func tryParse(v string, formats ...string) (time.Time, error) {
- var errs parseErrors
- for _, f := range formats {
- t, err := time.Parse(f, v)
- if err != nil {
- errs = append(errs, parseError{
- Format: f,
- Err: err,
- })
- continue
- }
- return t, nil
- }
-
- return time.Time{}, fmt.Errorf("unable to parse time string, %v", errs)
-}
-
-type parseErrors []parseError
-
-func (es parseErrors) Error() string {
- var s bytes.Buffer
- for _, e := range es {
- fmt.Fprintf(&s, "\n * %q: %v", e.Format, e.Err)
- }
-
- return "parse errors:" + s.String()
-}
-
-type parseError struct {
- Format string
- Err error
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
deleted file mode 100644
index f614ef898..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package protocol
-
-import (
- "io"
- "io/ioutil"
-
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
-var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
-
-// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
-func UnmarshalDiscardBody(r *request.Request) {
- if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
- return
- }
-
- io.Copy(ioutil.Discard, r.HTTPResponse.Body)
- r.HTTPResponse.Body.Close()
-}
-
-// ResponseMetadata provides the SDK response metadata attributes.
-type ResponseMetadata struct {
- StatusCode int
- RequestID string
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go
deleted file mode 100644
index cc857f136..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal_error.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package protocol
-
-import (
- "net/http"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// UnmarshalErrorHandler provides unmarshaling errors API response errors for
-// both typed and untyped errors.
-type UnmarshalErrorHandler struct {
- unmarshaler ErrorUnmarshaler
-}
-
-// ErrorUnmarshaler is an abstract interface for concrete implementations to
-// unmarshal protocol specific response errors.
-type ErrorUnmarshaler interface {
- UnmarshalError(*http.Response, ResponseMetadata) (error, error)
-}
-
-// NewUnmarshalErrorHandler returns an UnmarshalErrorHandler
-// initialized for the set of exception names to the error unmarshalers
-func NewUnmarshalErrorHandler(unmarshaler ErrorUnmarshaler) *UnmarshalErrorHandler {
- return &UnmarshalErrorHandler{
- unmarshaler: unmarshaler,
- }
-}
-
-// UnmarshalErrorHandlerName is the name of the named handler.
-const UnmarshalErrorHandlerName = "awssdk.protocol.UnmarshalError"
-
-// NamedHandler returns a NamedHandler for the unmarshaler using the set of
-// errors the unmarshaler was initialized for.
-func (u *UnmarshalErrorHandler) NamedHandler() request.NamedHandler {
- return request.NamedHandler{
- Name: UnmarshalErrorHandlerName,
- Fn: u.UnmarshalError,
- }
-}
-
-// UnmarshalError will attempt to unmarshal the API response's error message
-// into either a generic SDK error type, or a typed error corresponding to the
-// errors exception name.
-func (u *UnmarshalErrorHandler) UnmarshalError(r *request.Request) {
- defer r.HTTPResponse.Body.Close()
-
- respMeta := ResponseMetadata{
- StatusCode: r.HTTPResponse.StatusCode,
- RequestID: r.RequestID,
- }
-
- v, err := u.unmarshaler.UnmarshalError(r.HTTPResponse, respMeta)
- if err != nil {
- r.Error = awserr.NewRequestFailure(
- awserr.New(request.ErrCodeSerialization,
- "failed to unmarshal response error", err),
- respMeta.StatusCode,
- respMeta.RequestID,
- )
- return
- }
-
- r.Error = v
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
deleted file mode 100644
index 58c12bd8c..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
+++ /dev/null
@@ -1,345 +0,0 @@
-// Package xmlutil provides XML serialization of AWS requests and responses.
-package xmlutil
-
-import (
- "encoding/base64"
- "encoding/xml"
- "fmt"
- "math"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/private/protocol"
-)
-
-const (
- floatNaN = "NaN"
- floatInf = "Infinity"
- floatNegInf = "-Infinity"
-)
-
-// BuildXML will serialize params into an xml.Encoder. Error will be returned
-// if the serialization of any of the params or nested values fails.
-func BuildXML(params interface{}, e *xml.Encoder) error {
- return buildXML(params, e, false)
-}
-
-func buildXML(params interface{}, e *xml.Encoder, sorted bool) error {
- b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
- root := NewXMLElement(xml.Name{})
- if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
- return err
- }
- for _, c := range root.Children {
- for _, v := range c {
- return StructToXML(e, v, sorted)
- }
- }
- return nil
-}
-
-// Returns the reflection element of a value, if it is a pointer.
-func elemOf(value reflect.Value) reflect.Value {
- for value.Kind() == reflect.Ptr {
- value = value.Elem()
- }
- return value
-}
-
-// A xmlBuilder serializes values from Go code to XML
-type xmlBuilder struct {
- encoder *xml.Encoder
- namespaces map[string]string
-}
-
-// buildValue generic XMLNode builder for any type. Will build value for their specific type
-// struct, list, map, scalar.
-//
-// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
-// type is not provided reflect will be used to determine the value's type.
-func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
- value = elemOf(value)
- if !value.IsValid() { // no need to handle zero values
- return nil
- } else if tag.Get("location") != "" { // don't handle non-body location values
- return nil
- }
-
- xml := tag.Get("xml")
- if len(xml) != 0 {
- name := strings.SplitAfterN(xml, ",", 2)[0]
- if name == "-" {
- return nil
- }
- }
-
- t := tag.Get("type")
- if t == "" {
- switch value.Kind() {
- case reflect.Struct:
- t = "structure"
- case reflect.Slice:
- t = "list"
- case reflect.Map:
- t = "map"
- }
- }
-
- switch t {
- case "structure":
- if field, ok := value.Type().FieldByName("_"); ok {
- tag = tag + reflect.StructTag(" ") + field.Tag
- }
- return b.buildStruct(value, current, tag)
- case "list":
- return b.buildList(value, current, tag)
- case "map":
- return b.buildMap(value, current, tag)
- default:
- return b.buildScalar(value, current, tag)
- }
-}
-
-// buildStruct adds a struct and its fields to the current XMLNode. All fields and any nested
-// types are converted to XMLNodes also.
-func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
- if !value.IsValid() {
- return nil
- }
-
- // unwrap payloads
- if payload := tag.Get("payload"); payload != "" {
- field, _ := value.Type().FieldByName(payload)
- tag = field.Tag
- value = elemOf(value.FieldByName(payload))
-
- if !value.IsValid() {
- return nil
- }
- }
-
- child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
-
- // there is an xmlNamespace associated with this struct
- if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
- ns := xml.Attr{
- Name: xml.Name{Local: "xmlns"},
- Value: uri,
- }
- if prefix != "" {
- b.namespaces[prefix] = uri // register the namespace
- ns.Name.Local = "xmlns:" + prefix
- }
-
- child.Attr = append(child.Attr, ns)
- }
-
- var payloadFields, nonPayloadFields int
-
- t := value.Type()
- for i := 0; i < value.NumField(); i++ {
- member := elemOf(value.Field(i))
- field := t.Field(i)
-
- if field.PkgPath != "" {
- continue // ignore unexported fields
- }
- if field.Tag.Get("ignore") != "" {
- continue
- }
-
- mTag := field.Tag
- if mTag.Get("location") != "" { // skip non-body members
- nonPayloadFields++
- continue
- }
- payloadFields++
-
- if protocol.CanSetIdempotencyToken(value.Field(i), field) {
- token := protocol.GetIdempotencyToken()
- member = reflect.ValueOf(token)
- }
-
- memberName := mTag.Get("locationName")
- if memberName == "" {
- memberName = field.Name
- mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
- }
- if err := b.buildValue(member, child, mTag); err != nil {
- return err
- }
- }
-
- // Only case where the child shape is not added is if the shape only contains
- // non-payload fields, e.g headers/query.
- if !(payloadFields == 0 && nonPayloadFields > 0) {
- current.AddChild(child)
- }
-
- return nil
-}
-
-// buildList adds the value's list items to the current XMLNode as children nodes. All
-// nested values in the list are converted to XMLNodes also.
-func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
- if value.IsNil() { // don't build omitted lists
- return nil
- }
-
- // check for unflattened list member
- flattened := tag.Get("flattened") != ""
-
- xname := xml.Name{Local: tag.Get("locationName")}
- if flattened {
- for i := 0; i < value.Len(); i++ {
- child := NewXMLElement(xname)
- current.AddChild(child)
- if err := b.buildValue(value.Index(i), child, ""); err != nil {
- return err
- }
- }
- } else {
- list := NewXMLElement(xname)
- current.AddChild(list)
-
- for i := 0; i < value.Len(); i++ {
- iname := tag.Get("locationNameList")
- if iname == "" {
- iname = "member"
- }
-
- child := NewXMLElement(xml.Name{Local: iname})
- list.AddChild(child)
- if err := b.buildValue(value.Index(i), child, ""); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
-// nested values in the map are converted to XMLNodes also.
-//
-// Error will be returned if it is unable to build the map's values into XMLNodes
-func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
- if value.IsNil() { // don't build omitted maps
- return nil
- }
-
- maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
- current.AddChild(maproot)
- current = maproot
-
- kname, vname := "key", "value"
- if n := tag.Get("locationNameKey"); n != "" {
- kname = n
- }
- if n := tag.Get("locationNameValue"); n != "" {
- vname = n
- }
-
- // sorting is not required for compliance, but it makes testing easier
- keys := make([]string, value.Len())
- for i, k := range value.MapKeys() {
- keys[i] = k.String()
- }
- sort.Strings(keys)
-
- for _, k := range keys {
- v := value.MapIndex(reflect.ValueOf(k))
-
- mapcur := current
- if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
- child := NewXMLElement(xml.Name{Local: "entry"})
- mapcur.AddChild(child)
- mapcur = child
- }
-
- kchild := NewXMLElement(xml.Name{Local: kname})
- kchild.Text = k
- vchild := NewXMLElement(xml.Name{Local: vname})
- mapcur.AddChild(kchild)
- mapcur.AddChild(vchild)
-
- if err := b.buildValue(v, vchild, ""); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// buildScalar will convert the value into a string and append it as a attribute or child
-// of the current XMLNode.
-//
-// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
-//
-// Error will be returned if the value type is unsupported.
-func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
- var str string
-
- switch converted := value.Interface().(type) {
- case string:
- str = converted
- case []byte:
- if !value.IsNil() {
- str = base64.StdEncoding.EncodeToString(converted)
- }
- case bool:
- str = strconv.FormatBool(converted)
- case int64:
- str = strconv.FormatInt(converted, 10)
- case int:
- str = strconv.Itoa(converted)
- case float64:
- switch {
- case math.IsNaN(converted):
- str = floatNaN
- case math.IsInf(converted, 1):
- str = floatInf
- case math.IsInf(converted, -1):
- str = floatNegInf
- default:
- str = strconv.FormatFloat(converted, 'f', -1, 64)
- }
- case float32:
- // The SDK doesn't render float32 values in types, only float64. This case would never be hit currently.
- asFloat64 := float64(converted)
- switch {
- case math.IsNaN(asFloat64):
- str = floatNaN
- case math.IsInf(asFloat64, 1):
- str = floatInf
- case math.IsInf(asFloat64, -1):
- str = floatNegInf
- default:
- str = strconv.FormatFloat(asFloat64, 'f', -1, 32)
- }
- case time.Time:
- format := tag.Get("timestampFormat")
- if len(format) == 0 {
- format = protocol.ISO8601TimeFormatName
- }
-
- str = protocol.FormatTime(format, converted)
- default:
- return fmt.Errorf("unsupported value for param %s: %v (%s)",
- tag.Get("locationName"), value.Interface(), value.Type().Name())
- }
-
- xname := xml.Name{Local: tag.Get("locationName")}
- if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
- attr := xml.Attr{Name: xname, Value: str}
- current.Attr = append(current.Attr, attr)
- } else if len(xname.Local) == 0 {
- current.Text = str
- } else { // regular text node
- current.AddChild(&XMLNode{Name: xname, Text: str})
- }
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go
deleted file mode 100644
index c1a511851..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/sort.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package xmlutil
-
-import (
- "encoding/xml"
- "strings"
-)
-
-type xmlAttrSlice []xml.Attr
-
-func (x xmlAttrSlice) Len() int {
- return len(x)
-}
-
-func (x xmlAttrSlice) Less(i, j int) bool {
- spaceI, spaceJ := x[i].Name.Space, x[j].Name.Space
- localI, localJ := x[i].Name.Local, x[j].Name.Local
- valueI, valueJ := x[i].Value, x[j].Value
-
- spaceCmp := strings.Compare(spaceI, spaceJ)
- localCmp := strings.Compare(localI, localJ)
- valueCmp := strings.Compare(valueI, valueJ)
-
- if spaceCmp == -1 || (spaceCmp == 0 && (localCmp == -1 || (localCmp == 0 && valueCmp == -1))) {
- return true
- }
-
- return false
-}
-
-func (x xmlAttrSlice) Swap(i, j int) {
- x[i], x[j] = x[j], x[i]
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
deleted file mode 100644
index 44a580a94..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
+++ /dev/null
@@ -1,311 +0,0 @@
-package xmlutil
-
-import (
- "bytes"
- "encoding/base64"
- "encoding/xml"
- "fmt"
- "io"
- "math"
- "reflect"
- "strconv"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/private/protocol"
-)
-
-// UnmarshalXMLError unmarshals the XML error from the stream into the value
-// type specified. The value must be a pointer. If the message fails to
-// unmarshal, the message content will be included in the returned error as a
-// awserr.UnmarshalError.
-func UnmarshalXMLError(v interface{}, stream io.Reader) error {
- var errBuf bytes.Buffer
- body := io.TeeReader(stream, &errBuf)
-
- err := xml.NewDecoder(body).Decode(v)
- if err != nil && err != io.EOF {
- return awserr.NewUnmarshalError(err,
- "failed to unmarshal error message", errBuf.Bytes())
- }
-
- return nil
-}
-
-// UnmarshalXML deserializes an xml.Decoder into the container v. V
-// needs to match the shape of the XML expected to be decoded.
-// If the shape doesn't match unmarshaling will fail.
-func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
- n, err := XMLToStruct(d, nil)
- if err != nil {
- return err
- }
- if n.Children != nil {
- for _, root := range n.Children {
- for _, c := range root {
- if wrappedChild, ok := c.Children[wrapper]; ok {
- c = wrappedChild[0] // pull out wrapped element
- }
-
- err = parse(reflect.ValueOf(v), c, "")
- if err != nil {
- if err == io.EOF {
- return nil
- }
- return err
- }
- }
- }
- return nil
- }
- return nil
-}
-
-// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
-// will be used to determine the type from r.
-func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
- xml := tag.Get("xml")
- if len(xml) != 0 {
- name := strings.SplitAfterN(xml, ",", 2)[0]
- if name == "-" {
- return nil
- }
- }
-
- rtype := r.Type()
- if rtype.Kind() == reflect.Ptr {
- rtype = rtype.Elem() // check kind of actual element type
- }
-
- t := tag.Get("type")
- if t == "" {
- switch rtype.Kind() {
- case reflect.Struct:
- // also it can't be a time object
- if _, ok := r.Interface().(*time.Time); !ok {
- t = "structure"
- }
- case reflect.Slice:
- // also it can't be a byte slice
- if _, ok := r.Interface().([]byte); !ok {
- t = "list"
- }
- case reflect.Map:
- t = "map"
- }
- }
-
- switch t {
- case "structure":
- if field, ok := rtype.FieldByName("_"); ok {
- tag = field.Tag
- }
- return parseStruct(r, node, tag)
- case "list":
- return parseList(r, node, tag)
- case "map":
- return parseMap(r, node, tag)
- default:
- return parseScalar(r, node, tag)
- }
-}
-
-// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
-// types in the structure will also be deserialized.
-func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
- t := r.Type()
- if r.Kind() == reflect.Ptr {
- if r.IsNil() { // create the structure if it's nil
- s := reflect.New(r.Type().Elem())
- r.Set(s)
- r = s
- }
-
- r = r.Elem()
- t = t.Elem()
- }
-
- // unwrap any payloads
- if payload := tag.Get("payload"); payload != "" {
- field, _ := t.FieldByName(payload)
- return parseStruct(r.FieldByName(payload), node, field.Tag)
- }
-
- for i := 0; i < t.NumField(); i++ {
- field := t.Field(i)
- if c := field.Name[0:1]; strings.ToLower(c) == c {
- continue // ignore unexported fields
- }
-
- // figure out what this field is called
- name := field.Name
- if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
- name = field.Tag.Get("locationNameList")
- } else if locName := field.Tag.Get("locationName"); locName != "" {
- name = locName
- }
-
- // try to find the field by name in elements
- elems := node.Children[name]
-
- if elems == nil { // try to find the field in attributes
- if val, ok := node.findElem(name); ok {
- elems = []*XMLNode{{Text: val}}
- }
- }
-
- member := r.FieldByName(field.Name)
- for _, elem := range elems {
- err := parse(member, elem, field.Tag)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// parseList deserializes a list of values from an XML node. Each list entry
-// will also be deserialized.
-func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
- t := r.Type()
-
- if tag.Get("flattened") == "" { // look at all item entries
- mname := "member"
- if name := tag.Get("locationNameList"); name != "" {
- mname = name
- }
-
- if Children, ok := node.Children[mname]; ok {
- if r.IsNil() {
- r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
- }
-
- for i, c := range Children {
- err := parse(r.Index(i), c, "")
- if err != nil {
- return err
- }
- }
- }
- } else { // flattened list means this is a single element
- if r.IsNil() {
- r.Set(reflect.MakeSlice(t, 0, 0))
- }
-
- childR := reflect.Zero(t.Elem())
- r.Set(reflect.Append(r, childR))
- err := parse(r.Index(r.Len()-1), node, "")
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
-// will also be deserialized as map entries.
-func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
- if r.IsNil() {
- r.Set(reflect.MakeMap(r.Type()))
- }
-
- if tag.Get("flattened") == "" { // look at all child entries
- for _, entry := range node.Children["entry"] {
- parseMapEntry(r, entry, tag)
- }
- } else { // this element is itself an entry
- parseMapEntry(r, node, tag)
- }
-
- return nil
-}
-
-// parseMapEntry deserializes a map entry from a XML node.
-func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
- kname, vname := "key", "value"
- if n := tag.Get("locationNameKey"); n != "" {
- kname = n
- }
- if n := tag.Get("locationNameValue"); n != "" {
- vname = n
- }
-
- keys, ok := node.Children[kname]
- values := node.Children[vname]
- if ok {
- for i, key := range keys {
- keyR := reflect.ValueOf(key.Text)
- value := values[i]
- valueR := reflect.New(r.Type().Elem()).Elem()
-
- parse(valueR, value, "")
- r.SetMapIndex(keyR, valueR)
- }
- }
- return nil
-}
-
-// parseScaller deserializes an XMLNode value into a concrete type based on the
-// interface type of r.
-//
-// Error is returned if the deserialization fails due to invalid type conversion,
-// or unsupported interface type.
-func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
- switch r.Interface().(type) {
- case *string:
- r.Set(reflect.ValueOf(&node.Text))
- return nil
- case []byte:
- b, err := base64.StdEncoding.DecodeString(node.Text)
- if err != nil {
- return err
- }
- r.Set(reflect.ValueOf(b))
- case *bool:
- v, err := strconv.ParseBool(node.Text)
- if err != nil {
- return err
- }
- r.Set(reflect.ValueOf(&v))
- case *int64:
- v, err := strconv.ParseInt(node.Text, 10, 64)
- if err != nil {
- return err
- }
- r.Set(reflect.ValueOf(&v))
- case *float64:
- var v float64
- switch {
- case strings.EqualFold(node.Text, floatNaN):
- v = math.NaN()
- case strings.EqualFold(node.Text, floatInf):
- v = math.Inf(1)
- case strings.EqualFold(node.Text, floatNegInf):
- v = math.Inf(-1)
- default:
- var err error
- v, err = strconv.ParseFloat(node.Text, 64)
- if err != nil {
- return err
- }
- }
- r.Set(reflect.ValueOf(&v))
- case *time.Time:
- format := tag.Get("timestampFormat")
- if len(format) == 0 {
- format = protocol.ISO8601TimeFormatName
- }
-
- t, err := protocol.ParseTime(format, node.Text)
- if err != nil {
- return err
- }
- r.Set(reflect.ValueOf(&t))
- default:
- return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
- }
- return nil
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
deleted file mode 100644
index c85b79fdd..000000000
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package xmlutil
-
-import (
- "encoding/xml"
- "fmt"
- "io"
- "sort"
-)
-
-// A XMLNode contains the values to be encoded or decoded.
-type XMLNode struct {
- Name xml.Name `json:",omitempty"`
- Children map[string][]*XMLNode `json:",omitempty"`
- Text string `json:",omitempty"`
- Attr []xml.Attr `json:",omitempty"`
-
- namespaces map[string]string
- parent *XMLNode
-}
-
-// textEncoder is a string type alias that implemnts the TextMarshaler interface.
-// This alias type is used to ensure that the line feed (\n) (U+000A) is escaped.
-type textEncoder string
-
-func (t textEncoder) MarshalText() ([]byte, error) {
- return []byte(t), nil
-}
-
-// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
-func NewXMLElement(name xml.Name) *XMLNode {
- return &XMLNode{
- Name: name,
- Children: map[string][]*XMLNode{},
- Attr: []xml.Attr{},
- }
-}
-
-// AddChild adds child to the XMLNode.
-func (n *XMLNode) AddChild(child *XMLNode) {
- child.parent = n
- if _, ok := n.Children[child.Name.Local]; !ok {
- n.Children[child.Name.Local] = []*XMLNode{}
- }
- n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
-}
-
-// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
-func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
- out := &XMLNode{}
- for {
- tok, err := d.Token()
- if err != nil {
- if err == io.EOF {
- break
- } else {
- return out, err
- }
- }
-
- if tok == nil {
- break
- }
-
- switch typed := tok.(type) {
- case xml.CharData:
- out.Text = string(typed.Copy())
- case xml.StartElement:
- el := typed.Copy()
- out.Attr = el.Attr
- if out.Children == nil {
- out.Children = map[string][]*XMLNode{}
- }
-
- name := typed.Name.Local
- slice := out.Children[name]
- if slice == nil {
- slice = []*XMLNode{}
- }
- node, e := XMLToStruct(d, &el)
- out.findNamespaces()
- if e != nil {
- return out, e
- }
- node.Name = typed.Name
- node.findNamespaces()
- tempOut := *out
- // Save into a temp variable, simply because out gets squashed during
- // loop iterations
- node.parent = &tempOut
- slice = append(slice, node)
- out.Children[name] = slice
- case xml.EndElement:
- if s != nil && s.Name.Local == typed.Name.Local { // matching end token
- return out, nil
- }
- out = &XMLNode{}
- }
- }
- return out, nil
-}
-
-func (n *XMLNode) findNamespaces() {
- ns := map[string]string{}
- for _, a := range n.Attr {
- if a.Name.Space == "xmlns" {
- ns[a.Value] = a.Name.Local
- }
- }
-
- n.namespaces = ns
-}
-
-func (n *XMLNode) findElem(name string) (string, bool) {
- for node := n; node != nil; node = node.parent {
- for _, a := range node.Attr {
- namespace := a.Name.Space
- if v, ok := node.namespaces[namespace]; ok {
- namespace = v
- }
- if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) {
- return a.Value, true
- }
- }
- }
- return "", false
-}
-
-// StructToXML writes an XMLNode to a xml.Encoder as tokens.
-func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
- // Sort Attributes
- attrs := node.Attr
- if sorted {
- sortedAttrs := make([]xml.Attr, len(attrs))
- for _, k := range node.Attr {
- sortedAttrs = append(sortedAttrs, k)
- }
- sort.Sort(xmlAttrSlice(sortedAttrs))
- attrs = sortedAttrs
- }
-
- startElement := xml.StartElement{Name: node.Name, Attr: attrs}
-
- if node.Text != "" {
- e.EncodeElement(textEncoder(node.Text), startElement)
- return e.Flush()
- }
-
- e.EncodeToken(startElement)
-
- if sorted {
- sortedNames := []string{}
- for k := range node.Children {
- sortedNames = append(sortedNames, k)
- }
- sort.Strings(sortedNames)
-
- for _, k := range sortedNames {
- for _, v := range node.Children[k] {
- StructToXML(e, v, sorted)
- }
- }
- } else {
- for _, c := range node.Children {
- for _, v := range c {
- StructToXML(e, v, sorted)
- }
- }
- }
-
- e.EncodeToken(startElement.End())
-
- return e.Flush()
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go
deleted file mode 100644
index 5cae6505d..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go
+++ /dev/null
@@ -1,29214 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package dynamodb
-
-import (
- "fmt"
- "net/url"
- "strings"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awsutil"
- "github.com/aws/aws-sdk-go/aws/crr"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/protocol"
- "github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
-)
-
-const opBatchExecuteStatement = "BatchExecuteStatement"
-
-// BatchExecuteStatementRequest generates a "aws/request.Request" representing the
-// client's request for the BatchExecuteStatement operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See BatchExecuteStatement for more information on using the BatchExecuteStatement
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the BatchExecuteStatementRequest method.
-// req, resp := client.BatchExecuteStatementRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchExecuteStatement
-func (c *DynamoDB) BatchExecuteStatementRequest(input *BatchExecuteStatementInput) (req *request.Request, output *BatchExecuteStatementOutput) {
- op := &request.Operation{
- Name: opBatchExecuteStatement,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &BatchExecuteStatementInput{}
- }
-
- output = &BatchExecuteStatementOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// BatchExecuteStatement API operation for Amazon DynamoDB.
-//
-// This operation allows you to perform batch reads or writes on data stored
-// in DynamoDB, using PartiQL. Each read statement in a BatchExecuteStatement
-// must specify an equality condition on all key attributes. This enforces that
-// each SELECT statement in a batch returns at most a single item. For more
-// information, see Running batch operations with PartiQL for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ql-reference.multiplestatements.batching.html).
-//
-// The entire batch must consist of either read statements or write statements,
-// you cannot mix both in one batch.
-//
-// A HTTP 200 response does not mean that all statements in the BatchExecuteStatement
-// succeeded. Error details for individual statements can be found under the
-// Error (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchStatementResponse.html#DDB-Type-BatchStatementResponse-Error)
-// field of the BatchStatementResponse for each statement.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation BatchExecuteStatement for usage and error information.
-//
-// Returned Error Types:
-//
-// - RequestLimitExceeded
-// Throughput exceeds the current throughput quota for your account. Please
-// contact Amazon Web Services Support (https://aws.amazon.com/support) to request
-// a quota increase.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchExecuteStatement
-func (c *DynamoDB) BatchExecuteStatement(input *BatchExecuteStatementInput) (*BatchExecuteStatementOutput, error) {
- req, out := c.BatchExecuteStatementRequest(input)
- return out, req.Send()
-}
-
-// BatchExecuteStatementWithContext is the same as BatchExecuteStatement with the addition of
-// the ability to pass a context and additional request options.
-//
-// See BatchExecuteStatement for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) BatchExecuteStatementWithContext(ctx aws.Context, input *BatchExecuteStatementInput, opts ...request.Option) (*BatchExecuteStatementOutput, error) {
- req, out := c.BatchExecuteStatementRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opBatchGetItem = "BatchGetItem"
-
-// BatchGetItemRequest generates a "aws/request.Request" representing the
-// client's request for the BatchGetItem operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See BatchGetItem for more information on using the BatchGetItem
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the BatchGetItemRequest method.
-// req, resp := client.BatchGetItemRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem
-func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.Request, output *BatchGetItemOutput) {
- op := &request.Operation{
- Name: opBatchGetItem,
- HTTPMethod: "POST",
- HTTPPath: "/",
- Paginator: &request.Paginator{
- InputTokens: []string{"RequestItems"},
- OutputTokens: []string{"UnprocessedKeys"},
- LimitToken: "",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &BatchGetItemInput{}
- }
-
- output = &BatchGetItemOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// BatchGetItem API operation for Amazon DynamoDB.
-//
-// The BatchGetItem operation returns the attributes of one or more items from
-// one or more tables. You identify requested items by primary key.
-//
-// A single operation can retrieve up to 16 MB of data, which can contain as
-// many as 100 items. BatchGetItem returns a partial result if the response
-// size limit is exceeded, the table's provisioned throughput is exceeded, more
-// than 1MB per partition is requested, or an internal processing failure occurs.
-// If a partial result is returned, the operation returns a value for UnprocessedKeys.
-// You can use this value to retry the operation starting with the next item
-// to get.
-//
-// If you request more than 100 items, BatchGetItem returns a ValidationException
-// with the message "Too many items requested for the BatchGetItem call."
-//
-// For example, if you ask to retrieve 100 items, but each individual item is
-// 300 KB in size, the system returns 52 items (so as not to exceed the 16 MB
-// limit). It also returns an appropriate UnprocessedKeys value so you can get
-// the next page of results. If desired, your application can include its own
-// logic to assemble the pages of results into one dataset.
-//
-// If none of the items can be processed due to insufficient provisioned throughput
-// on all of the tables in the request, then BatchGetItem returns a ProvisionedThroughputExceededException.
-// If at least one of the items is successfully processed, then BatchGetItem
-// completes successfully, while returning the keys of the unread items in UnprocessedKeys.
-//
-// If DynamoDB returns any unprocessed items, you should retry the batch operation
-// on those items. However, we strongly recommend that you use an exponential
-// backoff algorithm. If you retry the batch operation immediately, the underlying
-// read or write requests can still fail due to throttling on the individual
-// tables. If you delay the batch operation using exponential backoff, the individual
-// requests in the batch are much more likely to succeed.
-//
-// For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#BatchOperations)
-// in the Amazon DynamoDB Developer Guide.
-//
-// By default, BatchGetItem performs eventually consistent reads on every table
-// in the request. If you want strongly consistent reads instead, you can set
-// ConsistentRead to true for any or all tables.
-//
-// In order to minimize response latency, BatchGetItem may retrieve items in
-// parallel.
-//
-// When designing your application, keep in mind that DynamoDB does not return
-// items in any particular order. To help parse the response by item, include
-// the primary key values for the items in your request in the ProjectionExpression
-// parameter.
-//
-// If a requested item does not exist, it is not returned in the result. Requests
-// for nonexistent items consume the minimum read capacity units according to
-// the type of read. For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#CapacityUnitCalculations)
-// in the Amazon DynamoDB Developer Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation BatchGetItem for usage and error information.
-//
-// Returned Error Types:
-//
-// - ProvisionedThroughputExceededException
-// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB
-// automatically retry requests that receive this exception. Your request is
-// eventually successful, unless your retry queue is too large to finish. Reduce
-// the frequency of requests and use exponential backoff. For more information,
-// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
-// in the Amazon DynamoDB Developer Guide.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - RequestLimitExceeded
-// Throughput exceeds the current throughput quota for your account. Please
-// contact Amazon Web Services Support (https://aws.amazon.com/support) to request
-// a quota increase.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchGetItem
-func (c *DynamoDB) BatchGetItem(input *BatchGetItemInput) (*BatchGetItemOutput, error) {
- req, out := c.BatchGetItemRequest(input)
- return out, req.Send()
-}
-
-// BatchGetItemWithContext is the same as BatchGetItem with the addition of
-// the ability to pass a context and additional request options.
-//
-// See BatchGetItem for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) BatchGetItemWithContext(ctx aws.Context, input *BatchGetItemInput, opts ...request.Option) (*BatchGetItemOutput, error) {
- req, out := c.BatchGetItemRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// BatchGetItemPages iterates over the pages of a BatchGetItem operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See BatchGetItem method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a BatchGetItem operation.
-// pageNum := 0
-// err := client.BatchGetItemPages(params,
-// func(page *dynamodb.BatchGetItemOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *DynamoDB) BatchGetItemPages(input *BatchGetItemInput, fn func(*BatchGetItemOutput, bool) bool) error {
- return c.BatchGetItemPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// BatchGetItemPagesWithContext same as BatchGetItemPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) BatchGetItemPagesWithContext(ctx aws.Context, input *BatchGetItemInput, fn func(*BatchGetItemOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *BatchGetItemInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.BatchGetItemRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*BatchGetItemOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opBatchWriteItem = "BatchWriteItem"
-
-// BatchWriteItemRequest generates a "aws/request.Request" representing the
-// client's request for the BatchWriteItem operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See BatchWriteItem for more information on using the BatchWriteItem
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the BatchWriteItemRequest method.
-// req, resp := client.BatchWriteItemRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem
-func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *request.Request, output *BatchWriteItemOutput) {
- op := &request.Operation{
- Name: opBatchWriteItem,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &BatchWriteItemInput{}
- }
-
- output = &BatchWriteItemOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// BatchWriteItem API operation for Amazon DynamoDB.
-//
-// The BatchWriteItem operation puts or deletes multiple items in one or more
-// tables. A single call to BatchWriteItem can transmit up to 16MB of data over
-// the network, consisting of up to 25 item put or delete operations. While
-// individual items can be up to 400 KB once stored, it's important to note
-// that an item's representation might be greater than 400KB while being sent
-// in DynamoDB's JSON format for the API call. For more details on this distinction,
-// see Naming Rules and Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html).
-//
-// BatchWriteItem cannot update items. If you perform a BatchWriteItem operation
-// on an existing item, that item's values will be overwritten by the operation
-// and it will appear like it was updated. To update items, we recommend you
-// use the UpdateItem action.
-//
-// The individual PutItem and DeleteItem operations specified in BatchWriteItem
-// are atomic; however BatchWriteItem as a whole is not. If any requested operations
-// fail because the table's provisioned throughput is exceeded or an internal
-// processing failure occurs, the failed operations are returned in the UnprocessedItems
-// response parameter. You can investigate and optionally resend the requests.
-// Typically, you would call BatchWriteItem in a loop. Each iteration would
-// check for unprocessed items and submit a new BatchWriteItem request with
-// those unprocessed items until all items have been processed.
-//
-// For tables and indexes with provisioned capacity, if none of the items can
-// be processed due to insufficient provisioned throughput on all of the tables
-// in the request, then BatchWriteItem returns a ProvisionedThroughputExceededException.
-// For all tables and indexes, if none of the items can be processed due to
-// other throttling scenarios (such as exceeding partition level limits), then
-// BatchWriteItem returns a ThrottlingException.
-//
-// If DynamoDB returns any unprocessed items, you should retry the batch operation
-// on those items. However, we strongly recommend that you use an exponential
-// backoff algorithm. If you retry the batch operation immediately, the underlying
-// read or write requests can still fail due to throttling on the individual
-// tables. If you delay the batch operation using exponential backoff, the individual
-// requests in the batch are much more likely to succeed.
-//
-// For more information, see Batch Operations and Error Handling (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#Programming.Errors.BatchOperations)
-// in the Amazon DynamoDB Developer Guide.
-//
-// With BatchWriteItem, you can efficiently write or delete large amounts of
-// data, such as from Amazon EMR, or copy data from another database into DynamoDB.
-// In order to improve performance with these large-scale operations, BatchWriteItem
-// does not behave in the same way as individual PutItem and DeleteItem calls
-// would. For example, you cannot specify conditions on individual put and delete
-// requests, and BatchWriteItem does not return deleted items in the response.
-//
-// If you use a programming language that supports concurrency, you can use
-// threads to write items in parallel. Your application must include the necessary
-// logic to manage the threads. With languages that don't support threading,
-// you must update or delete the specified items one at a time. In both situations,
-// BatchWriteItem performs the specified put and delete operations in parallel,
-// giving you the power of the thread pool approach without having to introduce
-// complexity into your application.
-//
-// Parallel processing reduces latency, but each specified put and delete request
-// consumes the same number of write capacity units whether it is processed
-// in parallel or not. Delete operations on nonexistent items consume one write
-// capacity unit.
-//
-// If one or more of the following is true, DynamoDB rejects the entire batch
-// write operation:
-//
-// - One or more tables specified in the BatchWriteItem request does not
-// exist.
-//
-// - Primary key attributes specified on an item in the request do not match
-// those in the corresponding table's primary key schema.
-//
-// - You try to perform multiple operations on the same item in the same
-// BatchWriteItem request. For example, you cannot put and delete the same
-// item in the same BatchWriteItem request.
-//
-// - Your request contains at least two items with identical hash and range
-// keys (which essentially is two put operations).
-//
-// - There are more than 25 requests in the batch.
-//
-// - Any individual item in a batch exceeds 400 KB.
-//
-// - The total request size exceeds 16 MB.
-//
-// - Any individual items with keys exceeding the key length limits. For
-// a partition key, the limit is 2048 bytes and for a sort key, the limit
-// is 1024 bytes.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation BatchWriteItem for usage and error information.
-//
-// Returned Error Types:
-//
-// - ProvisionedThroughputExceededException
-// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB
-// automatically retry requests that receive this exception. Your request is
-// eventually successful, unless your retry queue is too large to finish. Reduce
-// the frequency of requests and use exponential backoff. For more information,
-// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
-// in the Amazon DynamoDB Developer Guide.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - ItemCollectionSizeLimitExceededException
-// An item collection is too large. This exception is only returned for tables
-// that have one or more local secondary indexes.
-//
-// - RequestLimitExceeded
-// Throughput exceeds the current throughput quota for your account. Please
-// contact Amazon Web Services Support (https://aws.amazon.com/support) to request
-// a quota increase.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchWriteItem
-func (c *DynamoDB) BatchWriteItem(input *BatchWriteItemInput) (*BatchWriteItemOutput, error) {
- req, out := c.BatchWriteItemRequest(input)
- return out, req.Send()
-}
-
-// BatchWriteItemWithContext is the same as BatchWriteItem with the addition of
-// the ability to pass a context and additional request options.
-//
-// See BatchWriteItem for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) BatchWriteItemWithContext(ctx aws.Context, input *BatchWriteItemInput, opts ...request.Option) (*BatchWriteItemOutput, error) {
- req, out := c.BatchWriteItemRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opCreateBackup = "CreateBackup"
-
-// CreateBackupRequest generates a "aws/request.Request" representing the
-// client's request for the CreateBackup operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See CreateBackup for more information on using the CreateBackup
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the CreateBackupRequest method.
-// req, resp := client.CreateBackupRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup
-func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.Request, output *CreateBackupOutput) {
- op := &request.Operation{
- Name: opCreateBackup,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &CreateBackupInput{}
- }
-
- output = &CreateBackupOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// CreateBackup API operation for Amazon DynamoDB.
-//
-// Creates a backup for an existing table.
-//
-// Each time you create an on-demand backup, the entire table data is backed
-// up. There is no limit to the number of on-demand backups that can be taken.
-//
-// When you create an on-demand backup, a time marker of the request is cataloged,
-// and the backup is created asynchronously, by applying all changes until the
-// time of the request to the last full table snapshot. Backup requests are
-// processed instantaneously and become available for restore within minutes.
-//
-// You can call CreateBackup at a maximum rate of 50 times per second.
-//
-// All backups in DynamoDB work without consuming any provisioned throughput
-// on the table.
-//
-// If you submit a backup request on 2018-12-14 at 14:25:00, the backup is guaranteed
-// to contain all data committed to the table up to 14:24:00, and data committed
-// after 14:26:00 will not be. The backup might contain data modifications made
-// between 14:24:00 and 14:26:00. On-demand backup does not support causal consistency.
-//
-// Along with data, the following are also included on the backups:
-//
-// - Global secondary indexes (GSIs)
-//
-// - Local secondary indexes (LSIs)
-//
-// - Streams
-//
-// - Provisioned read and write capacity
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation CreateBackup for usage and error information.
-//
-// Returned Error Types:
-//
-// - TableNotFoundException
-// A source table with the name TableName does not currently exist within the
-// subscriber's account or the subscriber is operating in the wrong Amazon Web
-// Services Region.
-//
-// - TableInUseException
-// A target table with the specified name is either being created or deleted.
-//
-// - ContinuousBackupsUnavailableException
-// Backups have not yet been enabled for this table.
-//
-// - BackupInUseException
-// There is another ongoing conflicting backup control plane operation on the
-// table. The backup is either being created, deleted or restored to a table.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateBackup
-func (c *DynamoDB) CreateBackup(input *CreateBackupInput) (*CreateBackupOutput, error) {
- req, out := c.CreateBackupRequest(input)
- return out, req.Send()
-}
-
-// CreateBackupWithContext is the same as CreateBackup with the addition of
-// the ability to pass a context and additional request options.
-//
-// See CreateBackup for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) CreateBackupWithContext(ctx aws.Context, input *CreateBackupInput, opts ...request.Option) (*CreateBackupOutput, error) {
- req, out := c.CreateBackupRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opCreateGlobalTable = "CreateGlobalTable"
-
-// CreateGlobalTableRequest generates a "aws/request.Request" representing the
-// client's request for the CreateGlobalTable operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See CreateGlobalTable for more information on using the CreateGlobalTable
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the CreateGlobalTableRequest method.
-// req, resp := client.CreateGlobalTableRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable
-func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req *request.Request, output *CreateGlobalTableOutput) {
- op := &request.Operation{
- Name: opCreateGlobalTable,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &CreateGlobalTableInput{}
- }
-
- output = &CreateGlobalTableOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// CreateGlobalTable API operation for Amazon DynamoDB.
-//
-// Creates a global table from an existing table. A global table creates a replication
-// relationship between two or more DynamoDB tables with the same table name
-// in the provided Regions.
-//
-// This documentation is for version 2017.11.29 (Legacy) of global tables, which
-// should be avoided for new global tables. Customers should use Global Tables
-// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html)
-// when possible, because it provides greater flexibility, higher efficiency,
-// and consumes less write capacity than 2017.11.29 (Legacy).
-//
-// To determine which version you're using, see Determining the global table
-// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html).
-// To update existing global tables from version 2017.11.29 (Legacy) to version
-// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html).
-//
-// If you want to add a new replica table to a global table, each of the following
-// conditions must be true:
-//
-// - The table must have the same primary key as all of the other replicas.
-//
-// - The table must have the same name as all of the other replicas.
-//
-// - The table must have DynamoDB Streams enabled, with the stream containing
-// both the new and the old images of the item.
-//
-// - None of the replica tables in the global table can contain any data.
-//
-// If global secondary indexes are specified, then the following conditions
-// must also be met:
-//
-// - The global secondary indexes must have the same name.
-//
-// - The global secondary indexes must have the same hash key and sort key
-// (if present).
-//
-// If local secondary indexes are specified, then the following conditions must
-// also be met:
-//
-// - The local secondary indexes must have the same name.
-//
-// - The local secondary indexes must have the same hash key and sort key
-// (if present).
-//
-// Write capacity settings should be set consistently across your replica tables
-// and secondary indexes. DynamoDB strongly recommends enabling auto scaling
-// to manage the write capacity settings for all of your global tables replicas
-// and indexes.
-//
-// If you prefer to manage write capacity settings manually, you should provision
-// equal replicated write capacity units to your replica tables. You should
-// also provision equal replicated write capacity units to matching secondary
-// indexes across your global table.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation CreateGlobalTable for usage and error information.
-//
-// Returned Error Types:
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// - GlobalTableAlreadyExistsException
-// The specified global table already exists.
-//
-// - TableNotFoundException
-// A source table with the name TableName does not currently exist within the
-// subscriber's account or the subscriber is operating in the wrong Amazon Web
-// Services Region.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateGlobalTable
-func (c *DynamoDB) CreateGlobalTable(input *CreateGlobalTableInput) (*CreateGlobalTableOutput, error) {
- req, out := c.CreateGlobalTableRequest(input)
- return out, req.Send()
-}
-
-// CreateGlobalTableWithContext is the same as CreateGlobalTable with the addition of
-// the ability to pass a context and additional request options.
-//
-// See CreateGlobalTable for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) CreateGlobalTableWithContext(ctx aws.Context, input *CreateGlobalTableInput, opts ...request.Option) (*CreateGlobalTableOutput, error) {
- req, out := c.CreateGlobalTableRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opCreateTable = "CreateTable"
-
-// CreateTableRequest generates a "aws/request.Request" representing the
-// client's request for the CreateTable operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See CreateTable for more information on using the CreateTable
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the CreateTableRequest method.
-// req, resp := client.CreateTableRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable
-func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Request, output *CreateTableOutput) {
- op := &request.Operation{
- Name: opCreateTable,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &CreateTableInput{}
- }
-
- output = &CreateTableOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// CreateTable API operation for Amazon DynamoDB.
-//
-// The CreateTable operation adds a new table to your account. In an Amazon
-// Web Services account, table names must be unique within each Region. That
-// is, you can have two tables with same name if you create the tables in different
-// Regions.
-//
-// CreateTable is an asynchronous operation. Upon receiving a CreateTable request,
-// DynamoDB immediately returns a response with a TableStatus of CREATING. After
-// the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform
-// read and write operations only on an ACTIVE table.
-//
-// You can optionally define secondary indexes on the new table, as part of
-// the CreateTable operation. If you want to create multiple tables with secondary
-// indexes on them, you must create the tables sequentially. Only one table
-// with secondary indexes can be in the CREATING state at any given time.
-//
-// You can use the DescribeTable action to check the table status.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation CreateTable for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceInUseException
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/CreateTable
-func (c *DynamoDB) CreateTable(input *CreateTableInput) (*CreateTableOutput, error) {
- req, out := c.CreateTableRequest(input)
- return out, req.Send()
-}
-
-// CreateTableWithContext is the same as CreateTable with the addition of
-// the ability to pass a context and additional request options.
-//
-// See CreateTable for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) CreateTableWithContext(ctx aws.Context, input *CreateTableInput, opts ...request.Option) (*CreateTableOutput, error) {
- req, out := c.CreateTableRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDeleteBackup = "DeleteBackup"
-
-// DeleteBackupRequest generates a "aws/request.Request" representing the
-// client's request for the DeleteBackup operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DeleteBackup for more information on using the DeleteBackup
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DeleteBackupRequest method.
-// req, resp := client.DeleteBackupRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup
-func (c *DynamoDB) DeleteBackupRequest(input *DeleteBackupInput) (req *request.Request, output *DeleteBackupOutput) {
- op := &request.Operation{
- Name: opDeleteBackup,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DeleteBackupInput{}
- }
-
- output = &DeleteBackupOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// DeleteBackup API operation for Amazon DynamoDB.
-//
-// Deletes an existing backup of a table.
-//
-// You can call DeleteBackup at a maximum rate of 10 times per second.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DeleteBackup for usage and error information.
-//
-// Returned Error Types:
-//
-// - BackupNotFoundException
-// Backup not found for the given BackupARN.
-//
-// - BackupInUseException
-// There is another ongoing conflicting backup control plane operation on the
-// table. The backup is either being created, deleted or restored to a table.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteBackup
-func (c *DynamoDB) DeleteBackup(input *DeleteBackupInput) (*DeleteBackupOutput, error) {
- req, out := c.DeleteBackupRequest(input)
- return out, req.Send()
-}
-
-// DeleteBackupWithContext is the same as DeleteBackup with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DeleteBackup for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DeleteBackupWithContext(ctx aws.Context, input *DeleteBackupInput, opts ...request.Option) (*DeleteBackupOutput, error) {
- req, out := c.DeleteBackupRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDeleteItem = "DeleteItem"
-
-// DeleteItemRequest generates a "aws/request.Request" representing the
-// client's request for the DeleteItem operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DeleteItem for more information on using the DeleteItem
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DeleteItemRequest method.
-// req, resp := client.DeleteItemRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem
-func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Request, output *DeleteItemOutput) {
- op := &request.Operation{
- Name: opDeleteItem,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DeleteItemInput{}
- }
-
- output = &DeleteItemOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// DeleteItem API operation for Amazon DynamoDB.
-//
-// Deletes a single item in a table by primary key. You can perform a conditional
-// delete operation that deletes the item if it exists, or if it has an expected
-// attribute value.
-//
-// In addition to deleting an item, you can also return the item's attribute
-// values in the same operation, using the ReturnValues parameter.
-//
-// Unless you specify conditions, the DeleteItem is an idempotent operation;
-// running it multiple times on the same item or attribute does not result in
-// an error response.
-//
-// Conditional deletes are useful for deleting items only if specific conditions
-// are met. If those conditions are met, DynamoDB performs the delete. Otherwise,
-// the item is not deleted.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DeleteItem for usage and error information.
-//
-// Returned Error Types:
-//
-// - ConditionalCheckFailedException
-// A condition specified in the operation could not be evaluated.
-//
-// - ProvisionedThroughputExceededException
-// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB
-// automatically retry requests that receive this exception. Your request is
-// eventually successful, unless your retry queue is too large to finish. Reduce
-// the frequency of requests and use exponential backoff. For more information,
-// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
-// in the Amazon DynamoDB Developer Guide.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - ItemCollectionSizeLimitExceededException
-// An item collection is too large. This exception is only returned for tables
-// that have one or more local secondary indexes.
-//
-// - TransactionConflictException
-// Operation was rejected because there is an ongoing transaction for the item.
-//
-// - RequestLimitExceeded
-// Throughput exceeds the current throughput quota for your account. Please
-// contact Amazon Web Services Support (https://aws.amazon.com/support) to request
-// a quota increase.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteItem
-func (c *DynamoDB) DeleteItem(input *DeleteItemInput) (*DeleteItemOutput, error) {
- req, out := c.DeleteItemRequest(input)
- return out, req.Send()
-}
-
-// DeleteItemWithContext is the same as DeleteItem with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DeleteItem for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DeleteItemWithContext(ctx aws.Context, input *DeleteItemInput, opts ...request.Option) (*DeleteItemOutput, error) {
- req, out := c.DeleteItemRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDeleteResourcePolicy = "DeleteResourcePolicy"
-
-// DeleteResourcePolicyRequest generates a "aws/request.Request" representing the
-// client's request for the DeleteResourcePolicy operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DeleteResourcePolicy for more information on using the DeleteResourcePolicy
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DeleteResourcePolicyRequest method.
-// req, resp := client.DeleteResourcePolicyRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteResourcePolicy
-func (c *DynamoDB) DeleteResourcePolicyRequest(input *DeleteResourcePolicyInput) (req *request.Request, output *DeleteResourcePolicyOutput) {
- op := &request.Operation{
- Name: opDeleteResourcePolicy,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DeleteResourcePolicyInput{}
- }
-
- output = &DeleteResourcePolicyOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// DeleteResourcePolicy API operation for Amazon DynamoDB.
-//
-// Deletes the resource-based policy attached to the resource, which can be
-// a table or stream.
-//
-// DeleteResourcePolicy is an idempotent operation; running it multiple times
-// on the same resource doesn't result in an error response, unless you specify
-// an ExpectedRevisionId, which will then return a PolicyNotFoundException.
-//
-// To make sure that you don't inadvertently lock yourself out of your own resources,
-// the root principal in your Amazon Web Services account can perform DeleteResourcePolicy
-// requests, even if your resource-based policy explicitly denies the root principal's
-// access.
-//
-// DeleteResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy
-// request immediately after running the DeleteResourcePolicy request, DynamoDB
-// might still return the deleted policy. This is because the policy for your
-// resource might not have been deleted yet. Wait for a few seconds, and then
-// try the GetResourcePolicy request again.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DeleteResourcePolicy for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// - PolicyNotFoundException
-// The operation tried to access a nonexistent resource-based policy.
-//
-// If you specified an ExpectedRevisionId, it's possible that a policy is present
-// for the resource but its revision ID didn't match the expected value.
-//
-// - ResourceInUseException
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteResourcePolicy
-func (c *DynamoDB) DeleteResourcePolicy(input *DeleteResourcePolicyInput) (*DeleteResourcePolicyOutput, error) {
- req, out := c.DeleteResourcePolicyRequest(input)
- return out, req.Send()
-}
-
-// DeleteResourcePolicyWithContext is the same as DeleteResourcePolicy with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DeleteResourcePolicy for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DeleteResourcePolicyWithContext(ctx aws.Context, input *DeleteResourcePolicyInput, opts ...request.Option) (*DeleteResourcePolicyOutput, error) {
- req, out := c.DeleteResourcePolicyRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDeleteTable = "DeleteTable"
-
-// DeleteTableRequest generates a "aws/request.Request" representing the
-// client's request for the DeleteTable operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DeleteTable for more information on using the DeleteTable
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DeleteTableRequest method.
-// req, resp := client.DeleteTableRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable
-func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Request, output *DeleteTableOutput) {
- op := &request.Operation{
- Name: opDeleteTable,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DeleteTableInput{}
- }
-
- output = &DeleteTableOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// DeleteTable API operation for Amazon DynamoDB.
-//
-// The DeleteTable operation deletes a table and all of its items. After a DeleteTable
-// request, the specified table is in the DELETING state until DynamoDB completes
-// the deletion. If the table is in the ACTIVE state, you can delete it. If
-// a table is in CREATING or UPDATING states, then DynamoDB returns a ResourceInUseException.
-// If the specified table does not exist, DynamoDB returns a ResourceNotFoundException.
-// If table is already in the DELETING state, no error is returned.
-//
-// For global tables, this operation only applies to global tables using Version
-// 2019.11.21 (Current version).
-//
-// DynamoDB might continue to accept data read and write operations, such as
-// GetItem and PutItem, on a table in the DELETING state until the table deletion
-// is complete. For the full list of table states, see TableStatus (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TableDescription.html#DDB-Type-TableDescription-TableStatus).
-//
-// When you delete a table, any indexes on that table are also deleted.
-//
-// If you have DynamoDB Streams enabled on the table, then the corresponding
-// stream on that table goes into the DISABLED state, and the stream is automatically
-// deleted after 24 hours.
-//
-// Use the DescribeTable action to check the status of the table.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DeleteTable for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceInUseException
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DeleteTable
-func (c *DynamoDB) DeleteTable(input *DeleteTableInput) (*DeleteTableOutput, error) {
- req, out := c.DeleteTableRequest(input)
- return out, req.Send()
-}
-
-// DeleteTableWithContext is the same as DeleteTable with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DeleteTable for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DeleteTableWithContext(ctx aws.Context, input *DeleteTableInput, opts ...request.Option) (*DeleteTableOutput, error) {
- req, out := c.DeleteTableRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeBackup = "DescribeBackup"
-
-// DescribeBackupRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeBackup operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeBackup for more information on using the DescribeBackup
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeBackupRequest method.
-// req, resp := client.DescribeBackupRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup
-func (c *DynamoDB) DescribeBackupRequest(input *DescribeBackupInput) (req *request.Request, output *DescribeBackupOutput) {
- op := &request.Operation{
- Name: opDescribeBackup,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeBackupInput{}
- }
-
- output = &DescribeBackupOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// DescribeBackup API operation for Amazon DynamoDB.
-//
-// Describes an existing backup of a table.
-//
-// You can call DescribeBackup at a maximum rate of 10 times per second.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DescribeBackup for usage and error information.
-//
-// Returned Error Types:
-//
-// - BackupNotFoundException
-// Backup not found for the given BackupARN.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeBackup
-func (c *DynamoDB) DescribeBackup(input *DescribeBackupInput) (*DescribeBackupOutput, error) {
- req, out := c.DescribeBackupRequest(input)
- return out, req.Send()
-}
-
-// DescribeBackupWithContext is the same as DescribeBackup with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeBackup for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DescribeBackupWithContext(ctx aws.Context, input *DescribeBackupInput, opts ...request.Option) (*DescribeBackupOutput, error) {
- req, out := c.DescribeBackupRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeContinuousBackups = "DescribeContinuousBackups"
-
-// DescribeContinuousBackupsRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeContinuousBackups operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeContinuousBackups for more information on using the DescribeContinuousBackups
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeContinuousBackupsRequest method.
-// req, resp := client.DescribeContinuousBackupsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups
-func (c *DynamoDB) DescribeContinuousBackupsRequest(input *DescribeContinuousBackupsInput) (req *request.Request, output *DescribeContinuousBackupsOutput) {
- op := &request.Operation{
- Name: opDescribeContinuousBackups,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeContinuousBackupsInput{}
- }
-
- output = &DescribeContinuousBackupsOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// DescribeContinuousBackups API operation for Amazon DynamoDB.
-//
-// Checks the status of continuous backups and point in time recovery on the
-// specified table. Continuous backups are ENABLED on all tables at table creation.
-// If point in time recovery is enabled, PointInTimeRecoveryStatus will be set
-// to ENABLED.
-//
-// After continuous backups and point in time recovery are enabled, you can
-// restore to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.
-//
-// LatestRestorableDateTime is typically 5 minutes before the current time.
-// You can restore your table to any point in time during the last 35 days.
-//
-// You can call DescribeContinuousBackups at a maximum rate of 10 times per
-// second.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DescribeContinuousBackups for usage and error information.
-//
-// Returned Error Types:
-//
-// - TableNotFoundException
-// A source table with the name TableName does not currently exist within the
-// subscriber's account or the subscriber is operating in the wrong Amazon Web
-// Services Region.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContinuousBackups
-func (c *DynamoDB) DescribeContinuousBackups(input *DescribeContinuousBackupsInput) (*DescribeContinuousBackupsOutput, error) {
- req, out := c.DescribeContinuousBackupsRequest(input)
- return out, req.Send()
-}
-
-// DescribeContinuousBackupsWithContext is the same as DescribeContinuousBackups with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeContinuousBackups for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DescribeContinuousBackupsWithContext(ctx aws.Context, input *DescribeContinuousBackupsInput, opts ...request.Option) (*DescribeContinuousBackupsOutput, error) {
- req, out := c.DescribeContinuousBackupsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeContributorInsights = "DescribeContributorInsights"
-
-// DescribeContributorInsightsRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeContributorInsights operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeContributorInsights for more information on using the DescribeContributorInsights
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeContributorInsightsRequest method.
-// req, resp := client.DescribeContributorInsightsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContributorInsights
-func (c *DynamoDB) DescribeContributorInsightsRequest(input *DescribeContributorInsightsInput) (req *request.Request, output *DescribeContributorInsightsOutput) {
- op := &request.Operation{
- Name: opDescribeContributorInsights,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeContributorInsightsInput{}
- }
-
- output = &DescribeContributorInsightsOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DescribeContributorInsights API operation for Amazon DynamoDB.
-//
-// Returns information about contributor insights for a given table or global
-// secondary index.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DescribeContributorInsights for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeContributorInsights
-func (c *DynamoDB) DescribeContributorInsights(input *DescribeContributorInsightsInput) (*DescribeContributorInsightsOutput, error) {
- req, out := c.DescribeContributorInsightsRequest(input)
- return out, req.Send()
-}
-
-// DescribeContributorInsightsWithContext is the same as DescribeContributorInsights with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeContributorInsights for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DescribeContributorInsightsWithContext(ctx aws.Context, input *DescribeContributorInsightsInput, opts ...request.Option) (*DescribeContributorInsightsOutput, error) {
- req, out := c.DescribeContributorInsightsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeEndpoints = "DescribeEndpoints"
-
-// DescribeEndpointsRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeEndpoints operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeEndpoints for more information on using the DescribeEndpoints
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeEndpointsRequest method.
-// req, resp := client.DescribeEndpointsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpoints
-func (c *DynamoDB) DescribeEndpointsRequest(input *DescribeEndpointsInput) (req *request.Request, output *DescribeEndpointsOutput) {
- op := &request.Operation{
- Name: opDescribeEndpoints,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeEndpointsInput{}
- }
-
- output = &DescribeEndpointsOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DescribeEndpoints API operation for Amazon DynamoDB.
-//
-// Returns the regional endpoint information. For more information on policy
-// permissions, please see Internetwork traffic privacy (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/inter-network-traffic-privacy.html#inter-network-traffic-DescribeEndpoints).
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DescribeEndpoints for usage and error information.
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeEndpoints
-func (c *DynamoDB) DescribeEndpoints(input *DescribeEndpointsInput) (*DescribeEndpointsOutput, error) {
- req, out := c.DescribeEndpointsRequest(input)
- return out, req.Send()
-}
-
-// DescribeEndpointsWithContext is the same as DescribeEndpoints with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeEndpoints for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DescribeEndpointsWithContext(ctx aws.Context, input *DescribeEndpointsInput, opts ...request.Option) (*DescribeEndpointsOutput, error) {
- req, out := c.DescribeEndpointsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-type discovererDescribeEndpoints struct {
- Client *DynamoDB
- Required bool
- EndpointCache *crr.EndpointCache
- Params map[string]*string
- Key string
- req *request.Request
-}
-
-func (d *discovererDescribeEndpoints) Discover() (crr.Endpoint, error) {
- input := &DescribeEndpointsInput{}
-
- resp, err := d.Client.DescribeEndpoints(input)
- if err != nil {
- return crr.Endpoint{}, err
- }
-
- endpoint := crr.Endpoint{
- Key: d.Key,
- }
-
- for _, e := range resp.Endpoints {
- if e.Address == nil {
- continue
- }
-
- address := *e.Address
-
- var scheme string
- if idx := strings.Index(address, "://"); idx != -1 {
- scheme = address[:idx]
- }
-
- if len(scheme) == 0 {
- address = fmt.Sprintf("%s://%s", d.req.HTTPRequest.URL.Scheme, address)
- }
-
- cachedInMinutes := aws.Int64Value(e.CachePeriodInMinutes)
- u, err := url.Parse(address)
- if err != nil {
- continue
- }
-
- addr := crr.WeightedAddress{
- URL: u,
- Expired: time.Now().Add(time.Duration(cachedInMinutes) * time.Minute),
- }
-
- endpoint.Add(addr)
- }
-
- d.EndpointCache.Add(endpoint)
-
- return endpoint, nil
-}
-
-func (d *discovererDescribeEndpoints) Handler(r *request.Request) {
- endpointKey := crr.BuildEndpointKey(d.Params)
- d.Key = endpointKey
- d.req = r
-
- endpoint, err := d.EndpointCache.Get(d, endpointKey, d.Required)
- if err != nil {
- r.Error = err
- return
- }
-
- if endpoint.URL != nil && len(endpoint.URL.String()) > 0 {
- r.HTTPRequest.URL = endpoint.URL
- }
-}
-
-const opDescribeExport = "DescribeExport"
-
-// DescribeExportRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeExport operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeExport for more information on using the DescribeExport
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeExportRequest method.
-// req, resp := client.DescribeExportRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeExport
-func (c *DynamoDB) DescribeExportRequest(input *DescribeExportInput) (req *request.Request, output *DescribeExportOutput) {
- op := &request.Operation{
- Name: opDescribeExport,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeExportInput{}
- }
-
- output = &DescribeExportOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DescribeExport API operation for Amazon DynamoDB.
-//
-// Describes an existing table export.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DescribeExport for usage and error information.
-//
-// Returned Error Types:
-//
-// - ExportNotFoundException
-// The specified export was not found.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeExport
-func (c *DynamoDB) DescribeExport(input *DescribeExportInput) (*DescribeExportOutput, error) {
- req, out := c.DescribeExportRequest(input)
- return out, req.Send()
-}
-
-// DescribeExportWithContext is the same as DescribeExport with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeExport for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DescribeExportWithContext(ctx aws.Context, input *DescribeExportInput, opts ...request.Option) (*DescribeExportOutput, error) {
- req, out := c.DescribeExportRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeGlobalTable = "DescribeGlobalTable"
-
-// DescribeGlobalTableRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeGlobalTable operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeGlobalTable for more information on using the DescribeGlobalTable
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeGlobalTableRequest method.
-// req, resp := client.DescribeGlobalTableRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable
-func (c *DynamoDB) DescribeGlobalTableRequest(input *DescribeGlobalTableInput) (req *request.Request, output *DescribeGlobalTableOutput) {
- op := &request.Operation{
- Name: opDescribeGlobalTable,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeGlobalTableInput{}
- }
-
- output = &DescribeGlobalTableOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// DescribeGlobalTable API operation for Amazon DynamoDB.
-//
-// Returns information about the specified global table.
-//
-// This documentation is for version 2017.11.29 (Legacy) of global tables, which
-// should be avoided for new global tables. Customers should use Global Tables
-// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html)
-// when possible, because it provides greater flexibility, higher efficiency,
-// and consumes less write capacity than 2017.11.29 (Legacy).
-//
-// To determine which version you're using, see Determining the global table
-// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html).
-// To update existing global tables from version 2017.11.29 (Legacy) to version
-// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html).
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DescribeGlobalTable for usage and error information.
-//
-// Returned Error Types:
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// - GlobalTableNotFoundException
-// The specified global table does not exist.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTable
-func (c *DynamoDB) DescribeGlobalTable(input *DescribeGlobalTableInput) (*DescribeGlobalTableOutput, error) {
- req, out := c.DescribeGlobalTableRequest(input)
- return out, req.Send()
-}
-
-// DescribeGlobalTableWithContext is the same as DescribeGlobalTable with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeGlobalTable for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DescribeGlobalTableWithContext(ctx aws.Context, input *DescribeGlobalTableInput, opts ...request.Option) (*DescribeGlobalTableOutput, error) {
- req, out := c.DescribeGlobalTableRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeGlobalTableSettings = "DescribeGlobalTableSettings"
-
-// DescribeGlobalTableSettingsRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeGlobalTableSettings operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeGlobalTableSettings for more information on using the DescribeGlobalTableSettings
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeGlobalTableSettingsRequest method.
-// req, resp := client.DescribeGlobalTableSettingsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings
-func (c *DynamoDB) DescribeGlobalTableSettingsRequest(input *DescribeGlobalTableSettingsInput) (req *request.Request, output *DescribeGlobalTableSettingsOutput) {
- op := &request.Operation{
- Name: opDescribeGlobalTableSettings,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeGlobalTableSettingsInput{}
- }
-
- output = &DescribeGlobalTableSettingsOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// DescribeGlobalTableSettings API operation for Amazon DynamoDB.
-//
-// Describes Region-specific settings for a global table.
-//
-// This documentation is for version 2017.11.29 (Legacy) of global tables, which
-// should be avoided for new global tables. Customers should use Global Tables
-// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html)
-// when possible, because it provides greater flexibility, higher efficiency,
-// and consumes less write capacity than 2017.11.29 (Legacy).
-//
-// To determine which version you're using, see Determining the global table
-// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html).
-// To update existing global tables from version 2017.11.29 (Legacy) to version
-// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html).
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DescribeGlobalTableSettings for usage and error information.
-//
-// Returned Error Types:
-//
-// - GlobalTableNotFoundException
-// The specified global table does not exist.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeGlobalTableSettings
-func (c *DynamoDB) DescribeGlobalTableSettings(input *DescribeGlobalTableSettingsInput) (*DescribeGlobalTableSettingsOutput, error) {
- req, out := c.DescribeGlobalTableSettingsRequest(input)
- return out, req.Send()
-}
-
-// DescribeGlobalTableSettingsWithContext is the same as DescribeGlobalTableSettings with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeGlobalTableSettings for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DescribeGlobalTableSettingsWithContext(ctx aws.Context, input *DescribeGlobalTableSettingsInput, opts ...request.Option) (*DescribeGlobalTableSettingsOutput, error) {
- req, out := c.DescribeGlobalTableSettingsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeImport = "DescribeImport"
-
-// DescribeImportRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeImport operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeImport for more information on using the DescribeImport
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeImportRequest method.
-// req, resp := client.DescribeImportRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeImport
-func (c *DynamoDB) DescribeImportRequest(input *DescribeImportInput) (req *request.Request, output *DescribeImportOutput) {
- op := &request.Operation{
- Name: opDescribeImport,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeImportInput{}
- }
-
- output = &DescribeImportOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DescribeImport API operation for Amazon DynamoDB.
-//
-// Represents the properties of the import.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DescribeImport for usage and error information.
-//
-// Returned Error Types:
-// - ImportNotFoundException
-// The specified import was not found.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeImport
-func (c *DynamoDB) DescribeImport(input *DescribeImportInput) (*DescribeImportOutput, error) {
- req, out := c.DescribeImportRequest(input)
- return out, req.Send()
-}
-
-// DescribeImportWithContext is the same as DescribeImport with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeImport for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DescribeImportWithContext(ctx aws.Context, input *DescribeImportInput, opts ...request.Option) (*DescribeImportOutput, error) {
- req, out := c.DescribeImportRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeKinesisStreamingDestination = "DescribeKinesisStreamingDestination"
-
-// DescribeKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeKinesisStreamingDestination operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeKinesisStreamingDestination for more information on using the DescribeKinesisStreamingDestination
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeKinesisStreamingDestinationRequest method.
-// req, resp := client.DescribeKinesisStreamingDestinationRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeKinesisStreamingDestination
-func (c *DynamoDB) DescribeKinesisStreamingDestinationRequest(input *DescribeKinesisStreamingDestinationInput) (req *request.Request, output *DescribeKinesisStreamingDestinationOutput) {
- op := &request.Operation{
- Name: opDescribeKinesisStreamingDestination,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeKinesisStreamingDestinationInput{}
- }
-
- output = &DescribeKinesisStreamingDestinationOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// DescribeKinesisStreamingDestination API operation for Amazon DynamoDB.
-//
-// Returns information about the status of Kinesis streaming.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DescribeKinesisStreamingDestination for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeKinesisStreamingDestination
-func (c *DynamoDB) DescribeKinesisStreamingDestination(input *DescribeKinesisStreamingDestinationInput) (*DescribeKinesisStreamingDestinationOutput, error) {
- req, out := c.DescribeKinesisStreamingDestinationRequest(input)
- return out, req.Send()
-}
-
-// DescribeKinesisStreamingDestinationWithContext is the same as DescribeKinesisStreamingDestination with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeKinesisStreamingDestination for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DescribeKinesisStreamingDestinationWithContext(ctx aws.Context, input *DescribeKinesisStreamingDestinationInput, opts ...request.Option) (*DescribeKinesisStreamingDestinationOutput, error) {
- req, out := c.DescribeKinesisStreamingDestinationRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeLimits = "DescribeLimits"
-
-// DescribeLimitsRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeLimits operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeLimits for more information on using the DescribeLimits
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeLimitsRequest method.
-// req, resp := client.DescribeLimitsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits
-func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *request.Request, output *DescribeLimitsOutput) {
- op := &request.Operation{
- Name: opDescribeLimits,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeLimitsInput{}
- }
-
- output = &DescribeLimitsOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// DescribeLimits API operation for Amazon DynamoDB.
-//
-// Returns the current provisioned-capacity quotas for your Amazon Web Services
-// account in a Region, both for the Region as a whole and for any one DynamoDB
-// table that you create there.
-//
-// When you establish an Amazon Web Services account, the account has initial
-// quotas on the maximum read capacity units and write capacity units that you
-// can provision across all of your DynamoDB tables in a given Region. Also,
-// there are per-table quotas that apply when you create a table there. For
-// more information, see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
-// page in the Amazon DynamoDB Developer Guide.
-//
-// Although you can increase these quotas by filing a case at Amazon Web Services
-// Support Center (https://console.aws.amazon.com/support/home#/), obtaining
-// the increase is not instantaneous. The DescribeLimits action lets you write
-// code to compare the capacity you are currently using to those quotas imposed
-// by your account so that you have enough time to apply for an increase before
-// you hit a quota.
-//
-// For example, you could use one of the Amazon Web Services SDKs to do the
-// following:
-//
-// Call DescribeLimits for a particular Region to obtain your current account
-// quotas on provisioned capacity there.
-//
-// Create a variable to hold the aggregate read capacity units provisioned for
-// all your tables in that Region, and one to hold the aggregate write capacity
-// units. Zero them both.
-//
-// Call ListTables to obtain a list of all your DynamoDB tables.
-//
-// For each table name listed by ListTables, do the following:
-//
-// - Call DescribeTable with the table name.
-//
-// - Use the data returned by DescribeTable to add the read capacity units
-// and write capacity units provisioned for the table itself to your variables.
-//
-// - If the table has one or more global secondary indexes (GSIs), loop over
-// these GSIs and add their provisioned capacity values to your variables
-// as well.
-//
-// Report the account quotas for that Region returned by DescribeLimits, along
-// with the total current provisioned capacity levels you have calculated.
-//
-// This will let you see whether you are getting close to your account-level
-// quotas.
-//
-// The per-table quotas apply only when you are creating a new table. They restrict
-// the sum of the provisioned capacity of the new table itself and all its global
-// secondary indexes.
-//
-// For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned
-// capacity extremely rapidly, but the only quota that applies is that the aggregate
-// provisioned capacity over all your tables and GSIs cannot exceed either of
-// the per-account quotas.
-//
-// DescribeLimits should only be called periodically. You can expect throttling
-// errors if you call it more than once in a minute.
-//
-// The DescribeLimits Request element has no content.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DescribeLimits for usage and error information.
-//
-// Returned Error Types:
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeLimits
-func (c *DynamoDB) DescribeLimits(input *DescribeLimitsInput) (*DescribeLimitsOutput, error) {
- req, out := c.DescribeLimitsRequest(input)
- return out, req.Send()
-}
-
-// DescribeLimitsWithContext is the same as DescribeLimits with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeLimits for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DescribeLimitsWithContext(ctx aws.Context, input *DescribeLimitsInput, opts ...request.Option) (*DescribeLimitsOutput, error) {
- req, out := c.DescribeLimitsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeTable = "DescribeTable"
-
-// DescribeTableRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeTable operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeTable for more information on using the DescribeTable
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeTableRequest method.
-// req, resp := client.DescribeTableRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable
-func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request.Request, output *DescribeTableOutput) {
- op := &request.Operation{
- Name: opDescribeTable,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeTableInput{}
- }
-
- output = &DescribeTableOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// DescribeTable API operation for Amazon DynamoDB.
-//
-// Returns information about the table, including the current status of the
-// table, when it was created, the primary key schema, and any indexes on the
-// table.
-//
-// For global tables, this operation only applies to global tables using Version
-// 2019.11.21 (Current version).
-//
-// If you issue a DescribeTable request immediately after a CreateTable request,
-// DynamoDB might return a ResourceNotFoundException. This is because DescribeTable
-// uses an eventually consistent query, and the metadata for your table might
-// not be available at that moment. Wait for a few seconds, and then try the
-// DescribeTable request again.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DescribeTable for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTable
-func (c *DynamoDB) DescribeTable(input *DescribeTableInput) (*DescribeTableOutput, error) {
- req, out := c.DescribeTableRequest(input)
- return out, req.Send()
-}
-
-// DescribeTableWithContext is the same as DescribeTable with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeTable for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DescribeTableWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.Option) (*DescribeTableOutput, error) {
- req, out := c.DescribeTableRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeTableReplicaAutoScaling = "DescribeTableReplicaAutoScaling"
-
-// DescribeTableReplicaAutoScalingRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeTableReplicaAutoScaling operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeTableReplicaAutoScaling for more information on using the DescribeTableReplicaAutoScaling
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeTableReplicaAutoScalingRequest method.
-// req, resp := client.DescribeTableReplicaAutoScalingRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTableReplicaAutoScaling
-func (c *DynamoDB) DescribeTableReplicaAutoScalingRequest(input *DescribeTableReplicaAutoScalingInput) (req *request.Request, output *DescribeTableReplicaAutoScalingOutput) {
- op := &request.Operation{
- Name: opDescribeTableReplicaAutoScaling,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeTableReplicaAutoScalingInput{}
- }
-
- output = &DescribeTableReplicaAutoScalingOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DescribeTableReplicaAutoScaling API operation for Amazon DynamoDB.
-//
-// Describes auto scaling settings across replicas of the global table at once.
-//
-// For global tables, this operation only applies to global tables using Version
-// 2019.11.21 (Current version).
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DescribeTableReplicaAutoScaling for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTableReplicaAutoScaling
-func (c *DynamoDB) DescribeTableReplicaAutoScaling(input *DescribeTableReplicaAutoScalingInput) (*DescribeTableReplicaAutoScalingOutput, error) {
- req, out := c.DescribeTableReplicaAutoScalingRequest(input)
- return out, req.Send()
-}
-
-// DescribeTableReplicaAutoScalingWithContext is the same as DescribeTableReplicaAutoScaling with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeTableReplicaAutoScaling for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DescribeTableReplicaAutoScalingWithContext(ctx aws.Context, input *DescribeTableReplicaAutoScalingInput, opts ...request.Option) (*DescribeTableReplicaAutoScalingOutput, error) {
- req, out := c.DescribeTableReplicaAutoScalingRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDescribeTimeToLive = "DescribeTimeToLive"
-
-// DescribeTimeToLiveRequest generates a "aws/request.Request" representing the
-// client's request for the DescribeTimeToLive operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DescribeTimeToLive for more information on using the DescribeTimeToLive
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DescribeTimeToLiveRequest method.
-// req, resp := client.DescribeTimeToLiveRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive
-func (c *DynamoDB) DescribeTimeToLiveRequest(input *DescribeTimeToLiveInput) (req *request.Request, output *DescribeTimeToLiveOutput) {
- op := &request.Operation{
- Name: opDescribeTimeToLive,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DescribeTimeToLiveInput{}
- }
-
- output = &DescribeTimeToLiveOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// DescribeTimeToLive API operation for Amazon DynamoDB.
-//
-// Gives a description of the Time to Live (TTL) status on the specified table.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DescribeTimeToLive for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeTimeToLive
-func (c *DynamoDB) DescribeTimeToLive(input *DescribeTimeToLiveInput) (*DescribeTimeToLiveOutput, error) {
- req, out := c.DescribeTimeToLiveRequest(input)
- return out, req.Send()
-}
-
-// DescribeTimeToLiveWithContext is the same as DescribeTimeToLive with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DescribeTimeToLive for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DescribeTimeToLiveWithContext(ctx aws.Context, input *DescribeTimeToLiveInput, opts ...request.Option) (*DescribeTimeToLiveOutput, error) {
- req, out := c.DescribeTimeToLiveRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDisableKinesisStreamingDestination = "DisableKinesisStreamingDestination"
-
-// DisableKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the
-// client's request for the DisableKinesisStreamingDestination operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DisableKinesisStreamingDestination for more information on using the DisableKinesisStreamingDestination
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DisableKinesisStreamingDestinationRequest method.
-// req, resp := client.DisableKinesisStreamingDestinationRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DisableKinesisStreamingDestination
-func (c *DynamoDB) DisableKinesisStreamingDestinationRequest(input *DisableKinesisStreamingDestinationInput) (req *request.Request, output *DisableKinesisStreamingDestinationOutput) {
- op := &request.Operation{
- Name: opDisableKinesisStreamingDestination,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DisableKinesisStreamingDestinationInput{}
- }
-
- output = &DisableKinesisStreamingDestinationOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// DisableKinesisStreamingDestination API operation for Amazon DynamoDB.
-//
-// Stops replication from the DynamoDB table to the Kinesis data stream. This
-// is done without deleting either of the resources.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation DisableKinesisStreamingDestination for usage and error information.
-//
-// Returned Error Types:
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - ResourceInUseException
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DisableKinesisStreamingDestination
-func (c *DynamoDB) DisableKinesisStreamingDestination(input *DisableKinesisStreamingDestinationInput) (*DisableKinesisStreamingDestinationOutput, error) {
- req, out := c.DisableKinesisStreamingDestinationRequest(input)
- return out, req.Send()
-}
-
-// DisableKinesisStreamingDestinationWithContext is the same as DisableKinesisStreamingDestination with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DisableKinesisStreamingDestination for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) DisableKinesisStreamingDestinationWithContext(ctx aws.Context, input *DisableKinesisStreamingDestinationInput, opts ...request.Option) (*DisableKinesisStreamingDestinationOutput, error) {
- req, out := c.DisableKinesisStreamingDestinationRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opEnableKinesisStreamingDestination = "EnableKinesisStreamingDestination"
-
-// EnableKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the
-// client's request for the EnableKinesisStreamingDestination operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See EnableKinesisStreamingDestination for more information on using the EnableKinesisStreamingDestination
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the EnableKinesisStreamingDestinationRequest method.
-// req, resp := client.EnableKinesisStreamingDestinationRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/EnableKinesisStreamingDestination
-func (c *DynamoDB) EnableKinesisStreamingDestinationRequest(input *EnableKinesisStreamingDestinationInput) (req *request.Request, output *EnableKinesisStreamingDestinationOutput) {
- op := &request.Operation{
- Name: opEnableKinesisStreamingDestination,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &EnableKinesisStreamingDestinationInput{}
- }
-
- output = &EnableKinesisStreamingDestinationOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// EnableKinesisStreamingDestination API operation for Amazon DynamoDB.
-//
-// Starts table data replication to the specified Kinesis data stream at a timestamp
-// chosen during the enable workflow. If this operation doesn't return results
-// immediately, use DescribeKinesisStreamingDestination to check if streaming
-// to the Kinesis data stream is ACTIVE.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation EnableKinesisStreamingDestination for usage and error information.
-//
-// Returned Error Types:
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - ResourceInUseException
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/EnableKinesisStreamingDestination
-func (c *DynamoDB) EnableKinesisStreamingDestination(input *EnableKinesisStreamingDestinationInput) (*EnableKinesisStreamingDestinationOutput, error) {
- req, out := c.EnableKinesisStreamingDestinationRequest(input)
- return out, req.Send()
-}
-
-// EnableKinesisStreamingDestinationWithContext is the same as EnableKinesisStreamingDestination with the addition of
-// the ability to pass a context and additional request options.
-//
-// See EnableKinesisStreamingDestination for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) EnableKinesisStreamingDestinationWithContext(ctx aws.Context, input *EnableKinesisStreamingDestinationInput, opts ...request.Option) (*EnableKinesisStreamingDestinationOutput, error) {
- req, out := c.EnableKinesisStreamingDestinationRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opExecuteStatement = "ExecuteStatement"
-
-// ExecuteStatementRequest generates a "aws/request.Request" representing the
-// client's request for the ExecuteStatement operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ExecuteStatement for more information on using the ExecuteStatement
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ExecuteStatementRequest method.
-// req, resp := client.ExecuteStatementRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteStatement
-func (c *DynamoDB) ExecuteStatementRequest(input *ExecuteStatementInput) (req *request.Request, output *ExecuteStatementOutput) {
- op := &request.Operation{
- Name: opExecuteStatement,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &ExecuteStatementInput{}
- }
-
- output = &ExecuteStatementOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// ExecuteStatement API operation for Amazon DynamoDB.
-//
-// This operation allows you to perform reads and singleton writes on data stored
-// in DynamoDB, using PartiQL.
-//
-// For PartiQL reads (SELECT statement), if the total number of processed items
-// exceeds the maximum dataset size limit of 1 MB, the read stops and results
-// are returned to the user as a LastEvaluatedKey value to continue the read
-// in a subsequent operation. If the filter criteria in WHERE clause does not
-// match any data, the read will return an empty result set.
-//
-// A single SELECT statement response can return up to the maximum number of
-// items (if using the Limit parameter) or a maximum of 1 MB of data (and then
-// apply any filtering to the results using WHERE clause). If LastEvaluatedKey
-// is present in the response, you need to paginate the result set. If NextToken
-// is present, you need to paginate the result set and include NextToken.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation ExecuteStatement for usage and error information.
-//
-// Returned Error Types:
-//
-// - ConditionalCheckFailedException
-// A condition specified in the operation could not be evaluated.
-//
-// - ProvisionedThroughputExceededException
-// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB
-// automatically retry requests that receive this exception. Your request is
-// eventually successful, unless your retry queue is too large to finish. Reduce
-// the frequency of requests and use exponential backoff. For more information,
-// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
-// in the Amazon DynamoDB Developer Guide.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - ItemCollectionSizeLimitExceededException
-// An item collection is too large. This exception is only returned for tables
-// that have one or more local secondary indexes.
-//
-// - TransactionConflictException
-// Operation was rejected because there is an ongoing transaction for the item.
-//
-// - RequestLimitExceeded
-// Throughput exceeds the current throughput quota for your account. Please
-// contact Amazon Web Services Support (https://aws.amazon.com/support) to request
-// a quota increase.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// - DuplicateItemException
-// There was an attempt to insert an item with the same primary key as an item
-// that already exists in the DynamoDB table.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteStatement
-func (c *DynamoDB) ExecuteStatement(input *ExecuteStatementInput) (*ExecuteStatementOutput, error) {
- req, out := c.ExecuteStatementRequest(input)
- return out, req.Send()
-}
-
-// ExecuteStatementWithContext is the same as ExecuteStatement with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ExecuteStatement for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ExecuteStatementWithContext(ctx aws.Context, input *ExecuteStatementInput, opts ...request.Option) (*ExecuteStatementOutput, error) {
- req, out := c.ExecuteStatementRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opExecuteTransaction = "ExecuteTransaction"
-
-// ExecuteTransactionRequest generates a "aws/request.Request" representing the
-// client's request for the ExecuteTransaction operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ExecuteTransaction for more information on using the ExecuteTransaction
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ExecuteTransactionRequest method.
-// req, resp := client.ExecuteTransactionRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteTransaction
-func (c *DynamoDB) ExecuteTransactionRequest(input *ExecuteTransactionInput) (req *request.Request, output *ExecuteTransactionOutput) {
- op := &request.Operation{
- Name: opExecuteTransaction,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &ExecuteTransactionInput{}
- }
-
- output = &ExecuteTransactionOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// ExecuteTransaction API operation for Amazon DynamoDB.
-//
-// This operation allows you to perform transactional reads or writes on data
-// stored in DynamoDB, using PartiQL.
-//
-// The entire transaction must consist of either read statements or write statements,
-// you cannot mix both in one transaction. The EXISTS function is an exception
-// and can be used to check the condition of specific attributes of the item
-// in a similar manner to ConditionCheck in the TransactWriteItems (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/transaction-apis.html#transaction-apis-txwriteitems)
-// API.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation ExecuteTransaction for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - TransactionCanceledException
-// The entire transaction request was canceled.
-//
-// DynamoDB cancels a TransactWriteItems request under the following circumstances:
-//
-// - A condition in one of the condition expressions is not met.
-//
-// - A table in the TransactWriteItems request is in a different account
-// or region.
-//
-// - More than one action in the TransactWriteItems operation targets the
-// same item.
-//
-// - There is insufficient provisioned capacity for the transaction to be
-// completed.
-//
-// - An item size becomes too large (larger than 400 KB), or a local secondary
-// index (LSI) becomes too large, or a similar validation error occurs because
-// of changes made by the transaction.
-//
-// - There is a user error, such as an invalid data format.
-//
-// - There is an ongoing TransactWriteItems operation that conflicts with
-// a concurrent TransactWriteItems request. In this case the TransactWriteItems
-// operation fails with a TransactionCanceledException.
-//
-// DynamoDB cancels a TransactGetItems request under the following circumstances:
-//
-// - There is an ongoing TransactGetItems operation that conflicts with a
-// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request.
-// In this case the TransactGetItems operation fails with a TransactionCanceledException.
-//
-// - A table in the TransactGetItems request is in a different account or
-// region.
-//
-// - There is insufficient provisioned capacity for the transaction to be
-// completed.
-//
-// - There is a user error, such as an invalid data format.
-//
-// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
-// property. This property is not set for other languages. Transaction cancellation
-// reasons are ordered in the order of requested items, if an item has no error
-// it will have None code and Null message.
-//
-// Cancellation reason codes and possible error messages:
-//
-// - No Errors: Code: None Message: null
-//
-// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The
-// conditional request failed.
-//
-// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded
-// Message: Collection size exceeded.
-//
-// - Transaction Conflict: Code: TransactionConflict Message: Transaction
-// is ongoing for the item.
-//
-// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded
-// Messages: The level of configured provisioned throughput for the table
-// was exceeded. Consider increasing your provisioning level with the UpdateTable
-// API. This Message is received when provisioned throughput is exceeded
-// is on a provisioned DynamoDB table. The level of configured provisioned
-// throughput for one or more global secondary indexes of the table was exceeded.
-// Consider increasing your provisioning level for the under-provisioned
-// global secondary indexes with the UpdateTable API. This message is returned
-// when provisioned throughput is exceeded is on a provisioned GSI.
-//
-// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds
-// the current capacity of your table or index. DynamoDB is automatically
-// scaling your table or index so please try again shortly. If exceptions
-// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
-// This message is returned when writes get throttled on an On-Demand table
-// as DynamoDB is automatically scaling the table. Throughput exceeds the
-// current capacity for one or more global secondary indexes. DynamoDB is
-// automatically scaling your index so please try again shortly. This message
-// is returned when writes get throttled on an On-Demand GSI as DynamoDB
-// is automatically scaling the GSI.
-//
-// - Validation Error: Code: ValidationError Messages: One or more parameter
-// values were invalid. The update expression attempted to update the secondary
-// index key beyond allowed size limits. The update expression attempted
-// to update the secondary index key to unsupported type. An operand in the
-// update expression has an incorrect data type. Item size to update has
-// exceeded the maximum allowed size. Number overflow. Attempting to store
-// a number with magnitude larger than supported range. Type mismatch for
-// attribute to update. Nesting Levels have exceeded supported limits. The
-// document path provided in the update expression is invalid for update.
-// The provided expression refers to an attribute that does not exist in
-// the item.
-//
-// - TransactionInProgressException
-// The transaction with the given request token is already in progress.
-//
-// Recommended Settings
-//
-// This is a general recommendation for handling the TransactionInProgressException.
-// These settings help ensure that the client retries will trigger completion
-// of the ongoing TransactWriteItems request.
-//
-// - Set clientExecutionTimeout to a value that allows at least one retry
-// to be processed after 5 seconds have elapsed since the first attempt for
-// the TransactWriteItems operation.
-//
-// - Set socketTimeout to a value a little lower than the requestTimeout
-// setting.
-//
-// - requestTimeout should be set based on the time taken for the individual
-// retries of a single HTTP request for your use case, but setting it to
-// 1 second or higher should work well to reduce chances of retries and TransactionInProgressException
-// errors.
-//
-// - Use exponential backoff when retrying and tune backoff if needed.
-//
-// Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97),
-// example timeout settings based on the guidelines above are as follows:
-//
-// Example timeline:
-//
-// - 0-1000 first attempt
-//
-// - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base
-// delay for 4xx errors)
-//
-// - 1500-2500 second attempt
-//
-// - 2500-3500 second sleep/delay (500 * 2, exponential backoff)
-//
-// - 3500-4500 third attempt
-//
-// - 4500-6500 third sleep/delay (500 * 2^2)
-//
-// - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds
-// have elapsed since the first attempt reached TC)
-//
-// - IdempotentParameterMismatchException
-// DynamoDB rejected the request because you retried a request with a different
-// payload but with an idempotent token that was already used.
-//
-// - ProvisionedThroughputExceededException
-// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB
-// automatically retry requests that receive this exception. Your request is
-// eventually successful, unless your retry queue is too large to finish. Reduce
-// the frequency of requests and use exponential backoff. For more information,
-// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
-// in the Amazon DynamoDB Developer Guide.
-//
-// - RequestLimitExceeded
-// Throughput exceeds the current throughput quota for your account. Please
-// contact Amazon Web Services Support (https://aws.amazon.com/support) to request
-// a quota increase.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteTransaction
-func (c *DynamoDB) ExecuteTransaction(input *ExecuteTransactionInput) (*ExecuteTransactionOutput, error) {
- req, out := c.ExecuteTransactionRequest(input)
- return out, req.Send()
-}
-
-// ExecuteTransactionWithContext is the same as ExecuteTransaction with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ExecuteTransaction for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ExecuteTransactionWithContext(ctx aws.Context, input *ExecuteTransactionInput, opts ...request.Option) (*ExecuteTransactionOutput, error) {
- req, out := c.ExecuteTransactionRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opExportTableToPointInTime = "ExportTableToPointInTime"
-
-// ExportTableToPointInTimeRequest generates a "aws/request.Request" representing the
-// client's request for the ExportTableToPointInTime operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ExportTableToPointInTime for more information on using the ExportTableToPointInTime
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ExportTableToPointInTimeRequest method.
-// req, resp := client.ExportTableToPointInTimeRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime
-func (c *DynamoDB) ExportTableToPointInTimeRequest(input *ExportTableToPointInTimeInput) (req *request.Request, output *ExportTableToPointInTimeOutput) {
- op := &request.Operation{
- Name: opExportTableToPointInTime,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &ExportTableToPointInTimeInput{}
- }
-
- output = &ExportTableToPointInTimeOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// ExportTableToPointInTime API operation for Amazon DynamoDB.
-//
-// Exports table data to an S3 bucket. The table must have point in time recovery
-// enabled, and you can export data from any time within the point in time recovery
-// window.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation ExportTableToPointInTime for usage and error information.
-//
-// Returned Error Types:
-//
-// - TableNotFoundException
-// A source table with the name TableName does not currently exist within the
-// subscriber's account or the subscriber is operating in the wrong Amazon Web
-// Services Region.
-//
-// - PointInTimeRecoveryUnavailableException
-// Point in time recovery has not yet been enabled for this source table.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - InvalidExportTimeException
-// The specified ExportTime is outside of the point in time recovery window.
-//
-// - ExportConflictException
-// There was a conflict when writing to the specified S3 bucket.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime
-func (c *DynamoDB) ExportTableToPointInTime(input *ExportTableToPointInTimeInput) (*ExportTableToPointInTimeOutput, error) {
- req, out := c.ExportTableToPointInTimeRequest(input)
- return out, req.Send()
-}
-
-// ExportTableToPointInTimeWithContext is the same as ExportTableToPointInTime with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ExportTableToPointInTime for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ExportTableToPointInTimeWithContext(ctx aws.Context, input *ExportTableToPointInTimeInput, opts ...request.Option) (*ExportTableToPointInTimeOutput, error) {
- req, out := c.ExportTableToPointInTimeRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opGetItem = "GetItem"
-
-// GetItemRequest generates a "aws/request.Request" representing the
-// client's request for the GetItem operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See GetItem for more information on using the GetItem
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the GetItemRequest method.
-// req, resp := client.GetItemRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem
-func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, output *GetItemOutput) {
- op := &request.Operation{
- Name: opGetItem,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &GetItemInput{}
- }
-
- output = &GetItemOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// GetItem API operation for Amazon DynamoDB.
-//
-// The GetItem operation returns a set of attributes for the item with the given
-// primary key. If there is no matching item, GetItem does not return any data
-// and there will be no Item element in the response.
-//
-// GetItem provides an eventually consistent read by default. If your application
-// requires a strongly consistent read, set ConsistentRead to true. Although
-// a strongly consistent read might take more time than an eventually consistent
-// read, it always returns the last updated value.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation GetItem for usage and error information.
-//
-// Returned Error Types:
-//
-// - ProvisionedThroughputExceededException
-// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB
-// automatically retry requests that receive this exception. Your request is
-// eventually successful, unless your retry queue is too large to finish. Reduce
-// the frequency of requests and use exponential backoff. For more information,
-// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
-// in the Amazon DynamoDB Developer Guide.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - RequestLimitExceeded
-// Throughput exceeds the current throughput quota for your account. Please
-// contact Amazon Web Services Support (https://aws.amazon.com/support) to request
-// a quota increase.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem
-func (c *DynamoDB) GetItem(input *GetItemInput) (*GetItemOutput, error) {
- req, out := c.GetItemRequest(input)
- return out, req.Send()
-}
-
-// GetItemWithContext is the same as GetItem with the addition of
-// the ability to pass a context and additional request options.
-//
-// See GetItem for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) GetItemWithContext(ctx aws.Context, input *GetItemInput, opts ...request.Option) (*GetItemOutput, error) {
- req, out := c.GetItemRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opGetResourcePolicy = "GetResourcePolicy"
-
-// GetResourcePolicyRequest generates a "aws/request.Request" representing the
-// client's request for the GetResourcePolicy operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See GetResourcePolicy for more information on using the GetResourcePolicy
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the GetResourcePolicyRequest method.
-// req, resp := client.GetResourcePolicyRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetResourcePolicy
-func (c *DynamoDB) GetResourcePolicyRequest(input *GetResourcePolicyInput) (req *request.Request, output *GetResourcePolicyOutput) {
- op := &request.Operation{
- Name: opGetResourcePolicy,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &GetResourcePolicyInput{}
- }
-
- output = &GetResourcePolicyOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// GetResourcePolicy API operation for Amazon DynamoDB.
-//
-// Returns the resource-based policy document attached to the resource, which
-// can be a table or stream, in JSON format.
-//
-// GetResourcePolicy follows an eventually consistent (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html)
-// model. The following list describes the outcomes when you issue the GetResourcePolicy
-// request immediately after issuing another request:
-//
-// - If you issue a GetResourcePolicy request immediately after a PutResourcePolicy
-// request, DynamoDB might return a PolicyNotFoundException.
-//
-// - If you issue a GetResourcePolicyrequest immediately after a DeleteResourcePolicy
-// request, DynamoDB might return the policy that was present before the
-// deletion request.
-//
-// - If you issue a GetResourcePolicy request immediately after a CreateTable
-// request, which includes a resource-based policy, DynamoDB might return
-// a ResourceNotFoundException or a PolicyNotFoundException.
-//
-// Because GetResourcePolicy uses an eventually consistent query, the metadata
-// for your policy or table might not be available at that moment. Wait for
-// a few seconds, and then retry the GetResourcePolicy request.
-//
-// After a GetResourcePolicy request returns a policy created using the PutResourcePolicy
-// request, the policy will be applied in the authorization of requests to the
-// resource. Because this process is eventually consistent, it will take some
-// time to apply the policy to all requests to a resource. Policies that you
-// attach while creating a table using the CreateTable request will always be
-// applied to all requests for that table.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation GetResourcePolicy for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// - PolicyNotFoundException
-// The operation tried to access a nonexistent resource-based policy.
-//
-// If you specified an ExpectedRevisionId, it's possible that a policy is present
-// for the resource but its revision ID didn't match the expected value.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetResourcePolicy
-func (c *DynamoDB) GetResourcePolicy(input *GetResourcePolicyInput) (*GetResourcePolicyOutput, error) {
- req, out := c.GetResourcePolicyRequest(input)
- return out, req.Send()
-}
-
-// GetResourcePolicyWithContext is the same as GetResourcePolicy with the addition of
-// the ability to pass a context and additional request options.
-//
-// See GetResourcePolicy for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) GetResourcePolicyWithContext(ctx aws.Context, input *GetResourcePolicyInput, opts ...request.Option) (*GetResourcePolicyOutput, error) {
- req, out := c.GetResourcePolicyRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opImportTable = "ImportTable"
-
-// ImportTableRequest generates a "aws/request.Request" representing the
-// client's request for the ImportTable operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ImportTable for more information on using the ImportTable
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ImportTableRequest method.
-// req, resp := client.ImportTableRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ImportTable
-func (c *DynamoDB) ImportTableRequest(input *ImportTableInput) (req *request.Request, output *ImportTableOutput) {
- op := &request.Operation{
- Name: opImportTable,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &ImportTableInput{}
- }
-
- output = &ImportTableOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// ImportTable API operation for Amazon DynamoDB.
-//
-// Imports table data from an S3 bucket.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation ImportTable for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceInUseException
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - ImportConflictException
-// There was a conflict when importing from the specified S3 source. This can
-// occur when the current import conflicts with a previous import request that
-// had the same client token.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ImportTable
-func (c *DynamoDB) ImportTable(input *ImportTableInput) (*ImportTableOutput, error) {
- req, out := c.ImportTableRequest(input)
- return out, req.Send()
-}
-
-// ImportTableWithContext is the same as ImportTable with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ImportTable for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ImportTableWithContext(ctx aws.Context, input *ImportTableInput, opts ...request.Option) (*ImportTableOutput, error) {
- req, out := c.ImportTableRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opListBackups = "ListBackups"
-
-// ListBackupsRequest generates a "aws/request.Request" representing the
-// client's request for the ListBackups operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ListBackups for more information on using the ListBackups
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ListBackupsRequest method.
-// req, resp := client.ListBackupsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups
-func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Request, output *ListBackupsOutput) {
- op := &request.Operation{
- Name: opListBackups,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &ListBackupsInput{}
- }
-
- output = &ListBackupsOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// ListBackups API operation for Amazon DynamoDB.
-//
-// List DynamoDB backups that are associated with an Amazon Web Services account
-// and weren't made with Amazon Web Services Backup. To list these backups for
-// a given table, specify TableName. ListBackups returns a paginated list of
-// results with at most 1 MB worth of items in a page. You can also specify
-// a maximum number of entries to be returned in a page.
-//
-// In the request, start time is inclusive, but end time is exclusive. Note
-// that these boundaries are for the time at which the original backup was requested.
-//
-// You can call ListBackups a maximum of five times per second.
-//
-// If you want to retrieve the complete list of backups made with Amazon Web
-// Services Backup, use the Amazon Web Services Backup list API. (https://docs.aws.amazon.com/aws-backup/latest/devguide/API_ListBackupJobs.html)
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation ListBackups for usage and error information.
-//
-// Returned Error Types:
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups
-func (c *DynamoDB) ListBackups(input *ListBackupsInput) (*ListBackupsOutput, error) {
- req, out := c.ListBackupsRequest(input)
- return out, req.Send()
-}
-
-// ListBackupsWithContext is the same as ListBackups with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ListBackups for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ListBackupsWithContext(ctx aws.Context, input *ListBackupsInput, opts ...request.Option) (*ListBackupsOutput, error) {
- req, out := c.ListBackupsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opListContributorInsights = "ListContributorInsights"
-
-// ListContributorInsightsRequest generates a "aws/request.Request" representing the
-// client's request for the ListContributorInsights operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ListContributorInsights for more information on using the ListContributorInsights
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ListContributorInsightsRequest method.
-// req, resp := client.ListContributorInsightsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListContributorInsights
-func (c *DynamoDB) ListContributorInsightsRequest(input *ListContributorInsightsInput) (req *request.Request, output *ListContributorInsightsOutput) {
- op := &request.Operation{
- Name: opListContributorInsights,
- HTTPMethod: "POST",
- HTTPPath: "/",
- Paginator: &request.Paginator{
- InputTokens: []string{"NextToken"},
- OutputTokens: []string{"NextToken"},
- LimitToken: "MaxResults",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &ListContributorInsightsInput{}
- }
-
- output = &ListContributorInsightsOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// ListContributorInsights API operation for Amazon DynamoDB.
-//
-// Returns a list of ContributorInsightsSummary for a table and all its global
-// secondary indexes.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation ListContributorInsights for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListContributorInsights
-func (c *DynamoDB) ListContributorInsights(input *ListContributorInsightsInput) (*ListContributorInsightsOutput, error) {
- req, out := c.ListContributorInsightsRequest(input)
- return out, req.Send()
-}
-
-// ListContributorInsightsWithContext is the same as ListContributorInsights with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ListContributorInsights for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ListContributorInsightsWithContext(ctx aws.Context, input *ListContributorInsightsInput, opts ...request.Option) (*ListContributorInsightsOutput, error) {
- req, out := c.ListContributorInsightsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// ListContributorInsightsPages iterates over the pages of a ListContributorInsights operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See ListContributorInsights method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a ListContributorInsights operation.
-// pageNum := 0
-// err := client.ListContributorInsightsPages(params,
-// func(page *dynamodb.ListContributorInsightsOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *DynamoDB) ListContributorInsightsPages(input *ListContributorInsightsInput, fn func(*ListContributorInsightsOutput, bool) bool) error {
- return c.ListContributorInsightsPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// ListContributorInsightsPagesWithContext same as ListContributorInsightsPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ListContributorInsightsPagesWithContext(ctx aws.Context, input *ListContributorInsightsInput, fn func(*ListContributorInsightsOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *ListContributorInsightsInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.ListContributorInsightsRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*ListContributorInsightsOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opListExports = "ListExports"
-
-// ListExportsRequest generates a "aws/request.Request" representing the
-// client's request for the ListExports operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ListExports for more information on using the ListExports
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ListExportsRequest method.
-// req, resp := client.ListExportsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports
-func (c *DynamoDB) ListExportsRequest(input *ListExportsInput) (req *request.Request, output *ListExportsOutput) {
- op := &request.Operation{
- Name: opListExports,
- HTTPMethod: "POST",
- HTTPPath: "/",
- Paginator: &request.Paginator{
- InputTokens: []string{"NextToken"},
- OutputTokens: []string{"NextToken"},
- LimitToken: "MaxResults",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &ListExportsInput{}
- }
-
- output = &ListExportsOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// ListExports API operation for Amazon DynamoDB.
-//
-// Lists completed exports within the past 90 days.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation ListExports for usage and error information.
-//
-// Returned Error Types:
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports
-func (c *DynamoDB) ListExports(input *ListExportsInput) (*ListExportsOutput, error) {
- req, out := c.ListExportsRequest(input)
- return out, req.Send()
-}
-
-// ListExportsWithContext is the same as ListExports with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ListExports for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ListExportsWithContext(ctx aws.Context, input *ListExportsInput, opts ...request.Option) (*ListExportsOutput, error) {
- req, out := c.ListExportsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// ListExportsPages iterates over the pages of a ListExports operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See ListExports method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a ListExports operation.
-// pageNum := 0
-// err := client.ListExportsPages(params,
-// func(page *dynamodb.ListExportsOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *DynamoDB) ListExportsPages(input *ListExportsInput, fn func(*ListExportsOutput, bool) bool) error {
- return c.ListExportsPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// ListExportsPagesWithContext same as ListExportsPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ListExportsPagesWithContext(ctx aws.Context, input *ListExportsInput, fn func(*ListExportsOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *ListExportsInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.ListExportsRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*ListExportsOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opListGlobalTables = "ListGlobalTables"
-
-// ListGlobalTablesRequest generates a "aws/request.Request" representing the
-// client's request for the ListGlobalTables operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ListGlobalTables for more information on using the ListGlobalTables
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ListGlobalTablesRequest method.
-// req, resp := client.ListGlobalTablesRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables
-func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *request.Request, output *ListGlobalTablesOutput) {
- op := &request.Operation{
- Name: opListGlobalTables,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &ListGlobalTablesInput{}
- }
-
- output = &ListGlobalTablesOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// ListGlobalTables API operation for Amazon DynamoDB.
-//
-// Lists all global tables that have a replica in the specified Region.
-//
-// This documentation is for version 2017.11.29 (Legacy) of global tables, which
-// should be avoided for new global tables. Customers should use Global Tables
-// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html)
-// when possible, because it provides greater flexibility, higher efficiency,
-// and consumes less write capacity than 2017.11.29 (Legacy).
-//
-// To determine which version you're using, see Determining the global table
-// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html).
-// To update existing global tables from version 2017.11.29 (Legacy) to version
-// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html).
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation ListGlobalTables for usage and error information.
-//
-// Returned Error Types:
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables
-func (c *DynamoDB) ListGlobalTables(input *ListGlobalTablesInput) (*ListGlobalTablesOutput, error) {
- req, out := c.ListGlobalTablesRequest(input)
- return out, req.Send()
-}
-
-// ListGlobalTablesWithContext is the same as ListGlobalTables with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ListGlobalTables for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ListGlobalTablesWithContext(ctx aws.Context, input *ListGlobalTablesInput, opts ...request.Option) (*ListGlobalTablesOutput, error) {
- req, out := c.ListGlobalTablesRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opListImports = "ListImports"
-
-// ListImportsRequest generates a "aws/request.Request" representing the
-// client's request for the ListImports operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ListImports for more information on using the ListImports
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ListImportsRequest method.
-// req, resp := client.ListImportsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListImports
-func (c *DynamoDB) ListImportsRequest(input *ListImportsInput) (req *request.Request, output *ListImportsOutput) {
- op := &request.Operation{
- Name: opListImports,
- HTTPMethod: "POST",
- HTTPPath: "/",
- Paginator: &request.Paginator{
- InputTokens: []string{"NextToken"},
- OutputTokens: []string{"NextToken"},
- LimitToken: "PageSize",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &ListImportsInput{}
- }
-
- output = &ListImportsOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// ListImports API operation for Amazon DynamoDB.
-//
-// Lists completed imports within the past 90 days.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation ListImports for usage and error information.
-//
-// Returned Error Types:
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListImports
-func (c *DynamoDB) ListImports(input *ListImportsInput) (*ListImportsOutput, error) {
- req, out := c.ListImportsRequest(input)
- return out, req.Send()
-}
-
-// ListImportsWithContext is the same as ListImports with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ListImports for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ListImportsWithContext(ctx aws.Context, input *ListImportsInput, opts ...request.Option) (*ListImportsOutput, error) {
- req, out := c.ListImportsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// ListImportsPages iterates over the pages of a ListImports operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See ListImports method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a ListImports operation.
-// pageNum := 0
-// err := client.ListImportsPages(params,
-// func(page *dynamodb.ListImportsOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *DynamoDB) ListImportsPages(input *ListImportsInput, fn func(*ListImportsOutput, bool) bool) error {
- return c.ListImportsPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// ListImportsPagesWithContext same as ListImportsPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ListImportsPagesWithContext(ctx aws.Context, input *ListImportsInput, fn func(*ListImportsOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *ListImportsInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.ListImportsRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*ListImportsOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opListTables = "ListTables"
-
-// ListTablesRequest generates a "aws/request.Request" representing the
-// client's request for the ListTables operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ListTables for more information on using the ListTables
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ListTablesRequest method.
-// req, resp := client.ListTablesRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables
-func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) {
- op := &request.Operation{
- Name: opListTables,
- HTTPMethod: "POST",
- HTTPPath: "/",
- Paginator: &request.Paginator{
- InputTokens: []string{"ExclusiveStartTableName"},
- OutputTokens: []string{"LastEvaluatedTableName"},
- LimitToken: "Limit",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &ListTablesInput{}
- }
-
- output = &ListTablesOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// ListTables API operation for Amazon DynamoDB.
-//
-// Returns an array of table names associated with the current account and endpoint.
-// The output from ListTables is paginated, with each page returning a maximum
-// of 100 table names.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation ListTables for usage and error information.
-//
-// Returned Error Types:
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables
-func (c *DynamoDB) ListTables(input *ListTablesInput) (*ListTablesOutput, error) {
- req, out := c.ListTablesRequest(input)
- return out, req.Send()
-}
-
-// ListTablesWithContext is the same as ListTables with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ListTables for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ListTablesWithContext(ctx aws.Context, input *ListTablesInput, opts ...request.Option) (*ListTablesOutput, error) {
- req, out := c.ListTablesRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// ListTablesPages iterates over the pages of a ListTables operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See ListTables method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a ListTables operation.
-// pageNum := 0
-// err := client.ListTablesPages(params,
-// func(page *dynamodb.ListTablesOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(*ListTablesOutput, bool) bool) error {
- return c.ListTablesPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// ListTablesPagesWithContext same as ListTablesPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTablesInput, fn func(*ListTablesOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *ListTablesInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.ListTablesRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*ListTablesOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opListTagsOfResource = "ListTagsOfResource"
-
-// ListTagsOfResourceRequest generates a "aws/request.Request" representing the
-// client's request for the ListTagsOfResource operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ListTagsOfResource for more information on using the ListTagsOfResource
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ListTagsOfResourceRequest method.
-// req, resp := client.ListTagsOfResourceRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource
-func (c *DynamoDB) ListTagsOfResourceRequest(input *ListTagsOfResourceInput) (req *request.Request, output *ListTagsOfResourceOutput) {
- op := &request.Operation{
- Name: opListTagsOfResource,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &ListTagsOfResourceInput{}
- }
-
- output = &ListTagsOfResourceOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// ListTagsOfResource API operation for Amazon DynamoDB.
-//
-// List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource
-// up to 10 times per second, per account.
-//
-// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
-// in the Amazon DynamoDB Developer Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation ListTagsOfResource for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource
-func (c *DynamoDB) ListTagsOfResource(input *ListTagsOfResourceInput) (*ListTagsOfResourceOutput, error) {
- req, out := c.ListTagsOfResourceRequest(input)
- return out, req.Send()
-}
-
-// ListTagsOfResourceWithContext is the same as ListTagsOfResource with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ListTagsOfResource for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ListTagsOfResourceWithContext(ctx aws.Context, input *ListTagsOfResourceInput, opts ...request.Option) (*ListTagsOfResourceOutput, error) {
- req, out := c.ListTagsOfResourceRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opPutItem = "PutItem"
-
-// PutItemRequest generates a "aws/request.Request" representing the
-// client's request for the PutItem operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See PutItem for more information on using the PutItem
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the PutItemRequest method.
-// req, resp := client.PutItemRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem
-func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, output *PutItemOutput) {
- op := &request.Operation{
- Name: opPutItem,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &PutItemInput{}
- }
-
- output = &PutItemOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// PutItem API operation for Amazon DynamoDB.
-//
-// Creates a new item, or replaces an old item with a new item. If an item that
-// has the same primary key as the new item already exists in the specified
-// table, the new item completely replaces the existing item. You can perform
-// a conditional put operation (add a new item if one with the specified primary
-// key doesn't exist), or replace an existing item if it has certain attribute
-// values. You can return the item's attribute values in the same operation,
-// using the ReturnValues parameter.
-//
-// When you add an item, the primary key attributes are the only required attributes.
-//
-// Empty String and Binary attribute values are allowed. Attribute values of
-// type String and Binary must have a length greater than zero if the attribute
-// is used as a key attribute for a table or index. Set type attributes cannot
-// be empty.
-//
-// Invalid Requests with empty values will be rejected with a ValidationException
-// exception.
-//
-// To prevent a new item from replacing an existing item, use a conditional
-// expression that contains the attribute_not_exists function with the name
-// of the attribute being used as the partition key for the table. Since every
-// record must contain that attribute, the attribute_not_exists function will
-// only succeed if no matching item exists.
-//
-// For more information about PutItem, see Working with Items (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html)
-// in the Amazon DynamoDB Developer Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation PutItem for usage and error information.
-//
-// Returned Error Types:
-//
-// - ConditionalCheckFailedException
-// A condition specified in the operation could not be evaluated.
-//
-// - ProvisionedThroughputExceededException
-// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB
-// automatically retry requests that receive this exception. Your request is
-// eventually successful, unless your retry queue is too large to finish. Reduce
-// the frequency of requests and use exponential backoff. For more information,
-// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
-// in the Amazon DynamoDB Developer Guide.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - ItemCollectionSizeLimitExceededException
-// An item collection is too large. This exception is only returned for tables
-// that have one or more local secondary indexes.
-//
-// - TransactionConflictException
-// Operation was rejected because there is an ongoing transaction for the item.
-//
-// - RequestLimitExceeded
-// Throughput exceeds the current throughput quota for your account. Please
-// contact Amazon Web Services Support (https://aws.amazon.com/support) to request
-// a quota increase.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem
-func (c *DynamoDB) PutItem(input *PutItemInput) (*PutItemOutput, error) {
- req, out := c.PutItemRequest(input)
- return out, req.Send()
-}
-
-// PutItemWithContext is the same as PutItem with the addition of
-// the ability to pass a context and additional request options.
-//
-// See PutItem for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) PutItemWithContext(ctx aws.Context, input *PutItemInput, opts ...request.Option) (*PutItemOutput, error) {
- req, out := c.PutItemRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opPutResourcePolicy = "PutResourcePolicy"
-
-// PutResourcePolicyRequest generates a "aws/request.Request" representing the
-// client's request for the PutResourcePolicy operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See PutResourcePolicy for more information on using the PutResourcePolicy
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the PutResourcePolicyRequest method.
-// req, resp := client.PutResourcePolicyRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutResourcePolicy
-func (c *DynamoDB) PutResourcePolicyRequest(input *PutResourcePolicyInput) (req *request.Request, output *PutResourcePolicyOutput) {
- op := &request.Operation{
- Name: opPutResourcePolicy,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &PutResourcePolicyInput{}
- }
-
- output = &PutResourcePolicyOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// PutResourcePolicy API operation for Amazon DynamoDB.
-//
-// Attaches a resource-based policy document to the resource, which can be a
-// table or stream. When you attach a resource-based policy using this API,
-// the policy application is eventually consistent (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadConsistency.html).
-//
-// PutResourcePolicy is an idempotent operation; running it multiple times on
-// the same resource using the same policy document will return the same revision
-// ID. If you specify an ExpectedRevisionId that doesn't match the current policy's
-// RevisionId, the PolicyNotFoundException will be returned.
-//
-// PutResourcePolicy is an asynchronous operation. If you issue a GetResourcePolicy
-// request immediately after a PutResourcePolicy request, DynamoDB might return
-// your previous policy, if there was one, or return the PolicyNotFoundException.
-// This is because GetResourcePolicy uses an eventually consistent query, and
-// the metadata for your policy or table might not be available at that moment.
-// Wait for a few seconds, and then try the GetResourcePolicy request again.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation PutResourcePolicy for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - PolicyNotFoundException
-// The operation tried to access a nonexistent resource-based policy.
-//
-// If you specified an ExpectedRevisionId, it's possible that a policy is present
-// for the resource but its revision ID didn't match the expected value.
-//
-// - ResourceInUseException
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutResourcePolicy
-func (c *DynamoDB) PutResourcePolicy(input *PutResourcePolicyInput) (*PutResourcePolicyOutput, error) {
- req, out := c.PutResourcePolicyRequest(input)
- return out, req.Send()
-}
-
-// PutResourcePolicyWithContext is the same as PutResourcePolicy with the addition of
-// the ability to pass a context and additional request options.
-//
-// See PutResourcePolicy for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) PutResourcePolicyWithContext(ctx aws.Context, input *PutResourcePolicyInput, opts ...request.Option) (*PutResourcePolicyOutput, error) {
- req, out := c.PutResourcePolicyRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opQuery = "Query"
-
-// QueryRequest generates a "aws/request.Request" representing the
-// client's request for the Query operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See Query for more information on using the Query
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the QueryRequest method.
-// req, resp := client.QueryRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query
-func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output *QueryOutput) {
- op := &request.Operation{
- Name: opQuery,
- HTTPMethod: "POST",
- HTTPPath: "/",
- Paginator: &request.Paginator{
- InputTokens: []string{"ExclusiveStartKey"},
- OutputTokens: []string{"LastEvaluatedKey"},
- LimitToken: "Limit",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &QueryInput{}
- }
-
- output = &QueryOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// Query API operation for Amazon DynamoDB.
-//
-// You must provide the name of the partition key attribute and a single value
-// for that attribute. Query returns all items with that partition key value.
-// Optionally, you can provide a sort key attribute and use a comparison operator
-// to refine the search results.
-//
-// Use the KeyConditionExpression parameter to provide a specific value for
-// the partition key. The Query operation will return all of the items from
-// the table or index with that partition key value. You can optionally narrow
-// the scope of the Query operation by specifying a sort key value and a comparison
-// operator in KeyConditionExpression. To further refine the Query results,
-// you can optionally provide a FilterExpression. A FilterExpression determines
-// which items within the results should be returned to you. All of the other
-// results are discarded.
-//
-// A Query operation always returns a result set. If no matching items are found,
-// the result set will be empty. Queries that do not return results consume
-// the minimum number of read capacity units for that type of read operation.
-//
-// DynamoDB calculates the number of read capacity units consumed based on item
-// size, not on the amount of data that is returned to an application. The number
-// of capacity units consumed will be the same whether you request all of the
-// attributes (the default behavior) or just some of them (using a projection
-// expression). The number will also be the same whether or not you use a FilterExpression.
-//
-// Query results are always sorted by the sort key value. If the data type of
-// the sort key is Number, the results are returned in numeric order; otherwise,
-// the results are returned in order of UTF-8 bytes. By default, the sort order
-// is ascending. To reverse the order, set the ScanIndexForward parameter to
-// false.
-//
-// A single Query operation will read up to the maximum number of items set
-// (if using the Limit parameter) or a maximum of 1 MB of data and then apply
-// any filtering to the results using FilterExpression. If LastEvaluatedKey
-// is present in the response, you will need to paginate the result set. For
-// more information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination)
-// in the Amazon DynamoDB Developer Guide.
-//
-// FilterExpression is applied after a Query finishes, but before the results
-// are returned. A FilterExpression cannot contain partition key or sort key
-// attributes. You need to specify those attributes in the KeyConditionExpression.
-//
-// A Query operation can return an empty result set and a LastEvaluatedKey if
-// all the items read for the page of results are filtered out.
-//
-// You can query a table, a local secondary index, or a global secondary index.
-// For a query on a table or on a local secondary index, you can set the ConsistentRead
-// parameter to true and obtain a strongly consistent result. Global secondary
-// indexes support eventually consistent reads only, so do not specify ConsistentRead
-// when querying a global secondary index.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation Query for usage and error information.
-//
-// Returned Error Types:
-//
-// - ProvisionedThroughputExceededException
-// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB
-// automatically retry requests that receive this exception. Your request is
-// eventually successful, unless your retry queue is too large to finish. Reduce
-// the frequency of requests and use exponential backoff. For more information,
-// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
-// in the Amazon DynamoDB Developer Guide.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - RequestLimitExceeded
-// Throughput exceeds the current throughput quota for your account. Please
-// contact Amazon Web Services Support (https://aws.amazon.com/support) to request
-// a quota increase.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query
-func (c *DynamoDB) Query(input *QueryInput) (*QueryOutput, error) {
- req, out := c.QueryRequest(input)
- return out, req.Send()
-}
-
-// QueryWithContext is the same as Query with the addition of
-// the ability to pass a context and additional request options.
-//
-// See Query for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) QueryWithContext(ctx aws.Context, input *QueryInput, opts ...request.Option) (*QueryOutput, error) {
- req, out := c.QueryRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// QueryPages iterates over the pages of a Query operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See Query method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a Query operation.
-// pageNum := 0
-// err := client.QueryPages(params,
-// func(page *dynamodb.QueryOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *DynamoDB) QueryPages(input *QueryInput, fn func(*QueryOutput, bool) bool) error {
- return c.QueryPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// QueryPagesWithContext same as QueryPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) QueryPagesWithContext(ctx aws.Context, input *QueryInput, fn func(*QueryOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *QueryInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.QueryRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*QueryOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opRestoreTableFromBackup = "RestoreTableFromBackup"
-
-// RestoreTableFromBackupRequest generates a "aws/request.Request" representing the
-// client's request for the RestoreTableFromBackup operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See RestoreTableFromBackup for more information on using the RestoreTableFromBackup
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the RestoreTableFromBackupRequest method.
-// req, resp := client.RestoreTableFromBackupRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup
-func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupInput) (req *request.Request, output *RestoreTableFromBackupOutput) {
- op := &request.Operation{
- Name: opRestoreTableFromBackup,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &RestoreTableFromBackupInput{}
- }
-
- output = &RestoreTableFromBackupOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// RestoreTableFromBackup API operation for Amazon DynamoDB.
-//
-// Creates a new table from an existing backup. Any number of users can execute
-// up to 50 concurrent restores (any type of restore) in a given account.
-//
-// You can call RestoreTableFromBackup at a maximum rate of 10 times per second.
-//
-// You must manually set up the following on the restored table:
-//
-// - Auto scaling policies
-//
-// - IAM policies
-//
-// - Amazon CloudWatch metrics and alarms
-//
-// - Tags
-//
-// - Stream settings
-//
-// - Time to Live (TTL) settings
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation RestoreTableFromBackup for usage and error information.
-//
-// Returned Error Types:
-//
-// - TableAlreadyExistsException
-// A target table with the specified name already exists.
-//
-// - TableInUseException
-// A target table with the specified name is either being created or deleted.
-//
-// - BackupNotFoundException
-// Backup not found for the given BackupARN.
-//
-// - BackupInUseException
-// There is another ongoing conflicting backup control plane operation on the
-// table. The backup is either being created, deleted or restored to a table.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup
-func (c *DynamoDB) RestoreTableFromBackup(input *RestoreTableFromBackupInput) (*RestoreTableFromBackupOutput, error) {
- req, out := c.RestoreTableFromBackupRequest(input)
- return out, req.Send()
-}
-
-// RestoreTableFromBackupWithContext is the same as RestoreTableFromBackup with the addition of
-// the ability to pass a context and additional request options.
-//
-// See RestoreTableFromBackup for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) RestoreTableFromBackupWithContext(ctx aws.Context, input *RestoreTableFromBackupInput, opts ...request.Option) (*RestoreTableFromBackupOutput, error) {
- req, out := c.RestoreTableFromBackupRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opRestoreTableToPointInTime = "RestoreTableToPointInTime"
-
-// RestoreTableToPointInTimeRequest generates a "aws/request.Request" representing the
-// client's request for the RestoreTableToPointInTime operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See RestoreTableToPointInTime for more information on using the RestoreTableToPointInTime
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the RestoreTableToPointInTimeRequest method.
-// req, resp := client.RestoreTableToPointInTimeRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime
-func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointInTimeInput) (req *request.Request, output *RestoreTableToPointInTimeOutput) {
- op := &request.Operation{
- Name: opRestoreTableToPointInTime,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &RestoreTableToPointInTimeInput{}
- }
-
- output = &RestoreTableToPointInTimeOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// RestoreTableToPointInTime API operation for Amazon DynamoDB.
-//
-// Restores the specified table to the specified point in time within EarliestRestorableDateTime
-// and LatestRestorableDateTime. You can restore your table to any point in
-// time during the last 35 days. Any number of users can execute up to 50 concurrent
-// restores (any type of restore) in a given account.
-//
-// When you restore using point in time recovery, DynamoDB restores your table
-// data to the state based on the selected date and time (day:hour:minute:second)
-// to a new table.
-//
-// Along with data, the following are also included on the new restored table
-// using point in time recovery:
-//
-// - Global secondary indexes (GSIs)
-//
-// - Local secondary indexes (LSIs)
-//
-// - Provisioned read and write capacity
-//
-// - Encryption settings All these settings come from the current settings
-// of the source table at the time of restore.
-//
-// You must manually set up the following on the restored table:
-//
-// - Auto scaling policies
-//
-// - IAM policies
-//
-// - Amazon CloudWatch metrics and alarms
-//
-// - Tags
-//
-// - Stream settings
-//
-// - Time to Live (TTL) settings
-//
-// - Point in time recovery settings
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation RestoreTableToPointInTime for usage and error information.
-//
-// Returned Error Types:
-//
-// - TableAlreadyExistsException
-// A target table with the specified name already exists.
-//
-// - TableNotFoundException
-// A source table with the name TableName does not currently exist within the
-// subscriber's account or the subscriber is operating in the wrong Amazon Web
-// Services Region.
-//
-// - TableInUseException
-// A target table with the specified name is either being created or deleted.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - InvalidRestoreTimeException
-// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime
-// and LatestRestorableDateTime.
-//
-// - PointInTimeRecoveryUnavailableException
-// Point in time recovery has not yet been enabled for this source table.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime
-func (c *DynamoDB) RestoreTableToPointInTime(input *RestoreTableToPointInTimeInput) (*RestoreTableToPointInTimeOutput, error) {
- req, out := c.RestoreTableToPointInTimeRequest(input)
- return out, req.Send()
-}
-
-// RestoreTableToPointInTimeWithContext is the same as RestoreTableToPointInTime with the addition of
-// the ability to pass a context and additional request options.
-//
-// See RestoreTableToPointInTime for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) RestoreTableToPointInTimeWithContext(ctx aws.Context, input *RestoreTableToPointInTimeInput, opts ...request.Option) (*RestoreTableToPointInTimeOutput, error) {
- req, out := c.RestoreTableToPointInTimeRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opScan = "Scan"
-
-// ScanRequest generates a "aws/request.Request" representing the
-// client's request for the Scan operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See Scan for more information on using the Scan
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ScanRequest method.
-// req, resp := client.ScanRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan
-func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *ScanOutput) {
- op := &request.Operation{
- Name: opScan,
- HTTPMethod: "POST",
- HTTPPath: "/",
- Paginator: &request.Paginator{
- InputTokens: []string{"ExclusiveStartKey"},
- OutputTokens: []string{"LastEvaluatedKey"},
- LimitToken: "Limit",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &ScanInput{}
- }
-
- output = &ScanOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// Scan API operation for Amazon DynamoDB.
-//
-// The Scan operation returns one or more items and item attributes by accessing
-// every item in a table or a secondary index. To have DynamoDB return fewer
-// items, you can provide a FilterExpression operation.
-//
-// If the total size of scanned items exceeds the maximum dataset size limit
-// of 1 MB, the scan completes and results are returned to the user. The LastEvaluatedKey
-// value is also returned and the requestor can use the LastEvaluatedKey to
-// continue the scan in a subsequent operation. Each scan response also includes
-// number of items that were scanned (ScannedCount) as part of the request.
-// If using a FilterExpression, a scan result can result in no items meeting
-// the criteria and the Count will result in zero. If you did not use a FilterExpression
-// in the scan request, then Count is the same as ScannedCount.
-//
-// Count and ScannedCount only return the count of items specific to a single
-// scan request and, unless the table is less than 1MB, do not represent the
-// total number of items in the table.
-//
-// A single Scan operation first reads up to the maximum number of items set
-// (if using the Limit parameter) or a maximum of 1 MB of data and then applies
-// any filtering to the results if a FilterExpression is provided. If LastEvaluatedKey
-// is present in the response, pagination is required to complete the full table
-// scan. For more information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination)
-// in the Amazon DynamoDB Developer Guide.
-//
-// Scan operations proceed sequentially; however, for faster performance on
-// a large table or secondary index, applications can request a parallel Scan
-// operation by providing the Segment and TotalSegments parameters. For more
-// information, see Parallel Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan)
-// in the Amazon DynamoDB Developer Guide.
-//
-// By default, a Scan uses eventually consistent reads when accessing the items
-// in a table. Therefore, the results from an eventually consistent Scan may
-// not include the latest item changes at the time the scan iterates through
-// each item in the table. If you require a strongly consistent read of each
-// item as the scan iterates through the items in the table, you can set the
-// ConsistentRead parameter to true. Strong consistency only relates to the
-// consistency of the read at the item level.
-//
-// DynamoDB does not provide snapshot isolation for a scan operation when the
-// ConsistentRead parameter is set to true. Thus, a DynamoDB scan operation
-// does not guarantee that all reads in a scan see a consistent snapshot of
-// the table when the scan operation was requested.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation Scan for usage and error information.
-//
-// Returned Error Types:
-//
-// - ProvisionedThroughputExceededException
-// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB
-// automatically retry requests that receive this exception. Your request is
-// eventually successful, unless your retry queue is too large to finish. Reduce
-// the frequency of requests and use exponential backoff. For more information,
-// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
-// in the Amazon DynamoDB Developer Guide.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - RequestLimitExceeded
-// Throughput exceeds the current throughput quota for your account. Please
-// contact Amazon Web Services Support (https://aws.amazon.com/support) to request
-// a quota increase.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan
-func (c *DynamoDB) Scan(input *ScanInput) (*ScanOutput, error) {
- req, out := c.ScanRequest(input)
- return out, req.Send()
-}
-
-// ScanWithContext is the same as Scan with the addition of
-// the ability to pass a context and additional request options.
-//
-// See Scan for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ScanWithContext(ctx aws.Context, input *ScanInput, opts ...request.Option) (*ScanOutput, error) {
- req, out := c.ScanRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// ScanPages iterates over the pages of a Scan operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See Scan method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a Scan operation.
-// pageNum := 0
-// err := client.ScanPages(params,
-// func(page *dynamodb.ScanOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *DynamoDB) ScanPages(input *ScanInput, fn func(*ScanOutput, bool) bool) error {
- return c.ScanPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// ScanPagesWithContext same as ScanPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) ScanPagesWithContext(ctx aws.Context, input *ScanInput, fn func(*ScanOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *ScanInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.ScanRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*ScanOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opTagResource = "TagResource"
-
-// TagResourceRequest generates a "aws/request.Request" representing the
-// client's request for the TagResource operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See TagResource for more information on using the TagResource
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the TagResourceRequest method.
-// req, resp := client.TagResourceRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource
-func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) {
- op := &request.Operation{
- Name: opTagResource,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &TagResourceInput{}
- }
-
- output = &TagResourceOutput{}
- req = c.newRequest(op, input, output)
- req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// TagResource API operation for Amazon DynamoDB.
-//
-// Associate a set of tags with an Amazon DynamoDB resource. You can then activate
-// these user-defined tags so that they appear on the Billing and Cost Management
-// console for cost allocation tracking. You can call TagResource up to five
-// times per second, per account.
-//
-// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
-// in the Amazon DynamoDB Developer Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation TagResource for usage and error information.
-//
-// Returned Error Types:
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// - ResourceInUseException
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource
-func (c *DynamoDB) TagResource(input *TagResourceInput) (*TagResourceOutput, error) {
- req, out := c.TagResourceRequest(input)
- return out, req.Send()
-}
-
-// TagResourceWithContext is the same as TagResource with the addition of
-// the ability to pass a context and additional request options.
-//
-// See TagResource for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) {
- req, out := c.TagResourceRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opTransactGetItems = "TransactGetItems"
-
-// TransactGetItemsRequest generates a "aws/request.Request" representing the
-// client's request for the TransactGetItems operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See TransactGetItems for more information on using the TransactGetItems
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the TransactGetItemsRequest method.
-// req, resp := client.TransactGetItemsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems
-func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *request.Request, output *TransactGetItemsOutput) {
- op := &request.Operation{
- Name: opTransactGetItems,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &TransactGetItemsInput{}
- }
-
- output = &TransactGetItemsOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// TransactGetItems API operation for Amazon DynamoDB.
-//
-// TransactGetItems is a synchronous operation that atomically retrieves multiple
-// items from one or more tables (but not from indexes) in a single account
-// and Region. A TransactGetItems call can contain up to 100 TransactGetItem
-// objects, each of which contains a Get structure that specifies an item to
-// retrieve from a table in the account and Region. A call to TransactGetItems
-// cannot retrieve items from tables in more than one Amazon Web Services account
-// or Region. The aggregate size of the items in the transaction cannot exceed
-// 4 MB.
-//
-// DynamoDB rejects the entire TransactGetItems request if any of the following
-// is true:
-//
-// - A conflicting operation is in the process of updating an item to be
-// read.
-//
-// - There is insufficient provisioned capacity for the transaction to be
-// completed.
-//
-// - There is a user error, such as an invalid data format.
-//
-// - The aggregate size of the items in the transaction exceeded 4 MB.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation TransactGetItems for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - TransactionCanceledException
-// The entire transaction request was canceled.
-//
-// DynamoDB cancels a TransactWriteItems request under the following circumstances:
-//
-// - A condition in one of the condition expressions is not met.
-//
-// - A table in the TransactWriteItems request is in a different account
-// or region.
-//
-// - More than one action in the TransactWriteItems operation targets the
-// same item.
-//
-// - There is insufficient provisioned capacity for the transaction to be
-// completed.
-//
-// - An item size becomes too large (larger than 400 KB), or a local secondary
-// index (LSI) becomes too large, or a similar validation error occurs because
-// of changes made by the transaction.
-//
-// - There is a user error, such as an invalid data format.
-//
-// - There is an ongoing TransactWriteItems operation that conflicts with
-// a concurrent TransactWriteItems request. In this case the TransactWriteItems
-// operation fails with a TransactionCanceledException.
-//
-// DynamoDB cancels a TransactGetItems request under the following circumstances:
-//
-// - There is an ongoing TransactGetItems operation that conflicts with a
-// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request.
-// In this case the TransactGetItems operation fails with a TransactionCanceledException.
-//
-// - A table in the TransactGetItems request is in a different account or
-// region.
-//
-// - There is insufficient provisioned capacity for the transaction to be
-// completed.
-//
-// - There is a user error, such as an invalid data format.
-//
-// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
-// property. This property is not set for other languages. Transaction cancellation
-// reasons are ordered in the order of requested items, if an item has no error
-// it will have None code and Null message.
-//
-// Cancellation reason codes and possible error messages:
-//
-// - No Errors: Code: None Message: null
-//
-// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The
-// conditional request failed.
-//
-// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded
-// Message: Collection size exceeded.
-//
-// - Transaction Conflict: Code: TransactionConflict Message: Transaction
-// is ongoing for the item.
-//
-// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded
-// Messages: The level of configured provisioned throughput for the table
-// was exceeded. Consider increasing your provisioning level with the UpdateTable
-// API. This Message is received when provisioned throughput is exceeded
-// is on a provisioned DynamoDB table. The level of configured provisioned
-// throughput for one or more global secondary indexes of the table was exceeded.
-// Consider increasing your provisioning level for the under-provisioned
-// global secondary indexes with the UpdateTable API. This message is returned
-// when provisioned throughput is exceeded is on a provisioned GSI.
-//
-// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds
-// the current capacity of your table or index. DynamoDB is automatically
-// scaling your table or index so please try again shortly. If exceptions
-// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
-// This message is returned when writes get throttled on an On-Demand table
-// as DynamoDB is automatically scaling the table. Throughput exceeds the
-// current capacity for one or more global secondary indexes. DynamoDB is
-// automatically scaling your index so please try again shortly. This message
-// is returned when writes get throttled on an On-Demand GSI as DynamoDB
-// is automatically scaling the GSI.
-//
-// - Validation Error: Code: ValidationError Messages: One or more parameter
-// values were invalid. The update expression attempted to update the secondary
-// index key beyond allowed size limits. The update expression attempted
-// to update the secondary index key to unsupported type. An operand in the
-// update expression has an incorrect data type. Item size to update has
-// exceeded the maximum allowed size. Number overflow. Attempting to store
-// a number with magnitude larger than supported range. Type mismatch for
-// attribute to update. Nesting Levels have exceeded supported limits. The
-// document path provided in the update expression is invalid for update.
-// The provided expression refers to an attribute that does not exist in
-// the item.
-//
-// - ProvisionedThroughputExceededException
-// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB
-// automatically retry requests that receive this exception. Your request is
-// eventually successful, unless your retry queue is too large to finish. Reduce
-// the frequency of requests and use exponential backoff. For more information,
-// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
-// in the Amazon DynamoDB Developer Guide.
-//
-// - RequestLimitExceeded
-// Throughput exceeds the current throughput quota for your account. Please
-// contact Amazon Web Services Support (https://aws.amazon.com/support) to request
-// a quota increase.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems
-func (c *DynamoDB) TransactGetItems(input *TransactGetItemsInput) (*TransactGetItemsOutput, error) {
- req, out := c.TransactGetItemsRequest(input)
- return out, req.Send()
-}
-
-// TransactGetItemsWithContext is the same as TransactGetItems with the addition of
-// the ability to pass a context and additional request options.
-//
-// See TransactGetItems for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) TransactGetItemsWithContext(ctx aws.Context, input *TransactGetItemsInput, opts ...request.Option) (*TransactGetItemsOutput, error) {
- req, out := c.TransactGetItemsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opTransactWriteItems = "TransactWriteItems"
-
-// TransactWriteItemsRequest generates a "aws/request.Request" representing the
-// client's request for the TransactWriteItems operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See TransactWriteItems for more information on using the TransactWriteItems
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the TransactWriteItemsRequest method.
-// req, resp := client.TransactWriteItemsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems
-func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (req *request.Request, output *TransactWriteItemsOutput) {
- op := &request.Operation{
- Name: opTransactWriteItems,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &TransactWriteItemsInput{}
- }
-
- output = &TransactWriteItemsOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// TransactWriteItems API operation for Amazon DynamoDB.
-//
-// TransactWriteItems is a synchronous write operation that groups up to 100
-// action requests. These actions can target items in different tables, but
-// not in different Amazon Web Services accounts or Regions, and no two actions
-// can target the same item. For example, you cannot both ConditionCheck and
-// Update the same item. The aggregate size of the items in the transaction
-// cannot exceed 4 MB.
-//
-// The actions are completed atomically so that either all of them succeed,
-// or all of them fail. They are defined by the following objects:
-//
-// - Put — Initiates a PutItem operation to write a new item. This structure
-// specifies the primary key of the item to be written, the name of the table
-// to write it in, an optional condition expression that must be satisfied
-// for the write to succeed, a list of the item's attributes, and a field
-// indicating whether to retrieve the item's attributes if the condition
-// is not met.
-//
-// - Update — Initiates an UpdateItem operation to update an existing item.
-// This structure specifies the primary key of the item to be updated, the
-// name of the table where it resides, an optional condition expression that
-// must be satisfied for the update to succeed, an expression that defines
-// one or more attributes to be updated, and a field indicating whether to
-// retrieve the item's attributes if the condition is not met.
-//
-// - Delete — Initiates a DeleteItem operation to delete an existing item.
-// This structure specifies the primary key of the item to be deleted, the
-// name of the table where it resides, an optional condition expression that
-// must be satisfied for the deletion to succeed, and a field indicating
-// whether to retrieve the item's attributes if the condition is not met.
-//
-// - ConditionCheck — Applies a condition to an item that is not being
-// modified by the transaction. This structure specifies the primary key
-// of the item to be checked, the name of the table where it resides, a condition
-// expression that must be satisfied for the transaction to succeed, and
-// a field indicating whether to retrieve the item's attributes if the condition
-// is not met.
-//
-// DynamoDB rejects the entire TransactWriteItems request if any of the following
-// is true:
-//
-// - A condition in one of the condition expressions is not met.
-//
-// - An ongoing operation is in the process of updating the same item.
-//
-// - There is insufficient provisioned capacity for the transaction to be
-// completed.
-//
-// - An item size becomes too large (bigger than 400 KB), a local secondary
-// index (LSI) becomes too large, or a similar validation error occurs because
-// of changes made by the transaction.
-//
-// - The aggregate size of the items in the transaction exceeds 4 MB.
-//
-// - There is a user error, such as an invalid data format.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation TransactWriteItems for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - TransactionCanceledException
-// The entire transaction request was canceled.
-//
-// DynamoDB cancels a TransactWriteItems request under the following circumstances:
-//
-// - A condition in one of the condition expressions is not met.
-//
-// - A table in the TransactWriteItems request is in a different account
-// or region.
-//
-// - More than one action in the TransactWriteItems operation targets the
-// same item.
-//
-// - There is insufficient provisioned capacity for the transaction to be
-// completed.
-//
-// - An item size becomes too large (larger than 400 KB), or a local secondary
-// index (LSI) becomes too large, or a similar validation error occurs because
-// of changes made by the transaction.
-//
-// - There is a user error, such as an invalid data format.
-//
-// - There is an ongoing TransactWriteItems operation that conflicts with
-// a concurrent TransactWriteItems request. In this case the TransactWriteItems
-// operation fails with a TransactionCanceledException.
-//
-// DynamoDB cancels a TransactGetItems request under the following circumstances:
-//
-// - There is an ongoing TransactGetItems operation that conflicts with a
-// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request.
-// In this case the TransactGetItems operation fails with a TransactionCanceledException.
-//
-// - A table in the TransactGetItems request is in a different account or
-// region.
-//
-// - There is insufficient provisioned capacity for the transaction to be
-// completed.
-//
-// - There is a user error, such as an invalid data format.
-//
-// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
-// property. This property is not set for other languages. Transaction cancellation
-// reasons are ordered in the order of requested items, if an item has no error
-// it will have None code and Null message.
-//
-// Cancellation reason codes and possible error messages:
-//
-// - No Errors: Code: None Message: null
-//
-// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The
-// conditional request failed.
-//
-// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded
-// Message: Collection size exceeded.
-//
-// - Transaction Conflict: Code: TransactionConflict Message: Transaction
-// is ongoing for the item.
-//
-// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded
-// Messages: The level of configured provisioned throughput for the table
-// was exceeded. Consider increasing your provisioning level with the UpdateTable
-// API. This Message is received when provisioned throughput is exceeded
-// is on a provisioned DynamoDB table. The level of configured provisioned
-// throughput for one or more global secondary indexes of the table was exceeded.
-// Consider increasing your provisioning level for the under-provisioned
-// global secondary indexes with the UpdateTable API. This message is returned
-// when provisioned throughput is exceeded is on a provisioned GSI.
-//
-// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds
-// the current capacity of your table or index. DynamoDB is automatically
-// scaling your table or index so please try again shortly. If exceptions
-// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
-// This message is returned when writes get throttled on an On-Demand table
-// as DynamoDB is automatically scaling the table. Throughput exceeds the
-// current capacity for one or more global secondary indexes. DynamoDB is
-// automatically scaling your index so please try again shortly. This message
-// is returned when writes get throttled on an On-Demand GSI as DynamoDB
-// is automatically scaling the GSI.
-//
-// - Validation Error: Code: ValidationError Messages: One or more parameter
-// values were invalid. The update expression attempted to update the secondary
-// index key beyond allowed size limits. The update expression attempted
-// to update the secondary index key to unsupported type. An operand in the
-// update expression has an incorrect data type. Item size to update has
-// exceeded the maximum allowed size. Number overflow. Attempting to store
-// a number with magnitude larger than supported range. Type mismatch for
-// attribute to update. Nesting Levels have exceeded supported limits. The
-// document path provided in the update expression is invalid for update.
-// The provided expression refers to an attribute that does not exist in
-// the item.
-//
-// - TransactionInProgressException
-// The transaction with the given request token is already in progress.
-//
-// Recommended Settings
-//
-// This is a general recommendation for handling the TransactionInProgressException.
-// These settings help ensure that the client retries will trigger completion
-// of the ongoing TransactWriteItems request.
-//
-// - Set clientExecutionTimeout to a value that allows at least one retry
-// to be processed after 5 seconds have elapsed since the first attempt for
-// the TransactWriteItems operation.
-//
-// - Set socketTimeout to a value a little lower than the requestTimeout
-// setting.
-//
-// - requestTimeout should be set based on the time taken for the individual
-// retries of a single HTTP request for your use case, but setting it to
-// 1 second or higher should work well to reduce chances of retries and TransactionInProgressException
-// errors.
-//
-// - Use exponential backoff when retrying and tune backoff if needed.
-//
-// Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97),
-// example timeout settings based on the guidelines above are as follows:
-//
-// Example timeline:
-//
-// - 0-1000 first attempt
-//
-// - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base
-// delay for 4xx errors)
-//
-// - 1500-2500 second attempt
-//
-// - 2500-3500 second sleep/delay (500 * 2, exponential backoff)
-//
-// - 3500-4500 third attempt
-//
-// - 4500-6500 third sleep/delay (500 * 2^2)
-//
-// - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds
-// have elapsed since the first attempt reached TC)
-//
-// - IdempotentParameterMismatchException
-// DynamoDB rejected the request because you retried a request with a different
-// payload but with an idempotent token that was already used.
-//
-// - ProvisionedThroughputExceededException
-// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB
-// automatically retry requests that receive this exception. Your request is
-// eventually successful, unless your retry queue is too large to finish. Reduce
-// the frequency of requests and use exponential backoff. For more information,
-// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
-// in the Amazon DynamoDB Developer Guide.
-//
-// - RequestLimitExceeded
-// Throughput exceeds the current throughput quota for your account. Please
-// contact Amazon Web Services Support (https://aws.amazon.com/support) to request
-// a quota increase.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems
-func (c *DynamoDB) TransactWriteItems(input *TransactWriteItemsInput) (*TransactWriteItemsOutput, error) {
- req, out := c.TransactWriteItemsRequest(input)
- return out, req.Send()
-}
-
-// TransactWriteItemsWithContext is the same as TransactWriteItems with the addition of
-// the ability to pass a context and additional request options.
-//
-// See TransactWriteItems for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) TransactWriteItemsWithContext(ctx aws.Context, input *TransactWriteItemsInput, opts ...request.Option) (*TransactWriteItemsOutput, error) {
- req, out := c.TransactWriteItemsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opUntagResource = "UntagResource"
-
-// UntagResourceRequest generates a "aws/request.Request" representing the
-// client's request for the UntagResource operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See UntagResource for more information on using the UntagResource
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the UntagResourceRequest method.
-// req, resp := client.UntagResourceRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource
-func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) {
- op := &request.Operation{
- Name: opUntagResource,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &UntagResourceInput{}
- }
-
- output = &UntagResourceOutput{}
- req = c.newRequest(op, input, output)
- req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// UntagResource API operation for Amazon DynamoDB.
-//
-// Removes the association of tags from an Amazon DynamoDB resource. You can
-// call UntagResource up to five times per second, per account.
-//
-// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
-// in the Amazon DynamoDB Developer Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation UntagResource for usage and error information.
-//
-// Returned Error Types:
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// - ResourceInUseException
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource
-func (c *DynamoDB) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) {
- req, out := c.UntagResourceRequest(input)
- return out, req.Send()
-}
-
-// UntagResourceWithContext is the same as UntagResource with the addition of
-// the ability to pass a context and additional request options.
-//
-// See UntagResource for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) {
- req, out := c.UntagResourceRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opUpdateContinuousBackups = "UpdateContinuousBackups"
-
-// UpdateContinuousBackupsRequest generates a "aws/request.Request" representing the
-// client's request for the UpdateContinuousBackups operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See UpdateContinuousBackups for more information on using the UpdateContinuousBackups
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the UpdateContinuousBackupsRequest method.
-// req, resp := client.UpdateContinuousBackupsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups
-func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackupsInput) (req *request.Request, output *UpdateContinuousBackupsOutput) {
- op := &request.Operation{
- Name: opUpdateContinuousBackups,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &UpdateContinuousBackupsInput{}
- }
-
- output = &UpdateContinuousBackupsOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// UpdateContinuousBackups API operation for Amazon DynamoDB.
-//
-// UpdateContinuousBackups enables or disables point in time recovery for the
-// specified table. A successful UpdateContinuousBackups call returns the current
-// ContinuousBackupsDescription. Continuous backups are ENABLED on all tables
-// at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus
-// will be set to ENABLED.
-//
-// Once continuous backups and point in time recovery are enabled, you can restore
-// to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime.
-//
-// LatestRestorableDateTime is typically 5 minutes before the current time.
-// You can restore your table to any point in time during the last 35 days.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation UpdateContinuousBackups for usage and error information.
-//
-// Returned Error Types:
-//
-// - TableNotFoundException
-// A source table with the name TableName does not currently exist within the
-// subscriber's account or the subscriber is operating in the wrong Amazon Web
-// Services Region.
-//
-// - ContinuousBackupsUnavailableException
-// Backups have not yet been enabled for this table.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups
-func (c *DynamoDB) UpdateContinuousBackups(input *UpdateContinuousBackupsInput) (*UpdateContinuousBackupsOutput, error) {
- req, out := c.UpdateContinuousBackupsRequest(input)
- return out, req.Send()
-}
-
-// UpdateContinuousBackupsWithContext is the same as UpdateContinuousBackups with the addition of
-// the ability to pass a context and additional request options.
-//
-// See UpdateContinuousBackups for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) UpdateContinuousBackupsWithContext(ctx aws.Context, input *UpdateContinuousBackupsInput, opts ...request.Option) (*UpdateContinuousBackupsOutput, error) {
- req, out := c.UpdateContinuousBackupsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opUpdateContributorInsights = "UpdateContributorInsights"
-
-// UpdateContributorInsightsRequest generates a "aws/request.Request" representing the
-// client's request for the UpdateContributorInsights operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See UpdateContributorInsights for more information on using the UpdateContributorInsights
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the UpdateContributorInsightsRequest method.
-// req, resp := client.UpdateContributorInsightsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContributorInsights
-func (c *DynamoDB) UpdateContributorInsightsRequest(input *UpdateContributorInsightsInput) (req *request.Request, output *UpdateContributorInsightsOutput) {
- op := &request.Operation{
- Name: opUpdateContributorInsights,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &UpdateContributorInsightsInput{}
- }
-
- output = &UpdateContributorInsightsOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// UpdateContributorInsights API operation for Amazon DynamoDB.
-//
-// Updates the status for contributor insights for a specific table or index.
-// CloudWatch Contributor Insights for DynamoDB graphs display the partition
-// key and (if applicable) sort key of frequently accessed items and frequently
-// throttled items in plaintext. If you require the use of Amazon Web Services
-// Key Management Service (KMS) to encrypt this table’s partition key and
-// sort key data with an Amazon Web Services managed key or customer managed
-// key, you should not enable CloudWatch Contributor Insights for DynamoDB for
-// this table.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation UpdateContributorInsights for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContributorInsights
-func (c *DynamoDB) UpdateContributorInsights(input *UpdateContributorInsightsInput) (*UpdateContributorInsightsOutput, error) {
- req, out := c.UpdateContributorInsightsRequest(input)
- return out, req.Send()
-}
-
-// UpdateContributorInsightsWithContext is the same as UpdateContributorInsights with the addition of
-// the ability to pass a context and additional request options.
-//
-// See UpdateContributorInsights for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) UpdateContributorInsightsWithContext(ctx aws.Context, input *UpdateContributorInsightsInput, opts ...request.Option) (*UpdateContributorInsightsOutput, error) {
- req, out := c.UpdateContributorInsightsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opUpdateGlobalTable = "UpdateGlobalTable"
-
-// UpdateGlobalTableRequest generates a "aws/request.Request" representing the
-// client's request for the UpdateGlobalTable operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See UpdateGlobalTable for more information on using the UpdateGlobalTable
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the UpdateGlobalTableRequest method.
-// req, resp := client.UpdateGlobalTableRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable
-func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req *request.Request, output *UpdateGlobalTableOutput) {
- op := &request.Operation{
- Name: opUpdateGlobalTable,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &UpdateGlobalTableInput{}
- }
-
- output = &UpdateGlobalTableOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// UpdateGlobalTable API operation for Amazon DynamoDB.
-//
-// Adds or removes replicas in the specified global table. The global table
-// must already exist to be able to use this operation. Any replica to be added
-// must be empty, have the same name as the global table, have the same key
-// schema, have DynamoDB Streams enabled, and have the same provisioned and
-// maximum write capacity units.
-//
-// This documentation is for version 2017.11.29 (Legacy) of global tables, which
-// should be avoided for new global tables. Customers should use Global Tables
-// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html)
-// when possible, because it provides greater flexibility, higher efficiency,
-// and consumes less write capacity than 2017.11.29 (Legacy).
-//
-// To determine which version you're using, see Determining the global table
-// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html).
-// To update existing global tables from version 2017.11.29 (Legacy) to version
-// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html).
-//
-// For global tables, this operation only applies to global tables using Version
-// 2019.11.21 (Current version). If you are using global tables Version 2019.11.21
-// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html)
-// you can use UpdateTable (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_UpdateTable.html)
-// instead.
-//
-// Although you can use UpdateGlobalTable to add replicas and remove replicas
-// in a single request, for simplicity we recommend that you issue separate
-// requests for adding or removing replicas.
-//
-// If global secondary indexes are specified, then the following conditions
-// must also be met:
-//
-// - The global secondary indexes must have the same name.
-//
-// - The global secondary indexes must have the same hash key and sort key
-// (if present).
-//
-// - The global secondary indexes must have the same provisioned and maximum
-// write capacity units.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation UpdateGlobalTable for usage and error information.
-//
-// Returned Error Types:
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// - GlobalTableNotFoundException
-// The specified global table does not exist.
-//
-// - ReplicaAlreadyExistsException
-// The specified replica is already part of the global table.
-//
-// - ReplicaNotFoundException
-// The specified replica is no longer part of the global table.
-//
-// - TableNotFoundException
-// A source table with the name TableName does not currently exist within the
-// subscriber's account or the subscriber is operating in the wrong Amazon Web
-// Services Region.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable
-func (c *DynamoDB) UpdateGlobalTable(input *UpdateGlobalTableInput) (*UpdateGlobalTableOutput, error) {
- req, out := c.UpdateGlobalTableRequest(input)
- return out, req.Send()
-}
-
-// UpdateGlobalTableWithContext is the same as UpdateGlobalTable with the addition of
-// the ability to pass a context and additional request options.
-//
-// See UpdateGlobalTable for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) UpdateGlobalTableWithContext(ctx aws.Context, input *UpdateGlobalTableInput, opts ...request.Option) (*UpdateGlobalTableOutput, error) {
- req, out := c.UpdateGlobalTableRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opUpdateGlobalTableSettings = "UpdateGlobalTableSettings"
-
-// UpdateGlobalTableSettingsRequest generates a "aws/request.Request" representing the
-// client's request for the UpdateGlobalTableSettings operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See UpdateGlobalTableSettings for more information on using the UpdateGlobalTableSettings
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the UpdateGlobalTableSettingsRequest method.
-// req, resp := client.UpdateGlobalTableSettingsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings
-func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSettingsInput) (req *request.Request, output *UpdateGlobalTableSettingsOutput) {
- op := &request.Operation{
- Name: opUpdateGlobalTableSettings,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &UpdateGlobalTableSettingsInput{}
- }
-
- output = &UpdateGlobalTableSettingsOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// UpdateGlobalTableSettings API operation for Amazon DynamoDB.
-//
-// Updates settings for a global table.
-//
-// This documentation is for version 2017.11.29 (Legacy) of global tables, which
-// should be avoided for new global tables. Customers should use Global Tables
-// version 2019.11.21 (Current) (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html)
-// when possible, because it provides greater flexibility, higher efficiency,
-// and consumes less write capacity than 2017.11.29 (Legacy).
-//
-// To determine which version you're using, see Determining the global table
-// version you are using (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.DetermineVersion.html).
-// To update existing global tables from version 2017.11.29 (Legacy) to version
-// 2019.11.21 (Current), see Upgrading global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/V2globaltables_upgrade.html).
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation UpdateGlobalTableSettings for usage and error information.
-//
-// Returned Error Types:
-//
-// - GlobalTableNotFoundException
-// The specified global table does not exist.
-//
-// - ReplicaNotFoundException
-// The specified replica is no longer part of the global table.
-//
-// - IndexNotFoundException
-// The operation tried to access a nonexistent index.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - ResourceInUseException
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings
-func (c *DynamoDB) UpdateGlobalTableSettings(input *UpdateGlobalTableSettingsInput) (*UpdateGlobalTableSettingsOutput, error) {
- req, out := c.UpdateGlobalTableSettingsRequest(input)
- return out, req.Send()
-}
-
-// UpdateGlobalTableSettingsWithContext is the same as UpdateGlobalTableSettings with the addition of
-// the ability to pass a context and additional request options.
-//
-// See UpdateGlobalTableSettings for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) UpdateGlobalTableSettingsWithContext(ctx aws.Context, input *UpdateGlobalTableSettingsInput, opts ...request.Option) (*UpdateGlobalTableSettingsOutput, error) {
- req, out := c.UpdateGlobalTableSettingsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opUpdateItem = "UpdateItem"
-
-// UpdateItemRequest generates a "aws/request.Request" representing the
-// client's request for the UpdateItem operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See UpdateItem for more information on using the UpdateItem
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the UpdateItemRequest method.
-// req, resp := client.UpdateItemRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem
-func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Request, output *UpdateItemOutput) {
- op := &request.Operation{
- Name: opUpdateItem,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &UpdateItemInput{}
- }
-
- output = &UpdateItemOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// UpdateItem API operation for Amazon DynamoDB.
-//
-// Edits an existing item's attributes, or adds a new item to the table if it
-// does not already exist. You can put, delete, or add attribute values. You
-// can also perform a conditional update on an existing item (insert a new attribute
-// name-value pair if it doesn't exist, or replace an existing name-value pair
-// if it has certain expected attribute values).
-//
-// You can also return the item's attribute values in the same UpdateItem operation
-// using the ReturnValues parameter.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation UpdateItem for usage and error information.
-//
-// Returned Error Types:
-//
-// - ConditionalCheckFailedException
-// A condition specified in the operation could not be evaluated.
-//
-// - ProvisionedThroughputExceededException
-// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB
-// automatically retry requests that receive this exception. Your request is
-// eventually successful, unless your retry queue is too large to finish. Reduce
-// the frequency of requests and use exponential backoff. For more information,
-// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
-// in the Amazon DynamoDB Developer Guide.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - ItemCollectionSizeLimitExceededException
-// An item collection is too large. This exception is only returned for tables
-// that have one or more local secondary indexes.
-//
-// - TransactionConflictException
-// Operation was rejected because there is an ongoing transaction for the item.
-//
-// - RequestLimitExceeded
-// Throughput exceeds the current throughput quota for your account. Please
-// contact Amazon Web Services Support (https://aws.amazon.com/support) to request
-// a quota increase.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem
-func (c *DynamoDB) UpdateItem(input *UpdateItemInput) (*UpdateItemOutput, error) {
- req, out := c.UpdateItemRequest(input)
- return out, req.Send()
-}
-
-// UpdateItemWithContext is the same as UpdateItem with the addition of
-// the ability to pass a context and additional request options.
-//
-// See UpdateItem for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) UpdateItemWithContext(ctx aws.Context, input *UpdateItemInput, opts ...request.Option) (*UpdateItemOutput, error) {
- req, out := c.UpdateItemRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opUpdateKinesisStreamingDestination = "UpdateKinesisStreamingDestination"
-
-// UpdateKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the
-// client's request for the UpdateKinesisStreamingDestination operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See UpdateKinesisStreamingDestination for more information on using the UpdateKinesisStreamingDestination
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the UpdateKinesisStreamingDestinationRequest method.
-// req, resp := client.UpdateKinesisStreamingDestinationRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateKinesisStreamingDestination
-func (c *DynamoDB) UpdateKinesisStreamingDestinationRequest(input *UpdateKinesisStreamingDestinationInput) (req *request.Request, output *UpdateKinesisStreamingDestinationOutput) {
- op := &request.Operation{
- Name: opUpdateKinesisStreamingDestination,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &UpdateKinesisStreamingDestinationInput{}
- }
-
- output = &UpdateKinesisStreamingDestinationOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// UpdateKinesisStreamingDestination API operation for Amazon DynamoDB.
-//
-// The command to update the Kinesis stream destination.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation UpdateKinesisStreamingDestination for usage and error information.
-//
-// Returned Error Types:
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - ResourceInUseException
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateKinesisStreamingDestination
-func (c *DynamoDB) UpdateKinesisStreamingDestination(input *UpdateKinesisStreamingDestinationInput) (*UpdateKinesisStreamingDestinationOutput, error) {
- req, out := c.UpdateKinesisStreamingDestinationRequest(input)
- return out, req.Send()
-}
-
-// UpdateKinesisStreamingDestinationWithContext is the same as UpdateKinesisStreamingDestination with the addition of
-// the ability to pass a context and additional request options.
-//
-// See UpdateKinesisStreamingDestination for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) UpdateKinesisStreamingDestinationWithContext(ctx aws.Context, input *UpdateKinesisStreamingDestinationInput, opts ...request.Option) (*UpdateKinesisStreamingDestinationOutput, error) {
- req, out := c.UpdateKinesisStreamingDestinationRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opUpdateTable = "UpdateTable"
-
-// UpdateTableRequest generates a "aws/request.Request" representing the
-// client's request for the UpdateTable operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See UpdateTable for more information on using the UpdateTable
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the UpdateTableRequest method.
-// req, resp := client.UpdateTableRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable
-func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) {
- op := &request.Operation{
- Name: opUpdateTable,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &UpdateTableInput{}
- }
-
- output = &UpdateTableOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// UpdateTable API operation for Amazon DynamoDB.
-//
-// Modifies the provisioned throughput settings, global secondary indexes, or
-// DynamoDB Streams settings for a given table.
-//
-// For global tables, this operation only applies to global tables using Version
-// 2019.11.21 (Current version).
-//
-// You can only perform one of the following operations at once:
-//
-// - Modify the provisioned throughput settings of the table.
-//
-// - Remove a global secondary index from the table.
-//
-// - Create a new global secondary index on the table. After the index begins
-// backfilling, you can use UpdateTable to perform other operations.
-//
-// UpdateTable is an asynchronous operation; while it's executing, the table
-// status changes from ACTIVE to UPDATING. While it's UPDATING, you can't issue
-// another UpdateTable request. When the table returns to the ACTIVE state,
-// the UpdateTable operation is complete.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation UpdateTable for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceInUseException
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable
-func (c *DynamoDB) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) {
- req, out := c.UpdateTableRequest(input)
- return out, req.Send()
-}
-
-// UpdateTableWithContext is the same as UpdateTable with the addition of
-// the ability to pass a context and additional request options.
-//
-// See UpdateTable for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) UpdateTableWithContext(ctx aws.Context, input *UpdateTableInput, opts ...request.Option) (*UpdateTableOutput, error) {
- req, out := c.UpdateTableRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opUpdateTableReplicaAutoScaling = "UpdateTableReplicaAutoScaling"
-
-// UpdateTableReplicaAutoScalingRequest generates a "aws/request.Request" representing the
-// client's request for the UpdateTableReplicaAutoScaling operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See UpdateTableReplicaAutoScaling for more information on using the UpdateTableReplicaAutoScaling
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the UpdateTableReplicaAutoScalingRequest method.
-// req, resp := client.UpdateTableReplicaAutoScalingRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableReplicaAutoScaling
-func (c *DynamoDB) UpdateTableReplicaAutoScalingRequest(input *UpdateTableReplicaAutoScalingInput) (req *request.Request, output *UpdateTableReplicaAutoScalingOutput) {
- op := &request.Operation{
- Name: opUpdateTableReplicaAutoScaling,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &UpdateTableReplicaAutoScalingInput{}
- }
-
- output = &UpdateTableReplicaAutoScalingOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// UpdateTableReplicaAutoScaling API operation for Amazon DynamoDB.
-//
-// Updates auto scaling settings on your global tables at once.
-//
-// For global tables, this operation only applies to global tables using Version
-// 2019.11.21 (Current version).
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation UpdateTableReplicaAutoScaling for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - ResourceInUseException
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableReplicaAutoScaling
-func (c *DynamoDB) UpdateTableReplicaAutoScaling(input *UpdateTableReplicaAutoScalingInput) (*UpdateTableReplicaAutoScalingOutput, error) {
- req, out := c.UpdateTableReplicaAutoScalingRequest(input)
- return out, req.Send()
-}
-
-// UpdateTableReplicaAutoScalingWithContext is the same as UpdateTableReplicaAutoScaling with the addition of
-// the ability to pass a context and additional request options.
-//
-// See UpdateTableReplicaAutoScaling for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) UpdateTableReplicaAutoScalingWithContext(ctx aws.Context, input *UpdateTableReplicaAutoScalingInput, opts ...request.Option) (*UpdateTableReplicaAutoScalingOutput, error) {
- req, out := c.UpdateTableReplicaAutoScalingRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opUpdateTimeToLive = "UpdateTimeToLive"
-
-// UpdateTimeToLiveRequest generates a "aws/request.Request" representing the
-// client's request for the UpdateTimeToLive operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See UpdateTimeToLive for more information on using the UpdateTimeToLive
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the UpdateTimeToLiveRequest method.
-// req, resp := client.UpdateTimeToLiveRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive
-func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *request.Request, output *UpdateTimeToLiveOutput) {
- op := &request.Operation{
- Name: opUpdateTimeToLive,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &UpdateTimeToLiveInput{}
- }
-
- output = &UpdateTimeToLiveOutput{}
- req = c.newRequest(op, input, output)
- // if custom endpoint for the request is set to a non empty string,
- // we skip the endpoint discovery workflow.
- if req.Config.Endpoint == nil || *req.Config.Endpoint == "" {
- if aws.BoolValue(req.Config.EnableEndpointDiscovery) {
- de := discovererDescribeEndpoints{
- Required: false,
- EndpointCache: c.endpointCache,
- Params: map[string]*string{
- "op": aws.String(req.Operation.Name),
- },
- Client: c,
- }
-
- for k, v := range de.Params {
- if v == nil {
- delete(de.Params, k)
- }
- }
-
- req.Handlers.Build.PushFrontNamed(request.NamedHandler{
- Name: "crr.endpointdiscovery",
- Fn: de.Handler,
- })
- }
- }
- return
-}
-
-// UpdateTimeToLive API operation for Amazon DynamoDB.
-//
-// The UpdateTimeToLive method enables or disables Time to Live (TTL) for the
-// specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification.
-// It can take up to one hour for the change to fully process. Any additional
-// UpdateTimeToLive calls for the same table during this one hour duration result
-// in a ValidationException.
-//
-// TTL compares the current time in epoch time format to the time stored in
-// the TTL attribute of an item. If the epoch time value stored in the attribute
-// is less than the current time, the item is marked as expired and subsequently
-// deleted.
-//
-// The epoch time format is the number of seconds elapsed since 12:00:00 AM
-// January 1, 1970 UTC.
-//
-// DynamoDB deletes expired items on a best-effort basis to ensure availability
-// of throughput for other data operations.
-//
-// DynamoDB typically deletes expired items within two days of expiration. The
-// exact duration within which an item gets deleted after expiration is specific
-// to the nature of the workload. Items that have expired and not been deleted
-// will still show up in reads, queries, and scans.
-//
-// As items are deleted, they are removed from any local secondary index and
-// global secondary index immediately in the same eventually consistent way
-// as a standard delete operation.
-//
-// For more information, see Time To Live (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html)
-// in the Amazon DynamoDB Developer Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for Amazon DynamoDB's
-// API operation UpdateTimeToLive for usage and error information.
-//
-// Returned Error Types:
-//
-// - ResourceInUseException
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-//
-// - ResourceNotFoundException
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-//
-// - LimitExceededException
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-//
-// - InternalServerError
-// An error occurred on the server side.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive
-func (c *DynamoDB) UpdateTimeToLive(input *UpdateTimeToLiveInput) (*UpdateTimeToLiveOutput, error) {
- req, out := c.UpdateTimeToLiveRequest(input)
- return out, req.Send()
-}
-
-// UpdateTimeToLiveWithContext is the same as UpdateTimeToLive with the addition of
-// the ability to pass a context and additional request options.
-//
-// See UpdateTimeToLive for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) UpdateTimeToLiveWithContext(ctx aws.Context, input *UpdateTimeToLiveInput, opts ...request.Option) (*UpdateTimeToLiveOutput, error) {
- req, out := c.UpdateTimeToLiveRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// Contains details of a table archival operation.
-type ArchivalSummary struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) of the backup the table was archived to, when
- // applicable in the archival reason. If you wish to restore this backup to
- // the same table name, you will need to delete the original table.
- ArchivalBackupArn *string `min:"37" type:"string"`
-
- // The date and time when table archival was initiated by DynamoDB, in UNIX
- // epoch time format.
- ArchivalDateTime *time.Time `type:"timestamp"`
-
- // The reason DynamoDB archived the table. Currently, the only possible value
- // is:
- //
- // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The table was archived due to
- // the table's KMS key being inaccessible for more than seven days. An On-Demand
- // backup was created at the archival time.
- ArchivalReason *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ArchivalSummary) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ArchivalSummary) GoString() string {
- return s.String()
-}
-
-// SetArchivalBackupArn sets the ArchivalBackupArn field's value.
-func (s *ArchivalSummary) SetArchivalBackupArn(v string) *ArchivalSummary {
- s.ArchivalBackupArn = &v
- return s
-}
-
-// SetArchivalDateTime sets the ArchivalDateTime field's value.
-func (s *ArchivalSummary) SetArchivalDateTime(v time.Time) *ArchivalSummary {
- s.ArchivalDateTime = &v
- return s
-}
-
-// SetArchivalReason sets the ArchivalReason field's value.
-func (s *ArchivalSummary) SetArchivalReason(v string) *ArchivalSummary {
- s.ArchivalReason = &v
- return s
-}
-
-// Represents an attribute for describing the schema for the table and indexes.
-type AttributeDefinition struct {
- _ struct{} `type:"structure"`
-
- // A name for the attribute.
- //
- // AttributeName is a required field
- AttributeName *string `min:"1" type:"string" required:"true"`
-
- // The data type for the attribute, where:
- //
- // * S - the attribute is of type String
- //
- // * N - the attribute is of type Number
- //
- // * B - the attribute is of type Binary
- //
- // AttributeType is a required field
- AttributeType *string `type:"string" required:"true" enum:"ScalarAttributeType"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AttributeDefinition) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AttributeDefinition) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *AttributeDefinition) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "AttributeDefinition"}
- if s.AttributeName == nil {
- invalidParams.Add(request.NewErrParamRequired("AttributeName"))
- }
- if s.AttributeName != nil && len(*s.AttributeName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1))
- }
- if s.AttributeType == nil {
- invalidParams.Add(request.NewErrParamRequired("AttributeType"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAttributeName sets the AttributeName field's value.
-func (s *AttributeDefinition) SetAttributeName(v string) *AttributeDefinition {
- s.AttributeName = &v
- return s
-}
-
-// SetAttributeType sets the AttributeType field's value.
-func (s *AttributeDefinition) SetAttributeType(v string) *AttributeDefinition {
- s.AttributeType = &v
- return s
-}
-
-// Represents the data for an attribute.
-//
-// Each attribute value is described as a name-value pair. The name is the data
-// type, and the value is the data itself.
-//
-// For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes)
-// in the Amazon DynamoDB Developer Guide.
-type AttributeValue struct {
- _ struct{} `type:"structure"`
-
- // An attribute of type Binary. For example:
- //
- // "B": "dGhpcyB0ZXh0IGlzIGJhc2U2NC1lbmNvZGVk"
- // B is automatically base64 encoded/decoded by the SDK.
- B []byte `type:"blob"`
-
- // An attribute of type Boolean. For example:
- //
- // "BOOL": true
- BOOL *bool `type:"boolean"`
-
- // An attribute of type Binary Set. For example:
- //
- // "BS": ["U3Vubnk=", "UmFpbnk=", "U25vd3k="]
- BS [][]byte `type:"list"`
-
- // An attribute of type List. For example:
- //
- // "L": [ {"S": "Cookies"} , {"S": "Coffee"}, {"N": "3.14159"}]
- L []*AttributeValue `type:"list"`
-
- // An attribute of type Map. For example:
- //
- // "M": {"Name": {"S": "Joe"}, "Age": {"N": "35"}}
- M map[string]*AttributeValue `type:"map"`
-
- // An attribute of type Number. For example:
- //
- // "N": "123.45"
- //
- // Numbers are sent across the network to DynamoDB as strings, to maximize compatibility
- // across languages and libraries. However, DynamoDB treats them as number type
- // attributes for mathematical operations.
- N *string `type:"string"`
-
- // An attribute of type Number Set. For example:
- //
- // "NS": ["42.2", "-19", "7.5", "3.14"]
- //
- // Numbers are sent across the network to DynamoDB as strings, to maximize compatibility
- // across languages and libraries. However, DynamoDB treats them as number type
- // attributes for mathematical operations.
- NS []*string `type:"list"`
-
- // An attribute of type Null. For example:
- //
- // "NULL": true
- NULL *bool `type:"boolean"`
-
- // An attribute of type String. For example:
- //
- // "S": "Hello"
- S *string `type:"string"`
-
- // An attribute of type String Set. For example:
- //
- // "SS": ["Giraffe", "Hippo" ,"Zebra"]
- SS []*string `type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AttributeValue) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AttributeValue) GoString() string {
- return s.String()
-}
-
-// SetB sets the B field's value.
-func (s *AttributeValue) SetB(v []byte) *AttributeValue {
- s.B = v
- return s
-}
-
-// SetBOOL sets the BOOL field's value.
-func (s *AttributeValue) SetBOOL(v bool) *AttributeValue {
- s.BOOL = &v
- return s
-}
-
-// SetBS sets the BS field's value.
-func (s *AttributeValue) SetBS(v [][]byte) *AttributeValue {
- s.BS = v
- return s
-}
-
-// SetL sets the L field's value.
-func (s *AttributeValue) SetL(v []*AttributeValue) *AttributeValue {
- s.L = v
- return s
-}
-
-// SetM sets the M field's value.
-func (s *AttributeValue) SetM(v map[string]*AttributeValue) *AttributeValue {
- s.M = v
- return s
-}
-
-// SetN sets the N field's value.
-func (s *AttributeValue) SetN(v string) *AttributeValue {
- s.N = &v
- return s
-}
-
-// SetNS sets the NS field's value.
-func (s *AttributeValue) SetNS(v []*string) *AttributeValue {
- s.NS = v
- return s
-}
-
-// SetNULL sets the NULL field's value.
-func (s *AttributeValue) SetNULL(v bool) *AttributeValue {
- s.NULL = &v
- return s
-}
-
-// SetS sets the S field's value.
-func (s *AttributeValue) SetS(v string) *AttributeValue {
- s.S = &v
- return s
-}
-
-// SetSS sets the SS field's value.
-func (s *AttributeValue) SetSS(v []*string) *AttributeValue {
- s.SS = v
- return s
-}
-
-// For the UpdateItem operation, represents the attributes to be modified, the
-// action to perform on each, and the new value for each.
-//
-// You cannot use UpdateItem to update any primary key attributes. Instead,
-// you will need to delete the item, and then use PutItem to create a new item
-// with new attributes.
-//
-// Attribute values cannot be null; string and binary type attributes must have
-// lengths greater than zero; and set type attributes must not be empty. Requests
-// with empty values will be rejected with a ValidationException exception.
-type AttributeValueUpdate struct {
- _ struct{} `type:"structure"`
-
- // Specifies how to perform the update. Valid values are PUT (default), DELETE,
- // and ADD. The behavior depends on whether the specified primary key already
- // exists in the table.
- //
- // If an item with the specified Key is found in the table:
- //
- // * PUT - Adds the specified attribute to the item. If the attribute already
- // exists, it is replaced by the new value.
- //
- // * DELETE - If no value is specified, the attribute and its value are removed
- // from the item. The data type of the specified value must match the existing
- // value's data type. If a set of values is specified, then those values
- // are subtracted from the old set. For example, if the attribute value was
- // the set [a,b,c] and the DELETE action specified [a,c], then the final
- // attribute value would be [b]. Specifying an empty set is an error.
- //
- // * ADD - If the attribute does not already exist, then the attribute and
- // its values are added to the item. If the attribute does exist, then the
- // behavior of ADD depends on the data type of the attribute: If the existing
- // attribute is a number, and if Value is also a number, then the Value is
- // mathematically added to the existing attribute. If Value is a negative
- // number, then it is subtracted from the existing attribute. If you use
- // ADD to increment or decrement a number value for an item that doesn't
- // exist before the update, DynamoDB uses 0 as the initial value. In addition,
- // if you use ADD to update an existing item, and intend to increment or
- // decrement an attribute value which does not yet exist, DynamoDB uses 0
- // as the initial value. For example, suppose that the item you want to update
- // does not yet have an attribute named itemcount, but you decide to ADD
- // the number 3 to this attribute anyway, even though it currently does not
- // exist. DynamoDB will create the itemcount attribute, set its initial value
- // to 0, and finally add 3 to it. The result will be a new itemcount attribute
- // in the item, with a value of 3. If the existing data type is a set, and
- // if the Value is also a set, then the Value is added to the existing set.
- // (This is a set operation, not mathematical addition.) For example, if
- // the attribute value was the set [1,2], and the ADD action specified [3],
- // then the final attribute value would be [1,2,3]. An error occurs if an
- // Add action is specified for a set attribute and the attribute type specified
- // does not match the existing set type. Both sets must have the same primitive
- // data type. For example, if the existing data type is a set of strings,
- // the Value must also be a set of strings. The same holds true for number
- // sets and binary sets. This action is only valid for an existing attribute
- // whose data type is number or is a set. Do not use ADD for any other data
- // types.
- //
- // If no item with the specified Key is found:
- //
- // * PUT - DynamoDB creates a new item with the specified primary key, and
- // then adds the attribute.
- //
- // * DELETE - Nothing happens; there is no attribute to delete.
- //
- // * ADD - DynamoDB creates a new item with the supplied primary key and
- // number (or set) for the attribute value. The only data types allowed are
- // number, number set, string set or binary set.
- Action *string `type:"string" enum:"AttributeAction"`
-
- // Represents the data for an attribute.
- //
- // Each attribute value is described as a name-value pair. The name is the data
- // type, and the value is the data itself.
- //
- // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes)
- // in the Amazon DynamoDB Developer Guide.
- Value *AttributeValue `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AttributeValueUpdate) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AttributeValueUpdate) GoString() string {
- return s.String()
-}
-
-// SetAction sets the Action field's value.
-func (s *AttributeValueUpdate) SetAction(v string) *AttributeValueUpdate {
- s.Action = &v
- return s
-}
-
-// SetValue sets the Value field's value.
-func (s *AttributeValueUpdate) SetValue(v *AttributeValue) *AttributeValueUpdate {
- s.Value = v
- return s
-}
-
-// Represents the properties of the scaling policy.
-type AutoScalingPolicyDescription struct {
- _ struct{} `type:"structure"`
-
- // The name of the scaling policy.
- PolicyName *string `min:"1" type:"string"`
-
- // Represents a target tracking scaling policy configuration.
- TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AutoScalingPolicyDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AutoScalingPolicyDescription) GoString() string {
- return s.String()
-}
-
-// SetPolicyName sets the PolicyName field's value.
-func (s *AutoScalingPolicyDescription) SetPolicyName(v string) *AutoScalingPolicyDescription {
- s.PolicyName = &v
- return s
-}
-
-// SetTargetTrackingScalingPolicyConfiguration sets the TargetTrackingScalingPolicyConfiguration field's value.
-func (s *AutoScalingPolicyDescription) SetTargetTrackingScalingPolicyConfiguration(v *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) *AutoScalingPolicyDescription {
- s.TargetTrackingScalingPolicyConfiguration = v
- return s
-}
-
-// Represents the auto scaling policy to be modified.
-type AutoScalingPolicyUpdate struct {
- _ struct{} `type:"structure"`
-
- // The name of the scaling policy.
- PolicyName *string `min:"1" type:"string"`
-
- // Represents a target tracking scaling policy configuration.
- //
- // TargetTrackingScalingPolicyConfiguration is a required field
- TargetTrackingScalingPolicyConfiguration *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate `type:"structure" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AutoScalingPolicyUpdate) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AutoScalingPolicyUpdate) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *AutoScalingPolicyUpdate) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "AutoScalingPolicyUpdate"}
- if s.PolicyName != nil && len(*s.PolicyName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1))
- }
- if s.TargetTrackingScalingPolicyConfiguration == nil {
- invalidParams.Add(request.NewErrParamRequired("TargetTrackingScalingPolicyConfiguration"))
- }
- if s.TargetTrackingScalingPolicyConfiguration != nil {
- if err := s.TargetTrackingScalingPolicyConfiguration.Validate(); err != nil {
- invalidParams.AddNested("TargetTrackingScalingPolicyConfiguration", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetPolicyName sets the PolicyName field's value.
-func (s *AutoScalingPolicyUpdate) SetPolicyName(v string) *AutoScalingPolicyUpdate {
- s.PolicyName = &v
- return s
-}
-
-// SetTargetTrackingScalingPolicyConfiguration sets the TargetTrackingScalingPolicyConfiguration field's value.
-func (s *AutoScalingPolicyUpdate) SetTargetTrackingScalingPolicyConfiguration(v *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) *AutoScalingPolicyUpdate {
- s.TargetTrackingScalingPolicyConfiguration = v
- return s
-}
-
-// Represents the auto scaling settings for a global table or global secondary
-// index.
-type AutoScalingSettingsDescription struct {
- _ struct{} `type:"structure"`
-
- // Disabled auto scaling for this global table or global secondary index.
- AutoScalingDisabled *bool `type:"boolean"`
-
- // Role ARN used for configuring the auto scaling policy.
- AutoScalingRoleArn *string `type:"string"`
-
- // The maximum capacity units that a global table or global secondary index
- // should be scaled up to.
- MaximumUnits *int64 `min:"1" type:"long"`
-
- // The minimum capacity units that a global table or global secondary index
- // should be scaled down to.
- MinimumUnits *int64 `min:"1" type:"long"`
-
- // Information about the scaling policies.
- ScalingPolicies []*AutoScalingPolicyDescription `type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AutoScalingSettingsDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AutoScalingSettingsDescription) GoString() string {
- return s.String()
-}
-
-// SetAutoScalingDisabled sets the AutoScalingDisabled field's value.
-func (s *AutoScalingSettingsDescription) SetAutoScalingDisabled(v bool) *AutoScalingSettingsDescription {
- s.AutoScalingDisabled = &v
- return s
-}
-
-// SetAutoScalingRoleArn sets the AutoScalingRoleArn field's value.
-func (s *AutoScalingSettingsDescription) SetAutoScalingRoleArn(v string) *AutoScalingSettingsDescription {
- s.AutoScalingRoleArn = &v
- return s
-}
-
-// SetMaximumUnits sets the MaximumUnits field's value.
-func (s *AutoScalingSettingsDescription) SetMaximumUnits(v int64) *AutoScalingSettingsDescription {
- s.MaximumUnits = &v
- return s
-}
-
-// SetMinimumUnits sets the MinimumUnits field's value.
-func (s *AutoScalingSettingsDescription) SetMinimumUnits(v int64) *AutoScalingSettingsDescription {
- s.MinimumUnits = &v
- return s
-}
-
-// SetScalingPolicies sets the ScalingPolicies field's value.
-func (s *AutoScalingSettingsDescription) SetScalingPolicies(v []*AutoScalingPolicyDescription) *AutoScalingSettingsDescription {
- s.ScalingPolicies = v
- return s
-}
-
-// Represents the auto scaling settings to be modified for a global table or
-// global secondary index.
-type AutoScalingSettingsUpdate struct {
- _ struct{} `type:"structure"`
-
- // Disabled auto scaling for this global table or global secondary index.
- AutoScalingDisabled *bool `type:"boolean"`
-
- // Role ARN used for configuring auto scaling policy.
- AutoScalingRoleArn *string `min:"1" type:"string"`
-
- // The maximum capacity units that a global table or global secondary index
- // should be scaled up to.
- MaximumUnits *int64 `min:"1" type:"long"`
-
- // The minimum capacity units that a global table or global secondary index
- // should be scaled down to.
- MinimumUnits *int64 `min:"1" type:"long"`
-
- // The scaling policy to apply for scaling target global table or global secondary
- // index capacity units.
- ScalingPolicyUpdate *AutoScalingPolicyUpdate `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AutoScalingSettingsUpdate) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AutoScalingSettingsUpdate) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *AutoScalingSettingsUpdate) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "AutoScalingSettingsUpdate"}
- if s.AutoScalingRoleArn != nil && len(*s.AutoScalingRoleArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("AutoScalingRoleArn", 1))
- }
- if s.MaximumUnits != nil && *s.MaximumUnits < 1 {
- invalidParams.Add(request.NewErrParamMinValue("MaximumUnits", 1))
- }
- if s.MinimumUnits != nil && *s.MinimumUnits < 1 {
- invalidParams.Add(request.NewErrParamMinValue("MinimumUnits", 1))
- }
- if s.ScalingPolicyUpdate != nil {
- if err := s.ScalingPolicyUpdate.Validate(); err != nil {
- invalidParams.AddNested("ScalingPolicyUpdate", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAutoScalingDisabled sets the AutoScalingDisabled field's value.
-func (s *AutoScalingSettingsUpdate) SetAutoScalingDisabled(v bool) *AutoScalingSettingsUpdate {
- s.AutoScalingDisabled = &v
- return s
-}
-
-// SetAutoScalingRoleArn sets the AutoScalingRoleArn field's value.
-func (s *AutoScalingSettingsUpdate) SetAutoScalingRoleArn(v string) *AutoScalingSettingsUpdate {
- s.AutoScalingRoleArn = &v
- return s
-}
-
-// SetMaximumUnits sets the MaximumUnits field's value.
-func (s *AutoScalingSettingsUpdate) SetMaximumUnits(v int64) *AutoScalingSettingsUpdate {
- s.MaximumUnits = &v
- return s
-}
-
-// SetMinimumUnits sets the MinimumUnits field's value.
-func (s *AutoScalingSettingsUpdate) SetMinimumUnits(v int64) *AutoScalingSettingsUpdate {
- s.MinimumUnits = &v
- return s
-}
-
-// SetScalingPolicyUpdate sets the ScalingPolicyUpdate field's value.
-func (s *AutoScalingSettingsUpdate) SetScalingPolicyUpdate(v *AutoScalingPolicyUpdate) *AutoScalingSettingsUpdate {
- s.ScalingPolicyUpdate = v
- return s
-}
-
-// Represents the properties of a target tracking scaling policy.
-type AutoScalingTargetTrackingScalingPolicyConfigurationDescription struct {
- _ struct{} `type:"structure"`
-
- // Indicates whether scale in by the target tracking policy is disabled. If
- // the value is true, scale in is disabled and the target tracking policy won't
- // remove capacity from the scalable resource. Otherwise, scale in is enabled
- // and the target tracking policy can remove capacity from the scalable resource.
- // The default value is false.
- DisableScaleIn *bool `type:"boolean"`
-
- // The amount of time, in seconds, after a scale in activity completes before
- // another scale in activity can start. The cooldown period is used to block
- // subsequent scale in requests until it has expired. You should scale in conservatively
- // to protect your application's availability. However, if another alarm triggers
- // a scale out policy during the cooldown period after a scale-in, application
- // auto scaling scales out your scalable target immediately.
- ScaleInCooldown *int64 `type:"integer"`
-
- // The amount of time, in seconds, after a scale out activity completes before
- // another scale out activity can start. While the cooldown period is in effect,
- // the capacity that has been added by the previous scale out event that initiated
- // the cooldown is calculated as part of the desired capacity for the next scale
- // out. You should continuously (but not excessively) scale out.
- ScaleOutCooldown *int64 `type:"integer"`
-
- // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108
- // (Base 10) or 2e-360 to 2e360 (Base 2).
- //
- // TargetValue is a required field
- TargetValue *float64 `type:"double" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AutoScalingTargetTrackingScalingPolicyConfigurationDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AutoScalingTargetTrackingScalingPolicyConfigurationDescription) GoString() string {
- return s.String()
-}
-
-// SetDisableScaleIn sets the DisableScaleIn field's value.
-func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetDisableScaleIn(v bool) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription {
- s.DisableScaleIn = &v
- return s
-}
-
-// SetScaleInCooldown sets the ScaleInCooldown field's value.
-func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetScaleInCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription {
- s.ScaleInCooldown = &v
- return s
-}
-
-// SetScaleOutCooldown sets the ScaleOutCooldown field's value.
-func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetScaleOutCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription {
- s.ScaleOutCooldown = &v
- return s
-}
-
-// SetTargetValue sets the TargetValue field's value.
-func (s *AutoScalingTargetTrackingScalingPolicyConfigurationDescription) SetTargetValue(v float64) *AutoScalingTargetTrackingScalingPolicyConfigurationDescription {
- s.TargetValue = &v
- return s
-}
-
-// Represents the settings of a target tracking scaling policy that will be
-// modified.
-type AutoScalingTargetTrackingScalingPolicyConfigurationUpdate struct {
- _ struct{} `type:"structure"`
-
- // Indicates whether scale in by the target tracking policy is disabled. If
- // the value is true, scale in is disabled and the target tracking policy won't
- // remove capacity from the scalable resource. Otherwise, scale in is enabled
- // and the target tracking policy can remove capacity from the scalable resource.
- // The default value is false.
- DisableScaleIn *bool `type:"boolean"`
-
- // The amount of time, in seconds, after a scale in activity completes before
- // another scale in activity can start. The cooldown period is used to block
- // subsequent scale in requests until it has expired. You should scale in conservatively
- // to protect your application's availability. However, if another alarm triggers
- // a scale out policy during the cooldown period after a scale-in, application
- // auto scaling scales out your scalable target immediately.
- ScaleInCooldown *int64 `type:"integer"`
-
- // The amount of time, in seconds, after a scale out activity completes before
- // another scale out activity can start. While the cooldown period is in effect,
- // the capacity that has been added by the previous scale out event that initiated
- // the cooldown is calculated as part of the desired capacity for the next scale
- // out. You should continuously (but not excessively) scale out.
- ScaleOutCooldown *int64 `type:"integer"`
-
- // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108
- // (Base 10) or 2e-360 to 2e360 (Base 2).
- //
- // TargetValue is a required field
- TargetValue *float64 `type:"double" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "AutoScalingTargetTrackingScalingPolicyConfigurationUpdate"}
- if s.TargetValue == nil {
- invalidParams.Add(request.NewErrParamRequired("TargetValue"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetDisableScaleIn sets the DisableScaleIn field's value.
-func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetDisableScaleIn(v bool) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate {
- s.DisableScaleIn = &v
- return s
-}
-
-// SetScaleInCooldown sets the ScaleInCooldown field's value.
-func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetScaleInCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate {
- s.ScaleInCooldown = &v
- return s
-}
-
-// SetScaleOutCooldown sets the ScaleOutCooldown field's value.
-func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetScaleOutCooldown(v int64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate {
- s.ScaleOutCooldown = &v
- return s
-}
-
-// SetTargetValue sets the TargetValue field's value.
-func (s *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate) SetTargetValue(v float64) *AutoScalingTargetTrackingScalingPolicyConfigurationUpdate {
- s.TargetValue = &v
- return s
-}
-
-// Contains the description of the backup created for the table.
-type BackupDescription struct {
- _ struct{} `type:"structure"`
-
- // Contains the details of the backup created for the table.
- BackupDetails *BackupDetails `type:"structure"`
-
- // Contains the details of the table when the backup was created.
- SourceTableDetails *SourceTableDetails `type:"structure"`
-
- // Contains the details of the features enabled on the table when the backup
- // was created. For example, LSIs, GSIs, streams, TTL.
- SourceTableFeatureDetails *SourceTableFeatureDetails `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BackupDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BackupDescription) GoString() string {
- return s.String()
-}
-
-// SetBackupDetails sets the BackupDetails field's value.
-func (s *BackupDescription) SetBackupDetails(v *BackupDetails) *BackupDescription {
- s.BackupDetails = v
- return s
-}
-
-// SetSourceTableDetails sets the SourceTableDetails field's value.
-func (s *BackupDescription) SetSourceTableDetails(v *SourceTableDetails) *BackupDescription {
- s.SourceTableDetails = v
- return s
-}
-
-// SetSourceTableFeatureDetails sets the SourceTableFeatureDetails field's value.
-func (s *BackupDescription) SetSourceTableFeatureDetails(v *SourceTableFeatureDetails) *BackupDescription {
- s.SourceTableFeatureDetails = v
- return s
-}
-
-// Contains the details of the backup created for the table.
-type BackupDetails struct {
- _ struct{} `type:"structure"`
-
- // ARN associated with the backup.
- //
- // BackupArn is a required field
- BackupArn *string `min:"37" type:"string" required:"true"`
-
- // Time at which the backup was created. This is the request time of the backup.
- //
- // BackupCreationDateTime is a required field
- BackupCreationDateTime *time.Time `type:"timestamp" required:"true"`
-
- // Time at which the automatic on-demand backup created by DynamoDB will expire.
- // This SYSTEM on-demand backup expires automatically 35 days after its creation.
- BackupExpiryDateTime *time.Time `type:"timestamp"`
-
- // Name of the requested backup.
- //
- // BackupName is a required field
- BackupName *string `min:"3" type:"string" required:"true"`
-
- // Size of the backup in bytes. DynamoDB updates this value approximately every
- // six hours. Recent changes might not be reflected in this value.
- BackupSizeBytes *int64 `type:"long"`
-
- // Backup can be in one of the following states: CREATING, ACTIVE, DELETED.
- //
- // BackupStatus is a required field
- BackupStatus *string `type:"string" required:"true" enum:"BackupStatus"`
-
- // BackupType:
- //
- // * USER - You create and manage these using the on-demand backup feature.
- //
- // * SYSTEM - If you delete a table with point-in-time recovery enabled,
- // a SYSTEM backup is automatically created and is retained for 35 days (at
- // no additional cost). System backups allow you to restore the deleted table
- // to the state it was in just before the point of deletion.
- //
- // * AWS_BACKUP - On-demand backup created by you from Backup service.
- //
- // BackupType is a required field
- BackupType *string `type:"string" required:"true" enum:"BackupType"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BackupDetails) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BackupDetails) GoString() string {
- return s.String()
-}
-
-// SetBackupArn sets the BackupArn field's value.
-func (s *BackupDetails) SetBackupArn(v string) *BackupDetails {
- s.BackupArn = &v
- return s
-}
-
-// SetBackupCreationDateTime sets the BackupCreationDateTime field's value.
-func (s *BackupDetails) SetBackupCreationDateTime(v time.Time) *BackupDetails {
- s.BackupCreationDateTime = &v
- return s
-}
-
-// SetBackupExpiryDateTime sets the BackupExpiryDateTime field's value.
-func (s *BackupDetails) SetBackupExpiryDateTime(v time.Time) *BackupDetails {
- s.BackupExpiryDateTime = &v
- return s
-}
-
-// SetBackupName sets the BackupName field's value.
-func (s *BackupDetails) SetBackupName(v string) *BackupDetails {
- s.BackupName = &v
- return s
-}
-
-// SetBackupSizeBytes sets the BackupSizeBytes field's value.
-func (s *BackupDetails) SetBackupSizeBytes(v int64) *BackupDetails {
- s.BackupSizeBytes = &v
- return s
-}
-
-// SetBackupStatus sets the BackupStatus field's value.
-func (s *BackupDetails) SetBackupStatus(v string) *BackupDetails {
- s.BackupStatus = &v
- return s
-}
-
-// SetBackupType sets the BackupType field's value.
-func (s *BackupDetails) SetBackupType(v string) *BackupDetails {
- s.BackupType = &v
- return s
-}
-
-// There is another ongoing conflicting backup control plane operation on the
-// table. The backup is either being created, deleted or restored to a table.
-type BackupInUseException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BackupInUseException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BackupInUseException) GoString() string {
- return s.String()
-}
-
-func newErrorBackupInUseException(v protocol.ResponseMetadata) error {
- return &BackupInUseException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *BackupInUseException) Code() string {
- return "BackupInUseException"
-}
-
-// Message returns the exception's message.
-func (s *BackupInUseException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *BackupInUseException) OrigErr() error {
- return nil
-}
-
-func (s *BackupInUseException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *BackupInUseException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *BackupInUseException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Backup not found for the given BackupARN.
-type BackupNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BackupNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BackupNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorBackupNotFoundException(v protocol.ResponseMetadata) error {
- return &BackupNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *BackupNotFoundException) Code() string {
- return "BackupNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *BackupNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *BackupNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *BackupNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *BackupNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *BackupNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Contains details for the backup.
-type BackupSummary struct {
- _ struct{} `type:"structure"`
-
- // ARN associated with the backup.
- BackupArn *string `min:"37" type:"string"`
-
- // Time at which the backup was created.
- BackupCreationDateTime *time.Time `type:"timestamp"`
-
- // Time at which the automatic on-demand backup created by DynamoDB will expire.
- // This SYSTEM on-demand backup expires automatically 35 days after its creation.
- BackupExpiryDateTime *time.Time `type:"timestamp"`
-
- // Name of the specified backup.
- BackupName *string `min:"3" type:"string"`
-
- // Size of the backup in bytes.
- BackupSizeBytes *int64 `type:"long"`
-
- // Backup can be in one of the following states: CREATING, ACTIVE, DELETED.
- BackupStatus *string `type:"string" enum:"BackupStatus"`
-
- // BackupType:
- //
- // * USER - You create and manage these using the on-demand backup feature.
- //
- // * SYSTEM - If you delete a table with point-in-time recovery enabled,
- // a SYSTEM backup is automatically created and is retained for 35 days (at
- // no additional cost). System backups allow you to restore the deleted table
- // to the state it was in just before the point of deletion.
- //
- // * AWS_BACKUP - On-demand backup created by you from Backup service.
- BackupType *string `type:"string" enum:"BackupType"`
-
- // ARN associated with the table.
- TableArn *string `min:"1" type:"string"`
-
- // Unique identifier for the table.
- TableId *string `type:"string"`
-
- // Name of the table.
- TableName *string `min:"3" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BackupSummary) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BackupSummary) GoString() string {
- return s.String()
-}
-
-// SetBackupArn sets the BackupArn field's value.
-func (s *BackupSummary) SetBackupArn(v string) *BackupSummary {
- s.BackupArn = &v
- return s
-}
-
-// SetBackupCreationDateTime sets the BackupCreationDateTime field's value.
-func (s *BackupSummary) SetBackupCreationDateTime(v time.Time) *BackupSummary {
- s.BackupCreationDateTime = &v
- return s
-}
-
-// SetBackupExpiryDateTime sets the BackupExpiryDateTime field's value.
-func (s *BackupSummary) SetBackupExpiryDateTime(v time.Time) *BackupSummary {
- s.BackupExpiryDateTime = &v
- return s
-}
-
-// SetBackupName sets the BackupName field's value.
-func (s *BackupSummary) SetBackupName(v string) *BackupSummary {
- s.BackupName = &v
- return s
-}
-
-// SetBackupSizeBytes sets the BackupSizeBytes field's value.
-func (s *BackupSummary) SetBackupSizeBytes(v int64) *BackupSummary {
- s.BackupSizeBytes = &v
- return s
-}
-
-// SetBackupStatus sets the BackupStatus field's value.
-func (s *BackupSummary) SetBackupStatus(v string) *BackupSummary {
- s.BackupStatus = &v
- return s
-}
-
-// SetBackupType sets the BackupType field's value.
-func (s *BackupSummary) SetBackupType(v string) *BackupSummary {
- s.BackupType = &v
- return s
-}
-
-// SetTableArn sets the TableArn field's value.
-func (s *BackupSummary) SetTableArn(v string) *BackupSummary {
- s.TableArn = &v
- return s
-}
-
-// SetTableId sets the TableId field's value.
-func (s *BackupSummary) SetTableId(v string) *BackupSummary {
- s.TableId = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *BackupSummary) SetTableName(v string) *BackupSummary {
- s.TableName = &v
- return s
-}
-
-type BatchExecuteStatementInput struct {
- _ struct{} `type:"structure"`
-
- // Determines the level of detail about either provisioned or on-demand throughput
- // consumption that is returned in the response:
- //
- // * INDEXES - The response includes the aggregate ConsumedCapacity for the
- // operation, together with ConsumedCapacity for each table and secondary
- // index that was accessed. Note that some operations, such as GetItem and
- // BatchGetItem, do not access any indexes at all. In these cases, specifying
- // INDEXES will only return ConsumedCapacity information for table(s).
- //
- // * TOTAL - The response includes only the aggregate ConsumedCapacity for
- // the operation.
- //
- // * NONE - No ConsumedCapacity details are included in the response.
- ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
-
- // The list of PartiQL statements representing the batch to run.
- //
- // Statements is a required field
- Statements []*BatchStatementRequest `min:"1" type:"list" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchExecuteStatementInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchExecuteStatementInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *BatchExecuteStatementInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "BatchExecuteStatementInput"}
- if s.Statements == nil {
- invalidParams.Add(request.NewErrParamRequired("Statements"))
- }
- if s.Statements != nil && len(s.Statements) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Statements", 1))
- }
- if s.Statements != nil {
- for i, v := range s.Statements {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Statements", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
-func (s *BatchExecuteStatementInput) SetReturnConsumedCapacity(v string) *BatchExecuteStatementInput {
- s.ReturnConsumedCapacity = &v
- return s
-}
-
-// SetStatements sets the Statements field's value.
-func (s *BatchExecuteStatementInput) SetStatements(v []*BatchStatementRequest) *BatchExecuteStatementInput {
- s.Statements = v
- return s
-}
-
-type BatchExecuteStatementOutput struct {
- _ struct{} `type:"structure"`
-
- // The capacity units consumed by the entire operation. The values of the list
- // are ordered according to the ordering of the statements.
- ConsumedCapacity []*ConsumedCapacity `type:"list"`
-
- // The response to each PartiQL statement in the batch. The values of the list
- // are ordered according to the ordering of the request statements.
- Responses []*BatchStatementResponse `type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchExecuteStatementOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchExecuteStatementOutput) GoString() string {
- return s.String()
-}
-
-// SetConsumedCapacity sets the ConsumedCapacity field's value.
-func (s *BatchExecuteStatementOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchExecuteStatementOutput {
- s.ConsumedCapacity = v
- return s
-}
-
-// SetResponses sets the Responses field's value.
-func (s *BatchExecuteStatementOutput) SetResponses(v []*BatchStatementResponse) *BatchExecuteStatementOutput {
- s.Responses = v
- return s
-}
-
-// Represents the input of a BatchGetItem operation.
-type BatchGetItemInput struct {
- _ struct{} `type:"structure"`
-
- // A map of one or more table names or table ARNs and, for each table, a map
- // that describes one or more items to retrieve from that table. Each table
- // name or ARN can be used only once per BatchGetItem request.
- //
- // Each element in the map of items to retrieve consists of the following:
- //
- // * ConsistentRead - If true, a strongly consistent read is used; if false
- // (the default), an eventually consistent read is used.
- //
- // * ExpressionAttributeNames - One or more substitution tokens for attribute
- // names in the ProjectionExpression parameter. The following are some use
- // cases for using ExpressionAttributeNames: To access an attribute whose
- // name conflicts with a DynamoDB reserved word. To create a placeholder
- // for repeating occurrences of an attribute name in an expression. To prevent
- // special characters in an attribute name from being misinterpreted in an
- // expression. Use the # character in an expression to dereference an attribute
- // name. For example, consider the following attribute name: Percentile The
- // name of this attribute conflicts with a reserved word, so it cannot be
- // used directly in an expression. (For the complete list of reserved words,
- // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
- // in the Amazon DynamoDB Developer Guide). To work around this, you could
- // specify the following for ExpressionAttributeNames: {"#P":"Percentile"}
- // You could then use this substitution in an expression, as in this example:
- // #P = :val Tokens that begin with the : character are expression attribute
- // values, which are placeholders for the actual value at runtime. For more
- // information about expression attribute names, see Accessing Item Attributes
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
- // in the Amazon DynamoDB Developer Guide.
- //
- // * Keys - An array of primary key attribute values that define specific
- // items in the table. For each primary key, you must provide all of the
- // key attributes. For example, with a simple primary key, you only need
- // to provide the partition key value. For a composite key, you must provide
- // both the partition key value and the sort key value.
- //
- // * ProjectionExpression - A string that identifies one or more attributes
- // to retrieve from the table. These attributes can include scalars, sets,
- // or elements of a JSON document. The attributes in the expression must
- // be separated by commas. If no attribute names are specified, then all
- // attributes are returned. If any of the requested attributes are not found,
- // they do not appear in the result. For more information, see Accessing
- // Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
- // in the Amazon DynamoDB Developer Guide.
- //
- // * AttributesToGet - This is a legacy parameter. Use ProjectionExpression
- // instead. For more information, see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html)
- // in the Amazon DynamoDB Developer Guide.
- //
- // RequestItems is a required field
- RequestItems map[string]*KeysAndAttributes `min:"1" type:"map" required:"true"`
-
- // Determines the level of detail about either provisioned or on-demand throughput
- // consumption that is returned in the response:
- //
- // * INDEXES - The response includes the aggregate ConsumedCapacity for the
- // operation, together with ConsumedCapacity for each table and secondary
- // index that was accessed. Note that some operations, such as GetItem and
- // BatchGetItem, do not access any indexes at all. In these cases, specifying
- // INDEXES will only return ConsumedCapacity information for table(s).
- //
- // * TOTAL - The response includes only the aggregate ConsumedCapacity for
- // the operation.
- //
- // * NONE - No ConsumedCapacity details are included in the response.
- ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchGetItemInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchGetItemInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *BatchGetItemInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "BatchGetItemInput"}
- if s.RequestItems == nil {
- invalidParams.Add(request.NewErrParamRequired("RequestItems"))
- }
- if s.RequestItems != nil && len(s.RequestItems) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("RequestItems", 1))
- }
- if s.RequestItems != nil {
- for i, v := range s.RequestItems {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RequestItems", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRequestItems sets the RequestItems field's value.
-func (s *BatchGetItemInput) SetRequestItems(v map[string]*KeysAndAttributes) *BatchGetItemInput {
- s.RequestItems = v
- return s
-}
-
-// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
-func (s *BatchGetItemInput) SetReturnConsumedCapacity(v string) *BatchGetItemInput {
- s.ReturnConsumedCapacity = &v
- return s
-}
-
-// Represents the output of a BatchGetItem operation.
-type BatchGetItemOutput struct {
- _ struct{} `type:"structure"`
-
- // The read capacity units consumed by the entire BatchGetItem operation.
- //
- // Each element consists of:
- //
- // * TableName - The table that consumed the provisioned throughput.
- //
- // * CapacityUnits - The total number of capacity units consumed.
- ConsumedCapacity []*ConsumedCapacity `type:"list"`
-
- // A map of table name or table ARN to a list of items. Each object in Responses
- // consists of a table name or ARN, along with a map of attribute data consisting
- // of the data type and attribute value.
- Responses map[string][]map[string]*AttributeValue `type:"map"`
-
- // A map of tables and their respective keys that were not processed with the
- // current response. The UnprocessedKeys value is in the same form as RequestItems,
- // so the value can be provided directly to a subsequent BatchGetItem operation.
- // For more information, see RequestItems in the Request Parameters section.
- //
- // Each element consists of:
- //
- // * Keys - An array of primary key attribute values that define specific
- // items in the table.
- //
- // * ProjectionExpression - One or more attributes to be retrieved from the
- // table or index. By default, all attributes are returned. If a requested
- // attribute is not found, it does not appear in the result.
- //
- // * ConsistentRead - The consistency of a read operation. If set to true,
- // then a strongly consistent read is used; otherwise, an eventually consistent
- // read is used.
- //
- // If there are no unprocessed keys remaining, the response contains an empty
- // UnprocessedKeys map.
- UnprocessedKeys map[string]*KeysAndAttributes `min:"1" type:"map"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchGetItemOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchGetItemOutput) GoString() string {
- return s.String()
-}
-
-// SetConsumedCapacity sets the ConsumedCapacity field's value.
-func (s *BatchGetItemOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchGetItemOutput {
- s.ConsumedCapacity = v
- return s
-}
-
-// SetResponses sets the Responses field's value.
-func (s *BatchGetItemOutput) SetResponses(v map[string][]map[string]*AttributeValue) *BatchGetItemOutput {
- s.Responses = v
- return s
-}
-
-// SetUnprocessedKeys sets the UnprocessedKeys field's value.
-func (s *BatchGetItemOutput) SetUnprocessedKeys(v map[string]*KeysAndAttributes) *BatchGetItemOutput {
- s.UnprocessedKeys = v
- return s
-}
-
-// An error associated with a statement in a PartiQL batch that was run.
-type BatchStatementError struct {
- _ struct{} `type:"structure"`
-
- // The error code associated with the failed PartiQL batch statement.
- Code *string `type:"string" enum:"BatchStatementErrorCodeEnum"`
-
- // The item which caused the condition check to fail. This will be set if ReturnValuesOnConditionCheckFailure
- // is specified as ALL_OLD.
- Item map[string]*AttributeValue `type:"map"`
-
- // The error message associated with the PartiQL batch response.
- Message *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchStatementError) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchStatementError) GoString() string {
- return s.String()
-}
-
-// SetCode sets the Code field's value.
-func (s *BatchStatementError) SetCode(v string) *BatchStatementError {
- s.Code = &v
- return s
-}
-
-// SetItem sets the Item field's value.
-func (s *BatchStatementError) SetItem(v map[string]*AttributeValue) *BatchStatementError {
- s.Item = v
- return s
-}
-
-// SetMessage sets the Message field's value.
-func (s *BatchStatementError) SetMessage(v string) *BatchStatementError {
- s.Message = &v
- return s
-}
-
-// A PartiQL batch statement request.
-type BatchStatementRequest struct {
- _ struct{} `type:"structure"`
-
- // The read consistency of the PartiQL batch request.
- ConsistentRead *bool `type:"boolean"`
-
- // The parameters associated with a PartiQL statement in the batch request.
- Parameters []*AttributeValue `min:"1" type:"list"`
-
- // An optional parameter that returns the item attributes for a PartiQL batch
- // request operation that failed a condition check.
- //
- // There is no additional cost associated with requesting a return value aside
- // from the small network and processing overhead of receiving a larger response.
- // No read capacity units are consumed.
- ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"`
-
- // A valid PartiQL statement.
- //
- // Statement is a required field
- Statement *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchStatementRequest) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchStatementRequest) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *BatchStatementRequest) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "BatchStatementRequest"}
- if s.Parameters != nil && len(s.Parameters) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Parameters", 1))
- }
- if s.Statement == nil {
- invalidParams.Add(request.NewErrParamRequired("Statement"))
- }
- if s.Statement != nil && len(*s.Statement) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Statement", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetConsistentRead sets the ConsistentRead field's value.
-func (s *BatchStatementRequest) SetConsistentRead(v bool) *BatchStatementRequest {
- s.ConsistentRead = &v
- return s
-}
-
-// SetParameters sets the Parameters field's value.
-func (s *BatchStatementRequest) SetParameters(v []*AttributeValue) *BatchStatementRequest {
- s.Parameters = v
- return s
-}
-
-// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value.
-func (s *BatchStatementRequest) SetReturnValuesOnConditionCheckFailure(v string) *BatchStatementRequest {
- s.ReturnValuesOnConditionCheckFailure = &v
- return s
-}
-
-// SetStatement sets the Statement field's value.
-func (s *BatchStatementRequest) SetStatement(v string) *BatchStatementRequest {
- s.Statement = &v
- return s
-}
-
-// A PartiQL batch statement response..
-type BatchStatementResponse struct {
- _ struct{} `type:"structure"`
-
- // The error associated with a failed PartiQL batch statement.
- Error *BatchStatementError `type:"structure"`
-
- // A DynamoDB item associated with a BatchStatementResponse
- Item map[string]*AttributeValue `type:"map"`
-
- // The table name associated with a failed PartiQL batch statement.
- TableName *string `min:"3" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchStatementResponse) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchStatementResponse) GoString() string {
- return s.String()
-}
-
-// SetError sets the Error field's value.
-func (s *BatchStatementResponse) SetError(v *BatchStatementError) *BatchStatementResponse {
- s.Error = v
- return s
-}
-
-// SetItem sets the Item field's value.
-func (s *BatchStatementResponse) SetItem(v map[string]*AttributeValue) *BatchStatementResponse {
- s.Item = v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *BatchStatementResponse) SetTableName(v string) *BatchStatementResponse {
- s.TableName = &v
- return s
-}
-
-// Represents the input of a BatchWriteItem operation.
-type BatchWriteItemInput struct {
- _ struct{} `type:"structure"`
-
- // A map of one or more table names or table ARNs and, for each table, a list
- // of operations to be performed (DeleteRequest or PutRequest). Each element
- // in the map consists of the following:
- //
- // * DeleteRequest - Perform a DeleteItem operation on the specified item.
- // The item to be deleted is identified by a Key subelement: Key - A map
- // of primary key attribute values that uniquely identify the item. Each
- // entry in this map consists of an attribute name and an attribute value.
- // For each primary key, you must provide all of the key attributes. For
- // example, with a simple primary key, you only need to provide a value for
- // the partition key. For a composite primary key, you must provide values
- // for both the partition key and the sort key.
- //
- // * PutRequest - Perform a PutItem operation on the specified item. The
- // item to be put is identified by an Item subelement: Item - A map of attributes
- // and their values. Each entry in this map consists of an attribute name
- // and an attribute value. Attribute values must not be null; string and
- // binary type attributes must have lengths greater than zero; and set type
- // attributes must not be empty. Requests that contain empty values are rejected
- // with a ValidationException exception. If you specify any attributes that
- // are part of an index key, then the data types for those attributes must
- // match those of the schema in the table's attribute definition.
- //
- // RequestItems is a required field
- RequestItems map[string][]*WriteRequest `min:"1" type:"map" required:"true"`
-
- // Determines the level of detail about either provisioned or on-demand throughput
- // consumption that is returned in the response:
- //
- // * INDEXES - The response includes the aggregate ConsumedCapacity for the
- // operation, together with ConsumedCapacity for each table and secondary
- // index that was accessed. Note that some operations, such as GetItem and
- // BatchGetItem, do not access any indexes at all. In these cases, specifying
- // INDEXES will only return ConsumedCapacity information for table(s).
- //
- // * TOTAL - The response includes only the aggregate ConsumedCapacity for
- // the operation.
- //
- // * NONE - No ConsumedCapacity details are included in the response.
- ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
-
- // Determines whether item collection metrics are returned. If set to SIZE,
- // the response includes statistics about item collections, if any, that were
- // modified during the operation are returned in the response. If set to NONE
- // (the default), no statistics are returned.
- ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchWriteItemInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchWriteItemInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *BatchWriteItemInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "BatchWriteItemInput"}
- if s.RequestItems == nil {
- invalidParams.Add(request.NewErrParamRequired("RequestItems"))
- }
- if s.RequestItems != nil && len(s.RequestItems) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("RequestItems", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRequestItems sets the RequestItems field's value.
-func (s *BatchWriteItemInput) SetRequestItems(v map[string][]*WriteRequest) *BatchWriteItemInput {
- s.RequestItems = v
- return s
-}
-
-// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
-func (s *BatchWriteItemInput) SetReturnConsumedCapacity(v string) *BatchWriteItemInput {
- s.ReturnConsumedCapacity = &v
- return s
-}
-
-// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value.
-func (s *BatchWriteItemInput) SetReturnItemCollectionMetrics(v string) *BatchWriteItemInput {
- s.ReturnItemCollectionMetrics = &v
- return s
-}
-
-// Represents the output of a BatchWriteItem operation.
-type BatchWriteItemOutput struct {
- _ struct{} `type:"structure"`
-
- // The capacity units consumed by the entire BatchWriteItem operation.
- //
- // Each element consists of:
- //
- // * TableName - The table that consumed the provisioned throughput.
- //
- // * CapacityUnits - The total number of capacity units consumed.
- ConsumedCapacity []*ConsumedCapacity `type:"list"`
-
- // A list of tables that were processed by BatchWriteItem and, for each table,
- // information about any item collections that were affected by individual DeleteItem
- // or PutItem operations.
- //
- // Each entry consists of the following subelements:
- //
- // * ItemCollectionKey - The partition key value of the item collection.
- // This is the same as the partition key value of the item.
- //
- // * SizeEstimateRangeGB - An estimate of item collection size, expressed
- // in GB. This is a two-element array containing a lower bound and an upper
- // bound for the estimate. The estimate includes the size of all the items
- // in the table, plus the size of all attributes projected into all of the
- // local secondary indexes on the table. Use this estimate to measure whether
- // a local secondary index is approaching its size limit. The estimate is
- // subject to change over time; therefore, do not rely on the precision or
- // accuracy of the estimate.
- ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"`
-
- // A map of tables and requests against those tables that were not processed.
- // The UnprocessedItems value is in the same form as RequestItems, so you can
- // provide this value directly to a subsequent BatchWriteItem operation. For
- // more information, see RequestItems in the Request Parameters section.
- //
- // Each UnprocessedItems entry consists of a table name or table ARN and, for
- // that table, a list of operations to perform (DeleteRequest or PutRequest).
- //
- // * DeleteRequest - Perform a DeleteItem operation on the specified item.
- // The item to be deleted is identified by a Key subelement: Key - A map
- // of primary key attribute values that uniquely identify the item. Each
- // entry in this map consists of an attribute name and an attribute value.
- //
- // * PutRequest - Perform a PutItem operation on the specified item. The
- // item to be put is identified by an Item subelement: Item - A map of attributes
- // and their values. Each entry in this map consists of an attribute name
- // and an attribute value. Attribute values must not be null; string and
- // binary type attributes must have lengths greater than zero; and set type
- // attributes must not be empty. Requests that contain empty values will
- // be rejected with a ValidationException exception. If you specify any attributes
- // that are part of an index key, then the data types for those attributes
- // must match those of the schema in the table's attribute definition.
- //
- // If there are no unprocessed items remaining, the response contains an empty
- // UnprocessedItems map.
- UnprocessedItems map[string][]*WriteRequest `min:"1" type:"map"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchWriteItemOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BatchWriteItemOutput) GoString() string {
- return s.String()
-}
-
-// SetConsumedCapacity sets the ConsumedCapacity field's value.
-func (s *BatchWriteItemOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchWriteItemOutput {
- s.ConsumedCapacity = v
- return s
-}
-
-// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value.
-func (s *BatchWriteItemOutput) SetItemCollectionMetrics(v map[string][]*ItemCollectionMetrics) *BatchWriteItemOutput {
- s.ItemCollectionMetrics = v
- return s
-}
-
-// SetUnprocessedItems sets the UnprocessedItems field's value.
-func (s *BatchWriteItemOutput) SetUnprocessedItems(v map[string][]*WriteRequest) *BatchWriteItemOutput {
- s.UnprocessedItems = v
- return s
-}
-
-// Contains the details for the read/write capacity mode. This page talks about
-// PROVISIONED and PAY_PER_REQUEST billing modes. For more information about
-// these modes, see Read/write capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.ReadWriteCapacityMode.html).
-//
-// You may need to switch to on-demand mode at least once in order to return
-// a BillingModeSummary response.
-type BillingModeSummary struct {
- _ struct{} `type:"structure"`
-
- // Controls how you are charged for read and write throughput and how you manage
- // capacity. This setting can be changed later.
- //
- // * PROVISIONED - Sets the read/write capacity mode to PROVISIONED. We recommend
- // using PROVISIONED for predictable workloads.
- //
- // * PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST.
- // We recommend using PAY_PER_REQUEST for unpredictable workloads.
- BillingMode *string `type:"string" enum:"BillingMode"`
-
- // Represents the time when PAY_PER_REQUEST was last set as the read/write capacity
- // mode.
- LastUpdateToPayPerRequestDateTime *time.Time `type:"timestamp"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BillingModeSummary) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s BillingModeSummary) GoString() string {
- return s.String()
-}
-
-// SetBillingMode sets the BillingMode field's value.
-func (s *BillingModeSummary) SetBillingMode(v string) *BillingModeSummary {
- s.BillingMode = &v
- return s
-}
-
-// SetLastUpdateToPayPerRequestDateTime sets the LastUpdateToPayPerRequestDateTime field's value.
-func (s *BillingModeSummary) SetLastUpdateToPayPerRequestDateTime(v time.Time) *BillingModeSummary {
- s.LastUpdateToPayPerRequestDateTime = &v
- return s
-}
-
-// An ordered list of errors for each item in the request which caused the transaction
-// to get cancelled. The values of the list are ordered according to the ordering
-// of the TransactWriteItems request parameter. If no error occurred for the
-// associated item an error with a Null code and Null message will be present.
-type CancellationReason struct {
- _ struct{} `type:"structure"`
-
- // Status code for the result of the cancelled transaction.
- Code *string `type:"string"`
-
- // Item in the request which caused the transaction to get cancelled.
- Item map[string]*AttributeValue `type:"map"`
-
- // Cancellation reason message description.
- Message *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CancellationReason) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CancellationReason) GoString() string {
- return s.String()
-}
-
-// SetCode sets the Code field's value.
-func (s *CancellationReason) SetCode(v string) *CancellationReason {
- s.Code = &v
- return s
-}
-
-// SetItem sets the Item field's value.
-func (s *CancellationReason) SetItem(v map[string]*AttributeValue) *CancellationReason {
- s.Item = v
- return s
-}
-
-// SetMessage sets the Message field's value.
-func (s *CancellationReason) SetMessage(v string) *CancellationReason {
- s.Message = &v
- return s
-}
-
-// Represents the amount of provisioned throughput capacity consumed on a table
-// or an index.
-type Capacity struct {
- _ struct{} `type:"structure"`
-
- // The total number of capacity units consumed on a table or an index.
- CapacityUnits *float64 `type:"double"`
-
- // The total number of read capacity units consumed on a table or an index.
- ReadCapacityUnits *float64 `type:"double"`
-
- // The total number of write capacity units consumed on a table or an index.
- WriteCapacityUnits *float64 `type:"double"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Capacity) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Capacity) GoString() string {
- return s.String()
-}
-
-// SetCapacityUnits sets the CapacityUnits field's value.
-func (s *Capacity) SetCapacityUnits(v float64) *Capacity {
- s.CapacityUnits = &v
- return s
-}
-
-// SetReadCapacityUnits sets the ReadCapacityUnits field's value.
-func (s *Capacity) SetReadCapacityUnits(v float64) *Capacity {
- s.ReadCapacityUnits = &v
- return s
-}
-
-// SetWriteCapacityUnits sets the WriteCapacityUnits field's value.
-func (s *Capacity) SetWriteCapacityUnits(v float64) *Capacity {
- s.WriteCapacityUnits = &v
- return s
-}
-
-// Represents the selection criteria for a Query or Scan operation:
-//
-// - For a Query operation, Condition is used for specifying the KeyConditions
-// to use when querying a table or an index. For KeyConditions, only the
-// following comparison operators are supported: EQ | LE | LT | GE | GT |
-// BEGINS_WITH | BETWEEN Condition is also used in a QueryFilter, which evaluates
-// the query results and returns only the desired values.
-//
-// - For a Scan operation, Condition is used in a ScanFilter, which evaluates
-// the scan results and returns only the desired values.
-type Condition struct {
- _ struct{} `type:"structure"`
-
- // One or more values to evaluate against the supplied attribute. The number
- // of values in the list depends on the ComparisonOperator being used.
- //
- // For type Number, value comparisons are numeric.
- //
- // String value comparisons for greater than, equals, or less than are based
- // on ASCII character code values. For example, a is greater than A, and a is
- // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters
- // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters).
- //
- // For Binary, DynamoDB treats each byte of the binary data as unsigned when
- // it compares binary values.
- AttributeValueList []*AttributeValue `type:"list"`
-
- // A comparator for evaluating attributes. For example, equals, greater than,
- // less than, etc.
- //
- // The following comparison operators are available:
- //
- // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS |
- // BEGINS_WITH | IN | BETWEEN
- //
- // The following are descriptions of each comparison operator.
- //
- // * EQ : Equal. EQ is supported for all data types, including lists and
- // maps. AttributeValueList can contain only one AttributeValue element of
- // type String, Number, Binary, String Set, Number Set, or Binary Set. If
- // an item contains an AttributeValue element of a different type than the
- // one provided in the request, the value does not match. For example, {"S":"6"}
- // does not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2",
- // "1"]}.
- //
- // * NE : Not equal. NE is supported for all data types, including lists
- // and maps. AttributeValueList can contain only one AttributeValue of type
- // String, Number, Binary, String Set, Number Set, or Binary Set. If an item
- // contains an AttributeValue of a different type than the one provided in
- // the request, the value does not match. For example, {"S":"6"} does not
- // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}.
- //
- // * LE : Less than or equal. AttributeValueList can contain only one AttributeValue
- // element of type String, Number, or Binary (not a set type). If an item
- // contains an AttributeValue element of a different type than the one provided
- // in the request, the value does not match. For example, {"S":"6"} does
- // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2",
- // "1"]}.
- //
- // * LT : Less than. AttributeValueList can contain only one AttributeValue
- // of type String, Number, or Binary (not a set type). If an item contains
- // an AttributeValue element of a different type than the one provided in
- // the request, the value does not match. For example, {"S":"6"} does not
- // equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2",
- // "1"]}.
- //
- // * GE : Greater than or equal. AttributeValueList can contain only one
- // AttributeValue element of type String, Number, or Binary (not a set type).
- // If an item contains an AttributeValue element of a different type than
- // the one provided in the request, the value does not match. For example,
- // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to
- // {"NS":["6", "2", "1"]}.
- //
- // * GT : Greater than. AttributeValueList can contain only one AttributeValue
- // element of type String, Number, or Binary (not a set type). If an item
- // contains an AttributeValue element of a different type than the one provided
- // in the request, the value does not match. For example, {"S":"6"} does
- // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2",
- // "1"]}.
- //
- // * NOT_NULL : The attribute exists. NOT_NULL is supported for all data
- // types, including lists and maps. This operator tests for the existence
- // of an attribute, not its data type. If the data type of attribute "a"
- // is null, and you evaluate it using NOT_NULL, the result is a Boolean true.
- // This result is because the attribute "a" exists; its data type is not
- // relevant to the NOT_NULL comparison operator.
- //
- // * NULL : The attribute does not exist. NULL is supported for all data
- // types, including lists and maps. This operator tests for the nonexistence
- // of an attribute, not its data type. If the data type of attribute "a"
- // is null, and you evaluate it using NULL, the result is a Boolean false.
- // This is because the attribute "a" exists; its data type is not relevant
- // to the NULL comparison operator.
- //
- // * CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList
- // can contain only one AttributeValue element of type String, Number, or
- // Binary (not a set type). If the target attribute of the comparison is
- // of type String, then the operator checks for a substring match. If the
- // target attribute of the comparison is of type Binary, then the operator
- // looks for a subsequence of the target that matches the input. If the target
- // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator
- // evaluates to true if it finds an exact match with any member of the set.
- // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can
- // be a list; however, "b" cannot be a set, a map, or a list.
- //
- // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a
- // value in a set. AttributeValueList can contain only one AttributeValue
- // element of type String, Number, or Binary (not a set type). If the target
- // attribute of the comparison is a String, then the operator checks for
- // the absence of a substring match. If the target attribute of the comparison
- // is Binary, then the operator checks for the absence of a subsequence of
- // the target that matches the input. If the target attribute of the comparison
- // is a set ("SS", "NS", or "BS"), then the operator evaluates to true if
- // it does not find an exact match with any member of the set. NOT_CONTAINS
- // is supported for lists: When evaluating "a NOT CONTAINS b", "a" can be
- // a list; however, "b" cannot be a set, a map, or a list.
- //
- // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only
- // one AttributeValue of type String or Binary (not a Number or a set type).
- // The target attribute of the comparison must be of type String or Binary
- // (not a Number or a set type).
- //
- // * IN : Checks for matching elements in a list. AttributeValueList can
- // contain one or more AttributeValue elements of type String, Number, or
- // Binary. These attributes are compared against an existing attribute of
- // an item. If any elements of the input are equal to the item attribute,
- // the expression evaluates to true.
- //
- // * BETWEEN : Greater than or equal to the first value, and less than or
- // equal to the second value. AttributeValueList must contain two AttributeValue
- // elements of the same type, either String, Number, or Binary (not a set
- // type). A target attribute matches if the target value is greater than,
- // or equal to, the first element and less than, or equal to, the second
- // element. If an item contains an AttributeValue element of a different
- // type than the one provided in the request, the value does not match. For
- // example, {"S":"6"} does not compare to {"N":"6"}. Also, {"N":"6"} does
- // not compare to {"NS":["6", "2", "1"]}
- //
- // For usage examples of AttributeValueList and ComparisonOperator, see Legacy
- // Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html)
- // in the Amazon DynamoDB Developer Guide.
- //
- // ComparisonOperator is a required field
- ComparisonOperator *string `type:"string" required:"true" enum:"ComparisonOperator"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Condition) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Condition) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *Condition) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "Condition"}
- if s.ComparisonOperator == nil {
- invalidParams.Add(request.NewErrParamRequired("ComparisonOperator"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAttributeValueList sets the AttributeValueList field's value.
-func (s *Condition) SetAttributeValueList(v []*AttributeValue) *Condition {
- s.AttributeValueList = v
- return s
-}
-
-// SetComparisonOperator sets the ComparisonOperator field's value.
-func (s *Condition) SetComparisonOperator(v string) *Condition {
- s.ComparisonOperator = &v
- return s
-}
-
-// Represents a request to perform a check that an item exists or to check the
-// condition of specific attributes of the item.
-type ConditionCheck struct {
- _ struct{} `type:"structure"`
-
- // A condition that must be satisfied in order for a conditional update to succeed.
- // For more information, see Condition expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html)
- // in the Amazon DynamoDB Developer Guide.
- //
- // ConditionExpression is a required field
- ConditionExpression *string `type:"string" required:"true"`
-
- // One or more substitution tokens for attribute names in an expression. For
- // more information, see Expression attribute names (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ExpressionAttributeNames.html)
- // in the Amazon DynamoDB Developer Guide.
- ExpressionAttributeNames map[string]*string `type:"map"`
-
- // One or more values that can be substituted in an expression. For more information,
- // see Condition expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.ConditionExpressions.html)
- // in the Amazon DynamoDB Developer Guide.
- ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
-
- // The primary key of the item to be checked. Each element consists of an attribute
- // name and a value for that attribute.
- //
- // Key is a required field
- Key map[string]*AttributeValue `type:"map" required:"true"`
-
- // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the
- // ConditionCheck condition fails. For ReturnValuesOnConditionCheckFailure,
- // the valid values are: NONE and ALL_OLD.
- ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"`
-
- // Name of the table for the check item request. You can also provide the Amazon
- // Resource Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ConditionCheck) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ConditionCheck) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ConditionCheck) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ConditionCheck"}
- if s.ConditionExpression == nil {
- invalidParams.Add(request.NewErrParamRequired("ConditionExpression"))
- }
- if s.Key == nil {
- invalidParams.Add(request.NewErrParamRequired("Key"))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetConditionExpression sets the ConditionExpression field's value.
-func (s *ConditionCheck) SetConditionExpression(v string) *ConditionCheck {
- s.ConditionExpression = &v
- return s
-}
-
-// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
-func (s *ConditionCheck) SetExpressionAttributeNames(v map[string]*string) *ConditionCheck {
- s.ExpressionAttributeNames = v
- return s
-}
-
-// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
-func (s *ConditionCheck) SetExpressionAttributeValues(v map[string]*AttributeValue) *ConditionCheck {
- s.ExpressionAttributeValues = v
- return s
-}
-
-// SetKey sets the Key field's value.
-func (s *ConditionCheck) SetKey(v map[string]*AttributeValue) *ConditionCheck {
- s.Key = v
- return s
-}
-
-// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value.
-func (s *ConditionCheck) SetReturnValuesOnConditionCheckFailure(v string) *ConditionCheck {
- s.ReturnValuesOnConditionCheckFailure = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *ConditionCheck) SetTableName(v string) *ConditionCheck {
- s.TableName = &v
- return s
-}
-
-// A condition specified in the operation could not be evaluated.
-type ConditionalCheckFailedException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Item which caused the ConditionalCheckFailedException.
- Item map[string]*AttributeValue `type:"map"`
-
- // The conditional request failed.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ConditionalCheckFailedException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ConditionalCheckFailedException) GoString() string {
- return s.String()
-}
-
-func newErrorConditionalCheckFailedException(v protocol.ResponseMetadata) error {
- return &ConditionalCheckFailedException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ConditionalCheckFailedException) Code() string {
- return "ConditionalCheckFailedException"
-}
-
-// Message returns the exception's message.
-func (s *ConditionalCheckFailedException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ConditionalCheckFailedException) OrigErr() error {
- return nil
-}
-
-func (s *ConditionalCheckFailedException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ConditionalCheckFailedException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ConditionalCheckFailedException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The capacity units consumed by an operation. The data returned includes the
-// total provisioned throughput consumed, along with statistics for the table
-// and any indexes involved in the operation. ConsumedCapacity is only returned
-// if the request asked for it. For more information, see Provisioned capacity
-// mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html)
-// in the Amazon DynamoDB Developer Guide.
-type ConsumedCapacity struct {
- _ struct{} `type:"structure"`
-
- // The total number of capacity units consumed by the operation.
- CapacityUnits *float64 `type:"double"`
-
- // The amount of throughput consumed on each global index affected by the operation.
- GlobalSecondaryIndexes map[string]*Capacity `type:"map"`
-
- // The amount of throughput consumed on each local index affected by the operation.
- LocalSecondaryIndexes map[string]*Capacity `type:"map"`
-
- // The total number of read capacity units consumed by the operation.
- ReadCapacityUnits *float64 `type:"double"`
-
- // The amount of throughput consumed on the table affected by the operation.
- Table *Capacity `type:"structure"`
-
- // The name of the table that was affected by the operation. If you had specified
- // the Amazon Resource Name (ARN) of a table in the input, you'll see the table
- // ARN in the response.
- TableName *string `min:"1" type:"string"`
-
- // The total number of write capacity units consumed by the operation.
- WriteCapacityUnits *float64 `type:"double"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ConsumedCapacity) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ConsumedCapacity) GoString() string {
- return s.String()
-}
-
-// SetCapacityUnits sets the CapacityUnits field's value.
-func (s *ConsumedCapacity) SetCapacityUnits(v float64) *ConsumedCapacity {
- s.CapacityUnits = &v
- return s
-}
-
-// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value.
-func (s *ConsumedCapacity) SetGlobalSecondaryIndexes(v map[string]*Capacity) *ConsumedCapacity {
- s.GlobalSecondaryIndexes = v
- return s
-}
-
-// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value.
-func (s *ConsumedCapacity) SetLocalSecondaryIndexes(v map[string]*Capacity) *ConsumedCapacity {
- s.LocalSecondaryIndexes = v
- return s
-}
-
-// SetReadCapacityUnits sets the ReadCapacityUnits field's value.
-func (s *ConsumedCapacity) SetReadCapacityUnits(v float64) *ConsumedCapacity {
- s.ReadCapacityUnits = &v
- return s
-}
-
-// SetTable sets the Table field's value.
-func (s *ConsumedCapacity) SetTable(v *Capacity) *ConsumedCapacity {
- s.Table = v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *ConsumedCapacity) SetTableName(v string) *ConsumedCapacity {
- s.TableName = &v
- return s
-}
-
-// SetWriteCapacityUnits sets the WriteCapacityUnits field's value.
-func (s *ConsumedCapacity) SetWriteCapacityUnits(v float64) *ConsumedCapacity {
- s.WriteCapacityUnits = &v
- return s
-}
-
-// Represents the continuous backups and point in time recovery settings on
-// the table.
-type ContinuousBackupsDescription struct {
- _ struct{} `type:"structure"`
-
- // ContinuousBackupsStatus can be one of the following states: ENABLED, DISABLED
- //
- // ContinuousBackupsStatus is a required field
- ContinuousBackupsStatus *string `type:"string" required:"true" enum:"ContinuousBackupsStatus"`
-
- // The description of the point in time recovery settings applied to the table.
- PointInTimeRecoveryDescription *PointInTimeRecoveryDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ContinuousBackupsDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ContinuousBackupsDescription) GoString() string {
- return s.String()
-}
-
-// SetContinuousBackupsStatus sets the ContinuousBackupsStatus field's value.
-func (s *ContinuousBackupsDescription) SetContinuousBackupsStatus(v string) *ContinuousBackupsDescription {
- s.ContinuousBackupsStatus = &v
- return s
-}
-
-// SetPointInTimeRecoveryDescription sets the PointInTimeRecoveryDescription field's value.
-func (s *ContinuousBackupsDescription) SetPointInTimeRecoveryDescription(v *PointInTimeRecoveryDescription) *ContinuousBackupsDescription {
- s.PointInTimeRecoveryDescription = v
- return s
-}
-
-// Backups have not yet been enabled for this table.
-type ContinuousBackupsUnavailableException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ContinuousBackupsUnavailableException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ContinuousBackupsUnavailableException) GoString() string {
- return s.String()
-}
-
-func newErrorContinuousBackupsUnavailableException(v protocol.ResponseMetadata) error {
- return &ContinuousBackupsUnavailableException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ContinuousBackupsUnavailableException) Code() string {
- return "ContinuousBackupsUnavailableException"
-}
-
-// Message returns the exception's message.
-func (s *ContinuousBackupsUnavailableException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ContinuousBackupsUnavailableException) OrigErr() error {
- return nil
-}
-
-func (s *ContinuousBackupsUnavailableException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ContinuousBackupsUnavailableException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ContinuousBackupsUnavailableException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Represents a Contributor Insights summary entry.
-type ContributorInsightsSummary struct {
- _ struct{} `type:"structure"`
-
- // Describes the current status for contributor insights for the given table
- // and index, if applicable.
- ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"`
-
- // Name of the index associated with the summary, if any.
- IndexName *string `min:"3" type:"string"`
-
- // Name of the table associated with the summary.
- TableName *string `min:"3" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ContributorInsightsSummary) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ContributorInsightsSummary) GoString() string {
- return s.String()
-}
-
-// SetContributorInsightsStatus sets the ContributorInsightsStatus field's value.
-func (s *ContributorInsightsSummary) SetContributorInsightsStatus(v string) *ContributorInsightsSummary {
- s.ContributorInsightsStatus = &v
- return s
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *ContributorInsightsSummary) SetIndexName(v string) *ContributorInsightsSummary {
- s.IndexName = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *ContributorInsightsSummary) SetTableName(v string) *ContributorInsightsSummary {
- s.TableName = &v
- return s
-}
-
-type CreateBackupInput struct {
- _ struct{} `type:"structure"`
-
- // Specified name for the backup.
- //
- // BackupName is a required field
- BackupName *string `min:"3" type:"string" required:"true"`
-
- // The name of the table. You can also provide the Amazon Resource Name (ARN)
- // of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateBackupInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateBackupInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *CreateBackupInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "CreateBackupInput"}
- if s.BackupName == nil {
- invalidParams.Add(request.NewErrParamRequired("BackupName"))
- }
- if s.BackupName != nil && len(*s.BackupName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("BackupName", 3))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetBackupName sets the BackupName field's value.
-func (s *CreateBackupInput) SetBackupName(v string) *CreateBackupInput {
- s.BackupName = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *CreateBackupInput) SetTableName(v string) *CreateBackupInput {
- s.TableName = &v
- return s
-}
-
-type CreateBackupOutput struct {
- _ struct{} `type:"structure"`
-
- // Contains the details of the backup created for the table.
- BackupDetails *BackupDetails `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateBackupOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateBackupOutput) GoString() string {
- return s.String()
-}
-
-// SetBackupDetails sets the BackupDetails field's value.
-func (s *CreateBackupOutput) SetBackupDetails(v *BackupDetails) *CreateBackupOutput {
- s.BackupDetails = v
- return s
-}
-
-// Represents a new global secondary index to be added to an existing table.
-type CreateGlobalSecondaryIndexAction struct {
- _ struct{} `type:"structure"`
-
- // The name of the global secondary index to be created.
- //
- // IndexName is a required field
- IndexName *string `min:"3" type:"string" required:"true"`
-
- // The key schema for the global secondary index.
- //
- // KeySchema is a required field
- KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"`
-
- // The maximum number of read and write units for the global secondary index
- // being created. If you use this parameter, you must specify MaxReadRequestUnits,
- // MaxWriteRequestUnits, or both.
- OnDemandThroughput *OnDemandThroughput `type:"structure"`
-
- // Represents attributes that are copied (projected) from the table into an
- // index. These are in addition to the primary key attributes and index key
- // attributes, which are automatically projected.
- //
- // Projection is a required field
- Projection *Projection `type:"structure" required:"true"`
-
- // Represents the provisioned throughput settings for the specified global secondary
- // index.
- //
- // For current minimum and maximum provisioned throughput values, see Service,
- // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
- // in the Amazon DynamoDB Developer Guide.
- ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateGlobalSecondaryIndexAction) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateGlobalSecondaryIndexAction) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *CreateGlobalSecondaryIndexAction) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "CreateGlobalSecondaryIndexAction"}
- if s.IndexName == nil {
- invalidParams.Add(request.NewErrParamRequired("IndexName"))
- }
- if s.IndexName != nil && len(*s.IndexName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
- }
- if s.KeySchema == nil {
- invalidParams.Add(request.NewErrParamRequired("KeySchema"))
- }
- if s.KeySchema != nil && len(s.KeySchema) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1))
- }
- if s.Projection == nil {
- invalidParams.Add(request.NewErrParamRequired("Projection"))
- }
- if s.KeySchema != nil {
- for i, v := range s.KeySchema {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.Projection != nil {
- if err := s.Projection.Validate(); err != nil {
- invalidParams.AddNested("Projection", err.(request.ErrInvalidParams))
- }
- }
- if s.ProvisionedThroughput != nil {
- if err := s.ProvisionedThroughput.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *CreateGlobalSecondaryIndexAction) SetIndexName(v string) *CreateGlobalSecondaryIndexAction {
- s.IndexName = &v
- return s
-}
-
-// SetKeySchema sets the KeySchema field's value.
-func (s *CreateGlobalSecondaryIndexAction) SetKeySchema(v []*KeySchemaElement) *CreateGlobalSecondaryIndexAction {
- s.KeySchema = v
- return s
-}
-
-// SetOnDemandThroughput sets the OnDemandThroughput field's value.
-func (s *CreateGlobalSecondaryIndexAction) SetOnDemandThroughput(v *OnDemandThroughput) *CreateGlobalSecondaryIndexAction {
- s.OnDemandThroughput = v
- return s
-}
-
-// SetProjection sets the Projection field's value.
-func (s *CreateGlobalSecondaryIndexAction) SetProjection(v *Projection) *CreateGlobalSecondaryIndexAction {
- s.Projection = v
- return s
-}
-
-// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
-func (s *CreateGlobalSecondaryIndexAction) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateGlobalSecondaryIndexAction {
- s.ProvisionedThroughput = v
- return s
-}
-
-type CreateGlobalTableInput struct {
- _ struct{} `type:"structure"`
-
- // The global table name.
- //
- // GlobalTableName is a required field
- GlobalTableName *string `min:"3" type:"string" required:"true"`
-
- // The Regions where the global table needs to be created.
- //
- // ReplicationGroup is a required field
- ReplicationGroup []*Replica `type:"list" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateGlobalTableInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateGlobalTableInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *CreateGlobalTableInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "CreateGlobalTableInput"}
- if s.GlobalTableName == nil {
- invalidParams.Add(request.NewErrParamRequired("GlobalTableName"))
- }
- if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3))
- }
- if s.ReplicationGroup == nil {
- invalidParams.Add(request.NewErrParamRequired("ReplicationGroup"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetGlobalTableName sets the GlobalTableName field's value.
-func (s *CreateGlobalTableInput) SetGlobalTableName(v string) *CreateGlobalTableInput {
- s.GlobalTableName = &v
- return s
-}
-
-// SetReplicationGroup sets the ReplicationGroup field's value.
-func (s *CreateGlobalTableInput) SetReplicationGroup(v []*Replica) *CreateGlobalTableInput {
- s.ReplicationGroup = v
- return s
-}
-
-type CreateGlobalTableOutput struct {
- _ struct{} `type:"structure"`
-
- // Contains the details of the global table.
- GlobalTableDescription *GlobalTableDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateGlobalTableOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateGlobalTableOutput) GoString() string {
- return s.String()
-}
-
-// SetGlobalTableDescription sets the GlobalTableDescription field's value.
-func (s *CreateGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *CreateGlobalTableOutput {
- s.GlobalTableDescription = v
- return s
-}
-
-// Represents a replica to be added.
-type CreateReplicaAction struct {
- _ struct{} `type:"structure"`
-
- // The Region of the replica to be added.
- //
- // RegionName is a required field
- RegionName *string `type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateReplicaAction) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateReplicaAction) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *CreateReplicaAction) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "CreateReplicaAction"}
- if s.RegionName == nil {
- invalidParams.Add(request.NewErrParamRequired("RegionName"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRegionName sets the RegionName field's value.
-func (s *CreateReplicaAction) SetRegionName(v string) *CreateReplicaAction {
- s.RegionName = &v
- return s
-}
-
-// Represents a replica to be created.
-type CreateReplicationGroupMemberAction struct {
- _ struct{} `type:"structure"`
-
- // Replica-specific global secondary index settings.
- GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndex `min:"1" type:"list"`
-
- // The KMS key that should be used for KMS encryption in the new replica. To
- // specify a key, use its key ID, Amazon Resource Name (ARN), alias name, or
- // alias ARN. Note that you should only provide this parameter if the key is
- // different from the default DynamoDB KMS key alias/aws/dynamodb.
- KMSMasterKeyId *string `type:"string"`
-
- // The maximum on-demand throughput settings for the specified replica table
- // being created. You can only modify MaxReadRequestUnits, because you can't
- // modify MaxWriteRequestUnits for individual replica tables.
- OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"`
-
- // Replica-specific provisioned throughput. If not specified, uses the source
- // table's provisioned throughput settings.
- ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"`
-
- // The Region where the new replica will be created.
- //
- // RegionName is a required field
- RegionName *string `type:"string" required:"true"`
-
- // Replica-specific table class. If not specified, uses the source table's table
- // class.
- TableClassOverride *string `type:"string" enum:"TableClass"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateReplicationGroupMemberAction) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateReplicationGroupMemberAction) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *CreateReplicationGroupMemberAction) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "CreateReplicationGroupMemberAction"}
- if s.GlobalSecondaryIndexes != nil && len(s.GlobalSecondaryIndexes) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("GlobalSecondaryIndexes", 1))
- }
- if s.RegionName == nil {
- invalidParams.Add(request.NewErrParamRequired("RegionName"))
- }
- if s.GlobalSecondaryIndexes != nil {
- for i, v := range s.GlobalSecondaryIndexes {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.ProvisionedThroughputOverride != nil {
- if err := s.ProvisionedThroughputOverride.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value.
-func (s *CreateReplicationGroupMemberAction) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndex) *CreateReplicationGroupMemberAction {
- s.GlobalSecondaryIndexes = v
- return s
-}
-
-// SetKMSMasterKeyId sets the KMSMasterKeyId field's value.
-func (s *CreateReplicationGroupMemberAction) SetKMSMasterKeyId(v string) *CreateReplicationGroupMemberAction {
- s.KMSMasterKeyId = &v
- return s
-}
-
-// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value.
-func (s *CreateReplicationGroupMemberAction) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *CreateReplicationGroupMemberAction {
- s.OnDemandThroughputOverride = v
- return s
-}
-
-// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value.
-func (s *CreateReplicationGroupMemberAction) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *CreateReplicationGroupMemberAction {
- s.ProvisionedThroughputOverride = v
- return s
-}
-
-// SetRegionName sets the RegionName field's value.
-func (s *CreateReplicationGroupMemberAction) SetRegionName(v string) *CreateReplicationGroupMemberAction {
- s.RegionName = &v
- return s
-}
-
-// SetTableClassOverride sets the TableClassOverride field's value.
-func (s *CreateReplicationGroupMemberAction) SetTableClassOverride(v string) *CreateReplicationGroupMemberAction {
- s.TableClassOverride = &v
- return s
-}
-
-// Represents the input of a CreateTable operation.
-type CreateTableInput struct {
- _ struct{} `type:"structure"`
-
- // An array of attributes that describe the key schema for the table and indexes.
- //
- // AttributeDefinitions is a required field
- AttributeDefinitions []*AttributeDefinition `type:"list" required:"true"`
-
- // Controls how you are charged for read and write throughput and how you manage
- // capacity. This setting can be changed later.
- //
- // * PROVISIONED - We recommend using PROVISIONED for predictable workloads.
- // PROVISIONED sets the billing mode to Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html).
- //
- // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable
- // workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity
- // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html).
- BillingMode *string `type:"string" enum:"BillingMode"`
-
- // Indicates whether deletion protection is to be enabled (true) or disabled
- // (false) on the table.
- DeletionProtectionEnabled *bool `type:"boolean"`
-
- // One or more global secondary indexes (the maximum is 20) to be created on
- // the table. Each global secondary index in the array includes the following:
- //
- // * IndexName - The name of the global secondary index. Must be unique only
- // for this table.
- //
- // * KeySchema - Specifies the key schema for the global secondary index.
- //
- // * Projection - Specifies attributes that are copied (projected) from the
- // table into the index. These are in addition to the primary key attributes
- // and index key attributes, which are automatically projected. Each attribute
- // specification is composed of: ProjectionType - One of the following: KEYS_ONLY
- // - Only the index and primary keys are projected into the index. INCLUDE
- // - Only the specified table attributes are projected into the index. The
- // list of projected attributes is in NonKeyAttributes. ALL - All of the
- // table attributes are projected into the index. NonKeyAttributes - A list
- // of one or more non-key attribute names that are projected into the secondary
- // index. The total count of attributes provided in NonKeyAttributes, summed
- // across all of the secondary indexes, must not exceed 100. If you project
- // the same attribute into two different indexes, this counts as two distinct
- // attributes when determining the total.
- //
- // * ProvisionedThroughput - The provisioned throughput settings for the
- // global secondary index, consisting of read and write capacity units.
- GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"`
-
- // Specifies the attributes that make up the primary key for a table or an index.
- // The attributes in KeySchema must also be defined in the AttributeDefinitions
- // array. For more information, see Data Model (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html)
- // in the Amazon DynamoDB Developer Guide.
- //
- // Each KeySchemaElement in the array is composed of:
- //
- // * AttributeName - The name of this key attribute.
- //
- // * KeyType - The role that the key attribute will assume: HASH - partition
- // key RANGE - sort key
- //
- // The partition key of an item is also known as its hash attribute. The term
- // "hash attribute" derives from the DynamoDB usage of an internal hash function
- // to evenly distribute data items across partitions, based on their partition
- // key values.
- //
- // The sort key of an item is also known as its range attribute. The term "range
- // attribute" derives from the way DynamoDB stores items with the same partition
- // key physically close together, in sorted order by the sort key value.
- //
- // For a simple primary key (partition key), you must provide exactly one element
- // with a KeyType of HASH.
- //
- // For a composite primary key (partition key and sort key), you must provide
- // exactly two elements, in this order: The first element must have a KeyType
- // of HASH, and the second element must have a KeyType of RANGE.
- //
- // For more information, see Working with Tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#WorkingWithTables.primary.key)
- // in the Amazon DynamoDB Developer Guide.
- //
- // KeySchema is a required field
- KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"`
-
- // One or more local secondary indexes (the maximum is 5) to be created on the
- // table. Each index is scoped to a given partition key value. There is a 10
- // GB size limit per partition key value; otherwise, the size of a local secondary
- // index is unconstrained.
- //
- // Each local secondary index in the array includes the following:
- //
- // * IndexName - The name of the local secondary index. Must be unique only
- // for this table.
- //
- // * KeySchema - Specifies the key schema for the local secondary index.
- // The key schema must begin with the same partition key as the table.
- //
- // * Projection - Specifies attributes that are copied (projected) from the
- // table into the index. These are in addition to the primary key attributes
- // and index key attributes, which are automatically projected. Each attribute
- // specification is composed of: ProjectionType - One of the following: KEYS_ONLY
- // - Only the index and primary keys are projected into the index. INCLUDE
- // - Only the specified table attributes are projected into the index. The
- // list of projected attributes is in NonKeyAttributes. ALL - All of the
- // table attributes are projected into the index. NonKeyAttributes - A list
- // of one or more non-key attribute names that are projected into the secondary
- // index. The total count of attributes provided in NonKeyAttributes, summed
- // across all of the secondary indexes, must not exceed 100. If you project
- // the same attribute into two different indexes, this counts as two distinct
- // attributes when determining the total.
- LocalSecondaryIndexes []*LocalSecondaryIndex `type:"list"`
-
- // Sets the maximum number of read and write units for the specified table in
- // on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits,
- // MaxWriteRequestUnits, or both.
- OnDemandThroughput *OnDemandThroughput `type:"structure"`
-
- // Represents the provisioned throughput settings for a specified table or index.
- // The settings can be modified using the UpdateTable operation.
- //
- // If you set BillingMode as PROVISIONED, you must specify this property. If
- // you set BillingMode as PAY_PER_REQUEST, you cannot specify this property.
- //
- // For current minimum and maximum provisioned throughput values, see Service,
- // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
- // in the Amazon DynamoDB Developer Guide.
- ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
-
- // An Amazon Web Services resource-based policy document in JSON format that
- // will be attached to the table.
- //
- // When you attach a resource-based policy while creating a table, the policy
- // application is strongly consistent.
- //
- // The maximum size supported for a resource-based policy document is 20 KB.
- // DynamoDB counts whitespaces when calculating the size of a policy against
- // this limit. For a full list of all considerations that apply for resource-based
- // policies, see Resource-based policy considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html).
- //
- // You need to specify the CreateTable and PutResourcePolicy IAM actions for
- // authorizing a user to create a table with a resource-based policy.
- ResourcePolicy *string `type:"string"`
-
- // Represents the settings used to enable server-side encryption.
- SSESpecification *SSESpecification `type:"structure"`
-
- // The settings for DynamoDB Streams on the table. These settings consist of:
- //
- // * StreamEnabled - Indicates whether DynamoDB Streams is to be enabled
- // (true) or disabled (false).
- //
- // * StreamViewType - When an item in the table is modified, StreamViewType
- // determines what information is written to the table's stream. Valid values
- // for StreamViewType are: KEYS_ONLY - Only the key attributes of the modified
- // item are written to the stream. NEW_IMAGE - The entire item, as it appears
- // after it was modified, is written to the stream. OLD_IMAGE - The entire
- // item, as it appeared before it was modified, is written to the stream.
- // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item
- // are written to the stream.
- StreamSpecification *StreamSpecification `type:"structure"`
-
- // The table class of the new table. Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS.
- TableClass *string `type:"string" enum:"TableClass"`
-
- // The name of the table to create. You can also provide the Amazon Resource
- // Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-
- // A list of key-value pairs to label the table. For more information, see Tagging
- // for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html).
- Tags []*Tag `type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateTableInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateTableInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *CreateTableInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "CreateTableInput"}
- if s.AttributeDefinitions == nil {
- invalidParams.Add(request.NewErrParamRequired("AttributeDefinitions"))
- }
- if s.KeySchema == nil {
- invalidParams.Add(request.NewErrParamRequired("KeySchema"))
- }
- if s.KeySchema != nil && len(s.KeySchema) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
- if s.AttributeDefinitions != nil {
- for i, v := range s.AttributeDefinitions {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.GlobalSecondaryIndexes != nil {
- for i, v := range s.GlobalSecondaryIndexes {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.KeySchema != nil {
- for i, v := range s.KeySchema {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.LocalSecondaryIndexes != nil {
- for i, v := range s.LocalSecondaryIndexes {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexes", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.ProvisionedThroughput != nil {
- if err := s.ProvisionedThroughput.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams))
- }
- }
- if s.StreamSpecification != nil {
- if err := s.StreamSpecification.Validate(); err != nil {
- invalidParams.AddNested("StreamSpecification", err.(request.ErrInvalidParams))
- }
- }
- if s.Tags != nil {
- for i, v := range s.Tags {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAttributeDefinitions sets the AttributeDefinitions field's value.
-func (s *CreateTableInput) SetAttributeDefinitions(v []*AttributeDefinition) *CreateTableInput {
- s.AttributeDefinitions = v
- return s
-}
-
-// SetBillingMode sets the BillingMode field's value.
-func (s *CreateTableInput) SetBillingMode(v string) *CreateTableInput {
- s.BillingMode = &v
- return s
-}
-
-// SetDeletionProtectionEnabled sets the DeletionProtectionEnabled field's value.
-func (s *CreateTableInput) SetDeletionProtectionEnabled(v bool) *CreateTableInput {
- s.DeletionProtectionEnabled = &v
- return s
-}
-
-// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value.
-func (s *CreateTableInput) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndex) *CreateTableInput {
- s.GlobalSecondaryIndexes = v
- return s
-}
-
-// SetKeySchema sets the KeySchema field's value.
-func (s *CreateTableInput) SetKeySchema(v []*KeySchemaElement) *CreateTableInput {
- s.KeySchema = v
- return s
-}
-
-// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value.
-func (s *CreateTableInput) SetLocalSecondaryIndexes(v []*LocalSecondaryIndex) *CreateTableInput {
- s.LocalSecondaryIndexes = v
- return s
-}
-
-// SetOnDemandThroughput sets the OnDemandThroughput field's value.
-func (s *CreateTableInput) SetOnDemandThroughput(v *OnDemandThroughput) *CreateTableInput {
- s.OnDemandThroughput = v
- return s
-}
-
-// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
-func (s *CreateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateTableInput {
- s.ProvisionedThroughput = v
- return s
-}
-
-// SetResourcePolicy sets the ResourcePolicy field's value.
-func (s *CreateTableInput) SetResourcePolicy(v string) *CreateTableInput {
- s.ResourcePolicy = &v
- return s
-}
-
-// SetSSESpecification sets the SSESpecification field's value.
-func (s *CreateTableInput) SetSSESpecification(v *SSESpecification) *CreateTableInput {
- s.SSESpecification = v
- return s
-}
-
-// SetStreamSpecification sets the StreamSpecification field's value.
-func (s *CreateTableInput) SetStreamSpecification(v *StreamSpecification) *CreateTableInput {
- s.StreamSpecification = v
- return s
-}
-
-// SetTableClass sets the TableClass field's value.
-func (s *CreateTableInput) SetTableClass(v string) *CreateTableInput {
- s.TableClass = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *CreateTableInput) SetTableName(v string) *CreateTableInput {
- s.TableName = &v
- return s
-}
-
-// SetTags sets the Tags field's value.
-func (s *CreateTableInput) SetTags(v []*Tag) *CreateTableInput {
- s.Tags = v
- return s
-}
-
-// Represents the output of a CreateTable operation.
-type CreateTableOutput struct {
- _ struct{} `type:"structure"`
-
- // Represents the properties of the table.
- TableDescription *TableDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateTableOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateTableOutput) GoString() string {
- return s.String()
-}
-
-// SetTableDescription sets the TableDescription field's value.
-func (s *CreateTableOutput) SetTableDescription(v *TableDescription) *CreateTableOutput {
- s.TableDescription = v
- return s
-}
-
-// Processing options for the CSV file being imported.
-type CsvOptions struct {
- _ struct{} `type:"structure"`
-
- // The delimiter used for separating items in the CSV file being imported.
- Delimiter *string `min:"1" type:"string"`
-
- // List of the headers used to specify a common header for all source CSV files
- // being imported. If this field is specified then the first line of each CSV
- // file is treated as data instead of the header. If this field is not specified
- // the the first line of each CSV file is treated as the header.
- HeaderList []*string `min:"1" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CsvOptions) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CsvOptions) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *CsvOptions) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "CsvOptions"}
- if s.Delimiter != nil && len(*s.Delimiter) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Delimiter", 1))
- }
- if s.HeaderList != nil && len(s.HeaderList) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("HeaderList", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetDelimiter sets the Delimiter field's value.
-func (s *CsvOptions) SetDelimiter(v string) *CsvOptions {
- s.Delimiter = &v
- return s
-}
-
-// SetHeaderList sets the HeaderList field's value.
-func (s *CsvOptions) SetHeaderList(v []*string) *CsvOptions {
- s.HeaderList = v
- return s
-}
-
-// Represents a request to perform a DeleteItem operation.
-type Delete struct {
- _ struct{} `type:"structure"`
-
- // A condition that must be satisfied in order for a conditional delete to succeed.
- ConditionExpression *string `type:"string"`
-
- // One or more substitution tokens for attribute names in an expression.
- ExpressionAttributeNames map[string]*string `type:"map"`
-
- // One or more values that can be substituted in an expression.
- ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
-
- // The primary key of the item to be deleted. Each element consists of an attribute
- // name and a value for that attribute.
- //
- // Key is a required field
- Key map[string]*AttributeValue `type:"map" required:"true"`
-
- // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the
- // Delete condition fails. For ReturnValuesOnConditionCheckFailure, the valid
- // values are: NONE and ALL_OLD.
- ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"`
-
- // Name of the table in which the item to be deleted resides. You can also provide
- // the Amazon Resource Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Delete) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Delete) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *Delete) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "Delete"}
- if s.Key == nil {
- invalidParams.Add(request.NewErrParamRequired("Key"))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetConditionExpression sets the ConditionExpression field's value.
-func (s *Delete) SetConditionExpression(v string) *Delete {
- s.ConditionExpression = &v
- return s
-}
-
-// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
-func (s *Delete) SetExpressionAttributeNames(v map[string]*string) *Delete {
- s.ExpressionAttributeNames = v
- return s
-}
-
-// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
-func (s *Delete) SetExpressionAttributeValues(v map[string]*AttributeValue) *Delete {
- s.ExpressionAttributeValues = v
- return s
-}
-
-// SetKey sets the Key field's value.
-func (s *Delete) SetKey(v map[string]*AttributeValue) *Delete {
- s.Key = v
- return s
-}
-
-// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value.
-func (s *Delete) SetReturnValuesOnConditionCheckFailure(v string) *Delete {
- s.ReturnValuesOnConditionCheckFailure = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *Delete) SetTableName(v string) *Delete {
- s.TableName = &v
- return s
-}
-
-type DeleteBackupInput struct {
- _ struct{} `type:"structure"`
-
- // The ARN associated with the backup.
- //
- // BackupArn is a required field
- BackupArn *string `min:"37" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteBackupInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteBackupInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DeleteBackupInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DeleteBackupInput"}
- if s.BackupArn == nil {
- invalidParams.Add(request.NewErrParamRequired("BackupArn"))
- }
- if s.BackupArn != nil && len(*s.BackupArn) < 37 {
- invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetBackupArn sets the BackupArn field's value.
-func (s *DeleteBackupInput) SetBackupArn(v string) *DeleteBackupInput {
- s.BackupArn = &v
- return s
-}
-
-type DeleteBackupOutput struct {
- _ struct{} `type:"structure"`
-
- // Contains the description of the backup created for the table.
- BackupDescription *BackupDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteBackupOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteBackupOutput) GoString() string {
- return s.String()
-}
-
-// SetBackupDescription sets the BackupDescription field's value.
-func (s *DeleteBackupOutput) SetBackupDescription(v *BackupDescription) *DeleteBackupOutput {
- s.BackupDescription = v
- return s
-}
-
-// Represents a global secondary index to be deleted from an existing table.
-type DeleteGlobalSecondaryIndexAction struct {
- _ struct{} `type:"structure"`
-
- // The name of the global secondary index to be deleted.
- //
- // IndexName is a required field
- IndexName *string `min:"3" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteGlobalSecondaryIndexAction) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteGlobalSecondaryIndexAction) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DeleteGlobalSecondaryIndexAction) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DeleteGlobalSecondaryIndexAction"}
- if s.IndexName == nil {
- invalidParams.Add(request.NewErrParamRequired("IndexName"))
- }
- if s.IndexName != nil && len(*s.IndexName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *DeleteGlobalSecondaryIndexAction) SetIndexName(v string) *DeleteGlobalSecondaryIndexAction {
- s.IndexName = &v
- return s
-}
-
-// Represents the input of a DeleteItem operation.
-type DeleteItemInput struct {
- _ struct{} `type:"structure"`
-
- // A condition that must be satisfied in order for a conditional DeleteItem
- // to succeed.
- //
- // An expression can contain any of the following:
- //
- // * Functions: attribute_exists | attribute_not_exists | attribute_type
- // | contains | begins_with | size These function names are case-sensitive.
- //
- // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
- //
- // * Logical operators: AND | OR | NOT
- //
- // For more information about condition expressions, see Condition Expressions
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
- // in the Amazon DynamoDB Developer Guide.
- ConditionExpression *string `type:"string"`
-
- // This is a legacy parameter. Use ConditionExpression instead. For more information,
- // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
- // in the Amazon DynamoDB Developer Guide.
- ConditionalOperator *string `type:"string" enum:"ConditionalOperator"`
-
- // This is a legacy parameter. Use ConditionExpression instead. For more information,
- // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html)
- // in the Amazon DynamoDB Developer Guide.
- Expected map[string]*ExpectedAttributeValue `type:"map"`
-
- // One or more substitution tokens for attribute names in an expression. The
- // following are some use cases for using ExpressionAttributeNames:
- //
- // * To access an attribute whose name conflicts with a DynamoDB reserved
- // word.
- //
- // * To create a placeholder for repeating occurrences of an attribute name
- // in an expression.
- //
- // * To prevent special characters in an attribute name from being misinterpreted
- // in an expression.
- //
- // Use the # character in an expression to dereference an attribute name. For
- // example, consider the following attribute name:
- //
- // * Percentile
- //
- // The name of this attribute conflicts with a reserved word, so it cannot be
- // used directly in an expression. (For the complete list of reserved words,
- // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
- // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
- // the following for ExpressionAttributeNames:
- //
- // * {"#P":"Percentile"}
- //
- // You could then use this substitution in an expression, as in this example:
- //
- // * #P = :val
- //
- // Tokens that begin with the : character are expression attribute values, which
- // are placeholders for the actual value at runtime.
- //
- // For more information on expression attribute names, see Specifying Item Attributes
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
- // in the Amazon DynamoDB Developer Guide.
- ExpressionAttributeNames map[string]*string `type:"map"`
-
- // One or more values that can be substituted in an expression.
- //
- // Use the : (colon) character in an expression to dereference an attribute
- // value. For example, suppose that you wanted to check whether the value of
- // the ProductStatus attribute was one of the following:
- //
- // Available | Backordered | Discontinued
- //
- // You would first need to specify ExpressionAttributeValues as follows:
- //
- // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
- // }
- //
- // You could then use these values in an expression, such as this:
- //
- // ProductStatus IN (:avail, :back, :disc)
- //
- // For more information on expression attribute values, see Condition Expressions
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
- // in the Amazon DynamoDB Developer Guide.
- ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
-
- // A map of attribute names to AttributeValue objects, representing the primary
- // key of the item to delete.
- //
- // For the primary key, you must provide all of the key attributes. For example,
- // with a simple primary key, you only need to provide a value for the partition
- // key. For a composite primary key, you must provide values for both the partition
- // key and the sort key.
- //
- // Key is a required field
- Key map[string]*AttributeValue `type:"map" required:"true"`
-
- // Determines the level of detail about either provisioned or on-demand throughput
- // consumption that is returned in the response:
- //
- // * INDEXES - The response includes the aggregate ConsumedCapacity for the
- // operation, together with ConsumedCapacity for each table and secondary
- // index that was accessed. Note that some operations, such as GetItem and
- // BatchGetItem, do not access any indexes at all. In these cases, specifying
- // INDEXES will only return ConsumedCapacity information for table(s).
- //
- // * TOTAL - The response includes only the aggregate ConsumedCapacity for
- // the operation.
- //
- // * NONE - No ConsumedCapacity details are included in the response.
- ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
-
- // Determines whether item collection metrics are returned. If set to SIZE,
- // the response includes statistics about item collections, if any, that were
- // modified during the operation are returned in the response. If set to NONE
- // (the default), no statistics are returned.
- ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"`
-
- // Use ReturnValues if you want to get the item attributes as they appeared
- // before they were deleted. For DeleteItem, the valid values are:
- //
- // * NONE - If ReturnValues is not specified, or if its value is NONE, then
- // nothing is returned. (This setting is the default for ReturnValues.)
- //
- // * ALL_OLD - The content of the old item is returned.
- //
- // There is no additional cost associated with requesting a return value aside
- // from the small network and processing overhead of receiving a larger response.
- // No read capacity units are consumed.
- //
- // The ReturnValues parameter is used by several DynamoDB operations; however,
- // DeleteItem does not recognize any values other than NONE or ALL_OLD.
- ReturnValues *string `type:"string" enum:"ReturnValue"`
-
- // An optional parameter that returns the item attributes for a DeleteItem operation
- // that failed a condition check.
- //
- // There is no additional cost associated with requesting a return value aside
- // from the small network and processing overhead of receiving a larger response.
- // No read capacity units are consumed.
- ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"`
-
- // The name of the table from which to delete the item. You can also provide
- // the Amazon Resource Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteItemInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteItemInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DeleteItemInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DeleteItemInput"}
- if s.Key == nil {
- invalidParams.Add(request.NewErrParamRequired("Key"))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetConditionExpression sets the ConditionExpression field's value.
-func (s *DeleteItemInput) SetConditionExpression(v string) *DeleteItemInput {
- s.ConditionExpression = &v
- return s
-}
-
-// SetConditionalOperator sets the ConditionalOperator field's value.
-func (s *DeleteItemInput) SetConditionalOperator(v string) *DeleteItemInput {
- s.ConditionalOperator = &v
- return s
-}
-
-// SetExpected sets the Expected field's value.
-func (s *DeleteItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *DeleteItemInput {
- s.Expected = v
- return s
-}
-
-// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
-func (s *DeleteItemInput) SetExpressionAttributeNames(v map[string]*string) *DeleteItemInput {
- s.ExpressionAttributeNames = v
- return s
-}
-
-// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
-func (s *DeleteItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *DeleteItemInput {
- s.ExpressionAttributeValues = v
- return s
-}
-
-// SetKey sets the Key field's value.
-func (s *DeleteItemInput) SetKey(v map[string]*AttributeValue) *DeleteItemInput {
- s.Key = v
- return s
-}
-
-// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
-func (s *DeleteItemInput) SetReturnConsumedCapacity(v string) *DeleteItemInput {
- s.ReturnConsumedCapacity = &v
- return s
-}
-
-// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value.
-func (s *DeleteItemInput) SetReturnItemCollectionMetrics(v string) *DeleteItemInput {
- s.ReturnItemCollectionMetrics = &v
- return s
-}
-
-// SetReturnValues sets the ReturnValues field's value.
-func (s *DeleteItemInput) SetReturnValues(v string) *DeleteItemInput {
- s.ReturnValues = &v
- return s
-}
-
-// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value.
-func (s *DeleteItemInput) SetReturnValuesOnConditionCheckFailure(v string) *DeleteItemInput {
- s.ReturnValuesOnConditionCheckFailure = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *DeleteItemInput) SetTableName(v string) *DeleteItemInput {
- s.TableName = &v
- return s
-}
-
-// Represents the output of a DeleteItem operation.
-type DeleteItemOutput struct {
- _ struct{} `type:"structure"`
-
- // A map of attribute names to AttributeValue objects, representing the item
- // as it appeared before the DeleteItem operation. This map appears in the response
- // only if ReturnValues was specified as ALL_OLD in the request.
- Attributes map[string]*AttributeValue `type:"map"`
-
- // The capacity units consumed by the DeleteItem operation. The data returned
- // includes the total provisioned throughput consumed, along with statistics
- // for the table and any indexes involved in the operation. ConsumedCapacity
- // is only returned if the ReturnConsumedCapacity parameter was specified. For
- // more information, see Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html)
- // in the Amazon DynamoDB Developer Guide.
- ConsumedCapacity *ConsumedCapacity `type:"structure"`
-
- // Information about item collections, if any, that were affected by the DeleteItem
- // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics
- // parameter was specified. If the table does not have any local secondary indexes,
- // this information is not returned in the response.
- //
- // Each ItemCollectionMetrics element consists of:
- //
- // * ItemCollectionKey - The partition key value of the item collection.
- // This is the same as the partition key value of the item itself.
- //
- // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
- // This value is a two-element array containing a lower bound and an upper
- // bound for the estimate. The estimate includes the size of all the items
- // in the table, plus the size of all attributes projected into all of the
- // local secondary indexes on that table. Use this estimate to measure whether
- // a local secondary index is approaching its size limit. The estimate is
- // subject to change over time; therefore, do not rely on the precision or
- // accuracy of the estimate.
- ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteItemOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteItemOutput) GoString() string {
- return s.String()
-}
-
-// SetAttributes sets the Attributes field's value.
-func (s *DeleteItemOutput) SetAttributes(v map[string]*AttributeValue) *DeleteItemOutput {
- s.Attributes = v
- return s
-}
-
-// SetConsumedCapacity sets the ConsumedCapacity field's value.
-func (s *DeleteItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *DeleteItemOutput {
- s.ConsumedCapacity = v
- return s
-}
-
-// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value.
-func (s *DeleteItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *DeleteItemOutput {
- s.ItemCollectionMetrics = v
- return s
-}
-
-// Represents a replica to be removed.
-type DeleteReplicaAction struct {
- _ struct{} `type:"structure"`
-
- // The Region of the replica to be removed.
- //
- // RegionName is a required field
- RegionName *string `type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteReplicaAction) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteReplicaAction) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DeleteReplicaAction) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DeleteReplicaAction"}
- if s.RegionName == nil {
- invalidParams.Add(request.NewErrParamRequired("RegionName"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRegionName sets the RegionName field's value.
-func (s *DeleteReplicaAction) SetRegionName(v string) *DeleteReplicaAction {
- s.RegionName = &v
- return s
-}
-
-// Represents a replica to be deleted.
-type DeleteReplicationGroupMemberAction struct {
- _ struct{} `type:"structure"`
-
- // The Region where the replica exists.
- //
- // RegionName is a required field
- RegionName *string `type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteReplicationGroupMemberAction) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteReplicationGroupMemberAction) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DeleteReplicationGroupMemberAction) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationGroupMemberAction"}
- if s.RegionName == nil {
- invalidParams.Add(request.NewErrParamRequired("RegionName"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRegionName sets the RegionName field's value.
-func (s *DeleteReplicationGroupMemberAction) SetRegionName(v string) *DeleteReplicationGroupMemberAction {
- s.RegionName = &v
- return s
-}
-
-// Represents a request to perform a DeleteItem operation on an item.
-type DeleteRequest struct {
- _ struct{} `type:"structure"`
-
- // A map of attribute name to attribute values, representing the primary key
- // of the item to delete. All of the table's primary key attributes must be
- // specified, and their data types must match those of the table's key schema.
- //
- // Key is a required field
- Key map[string]*AttributeValue `type:"map" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteRequest) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteRequest) GoString() string {
- return s.String()
-}
-
-// SetKey sets the Key field's value.
-func (s *DeleteRequest) SetKey(v map[string]*AttributeValue) *DeleteRequest {
- s.Key = v
- return s
-}
-
-type DeleteResourcePolicyInput struct {
- _ struct{} `type:"structure"`
-
- // A string value that you can use to conditionally delete your policy. When
- // you provide an expected revision ID, if the revision ID of the existing policy
- // on the resource doesn't match or if there's no policy attached to the resource,
- // the request will fail and return a PolicyNotFoundException.
- ExpectedRevisionId *string `min:"1" type:"string"`
-
- // The Amazon Resource Name (ARN) of the DynamoDB resource from which the policy
- // will be removed. The resources you can specify include tables and streams.
- // If you remove the policy of a table, it will also remove the permissions
- // for the table's indexes defined in that policy document. This is because
- // index permissions are defined in the table's policy.
- //
- // ResourceArn is a required field
- ResourceArn *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteResourcePolicyInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteResourcePolicyInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DeleteResourcePolicyInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DeleteResourcePolicyInput"}
- if s.ExpectedRevisionId != nil && len(*s.ExpectedRevisionId) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ExpectedRevisionId", 1))
- }
- if s.ResourceArn == nil {
- invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
- }
- if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetExpectedRevisionId sets the ExpectedRevisionId field's value.
-func (s *DeleteResourcePolicyInput) SetExpectedRevisionId(v string) *DeleteResourcePolicyInput {
- s.ExpectedRevisionId = &v
- return s
-}
-
-// SetResourceArn sets the ResourceArn field's value.
-func (s *DeleteResourcePolicyInput) SetResourceArn(v string) *DeleteResourcePolicyInput {
- s.ResourceArn = &v
- return s
-}
-
-type DeleteResourcePolicyOutput struct {
- _ struct{} `type:"structure"`
-
- // A unique string that represents the revision ID of the policy. If you're
- // comparing revision IDs, make sure to always use string comparison logic.
- //
- // This value will be empty if you make a request against a resource without
- // a policy.
- RevisionId *string `min:"1" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteResourcePolicyOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteResourcePolicyOutput) GoString() string {
- return s.String()
-}
-
-// SetRevisionId sets the RevisionId field's value.
-func (s *DeleteResourcePolicyOutput) SetRevisionId(v string) *DeleteResourcePolicyOutput {
- s.RevisionId = &v
- return s
-}
-
-// Represents the input of a DeleteTable operation.
-type DeleteTableInput struct {
- _ struct{} `type:"structure"`
-
- // The name of the table to delete. You can also provide the Amazon Resource
- // Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteTableInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteTableInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DeleteTableInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DeleteTableInput"}
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetTableName sets the TableName field's value.
-func (s *DeleteTableInput) SetTableName(v string) *DeleteTableInput {
- s.TableName = &v
- return s
-}
-
-// Represents the output of a DeleteTable operation.
-type DeleteTableOutput struct {
- _ struct{} `type:"structure"`
-
- // Represents the properties of a table.
- TableDescription *TableDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteTableOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DeleteTableOutput) GoString() string {
- return s.String()
-}
-
-// SetTableDescription sets the TableDescription field's value.
-func (s *DeleteTableOutput) SetTableDescription(v *TableDescription) *DeleteTableOutput {
- s.TableDescription = v
- return s
-}
-
-type DescribeBackupInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) associated with the backup.
- //
- // BackupArn is a required field
- BackupArn *string `min:"37" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeBackupInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeBackupInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeBackupInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeBackupInput"}
- if s.BackupArn == nil {
- invalidParams.Add(request.NewErrParamRequired("BackupArn"))
- }
- if s.BackupArn != nil && len(*s.BackupArn) < 37 {
- invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetBackupArn sets the BackupArn field's value.
-func (s *DescribeBackupInput) SetBackupArn(v string) *DescribeBackupInput {
- s.BackupArn = &v
- return s
-}
-
-type DescribeBackupOutput struct {
- _ struct{} `type:"structure"`
-
- // Contains the description of the backup created for the table.
- BackupDescription *BackupDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeBackupOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeBackupOutput) GoString() string {
- return s.String()
-}
-
-// SetBackupDescription sets the BackupDescription field's value.
-func (s *DescribeBackupOutput) SetBackupDescription(v *BackupDescription) *DescribeBackupOutput {
- s.BackupDescription = v
- return s
-}
-
-type DescribeContinuousBackupsInput struct {
- _ struct{} `type:"structure"`
-
- // Name of the table for which the customer wants to check the continuous backups
- // and point in time recovery settings.
- //
- // You can also provide the Amazon Resource Name (ARN) of the table in this
- // parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeContinuousBackupsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeContinuousBackupsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeContinuousBackupsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeContinuousBackupsInput"}
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetTableName sets the TableName field's value.
-func (s *DescribeContinuousBackupsInput) SetTableName(v string) *DescribeContinuousBackupsInput {
- s.TableName = &v
- return s
-}
-
-type DescribeContinuousBackupsOutput struct {
- _ struct{} `type:"structure"`
-
- // Represents the continuous backups and point in time recovery settings on
- // the table.
- ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeContinuousBackupsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeContinuousBackupsOutput) GoString() string {
- return s.String()
-}
-
-// SetContinuousBackupsDescription sets the ContinuousBackupsDescription field's value.
-func (s *DescribeContinuousBackupsOutput) SetContinuousBackupsDescription(v *ContinuousBackupsDescription) *DescribeContinuousBackupsOutput {
- s.ContinuousBackupsDescription = v
- return s
-}
-
-type DescribeContributorInsightsInput struct {
- _ struct{} `type:"structure"`
-
- // The name of the global secondary index to describe, if applicable.
- IndexName *string `min:"3" type:"string"`
-
- // The name of the table to describe. You can also provide the Amazon Resource
- // Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeContributorInsightsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeContributorInsightsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeContributorInsightsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeContributorInsightsInput"}
- if s.IndexName != nil && len(*s.IndexName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *DescribeContributorInsightsInput) SetIndexName(v string) *DescribeContributorInsightsInput {
- s.IndexName = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *DescribeContributorInsightsInput) SetTableName(v string) *DescribeContributorInsightsInput {
- s.TableName = &v
- return s
-}
-
-type DescribeContributorInsightsOutput struct {
- _ struct{} `type:"structure"`
-
- // List of names of the associated contributor insights rules.
- ContributorInsightsRuleList []*string `type:"list"`
-
- // Current status of contributor insights.
- ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"`
-
- // Returns information about the last failure that was encountered.
- //
- // The most common exceptions for a FAILED status are:
- //
- // * LimitExceededException - Per-account Amazon CloudWatch Contributor Insights
- // rule limit reached. Please disable Contributor Insights for other tables/indexes
- // OR disable Contributor Insights rules before retrying.
- //
- // * AccessDeniedException - Amazon CloudWatch Contributor Insights rules
- // cannot be modified due to insufficient permissions.
- //
- // * AccessDeniedException - Failed to create service-linked role for Contributor
- // Insights due to insufficient permissions.
- //
- // * InternalServerError - Failed to create Amazon CloudWatch Contributor
- // Insights rules. Please retry request.
- FailureException *FailureException `type:"structure"`
-
- // The name of the global secondary index being described.
- IndexName *string `min:"3" type:"string"`
-
- // Timestamp of the last time the status was changed.
- LastUpdateDateTime *time.Time `type:"timestamp"`
-
- // The name of the table being described.
- TableName *string `min:"3" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeContributorInsightsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeContributorInsightsOutput) GoString() string {
- return s.String()
-}
-
-// SetContributorInsightsRuleList sets the ContributorInsightsRuleList field's value.
-func (s *DescribeContributorInsightsOutput) SetContributorInsightsRuleList(v []*string) *DescribeContributorInsightsOutput {
- s.ContributorInsightsRuleList = v
- return s
-}
-
-// SetContributorInsightsStatus sets the ContributorInsightsStatus field's value.
-func (s *DescribeContributorInsightsOutput) SetContributorInsightsStatus(v string) *DescribeContributorInsightsOutput {
- s.ContributorInsightsStatus = &v
- return s
-}
-
-// SetFailureException sets the FailureException field's value.
-func (s *DescribeContributorInsightsOutput) SetFailureException(v *FailureException) *DescribeContributorInsightsOutput {
- s.FailureException = v
- return s
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *DescribeContributorInsightsOutput) SetIndexName(v string) *DescribeContributorInsightsOutput {
- s.IndexName = &v
- return s
-}
-
-// SetLastUpdateDateTime sets the LastUpdateDateTime field's value.
-func (s *DescribeContributorInsightsOutput) SetLastUpdateDateTime(v time.Time) *DescribeContributorInsightsOutput {
- s.LastUpdateDateTime = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *DescribeContributorInsightsOutput) SetTableName(v string) *DescribeContributorInsightsOutput {
- s.TableName = &v
- return s
-}
-
-type DescribeEndpointsInput struct {
- _ struct{} `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeEndpointsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeEndpointsInput) GoString() string {
- return s.String()
-}
-
-type DescribeEndpointsOutput struct {
- _ struct{} `type:"structure"`
-
- // List of endpoints.
- //
- // Endpoints is a required field
- Endpoints []*Endpoint `type:"list" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeEndpointsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeEndpointsOutput) GoString() string {
- return s.String()
-}
-
-// SetEndpoints sets the Endpoints field's value.
-func (s *DescribeEndpointsOutput) SetEndpoints(v []*Endpoint) *DescribeEndpointsOutput {
- s.Endpoints = v
- return s
-}
-
-type DescribeExportInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) associated with the export.
- //
- // ExportArn is a required field
- ExportArn *string `min:"37" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeExportInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeExportInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeExportInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeExportInput"}
- if s.ExportArn == nil {
- invalidParams.Add(request.NewErrParamRequired("ExportArn"))
- }
- if s.ExportArn != nil && len(*s.ExportArn) < 37 {
- invalidParams.Add(request.NewErrParamMinLen("ExportArn", 37))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetExportArn sets the ExportArn field's value.
-func (s *DescribeExportInput) SetExportArn(v string) *DescribeExportInput {
- s.ExportArn = &v
- return s
-}
-
-type DescribeExportOutput struct {
- _ struct{} `type:"structure"`
-
- // Represents the properties of the export.
- ExportDescription *ExportDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeExportOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeExportOutput) GoString() string {
- return s.String()
-}
-
-// SetExportDescription sets the ExportDescription field's value.
-func (s *DescribeExportOutput) SetExportDescription(v *ExportDescription) *DescribeExportOutput {
- s.ExportDescription = v
- return s
-}
-
-type DescribeGlobalTableInput struct {
- _ struct{} `type:"structure"`
-
- // The name of the global table.
- //
- // GlobalTableName is a required field
- GlobalTableName *string `min:"3" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeGlobalTableInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeGlobalTableInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeGlobalTableInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableInput"}
- if s.GlobalTableName == nil {
- invalidParams.Add(request.NewErrParamRequired("GlobalTableName"))
- }
- if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetGlobalTableName sets the GlobalTableName field's value.
-func (s *DescribeGlobalTableInput) SetGlobalTableName(v string) *DescribeGlobalTableInput {
- s.GlobalTableName = &v
- return s
-}
-
-type DescribeGlobalTableOutput struct {
- _ struct{} `type:"structure"`
-
- // Contains the details of the global table.
- GlobalTableDescription *GlobalTableDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeGlobalTableOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeGlobalTableOutput) GoString() string {
- return s.String()
-}
-
-// SetGlobalTableDescription sets the GlobalTableDescription field's value.
-func (s *DescribeGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *DescribeGlobalTableOutput {
- s.GlobalTableDescription = v
- return s
-}
-
-type DescribeGlobalTableSettingsInput struct {
- _ struct{} `type:"structure"`
-
- // The name of the global table to describe.
- //
- // GlobalTableName is a required field
- GlobalTableName *string `min:"3" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeGlobalTableSettingsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeGlobalTableSettingsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeGlobalTableSettingsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableSettingsInput"}
- if s.GlobalTableName == nil {
- invalidParams.Add(request.NewErrParamRequired("GlobalTableName"))
- }
- if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetGlobalTableName sets the GlobalTableName field's value.
-func (s *DescribeGlobalTableSettingsInput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsInput {
- s.GlobalTableName = &v
- return s
-}
-
-type DescribeGlobalTableSettingsOutput struct {
- _ struct{} `type:"structure"`
-
- // The name of the global table.
- GlobalTableName *string `min:"3" type:"string"`
-
- // The Region-specific settings for the global table.
- ReplicaSettings []*ReplicaSettingsDescription `type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeGlobalTableSettingsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeGlobalTableSettingsOutput) GoString() string {
- return s.String()
-}
-
-// SetGlobalTableName sets the GlobalTableName field's value.
-func (s *DescribeGlobalTableSettingsOutput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsOutput {
- s.GlobalTableName = &v
- return s
-}
-
-// SetReplicaSettings sets the ReplicaSettings field's value.
-func (s *DescribeGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSettingsDescription) *DescribeGlobalTableSettingsOutput {
- s.ReplicaSettings = v
- return s
-}
-
-type DescribeImportInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) associated with the table you're importing
- // to.
- //
- // ImportArn is a required field
- ImportArn *string `min:"37" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImportInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImportInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeImportInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeImportInput"}
- if s.ImportArn == nil {
- invalidParams.Add(request.NewErrParamRequired("ImportArn"))
- }
- if s.ImportArn != nil && len(*s.ImportArn) < 37 {
- invalidParams.Add(request.NewErrParamMinLen("ImportArn", 37))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetImportArn sets the ImportArn field's value.
-func (s *DescribeImportInput) SetImportArn(v string) *DescribeImportInput {
- s.ImportArn = &v
- return s
-}
-
-type DescribeImportOutput struct {
- _ struct{} `type:"structure"`
-
- // Represents the properties of the table created for the import, and parameters
- // of the import. The import parameters include import status, how many items
- // were processed, and how many errors were encountered.
- //
- // ImportTableDescription is a required field
- ImportTableDescription *ImportTableDescription `type:"structure" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImportOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeImportOutput) GoString() string {
- return s.String()
-}
-
-// SetImportTableDescription sets the ImportTableDescription field's value.
-func (s *DescribeImportOutput) SetImportTableDescription(v *ImportTableDescription) *DescribeImportOutput {
- s.ImportTableDescription = v
- return s
-}
-
-type DescribeKinesisStreamingDestinationInput struct {
- _ struct{} `type:"structure"`
-
- // The name of the table being described. You can also provide the Amazon Resource
- // Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeKinesisStreamingDestinationInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeKinesisStreamingDestinationInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeKinesisStreamingDestinationInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeKinesisStreamingDestinationInput"}
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetTableName sets the TableName field's value.
-func (s *DescribeKinesisStreamingDestinationInput) SetTableName(v string) *DescribeKinesisStreamingDestinationInput {
- s.TableName = &v
- return s
-}
-
-type DescribeKinesisStreamingDestinationOutput struct {
- _ struct{} `type:"structure"`
-
- // The list of replica structures for the table being described.
- KinesisDataStreamDestinations []*KinesisDataStreamDestination `type:"list"`
-
- // The name of the table being described.
- TableName *string `min:"3" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeKinesisStreamingDestinationOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeKinesisStreamingDestinationOutput) GoString() string {
- return s.String()
-}
-
-// SetKinesisDataStreamDestinations sets the KinesisDataStreamDestinations field's value.
-func (s *DescribeKinesisStreamingDestinationOutput) SetKinesisDataStreamDestinations(v []*KinesisDataStreamDestination) *DescribeKinesisStreamingDestinationOutput {
- s.KinesisDataStreamDestinations = v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *DescribeKinesisStreamingDestinationOutput) SetTableName(v string) *DescribeKinesisStreamingDestinationOutput {
- s.TableName = &v
- return s
-}
-
-// Represents the input of a DescribeLimits operation. Has no content.
-type DescribeLimitsInput struct {
- _ struct{} `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeLimitsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeLimitsInput) GoString() string {
- return s.String()
-}
-
-// Represents the output of a DescribeLimits operation.
-type DescribeLimitsOutput struct {
- _ struct{} `type:"structure"`
-
- // The maximum total read capacity units that your account allows you to provision
- // across all of your tables in this Region.
- AccountMaxReadCapacityUnits *int64 `min:"1" type:"long"`
-
- // The maximum total write capacity units that your account allows you to provision
- // across all of your tables in this Region.
- AccountMaxWriteCapacityUnits *int64 `min:"1" type:"long"`
-
- // The maximum read capacity units that your account allows you to provision
- // for a new table that you are creating in this Region, including the read
- // capacity units provisioned for its global secondary indexes (GSIs).
- TableMaxReadCapacityUnits *int64 `min:"1" type:"long"`
-
- // The maximum write capacity units that your account allows you to provision
- // for a new table that you are creating in this Region, including the write
- // capacity units provisioned for its global secondary indexes (GSIs).
- TableMaxWriteCapacityUnits *int64 `min:"1" type:"long"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeLimitsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeLimitsOutput) GoString() string {
- return s.String()
-}
-
-// SetAccountMaxReadCapacityUnits sets the AccountMaxReadCapacityUnits field's value.
-func (s *DescribeLimitsOutput) SetAccountMaxReadCapacityUnits(v int64) *DescribeLimitsOutput {
- s.AccountMaxReadCapacityUnits = &v
- return s
-}
-
-// SetAccountMaxWriteCapacityUnits sets the AccountMaxWriteCapacityUnits field's value.
-func (s *DescribeLimitsOutput) SetAccountMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput {
- s.AccountMaxWriteCapacityUnits = &v
- return s
-}
-
-// SetTableMaxReadCapacityUnits sets the TableMaxReadCapacityUnits field's value.
-func (s *DescribeLimitsOutput) SetTableMaxReadCapacityUnits(v int64) *DescribeLimitsOutput {
- s.TableMaxReadCapacityUnits = &v
- return s
-}
-
-// SetTableMaxWriteCapacityUnits sets the TableMaxWriteCapacityUnits field's value.
-func (s *DescribeLimitsOutput) SetTableMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput {
- s.TableMaxWriteCapacityUnits = &v
- return s
-}
-
-// Represents the input of a DescribeTable operation.
-type DescribeTableInput struct {
- _ struct{} `type:"structure"`
-
- // The name of the table to describe. You can also provide the Amazon Resource
- // Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeTableInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeTableInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeTableInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeTableInput"}
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetTableName sets the TableName field's value.
-func (s *DescribeTableInput) SetTableName(v string) *DescribeTableInput {
- s.TableName = &v
- return s
-}
-
-// Represents the output of a DescribeTable operation.
-type DescribeTableOutput struct {
- _ struct{} `type:"structure"`
-
- // The properties of the table.
- Table *TableDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeTableOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeTableOutput) GoString() string {
- return s.String()
-}
-
-// SetTable sets the Table field's value.
-func (s *DescribeTableOutput) SetTable(v *TableDescription) *DescribeTableOutput {
- s.Table = v
- return s
-}
-
-type DescribeTableReplicaAutoScalingInput struct {
- _ struct{} `type:"structure"`
-
- // The name of the table. You can also provide the Amazon Resource Name (ARN)
- // of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeTableReplicaAutoScalingInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeTableReplicaAutoScalingInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeTableReplicaAutoScalingInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeTableReplicaAutoScalingInput"}
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetTableName sets the TableName field's value.
-func (s *DescribeTableReplicaAutoScalingInput) SetTableName(v string) *DescribeTableReplicaAutoScalingInput {
- s.TableName = &v
- return s
-}
-
-type DescribeTableReplicaAutoScalingOutput struct {
- _ struct{} `type:"structure"`
-
- // Represents the auto scaling properties of the table.
- TableAutoScalingDescription *TableAutoScalingDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeTableReplicaAutoScalingOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeTableReplicaAutoScalingOutput) GoString() string {
- return s.String()
-}
-
-// SetTableAutoScalingDescription sets the TableAutoScalingDescription field's value.
-func (s *DescribeTableReplicaAutoScalingOutput) SetTableAutoScalingDescription(v *TableAutoScalingDescription) *DescribeTableReplicaAutoScalingOutput {
- s.TableAutoScalingDescription = v
- return s
-}
-
-type DescribeTimeToLiveInput struct {
- _ struct{} `type:"structure"`
-
- // The name of the table to be described. You can also provide the Amazon Resource
- // Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeTimeToLiveInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeTimeToLiveInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DescribeTimeToLiveInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DescribeTimeToLiveInput"}
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetTableName sets the TableName field's value.
-func (s *DescribeTimeToLiveInput) SetTableName(v string) *DescribeTimeToLiveInput {
- s.TableName = &v
- return s
-}
-
-type DescribeTimeToLiveOutput struct {
- _ struct{} `type:"structure"`
-
- // The description of the Time to Live (TTL) status on the specified table.
- TimeToLiveDescription *TimeToLiveDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeTimeToLiveOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DescribeTimeToLiveOutput) GoString() string {
- return s.String()
-}
-
-// SetTimeToLiveDescription sets the TimeToLiveDescription field's value.
-func (s *DescribeTimeToLiveOutput) SetTimeToLiveDescription(v *TimeToLiveDescription) *DescribeTimeToLiveOutput {
- s.TimeToLiveDescription = v
- return s
-}
-
-type DisableKinesisStreamingDestinationInput struct {
- _ struct{} `type:"structure"`
-
- // The source for the Kinesis streaming information that is being enabled.
- EnableKinesisStreamingConfiguration *EnableKinesisStreamingConfiguration `type:"structure"`
-
- // The ARN for a Kinesis data stream.
- //
- // StreamArn is a required field
- StreamArn *string `min:"37" type:"string" required:"true"`
-
- // The name of the DynamoDB table. You can also provide the Amazon Resource
- // Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DisableKinesisStreamingDestinationInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DisableKinesisStreamingDestinationInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DisableKinesisStreamingDestinationInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DisableKinesisStreamingDestinationInput"}
- if s.StreamArn == nil {
- invalidParams.Add(request.NewErrParamRequired("StreamArn"))
- }
- if s.StreamArn != nil && len(*s.StreamArn) < 37 {
- invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetEnableKinesisStreamingConfiguration sets the EnableKinesisStreamingConfiguration field's value.
-func (s *DisableKinesisStreamingDestinationInput) SetEnableKinesisStreamingConfiguration(v *EnableKinesisStreamingConfiguration) *DisableKinesisStreamingDestinationInput {
- s.EnableKinesisStreamingConfiguration = v
- return s
-}
-
-// SetStreamArn sets the StreamArn field's value.
-func (s *DisableKinesisStreamingDestinationInput) SetStreamArn(v string) *DisableKinesisStreamingDestinationInput {
- s.StreamArn = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *DisableKinesisStreamingDestinationInput) SetTableName(v string) *DisableKinesisStreamingDestinationInput {
- s.TableName = &v
- return s
-}
-
-type DisableKinesisStreamingDestinationOutput struct {
- _ struct{} `type:"structure"`
-
- // The current status of the replication.
- DestinationStatus *string `type:"string" enum:"DestinationStatus"`
-
- // The destination for the Kinesis streaming information that is being enabled.
- EnableKinesisStreamingConfiguration *EnableKinesisStreamingConfiguration `type:"structure"`
-
- // The ARN for the specific Kinesis data stream.
- StreamArn *string `min:"37" type:"string"`
-
- // The name of the table being modified.
- TableName *string `min:"3" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DisableKinesisStreamingDestinationOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DisableKinesisStreamingDestinationOutput) GoString() string {
- return s.String()
-}
-
-// SetDestinationStatus sets the DestinationStatus field's value.
-func (s *DisableKinesisStreamingDestinationOutput) SetDestinationStatus(v string) *DisableKinesisStreamingDestinationOutput {
- s.DestinationStatus = &v
- return s
-}
-
-// SetEnableKinesisStreamingConfiguration sets the EnableKinesisStreamingConfiguration field's value.
-func (s *DisableKinesisStreamingDestinationOutput) SetEnableKinesisStreamingConfiguration(v *EnableKinesisStreamingConfiguration) *DisableKinesisStreamingDestinationOutput {
- s.EnableKinesisStreamingConfiguration = v
- return s
-}
-
-// SetStreamArn sets the StreamArn field's value.
-func (s *DisableKinesisStreamingDestinationOutput) SetStreamArn(v string) *DisableKinesisStreamingDestinationOutput {
- s.StreamArn = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *DisableKinesisStreamingDestinationOutput) SetTableName(v string) *DisableKinesisStreamingDestinationOutput {
- s.TableName = &v
- return s
-}
-
-// There was an attempt to insert an item with the same primary key as an item
-// that already exists in the DynamoDB table.
-type DuplicateItemException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DuplicateItemException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DuplicateItemException) GoString() string {
- return s.String()
-}
-
-func newErrorDuplicateItemException(v protocol.ResponseMetadata) error {
- return &DuplicateItemException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *DuplicateItemException) Code() string {
- return "DuplicateItemException"
-}
-
-// Message returns the exception's message.
-func (s *DuplicateItemException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *DuplicateItemException) OrigErr() error {
- return nil
-}
-
-func (s *DuplicateItemException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *DuplicateItemException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *DuplicateItemException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Enables setting the configuration for Kinesis Streaming.
-type EnableKinesisStreamingConfiguration struct {
- _ struct{} `type:"structure"`
-
- // Toggle for the precision of Kinesis data stream timestamp. The values are
- // either MILLISECOND or MICROSECOND.
- ApproximateCreationDateTimePrecision *string `type:"string" enum:"ApproximateCreationDateTimePrecision"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s EnableKinesisStreamingConfiguration) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s EnableKinesisStreamingConfiguration) GoString() string {
- return s.String()
-}
-
-// SetApproximateCreationDateTimePrecision sets the ApproximateCreationDateTimePrecision field's value.
-func (s *EnableKinesisStreamingConfiguration) SetApproximateCreationDateTimePrecision(v string) *EnableKinesisStreamingConfiguration {
- s.ApproximateCreationDateTimePrecision = &v
- return s
-}
-
-type EnableKinesisStreamingDestinationInput struct {
- _ struct{} `type:"structure"`
-
- // The source for the Kinesis streaming information that is being enabled.
- EnableKinesisStreamingConfiguration *EnableKinesisStreamingConfiguration `type:"structure"`
-
- // The ARN for a Kinesis data stream.
- //
- // StreamArn is a required field
- StreamArn *string `min:"37" type:"string" required:"true"`
-
- // The name of the DynamoDB table. You can also provide the Amazon Resource
- // Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s EnableKinesisStreamingDestinationInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s EnableKinesisStreamingDestinationInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *EnableKinesisStreamingDestinationInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "EnableKinesisStreamingDestinationInput"}
- if s.StreamArn == nil {
- invalidParams.Add(request.NewErrParamRequired("StreamArn"))
- }
- if s.StreamArn != nil && len(*s.StreamArn) < 37 {
- invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetEnableKinesisStreamingConfiguration sets the EnableKinesisStreamingConfiguration field's value.
-func (s *EnableKinesisStreamingDestinationInput) SetEnableKinesisStreamingConfiguration(v *EnableKinesisStreamingConfiguration) *EnableKinesisStreamingDestinationInput {
- s.EnableKinesisStreamingConfiguration = v
- return s
-}
-
-// SetStreamArn sets the StreamArn field's value.
-func (s *EnableKinesisStreamingDestinationInput) SetStreamArn(v string) *EnableKinesisStreamingDestinationInput {
- s.StreamArn = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *EnableKinesisStreamingDestinationInput) SetTableName(v string) *EnableKinesisStreamingDestinationInput {
- s.TableName = &v
- return s
-}
-
-type EnableKinesisStreamingDestinationOutput struct {
- _ struct{} `type:"structure"`
-
- // The current status of the replication.
- DestinationStatus *string `type:"string" enum:"DestinationStatus"`
-
- // The destination for the Kinesis streaming information that is being enabled.
- EnableKinesisStreamingConfiguration *EnableKinesisStreamingConfiguration `type:"structure"`
-
- // The ARN for the specific Kinesis data stream.
- StreamArn *string `min:"37" type:"string"`
-
- // The name of the table being modified.
- TableName *string `min:"3" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s EnableKinesisStreamingDestinationOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s EnableKinesisStreamingDestinationOutput) GoString() string {
- return s.String()
-}
-
-// SetDestinationStatus sets the DestinationStatus field's value.
-func (s *EnableKinesisStreamingDestinationOutput) SetDestinationStatus(v string) *EnableKinesisStreamingDestinationOutput {
- s.DestinationStatus = &v
- return s
-}
-
-// SetEnableKinesisStreamingConfiguration sets the EnableKinesisStreamingConfiguration field's value.
-func (s *EnableKinesisStreamingDestinationOutput) SetEnableKinesisStreamingConfiguration(v *EnableKinesisStreamingConfiguration) *EnableKinesisStreamingDestinationOutput {
- s.EnableKinesisStreamingConfiguration = v
- return s
-}
-
-// SetStreamArn sets the StreamArn field's value.
-func (s *EnableKinesisStreamingDestinationOutput) SetStreamArn(v string) *EnableKinesisStreamingDestinationOutput {
- s.StreamArn = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *EnableKinesisStreamingDestinationOutput) SetTableName(v string) *EnableKinesisStreamingDestinationOutput {
- s.TableName = &v
- return s
-}
-
-// An endpoint information details.
-type Endpoint struct {
- _ struct{} `type:"structure"`
-
- // IP address of the endpoint.
- //
- // Address is a required field
- Address *string `type:"string" required:"true"`
-
- // Endpoint cache time to live (TTL) value.
- //
- // CachePeriodInMinutes is a required field
- CachePeriodInMinutes *int64 `type:"long" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Endpoint) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Endpoint) GoString() string {
- return s.String()
-}
-
-// SetAddress sets the Address field's value.
-func (s *Endpoint) SetAddress(v string) *Endpoint {
- s.Address = &v
- return s
-}
-
-// SetCachePeriodInMinutes sets the CachePeriodInMinutes field's value.
-func (s *Endpoint) SetCachePeriodInMinutes(v int64) *Endpoint {
- s.CachePeriodInMinutes = &v
- return s
-}
-
-type ExecuteStatementInput struct {
- _ struct{} `type:"structure"`
-
- // The consistency of a read operation. If set to true, then a strongly consistent
- // read is used; otherwise, an eventually consistent read is used.
- ConsistentRead *bool `type:"boolean"`
-
- // The maximum number of items to evaluate (not necessarily the number of matching
- // items). If DynamoDB processes the number of items up to the limit while processing
- // the results, it stops the operation and returns the matching values up to
- // that point, along with a key in LastEvaluatedKey to apply in a subsequent
- // operation so you can pick up where you left off. Also, if the processed dataset
- // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation
- // and returns the matching values up to the limit, and a key in LastEvaluatedKey
- // to apply in a subsequent operation to continue the operation.
- Limit *int64 `min:"1" type:"integer"`
-
- // Set this value to get remaining results, if NextToken was returned in the
- // statement response.
- NextToken *string `min:"1" type:"string"`
-
- // The parameters for the PartiQL statement, if any.
- Parameters []*AttributeValue `min:"1" type:"list"`
-
- // Determines the level of detail about either provisioned or on-demand throughput
- // consumption that is returned in the response:
- //
- // * INDEXES - The response includes the aggregate ConsumedCapacity for the
- // operation, together with ConsumedCapacity for each table and secondary
- // index that was accessed. Note that some operations, such as GetItem and
- // BatchGetItem, do not access any indexes at all. In these cases, specifying
- // INDEXES will only return ConsumedCapacity information for table(s).
- //
- // * TOTAL - The response includes only the aggregate ConsumedCapacity for
- // the operation.
- //
- // * NONE - No ConsumedCapacity details are included in the response.
- ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
-
- // An optional parameter that returns the item attributes for an ExecuteStatement
- // operation that failed a condition check.
- //
- // There is no additional cost associated with requesting a return value aside
- // from the small network and processing overhead of receiving a larger response.
- // No read capacity units are consumed.
- ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"`
-
- // The PartiQL statement representing the operation to run.
- //
- // Statement is a required field
- Statement *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExecuteStatementInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExecuteStatementInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ExecuteStatementInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ExecuteStatementInput"}
- if s.Limit != nil && *s.Limit < 1 {
- invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
- }
- if s.NextToken != nil && len(*s.NextToken) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("NextToken", 1))
- }
- if s.Parameters != nil && len(s.Parameters) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Parameters", 1))
- }
- if s.Statement == nil {
- invalidParams.Add(request.NewErrParamRequired("Statement"))
- }
- if s.Statement != nil && len(*s.Statement) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Statement", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetConsistentRead sets the ConsistentRead field's value.
-func (s *ExecuteStatementInput) SetConsistentRead(v bool) *ExecuteStatementInput {
- s.ConsistentRead = &v
- return s
-}
-
-// SetLimit sets the Limit field's value.
-func (s *ExecuteStatementInput) SetLimit(v int64) *ExecuteStatementInput {
- s.Limit = &v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ExecuteStatementInput) SetNextToken(v string) *ExecuteStatementInput {
- s.NextToken = &v
- return s
-}
-
-// SetParameters sets the Parameters field's value.
-func (s *ExecuteStatementInput) SetParameters(v []*AttributeValue) *ExecuteStatementInput {
- s.Parameters = v
- return s
-}
-
-// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
-func (s *ExecuteStatementInput) SetReturnConsumedCapacity(v string) *ExecuteStatementInput {
- s.ReturnConsumedCapacity = &v
- return s
-}
-
-// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value.
-func (s *ExecuteStatementInput) SetReturnValuesOnConditionCheckFailure(v string) *ExecuteStatementInput {
- s.ReturnValuesOnConditionCheckFailure = &v
- return s
-}
-
-// SetStatement sets the Statement field's value.
-func (s *ExecuteStatementInput) SetStatement(v string) *ExecuteStatementInput {
- s.Statement = &v
- return s
-}
-
-type ExecuteStatementOutput struct {
- _ struct{} `type:"structure"`
-
- // The capacity units consumed by an operation. The data returned includes the
- // total provisioned throughput consumed, along with statistics for the table
- // and any indexes involved in the operation. ConsumedCapacity is only returned
- // if the request asked for it. For more information, see Provisioned capacity
- // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html)
- // in the Amazon DynamoDB Developer Guide.
- ConsumedCapacity *ConsumedCapacity `type:"structure"`
-
- // If a read operation was used, this property will contain the result of the
- // read operation; a map of attribute names and their values. For the write
- // operations this value will be empty.
- Items []map[string]*AttributeValue `type:"list"`
-
- // The primary key of the item where the operation stopped, inclusive of the
- // previous result set. Use this value to start a new operation, excluding this
- // value in the new request. If LastEvaluatedKey is empty, then the "last page"
- // of results has been processed and there is no more data to be retrieved.
- // If LastEvaluatedKey is not empty, it does not necessarily mean that there
- // is more data in the result set. The only way to know when you have reached
- // the end of the result set is when LastEvaluatedKey is empty.
- LastEvaluatedKey map[string]*AttributeValue `type:"map"`
-
- // If the response of a read request exceeds the response payload limit DynamoDB
- // will set this value in the response. If set, you can use that this value
- // in the subsequent request to get the remaining results.
- NextToken *string `min:"1" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExecuteStatementOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExecuteStatementOutput) GoString() string {
- return s.String()
-}
-
-// SetConsumedCapacity sets the ConsumedCapacity field's value.
-func (s *ExecuteStatementOutput) SetConsumedCapacity(v *ConsumedCapacity) *ExecuteStatementOutput {
- s.ConsumedCapacity = v
- return s
-}
-
-// SetItems sets the Items field's value.
-func (s *ExecuteStatementOutput) SetItems(v []map[string]*AttributeValue) *ExecuteStatementOutput {
- s.Items = v
- return s
-}
-
-// SetLastEvaluatedKey sets the LastEvaluatedKey field's value.
-func (s *ExecuteStatementOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *ExecuteStatementOutput {
- s.LastEvaluatedKey = v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ExecuteStatementOutput) SetNextToken(v string) *ExecuteStatementOutput {
- s.NextToken = &v
- return s
-}
-
-type ExecuteTransactionInput struct {
- _ struct{} `type:"structure"`
-
- // Set this value to get remaining results, if NextToken was returned in the
- // statement response.
- ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"`
-
- // Determines the level of detail about either provisioned or on-demand throughput
- // consumption that is returned in the response. For more information, see TransactGetItems
- // (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactGetItems.html)
- // and TransactWriteItems (https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html).
- ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
-
- // The list of PartiQL statements representing the transaction to run.
- //
- // TransactStatements is a required field
- TransactStatements []*ParameterizedStatement `min:"1" type:"list" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExecuteTransactionInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExecuteTransactionInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ExecuteTransactionInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ExecuteTransactionInput"}
- if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
- }
- if s.TransactStatements == nil {
- invalidParams.Add(request.NewErrParamRequired("TransactStatements"))
- }
- if s.TransactStatements != nil && len(s.TransactStatements) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TransactStatements", 1))
- }
- if s.TransactStatements != nil {
- for i, v := range s.TransactStatements {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactStatements", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetClientRequestToken sets the ClientRequestToken field's value.
-func (s *ExecuteTransactionInput) SetClientRequestToken(v string) *ExecuteTransactionInput {
- s.ClientRequestToken = &v
- return s
-}
-
-// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
-func (s *ExecuteTransactionInput) SetReturnConsumedCapacity(v string) *ExecuteTransactionInput {
- s.ReturnConsumedCapacity = &v
- return s
-}
-
-// SetTransactStatements sets the TransactStatements field's value.
-func (s *ExecuteTransactionInput) SetTransactStatements(v []*ParameterizedStatement) *ExecuteTransactionInput {
- s.TransactStatements = v
- return s
-}
-
-type ExecuteTransactionOutput struct {
- _ struct{} `type:"structure"`
-
- // The capacity units consumed by the entire operation. The values of the list
- // are ordered according to the ordering of the statements.
- ConsumedCapacity []*ConsumedCapacity `type:"list"`
-
- // The response to a PartiQL transaction.
- Responses []*ItemResponse `min:"1" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExecuteTransactionOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExecuteTransactionOutput) GoString() string {
- return s.String()
-}
-
-// SetConsumedCapacity sets the ConsumedCapacity field's value.
-func (s *ExecuteTransactionOutput) SetConsumedCapacity(v []*ConsumedCapacity) *ExecuteTransactionOutput {
- s.ConsumedCapacity = v
- return s
-}
-
-// SetResponses sets the Responses field's value.
-func (s *ExecuteTransactionOutput) SetResponses(v []*ItemResponse) *ExecuteTransactionOutput {
- s.Responses = v
- return s
-}
-
-// Represents a condition to be compared with an attribute value. This condition
-// can be used with DeleteItem, PutItem, or UpdateItem operations; if the comparison
-// evaluates to true, the operation succeeds; if not, the operation fails. You
-// can use ExpectedAttributeValue in one of two different ways:
-//
-// - Use AttributeValueList to specify one or more values to compare against
-// an attribute. Use ComparisonOperator to specify how you want to perform
-// the comparison. If the comparison evaluates to true, then the conditional
-// operation succeeds.
-//
-// - Use Value to specify a value that DynamoDB will compare against an attribute.
-// If the values match, then ExpectedAttributeValue evaluates to true and
-// the conditional operation succeeds. Optionally, you can also set Exists
-// to false, indicating that you do not expect to find the attribute value
-// in the table. In this case, the conditional operation succeeds only if
-// the comparison evaluates to false.
-//
-// Value and Exists are incompatible with AttributeValueList and ComparisonOperator.
-// Note that if you use both sets of parameters at once, DynamoDB will return
-// a ValidationException exception.
-type ExpectedAttributeValue struct {
- _ struct{} `type:"structure"`
-
- // One or more values to evaluate against the supplied attribute. The number
- // of values in the list depends on the ComparisonOperator being used.
- //
- // For type Number, value comparisons are numeric.
- //
- // String value comparisons for greater than, equals, or less than are based
- // on ASCII character code values. For example, a is greater than A, and a is
- // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters
- // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters).
- //
- // For Binary, DynamoDB treats each byte of the binary data as unsigned when
- // it compares binary values.
- //
- // For information on specifying data types in JSON, see JSON Data Format (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html)
- // in the Amazon DynamoDB Developer Guide.
- AttributeValueList []*AttributeValue `type:"list"`
-
- // A comparator for evaluating attributes in the AttributeValueList. For example,
- // equals, greater than, less than, etc.
- //
- // The following comparison operators are available:
- //
- // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS |
- // BEGINS_WITH | IN | BETWEEN
- //
- // The following are descriptions of each comparison operator.
- //
- // * EQ : Equal. EQ is supported for all data types, including lists and
- // maps. AttributeValueList can contain only one AttributeValue element of
- // type String, Number, Binary, String Set, Number Set, or Binary Set. If
- // an item contains an AttributeValue element of a different type than the
- // one provided in the request, the value does not match. For example, {"S":"6"}
- // does not equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2",
- // "1"]}.
- //
- // * NE : Not equal. NE is supported for all data types, including lists
- // and maps. AttributeValueList can contain only one AttributeValue of type
- // String, Number, Binary, String Set, Number Set, or Binary Set. If an item
- // contains an AttributeValue of a different type than the one provided in
- // the request, the value does not match. For example, {"S":"6"} does not
- // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}.
- //
- // * LE : Less than or equal. AttributeValueList can contain only one AttributeValue
- // element of type String, Number, or Binary (not a set type). If an item
- // contains an AttributeValue element of a different type than the one provided
- // in the request, the value does not match. For example, {"S":"6"} does
- // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2",
- // "1"]}.
- //
- // * LT : Less than. AttributeValueList can contain only one AttributeValue
- // of type String, Number, or Binary (not a set type). If an item contains
- // an AttributeValue element of a different type than the one provided in
- // the request, the value does not match. For example, {"S":"6"} does not
- // equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2",
- // "1"]}.
- //
- // * GE : Greater than or equal. AttributeValueList can contain only one
- // AttributeValue element of type String, Number, or Binary (not a set type).
- // If an item contains an AttributeValue element of a different type than
- // the one provided in the request, the value does not match. For example,
- // {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} does not compare to
- // {"NS":["6", "2", "1"]}.
- //
- // * GT : Greater than. AttributeValueList can contain only one AttributeValue
- // element of type String, Number, or Binary (not a set type). If an item
- // contains an AttributeValue element of a different type than the one provided
- // in the request, the value does not match. For example, {"S":"6"} does
- // not equal {"N":"6"}. Also, {"N":"6"} does not compare to {"NS":["6", "2",
- // "1"]}.
- //
- // * NOT_NULL : The attribute exists. NOT_NULL is supported for all data
- // types, including lists and maps. This operator tests for the existence
- // of an attribute, not its data type. If the data type of attribute "a"
- // is null, and you evaluate it using NOT_NULL, the result is a Boolean true.
- // This result is because the attribute "a" exists; its data type is not
- // relevant to the NOT_NULL comparison operator.
- //
- // * NULL : The attribute does not exist. NULL is supported for all data
- // types, including lists and maps. This operator tests for the nonexistence
- // of an attribute, not its data type. If the data type of attribute "a"
- // is null, and you evaluate it using NULL, the result is a Boolean false.
- // This is because the attribute "a" exists; its data type is not relevant
- // to the NULL comparison operator.
- //
- // * CONTAINS : Checks for a subsequence, or value in a set. AttributeValueList
- // can contain only one AttributeValue element of type String, Number, or
- // Binary (not a set type). If the target attribute of the comparison is
- // of type String, then the operator checks for a substring match. If the
- // target attribute of the comparison is of type Binary, then the operator
- // looks for a subsequence of the target that matches the input. If the target
- // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator
- // evaluates to true if it finds an exact match with any member of the set.
- // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can
- // be a list; however, "b" cannot be a set, a map, or a list.
- //
- // * NOT_CONTAINS : Checks for absence of a subsequence, or absence of a
- // value in a set. AttributeValueList can contain only one AttributeValue
- // element of type String, Number, or Binary (not a set type). If the target
- // attribute of the comparison is a String, then the operator checks for
- // the absence of a substring match. If the target attribute of the comparison
- // is Binary, then the operator checks for the absence of a subsequence of
- // the target that matches the input. If the target attribute of the comparison
- // is a set ("SS", "NS", or "BS"), then the operator evaluates to true if
- // it does not find an exact match with any member of the set. NOT_CONTAINS
- // is supported for lists: When evaluating "a NOT CONTAINS b", "a" can be
- // a list; however, "b" cannot be a set, a map, or a list.
- //
- // * BEGINS_WITH : Checks for a prefix. AttributeValueList can contain only
- // one AttributeValue of type String or Binary (not a Number or a set type).
- // The target attribute of the comparison must be of type String or Binary
- // (not a Number or a set type).
- //
- // * IN : Checks for matching elements in a list. AttributeValueList can
- // contain one or more AttributeValue elements of type String, Number, or
- // Binary. These attributes are compared against an existing attribute of
- // an item. If any elements of the input are equal to the item attribute,
- // the expression evaluates to true.
- //
- // * BETWEEN : Greater than or equal to the first value, and less than or
- // equal to the second value. AttributeValueList must contain two AttributeValue
- // elements of the same type, either String, Number, or Binary (not a set
- // type). A target attribute matches if the target value is greater than,
- // or equal to, the first element and less than, or equal to, the second
- // element. If an item contains an AttributeValue element of a different
- // type than the one provided in the request, the value does not match. For
- // example, {"S":"6"} does not compare to {"N":"6"}. Also, {"N":"6"} does
- // not compare to {"NS":["6", "2", "1"]}
- ComparisonOperator *string `type:"string" enum:"ComparisonOperator"`
-
- // Causes DynamoDB to evaluate the value before attempting a conditional operation:
- //
- // * If Exists is true, DynamoDB will check to see if that attribute value
- // already exists in the table. If it is found, then the operation succeeds.
- // If it is not found, the operation fails with a ConditionCheckFailedException.
- //
- // * If Exists is false, DynamoDB assumes that the attribute value does not
- // exist in the table. If in fact the value does not exist, then the assumption
- // is valid and the operation succeeds. If the value is found, despite the
- // assumption that it does not exist, the operation fails with a ConditionCheckFailedException.
- //
- // The default setting for Exists is true. If you supply a Value all by itself,
- // DynamoDB assumes the attribute exists: You don't have to set Exists to true,
- // because it is implied.
- //
- // DynamoDB returns a ValidationException if:
- //
- // * Exists is true but there is no Value to check. (You expect a value to
- // exist, but don't specify what that value is.)
- //
- // * Exists is false but you also provide a Value. (You cannot expect an
- // attribute to have a value, while also expecting it not to exist.)
- Exists *bool `type:"boolean"`
-
- // Represents the data for the expected attribute.
- //
- // Each attribute value is described as a name-value pair. The name is the data
- // type, and the value is the data itself.
- //
- // For more information, see Data Types (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.NamingRulesDataTypes.html#HowItWorks.DataTypes)
- // in the Amazon DynamoDB Developer Guide.
- Value *AttributeValue `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExpectedAttributeValue) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExpectedAttributeValue) GoString() string {
- return s.String()
-}
-
-// SetAttributeValueList sets the AttributeValueList field's value.
-func (s *ExpectedAttributeValue) SetAttributeValueList(v []*AttributeValue) *ExpectedAttributeValue {
- s.AttributeValueList = v
- return s
-}
-
-// SetComparisonOperator sets the ComparisonOperator field's value.
-func (s *ExpectedAttributeValue) SetComparisonOperator(v string) *ExpectedAttributeValue {
- s.ComparisonOperator = &v
- return s
-}
-
-// SetExists sets the Exists field's value.
-func (s *ExpectedAttributeValue) SetExists(v bool) *ExpectedAttributeValue {
- s.Exists = &v
- return s
-}
-
-// SetValue sets the Value field's value.
-func (s *ExpectedAttributeValue) SetValue(v *AttributeValue) *ExpectedAttributeValue {
- s.Value = v
- return s
-}
-
-// There was a conflict when writing to the specified S3 bucket.
-type ExportConflictException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExportConflictException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExportConflictException) GoString() string {
- return s.String()
-}
-
-func newErrorExportConflictException(v protocol.ResponseMetadata) error {
- return &ExportConflictException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ExportConflictException) Code() string {
- return "ExportConflictException"
-}
-
-// Message returns the exception's message.
-func (s *ExportConflictException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ExportConflictException) OrigErr() error {
- return nil
-}
-
-func (s *ExportConflictException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ExportConflictException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ExportConflictException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Represents the properties of the exported table.
-type ExportDescription struct {
- _ struct{} `type:"structure"`
-
- // The billable size of the table export.
- BilledSizeBytes *int64 `type:"long"`
-
- // The client token that was provided for the export task. A client token makes
- // calls to ExportTableToPointInTimeInput idempotent, meaning that multiple
- // identical calls have the same effect as one single call.
- ClientToken *string `type:"string"`
-
- // The time at which the export task completed.
- EndTime *time.Time `type:"timestamp"`
-
- // The Amazon Resource Name (ARN) of the table export.
- ExportArn *string `min:"37" type:"string"`
-
- // The format of the exported data. Valid values for ExportFormat are DYNAMODB_JSON
- // or ION.
- ExportFormat *string `type:"string" enum:"ExportFormat"`
-
- // The name of the manifest file for the export task.
- ExportManifest *string `type:"string"`
-
- // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or
- // FAILED.
- ExportStatus *string `type:"string" enum:"ExportStatus"`
-
- // Point in time from which table data was exported.
- ExportTime *time.Time `type:"timestamp"`
-
- // The type of export that was performed. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT.
- ExportType *string `type:"string" enum:"ExportType"`
-
- // Status code for the result of the failed export.
- FailureCode *string `type:"string"`
-
- // Export failure reason description.
- FailureMessage *string `type:"string"`
-
- // Optional object containing the parameters specific to an incremental export.
- IncrementalExportSpecification *IncrementalExportSpecification `type:"structure"`
-
- // The number of items exported.
- ItemCount *int64 `type:"long"`
-
- // The name of the Amazon S3 bucket containing the export.
- S3Bucket *string `type:"string"`
-
- // The ID of the Amazon Web Services account that owns the bucket containing
- // the export.
- S3BucketOwner *string `type:"string"`
-
- // The Amazon S3 bucket prefix used as the file name and path of the exported
- // snapshot.
- S3Prefix *string `type:"string"`
-
- // Type of encryption used on the bucket where export data is stored. Valid
- // values for S3SseAlgorithm are:
- //
- // * AES256 - server-side encryption with Amazon S3 managed keys
- //
- // * KMS - server-side encryption with KMS managed keys
- S3SseAlgorithm *string `type:"string" enum:"S3SseAlgorithm"`
-
- // The ID of the KMS managed key used to encrypt the S3 bucket where export
- // data is stored (if applicable).
- S3SseKmsKeyId *string `min:"1" type:"string"`
-
- // The time at which the export task began.
- StartTime *time.Time `type:"timestamp"`
-
- // The Amazon Resource Name (ARN) of the table that was exported.
- TableArn *string `min:"1" type:"string"`
-
- // Unique ID of the table that was exported.
- TableId *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExportDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExportDescription) GoString() string {
- return s.String()
-}
-
-// SetBilledSizeBytes sets the BilledSizeBytes field's value.
-func (s *ExportDescription) SetBilledSizeBytes(v int64) *ExportDescription {
- s.BilledSizeBytes = &v
- return s
-}
-
-// SetClientToken sets the ClientToken field's value.
-func (s *ExportDescription) SetClientToken(v string) *ExportDescription {
- s.ClientToken = &v
- return s
-}
-
-// SetEndTime sets the EndTime field's value.
-func (s *ExportDescription) SetEndTime(v time.Time) *ExportDescription {
- s.EndTime = &v
- return s
-}
-
-// SetExportArn sets the ExportArn field's value.
-func (s *ExportDescription) SetExportArn(v string) *ExportDescription {
- s.ExportArn = &v
- return s
-}
-
-// SetExportFormat sets the ExportFormat field's value.
-func (s *ExportDescription) SetExportFormat(v string) *ExportDescription {
- s.ExportFormat = &v
- return s
-}
-
-// SetExportManifest sets the ExportManifest field's value.
-func (s *ExportDescription) SetExportManifest(v string) *ExportDescription {
- s.ExportManifest = &v
- return s
-}
-
-// SetExportStatus sets the ExportStatus field's value.
-func (s *ExportDescription) SetExportStatus(v string) *ExportDescription {
- s.ExportStatus = &v
- return s
-}
-
-// SetExportTime sets the ExportTime field's value.
-func (s *ExportDescription) SetExportTime(v time.Time) *ExportDescription {
- s.ExportTime = &v
- return s
-}
-
-// SetExportType sets the ExportType field's value.
-func (s *ExportDescription) SetExportType(v string) *ExportDescription {
- s.ExportType = &v
- return s
-}
-
-// SetFailureCode sets the FailureCode field's value.
-func (s *ExportDescription) SetFailureCode(v string) *ExportDescription {
- s.FailureCode = &v
- return s
-}
-
-// SetFailureMessage sets the FailureMessage field's value.
-func (s *ExportDescription) SetFailureMessage(v string) *ExportDescription {
- s.FailureMessage = &v
- return s
-}
-
-// SetIncrementalExportSpecification sets the IncrementalExportSpecification field's value.
-func (s *ExportDescription) SetIncrementalExportSpecification(v *IncrementalExportSpecification) *ExportDescription {
- s.IncrementalExportSpecification = v
- return s
-}
-
-// SetItemCount sets the ItemCount field's value.
-func (s *ExportDescription) SetItemCount(v int64) *ExportDescription {
- s.ItemCount = &v
- return s
-}
-
-// SetS3Bucket sets the S3Bucket field's value.
-func (s *ExportDescription) SetS3Bucket(v string) *ExportDescription {
- s.S3Bucket = &v
- return s
-}
-
-// SetS3BucketOwner sets the S3BucketOwner field's value.
-func (s *ExportDescription) SetS3BucketOwner(v string) *ExportDescription {
- s.S3BucketOwner = &v
- return s
-}
-
-// SetS3Prefix sets the S3Prefix field's value.
-func (s *ExportDescription) SetS3Prefix(v string) *ExportDescription {
- s.S3Prefix = &v
- return s
-}
-
-// SetS3SseAlgorithm sets the S3SseAlgorithm field's value.
-func (s *ExportDescription) SetS3SseAlgorithm(v string) *ExportDescription {
- s.S3SseAlgorithm = &v
- return s
-}
-
-// SetS3SseKmsKeyId sets the S3SseKmsKeyId field's value.
-func (s *ExportDescription) SetS3SseKmsKeyId(v string) *ExportDescription {
- s.S3SseKmsKeyId = &v
- return s
-}
-
-// SetStartTime sets the StartTime field's value.
-func (s *ExportDescription) SetStartTime(v time.Time) *ExportDescription {
- s.StartTime = &v
- return s
-}
-
-// SetTableArn sets the TableArn field's value.
-func (s *ExportDescription) SetTableArn(v string) *ExportDescription {
- s.TableArn = &v
- return s
-}
-
-// SetTableId sets the TableId field's value.
-func (s *ExportDescription) SetTableId(v string) *ExportDescription {
- s.TableId = &v
- return s
-}
-
-// The specified export was not found.
-type ExportNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExportNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExportNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorExportNotFoundException(v protocol.ResponseMetadata) error {
- return &ExportNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ExportNotFoundException) Code() string {
- return "ExportNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *ExportNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ExportNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *ExportNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ExportNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ExportNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Summary information about an export task.
-type ExportSummary struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) of the export.
- ExportArn *string `min:"37" type:"string"`
-
- // Export can be in one of the following states: IN_PROGRESS, COMPLETED, or
- // FAILED.
- ExportStatus *string `type:"string" enum:"ExportStatus"`
-
- // The type of export that was performed. Valid values are FULL_EXPORT or INCREMENTAL_EXPORT.
- ExportType *string `type:"string" enum:"ExportType"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExportSummary) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExportSummary) GoString() string {
- return s.String()
-}
-
-// SetExportArn sets the ExportArn field's value.
-func (s *ExportSummary) SetExportArn(v string) *ExportSummary {
- s.ExportArn = &v
- return s
-}
-
-// SetExportStatus sets the ExportStatus field's value.
-func (s *ExportSummary) SetExportStatus(v string) *ExportSummary {
- s.ExportStatus = &v
- return s
-}
-
-// SetExportType sets the ExportType field's value.
-func (s *ExportSummary) SetExportType(v string) *ExportSummary {
- s.ExportType = &v
- return s
-}
-
-type ExportTableToPointInTimeInput struct {
- _ struct{} `type:"structure"`
-
- // Providing a ClientToken makes the call to ExportTableToPointInTimeInput idempotent,
- // meaning that multiple identical calls have the same effect as one single
- // call.
- //
- // A client token is valid for 8 hours after the first request that uses it
- // is completed. After 8 hours, any request with the same client token is treated
- // as a new request. Do not resubmit the same request with the same client token
- // for more than 8 hours, or the result might not be idempotent.
- //
- // If you submit a request with the same client token but a change in other
- // parameters within the 8-hour idempotency window, DynamoDB returns an ImportConflictException.
- ClientToken *string `type:"string" idempotencyToken:"true"`
-
- // The format for the exported data. Valid values for ExportFormat are DYNAMODB_JSON
- // or ION.
- ExportFormat *string `type:"string" enum:"ExportFormat"`
-
- // Time in the past from which to export table data, counted in seconds from
- // the start of the Unix epoch. The table export will be a snapshot of the table's
- // state at this point in time.
- ExportTime *time.Time `type:"timestamp"`
-
- // Choice of whether to execute as a full export or incremental export. Valid
- // values are FULL_EXPORT or INCREMENTAL_EXPORT. The default value is FULL_EXPORT.
- // If INCREMENTAL_EXPORT is provided, the IncrementalExportSpecification must
- // also be used.
- ExportType *string `type:"string" enum:"ExportType"`
-
- // Optional object containing the parameters specific to an incremental export.
- IncrementalExportSpecification *IncrementalExportSpecification `type:"structure"`
-
- // The name of the Amazon S3 bucket to export the snapshot to.
- //
- // S3Bucket is a required field
- S3Bucket *string `type:"string" required:"true"`
-
- // The ID of the Amazon Web Services account that owns the bucket the export
- // will be stored in.
- //
- // S3BucketOwner is a required parameter when exporting to a S3 bucket in another
- // account.
- S3BucketOwner *string `type:"string"`
-
- // The Amazon S3 bucket prefix to use as the file name and path of the exported
- // snapshot.
- S3Prefix *string `type:"string"`
-
- // Type of encryption used on the bucket where export data will be stored. Valid
- // values for S3SseAlgorithm are:
- //
- // * AES256 - server-side encryption with Amazon S3 managed keys
- //
- // * KMS - server-side encryption with KMS managed keys
- S3SseAlgorithm *string `type:"string" enum:"S3SseAlgorithm"`
-
- // The ID of the KMS managed key used to encrypt the S3 bucket where export
- // data will be stored (if applicable).
- S3SseKmsKeyId *string `min:"1" type:"string"`
-
- // The Amazon Resource Name (ARN) associated with the table to export.
- //
- // TableArn is a required field
- TableArn *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExportTableToPointInTimeInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExportTableToPointInTimeInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ExportTableToPointInTimeInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ExportTableToPointInTimeInput"}
- if s.S3Bucket == nil {
- invalidParams.Add(request.NewErrParamRequired("S3Bucket"))
- }
- if s.S3SseKmsKeyId != nil && len(*s.S3SseKmsKeyId) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("S3SseKmsKeyId", 1))
- }
- if s.TableArn == nil {
- invalidParams.Add(request.NewErrParamRequired("TableArn"))
- }
- if s.TableArn != nil && len(*s.TableArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableArn", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetClientToken sets the ClientToken field's value.
-func (s *ExportTableToPointInTimeInput) SetClientToken(v string) *ExportTableToPointInTimeInput {
- s.ClientToken = &v
- return s
-}
-
-// SetExportFormat sets the ExportFormat field's value.
-func (s *ExportTableToPointInTimeInput) SetExportFormat(v string) *ExportTableToPointInTimeInput {
- s.ExportFormat = &v
- return s
-}
-
-// SetExportTime sets the ExportTime field's value.
-func (s *ExportTableToPointInTimeInput) SetExportTime(v time.Time) *ExportTableToPointInTimeInput {
- s.ExportTime = &v
- return s
-}
-
-// SetExportType sets the ExportType field's value.
-func (s *ExportTableToPointInTimeInput) SetExportType(v string) *ExportTableToPointInTimeInput {
- s.ExportType = &v
- return s
-}
-
-// SetIncrementalExportSpecification sets the IncrementalExportSpecification field's value.
-func (s *ExportTableToPointInTimeInput) SetIncrementalExportSpecification(v *IncrementalExportSpecification) *ExportTableToPointInTimeInput {
- s.IncrementalExportSpecification = v
- return s
-}
-
-// SetS3Bucket sets the S3Bucket field's value.
-func (s *ExportTableToPointInTimeInput) SetS3Bucket(v string) *ExportTableToPointInTimeInput {
- s.S3Bucket = &v
- return s
-}
-
-// SetS3BucketOwner sets the S3BucketOwner field's value.
-func (s *ExportTableToPointInTimeInput) SetS3BucketOwner(v string) *ExportTableToPointInTimeInput {
- s.S3BucketOwner = &v
- return s
-}
-
-// SetS3Prefix sets the S3Prefix field's value.
-func (s *ExportTableToPointInTimeInput) SetS3Prefix(v string) *ExportTableToPointInTimeInput {
- s.S3Prefix = &v
- return s
-}
-
-// SetS3SseAlgorithm sets the S3SseAlgorithm field's value.
-func (s *ExportTableToPointInTimeInput) SetS3SseAlgorithm(v string) *ExportTableToPointInTimeInput {
- s.S3SseAlgorithm = &v
- return s
-}
-
-// SetS3SseKmsKeyId sets the S3SseKmsKeyId field's value.
-func (s *ExportTableToPointInTimeInput) SetS3SseKmsKeyId(v string) *ExportTableToPointInTimeInput {
- s.S3SseKmsKeyId = &v
- return s
-}
-
-// SetTableArn sets the TableArn field's value.
-func (s *ExportTableToPointInTimeInput) SetTableArn(v string) *ExportTableToPointInTimeInput {
- s.TableArn = &v
- return s
-}
-
-type ExportTableToPointInTimeOutput struct {
- _ struct{} `type:"structure"`
-
- // Contains a description of the table export.
- ExportDescription *ExportDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExportTableToPointInTimeOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExportTableToPointInTimeOutput) GoString() string {
- return s.String()
-}
-
-// SetExportDescription sets the ExportDescription field's value.
-func (s *ExportTableToPointInTimeOutput) SetExportDescription(v *ExportDescription) *ExportTableToPointInTimeOutput {
- s.ExportDescription = v
- return s
-}
-
-// Represents a failure a contributor insights operation.
-type FailureException struct {
- _ struct{} `type:"structure"`
-
- // Description of the failure.
- ExceptionDescription *string `type:"string"`
-
- // Exception name.
- ExceptionName *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s FailureException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s FailureException) GoString() string {
- return s.String()
-}
-
-// SetExceptionDescription sets the ExceptionDescription field's value.
-func (s *FailureException) SetExceptionDescription(v string) *FailureException {
- s.ExceptionDescription = &v
- return s
-}
-
-// SetExceptionName sets the ExceptionName field's value.
-func (s *FailureException) SetExceptionName(v string) *FailureException {
- s.ExceptionName = &v
- return s
-}
-
-// Specifies an item and related attribute values to retrieve in a TransactGetItem
-// object.
-type Get struct {
- _ struct{} `type:"structure"`
-
- // One or more substitution tokens for attribute names in the ProjectionExpression
- // parameter.
- ExpressionAttributeNames map[string]*string `type:"map"`
-
- // A map of attribute names to AttributeValue objects that specifies the primary
- // key of the item to retrieve.
- //
- // Key is a required field
- Key map[string]*AttributeValue `type:"map" required:"true"`
-
- // A string that identifies one or more attributes of the specified item to
- // retrieve from the table. The attributes in the expression must be separated
- // by commas. If no attribute names are specified, then all attributes of the
- // specified item are returned. If any of the requested attributes are not found,
- // they do not appear in the result.
- ProjectionExpression *string `type:"string"`
-
- // The name of the table from which to retrieve the specified item. You can
- // also provide the Amazon Resource Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Get) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Get) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *Get) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "Get"}
- if s.Key == nil {
- invalidParams.Add(request.NewErrParamRequired("Key"))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
-func (s *Get) SetExpressionAttributeNames(v map[string]*string) *Get {
- s.ExpressionAttributeNames = v
- return s
-}
-
-// SetKey sets the Key field's value.
-func (s *Get) SetKey(v map[string]*AttributeValue) *Get {
- s.Key = v
- return s
-}
-
-// SetProjectionExpression sets the ProjectionExpression field's value.
-func (s *Get) SetProjectionExpression(v string) *Get {
- s.ProjectionExpression = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *Get) SetTableName(v string) *Get {
- s.TableName = &v
- return s
-}
-
-// Represents the input of a GetItem operation.
-type GetItemInput struct {
- _ struct{} `type:"structure"`
-
- // This is a legacy parameter. Use ProjectionExpression instead. For more information,
- // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html)
- // in the Amazon DynamoDB Developer Guide.
- AttributesToGet []*string `min:"1" type:"list"`
-
- // Determines the read consistency model: If set to true, then the operation
- // uses strongly consistent reads; otherwise, the operation uses eventually
- // consistent reads.
- ConsistentRead *bool `type:"boolean"`
-
- // One or more substitution tokens for attribute names in an expression. The
- // following are some use cases for using ExpressionAttributeNames:
- //
- // * To access an attribute whose name conflicts with a DynamoDB reserved
- // word.
- //
- // * To create a placeholder for repeating occurrences of an attribute name
- // in an expression.
- //
- // * To prevent special characters in an attribute name from being misinterpreted
- // in an expression.
- //
- // Use the # character in an expression to dereference an attribute name. For
- // example, consider the following attribute name:
- //
- // * Percentile
- //
- // The name of this attribute conflicts with a reserved word, so it cannot be
- // used directly in an expression. (For the complete list of reserved words,
- // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
- // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
- // the following for ExpressionAttributeNames:
- //
- // * {"#P":"Percentile"}
- //
- // You could then use this substitution in an expression, as in this example:
- //
- // * #P = :val
- //
- // Tokens that begin with the : character are expression attribute values, which
- // are placeholders for the actual value at runtime.
- //
- // For more information on expression attribute names, see Specifying Item Attributes
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
- // in the Amazon DynamoDB Developer Guide.
- ExpressionAttributeNames map[string]*string `type:"map"`
-
- // A map of attribute names to AttributeValue objects, representing the primary
- // key of the item to retrieve.
- //
- // For the primary key, you must provide all of the attributes. For example,
- // with a simple primary key, you only need to provide a value for the partition
- // key. For a composite primary key, you must provide values for both the partition
- // key and the sort key.
- //
- // Key is a required field
- Key map[string]*AttributeValue `type:"map" required:"true"`
-
- // A string that identifies one or more attributes to retrieve from the table.
- // These attributes can include scalars, sets, or elements of a JSON document.
- // The attributes in the expression must be separated by commas.
- //
- // If no attribute names are specified, then all attributes are returned. If
- // any of the requested attributes are not found, they do not appear in the
- // result.
- //
- // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
- // in the Amazon DynamoDB Developer Guide.
- ProjectionExpression *string `type:"string"`
-
- // Determines the level of detail about either provisioned or on-demand throughput
- // consumption that is returned in the response:
- //
- // * INDEXES - The response includes the aggregate ConsumedCapacity for the
- // operation, together with ConsumedCapacity for each table and secondary
- // index that was accessed. Note that some operations, such as GetItem and
- // BatchGetItem, do not access any indexes at all. In these cases, specifying
- // INDEXES will only return ConsumedCapacity information for table(s).
- //
- // * TOTAL - The response includes only the aggregate ConsumedCapacity for
- // the operation.
- //
- // * NONE - No ConsumedCapacity details are included in the response.
- ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
-
- // The name of the table containing the requested item. You can also provide
- // the Amazon Resource Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetItemInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetItemInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GetItemInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GetItemInput"}
- if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1))
- }
- if s.Key == nil {
- invalidParams.Add(request.NewErrParamRequired("Key"))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAttributesToGet sets the AttributesToGet field's value.
-func (s *GetItemInput) SetAttributesToGet(v []*string) *GetItemInput {
- s.AttributesToGet = v
- return s
-}
-
-// SetConsistentRead sets the ConsistentRead field's value.
-func (s *GetItemInput) SetConsistentRead(v bool) *GetItemInput {
- s.ConsistentRead = &v
- return s
-}
-
-// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
-func (s *GetItemInput) SetExpressionAttributeNames(v map[string]*string) *GetItemInput {
- s.ExpressionAttributeNames = v
- return s
-}
-
-// SetKey sets the Key field's value.
-func (s *GetItemInput) SetKey(v map[string]*AttributeValue) *GetItemInput {
- s.Key = v
- return s
-}
-
-// SetProjectionExpression sets the ProjectionExpression field's value.
-func (s *GetItemInput) SetProjectionExpression(v string) *GetItemInput {
- s.ProjectionExpression = &v
- return s
-}
-
-// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
-func (s *GetItemInput) SetReturnConsumedCapacity(v string) *GetItemInput {
- s.ReturnConsumedCapacity = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *GetItemInput) SetTableName(v string) *GetItemInput {
- s.TableName = &v
- return s
-}
-
-// Represents the output of a GetItem operation.
-type GetItemOutput struct {
- _ struct{} `type:"structure"`
-
- // The capacity units consumed by the GetItem operation. The data returned includes
- // the total provisioned throughput consumed, along with statistics for the
- // table and any indexes involved in the operation. ConsumedCapacity is only
- // returned if the ReturnConsumedCapacity parameter was specified. For more
- // information, see Capacity unit consumption for read operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption)
- // in the Amazon DynamoDB Developer Guide.
- ConsumedCapacity *ConsumedCapacity `type:"structure"`
-
- // A map of attribute names to AttributeValue objects, as specified by ProjectionExpression.
- Item map[string]*AttributeValue `type:"map"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetItemOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetItemOutput) GoString() string {
- return s.String()
-}
-
-// SetConsumedCapacity sets the ConsumedCapacity field's value.
-func (s *GetItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *GetItemOutput {
- s.ConsumedCapacity = v
- return s
-}
-
-// SetItem sets the Item field's value.
-func (s *GetItemOutput) SetItem(v map[string]*AttributeValue) *GetItemOutput {
- s.Item = v
- return s
-}
-
-type GetResourcePolicyInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy
- // is attached. The resources you can specify include tables and streams.
- //
- // ResourceArn is a required field
- ResourceArn *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetResourcePolicyInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetResourcePolicyInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GetResourcePolicyInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GetResourcePolicyInput"}
- if s.ResourceArn == nil {
- invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
- }
- if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetResourceArn sets the ResourceArn field's value.
-func (s *GetResourcePolicyInput) SetResourceArn(v string) *GetResourcePolicyInput {
- s.ResourceArn = &v
- return s
-}
-
-type GetResourcePolicyOutput struct {
- _ struct{} `type:"structure"`
-
- // The resource-based policy document attached to the resource, which can be
- // a table or stream, in JSON format.
- Policy *string `type:"string"`
-
- // A unique string that represents the revision ID of the policy. If you're
- // comparing revision IDs, make sure to always use string comparison logic.
- RevisionId *string `min:"1" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetResourcePolicyOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetResourcePolicyOutput) GoString() string {
- return s.String()
-}
-
-// SetPolicy sets the Policy field's value.
-func (s *GetResourcePolicyOutput) SetPolicy(v string) *GetResourcePolicyOutput {
- s.Policy = &v
- return s
-}
-
-// SetRevisionId sets the RevisionId field's value.
-func (s *GetResourcePolicyOutput) SetRevisionId(v string) *GetResourcePolicyOutput {
- s.RevisionId = &v
- return s
-}
-
-// Represents the properties of a global secondary index.
-type GlobalSecondaryIndex struct {
- _ struct{} `type:"structure"`
-
- // The name of the global secondary index. The name must be unique among all
- // other indexes on this table.
- //
- // IndexName is a required field
- IndexName *string `min:"3" type:"string" required:"true"`
-
- // The complete key schema for a global secondary index, which consists of one
- // or more pairs of attribute names and key types:
- //
- // * HASH - partition key
- //
- // * RANGE - sort key
- //
- // The partition key of an item is also known as its hash attribute. The term
- // "hash attribute" derives from DynamoDB's usage of an internal hash function
- // to evenly distribute data items across partitions, based on their partition
- // key values.
- //
- // The sort key of an item is also known as its range attribute. The term "range
- // attribute" derives from the way DynamoDB stores items with the same partition
- // key physically close together, in sorted order by the sort key value.
- //
- // KeySchema is a required field
- KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"`
-
- // The maximum number of read and write units for the specified global secondary
- // index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits,
- // or both.
- OnDemandThroughput *OnDemandThroughput `type:"structure"`
-
- // Represents attributes that are copied (projected) from the table into the
- // global secondary index. These are in addition to the primary key attributes
- // and index key attributes, which are automatically projected.
- //
- // Projection is a required field
- Projection *Projection `type:"structure" required:"true"`
-
- // Represents the provisioned throughput settings for the specified global secondary
- // index.
- //
- // For current minimum and maximum provisioned throughput values, see Service,
- // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
- // in the Amazon DynamoDB Developer Guide.
- ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalSecondaryIndex) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalSecondaryIndex) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GlobalSecondaryIndex) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndex"}
- if s.IndexName == nil {
- invalidParams.Add(request.NewErrParamRequired("IndexName"))
- }
- if s.IndexName != nil && len(*s.IndexName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
- }
- if s.KeySchema == nil {
- invalidParams.Add(request.NewErrParamRequired("KeySchema"))
- }
- if s.KeySchema != nil && len(s.KeySchema) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1))
- }
- if s.Projection == nil {
- invalidParams.Add(request.NewErrParamRequired("Projection"))
- }
- if s.KeySchema != nil {
- for i, v := range s.KeySchema {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.Projection != nil {
- if err := s.Projection.Validate(); err != nil {
- invalidParams.AddNested("Projection", err.(request.ErrInvalidParams))
- }
- }
- if s.ProvisionedThroughput != nil {
- if err := s.ProvisionedThroughput.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *GlobalSecondaryIndex) SetIndexName(v string) *GlobalSecondaryIndex {
- s.IndexName = &v
- return s
-}
-
-// SetKeySchema sets the KeySchema field's value.
-func (s *GlobalSecondaryIndex) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndex {
- s.KeySchema = v
- return s
-}
-
-// SetOnDemandThroughput sets the OnDemandThroughput field's value.
-func (s *GlobalSecondaryIndex) SetOnDemandThroughput(v *OnDemandThroughput) *GlobalSecondaryIndex {
- s.OnDemandThroughput = v
- return s
-}
-
-// SetProjection sets the Projection field's value.
-func (s *GlobalSecondaryIndex) SetProjection(v *Projection) *GlobalSecondaryIndex {
- s.Projection = v
- return s
-}
-
-// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
-func (s *GlobalSecondaryIndex) SetProvisionedThroughput(v *ProvisionedThroughput) *GlobalSecondaryIndex {
- s.ProvisionedThroughput = v
- return s
-}
-
-// Represents the auto scaling settings of a global secondary index for a global
-// table that will be modified.
-type GlobalSecondaryIndexAutoScalingUpdate struct {
- _ struct{} `type:"structure"`
-
- // The name of the global secondary index.
- IndexName *string `min:"3" type:"string"`
-
- // Represents the auto scaling settings to be modified for a global table or
- // global secondary index.
- ProvisionedWriteCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalSecondaryIndexAutoScalingUpdate) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalSecondaryIndexAutoScalingUpdate) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GlobalSecondaryIndexAutoScalingUpdate) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndexAutoScalingUpdate"}
- if s.IndexName != nil && len(*s.IndexName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
- }
- if s.ProvisionedWriteCapacityAutoScalingUpdate != nil {
- if err := s.ProvisionedWriteCapacityAutoScalingUpdate.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingUpdate", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *GlobalSecondaryIndexAutoScalingUpdate) SetIndexName(v string) *GlobalSecondaryIndexAutoScalingUpdate {
- s.IndexName = &v
- return s
-}
-
-// SetProvisionedWriteCapacityAutoScalingUpdate sets the ProvisionedWriteCapacityAutoScalingUpdate field's value.
-func (s *GlobalSecondaryIndexAutoScalingUpdate) SetProvisionedWriteCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *GlobalSecondaryIndexAutoScalingUpdate {
- s.ProvisionedWriteCapacityAutoScalingUpdate = v
- return s
-}
-
-// Represents the properties of a global secondary index.
-type GlobalSecondaryIndexDescription struct {
- _ struct{} `type:"structure"`
-
- // Indicates whether the index is currently backfilling. Backfilling is the
- // process of reading items from the table and determining whether they can
- // be added to the index. (Not all items will qualify: For example, a partition
- // key cannot have any duplicate values.) If an item can be added to the index,
- // DynamoDB will do so. After all items have been processed, the backfilling
- // operation is complete and Backfilling is false.
- //
- // You can delete an index that is being created during the Backfilling phase
- // when IndexStatus is set to CREATING and Backfilling is true. You can't delete
- // the index that is being created when IndexStatus is set to CREATING and Backfilling
- // is false.
- //
- // For indexes that were created during a CreateTable operation, the Backfilling
- // attribute does not appear in the DescribeTable output.
- Backfilling *bool `type:"boolean"`
-
- // The Amazon Resource Name (ARN) that uniquely identifies the index.
- IndexArn *string `type:"string"`
-
- // The name of the global secondary index.
- IndexName *string `min:"3" type:"string"`
-
- // The total size of the specified index, in bytes. DynamoDB updates this value
- // approximately every six hours. Recent changes might not be reflected in this
- // value.
- IndexSizeBytes *int64 `type:"long"`
-
- // The current state of the global secondary index:
- //
- // * CREATING - The index is being created.
- //
- // * UPDATING - The index is being updated.
- //
- // * DELETING - The index is being deleted.
- //
- // * ACTIVE - The index is ready for use.
- IndexStatus *string `type:"string" enum:"IndexStatus"`
-
- // The number of items in the specified index. DynamoDB updates this value approximately
- // every six hours. Recent changes might not be reflected in this value.
- ItemCount *int64 `type:"long"`
-
- // The complete key schema for a global secondary index, which consists of one
- // or more pairs of attribute names and key types:
- //
- // * HASH - partition key
- //
- // * RANGE - sort key
- //
- // The partition key of an item is also known as its hash attribute. The term
- // "hash attribute" derives from DynamoDB's usage of an internal hash function
- // to evenly distribute data items across partitions, based on their partition
- // key values.
- //
- // The sort key of an item is also known as its range attribute. The term "range
- // attribute" derives from the way DynamoDB stores items with the same partition
- // key physically close together, in sorted order by the sort key value.
- KeySchema []*KeySchemaElement `min:"1" type:"list"`
-
- // The maximum number of read and write units for the specified global secondary
- // index. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits,
- // or both.
- OnDemandThroughput *OnDemandThroughput `type:"structure"`
-
- // Represents attributes that are copied (projected) from the table into the
- // global secondary index. These are in addition to the primary key attributes
- // and index key attributes, which are automatically projected.
- Projection *Projection `type:"structure"`
-
- // Represents the provisioned throughput settings for the specified global secondary
- // index.
- //
- // For current minimum and maximum provisioned throughput values, see Service,
- // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
- // in the Amazon DynamoDB Developer Guide.
- ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalSecondaryIndexDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalSecondaryIndexDescription) GoString() string {
- return s.String()
-}
-
-// SetBackfilling sets the Backfilling field's value.
-func (s *GlobalSecondaryIndexDescription) SetBackfilling(v bool) *GlobalSecondaryIndexDescription {
- s.Backfilling = &v
- return s
-}
-
-// SetIndexArn sets the IndexArn field's value.
-func (s *GlobalSecondaryIndexDescription) SetIndexArn(v string) *GlobalSecondaryIndexDescription {
- s.IndexArn = &v
- return s
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *GlobalSecondaryIndexDescription) SetIndexName(v string) *GlobalSecondaryIndexDescription {
- s.IndexName = &v
- return s
-}
-
-// SetIndexSizeBytes sets the IndexSizeBytes field's value.
-func (s *GlobalSecondaryIndexDescription) SetIndexSizeBytes(v int64) *GlobalSecondaryIndexDescription {
- s.IndexSizeBytes = &v
- return s
-}
-
-// SetIndexStatus sets the IndexStatus field's value.
-func (s *GlobalSecondaryIndexDescription) SetIndexStatus(v string) *GlobalSecondaryIndexDescription {
- s.IndexStatus = &v
- return s
-}
-
-// SetItemCount sets the ItemCount field's value.
-func (s *GlobalSecondaryIndexDescription) SetItemCount(v int64) *GlobalSecondaryIndexDescription {
- s.ItemCount = &v
- return s
-}
-
-// SetKeySchema sets the KeySchema field's value.
-func (s *GlobalSecondaryIndexDescription) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndexDescription {
- s.KeySchema = v
- return s
-}
-
-// SetOnDemandThroughput sets the OnDemandThroughput field's value.
-func (s *GlobalSecondaryIndexDescription) SetOnDemandThroughput(v *OnDemandThroughput) *GlobalSecondaryIndexDescription {
- s.OnDemandThroughput = v
- return s
-}
-
-// SetProjection sets the Projection field's value.
-func (s *GlobalSecondaryIndexDescription) SetProjection(v *Projection) *GlobalSecondaryIndexDescription {
- s.Projection = v
- return s
-}
-
-// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
-func (s *GlobalSecondaryIndexDescription) SetProvisionedThroughput(v *ProvisionedThroughputDescription) *GlobalSecondaryIndexDescription {
- s.ProvisionedThroughput = v
- return s
-}
-
-// Represents the properties of a global secondary index for the table when
-// the backup was created.
-type GlobalSecondaryIndexInfo struct {
- _ struct{} `type:"structure"`
-
- // The name of the global secondary index.
- IndexName *string `min:"3" type:"string"`
-
- // The complete key schema for a global secondary index, which consists of one
- // or more pairs of attribute names and key types:
- //
- // * HASH - partition key
- //
- // * RANGE - sort key
- //
- // The partition key of an item is also known as its hash attribute. The term
- // "hash attribute" derives from DynamoDB's usage of an internal hash function
- // to evenly distribute data items across partitions, based on their partition
- // key values.
- //
- // The sort key of an item is also known as its range attribute. The term "range
- // attribute" derives from the way DynamoDB stores items with the same partition
- // key physically close together, in sorted order by the sort key value.
- KeySchema []*KeySchemaElement `min:"1" type:"list"`
-
- // Sets the maximum number of read and write units for the specified on-demand
- // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits,
- // or both.
- OnDemandThroughput *OnDemandThroughput `type:"structure"`
-
- // Represents attributes that are copied (projected) from the table into the
- // global secondary index. These are in addition to the primary key attributes
- // and index key attributes, which are automatically projected.
- Projection *Projection `type:"structure"`
-
- // Represents the provisioned throughput settings for the specified global secondary
- // index.
- ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalSecondaryIndexInfo) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalSecondaryIndexInfo) GoString() string {
- return s.String()
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *GlobalSecondaryIndexInfo) SetIndexName(v string) *GlobalSecondaryIndexInfo {
- s.IndexName = &v
- return s
-}
-
-// SetKeySchema sets the KeySchema field's value.
-func (s *GlobalSecondaryIndexInfo) SetKeySchema(v []*KeySchemaElement) *GlobalSecondaryIndexInfo {
- s.KeySchema = v
- return s
-}
-
-// SetOnDemandThroughput sets the OnDemandThroughput field's value.
-func (s *GlobalSecondaryIndexInfo) SetOnDemandThroughput(v *OnDemandThroughput) *GlobalSecondaryIndexInfo {
- s.OnDemandThroughput = v
- return s
-}
-
-// SetProjection sets the Projection field's value.
-func (s *GlobalSecondaryIndexInfo) SetProjection(v *Projection) *GlobalSecondaryIndexInfo {
- s.Projection = v
- return s
-}
-
-// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
-func (s *GlobalSecondaryIndexInfo) SetProvisionedThroughput(v *ProvisionedThroughput) *GlobalSecondaryIndexInfo {
- s.ProvisionedThroughput = v
- return s
-}
-
-// Represents one of the following:
-//
-// - A new global secondary index to be added to an existing table.
-//
-// - New provisioned throughput parameters for an existing global secondary
-// index.
-//
-// - An existing global secondary index to be removed from an existing table.
-type GlobalSecondaryIndexUpdate struct {
- _ struct{} `type:"structure"`
-
- // The parameters required for creating a global secondary index on an existing
- // table:
- //
- // * IndexName
- //
- // * KeySchema
- //
- // * AttributeDefinitions
- //
- // * Projection
- //
- // * ProvisionedThroughput
- Create *CreateGlobalSecondaryIndexAction `type:"structure"`
-
- // The name of an existing global secondary index to be removed.
- Delete *DeleteGlobalSecondaryIndexAction `type:"structure"`
-
- // The name of an existing global secondary index, along with new provisioned
- // throughput settings to be applied to that index.
- Update *UpdateGlobalSecondaryIndexAction `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalSecondaryIndexUpdate) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalSecondaryIndexUpdate) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GlobalSecondaryIndexUpdate) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GlobalSecondaryIndexUpdate"}
- if s.Create != nil {
- if err := s.Create.Validate(); err != nil {
- invalidParams.AddNested("Create", err.(request.ErrInvalidParams))
- }
- }
- if s.Delete != nil {
- if err := s.Delete.Validate(); err != nil {
- invalidParams.AddNested("Delete", err.(request.ErrInvalidParams))
- }
- }
- if s.Update != nil {
- if err := s.Update.Validate(); err != nil {
- invalidParams.AddNested("Update", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetCreate sets the Create field's value.
-func (s *GlobalSecondaryIndexUpdate) SetCreate(v *CreateGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate {
- s.Create = v
- return s
-}
-
-// SetDelete sets the Delete field's value.
-func (s *GlobalSecondaryIndexUpdate) SetDelete(v *DeleteGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate {
- s.Delete = v
- return s
-}
-
-// SetUpdate sets the Update field's value.
-func (s *GlobalSecondaryIndexUpdate) SetUpdate(v *UpdateGlobalSecondaryIndexAction) *GlobalSecondaryIndexUpdate {
- s.Update = v
- return s
-}
-
-// Represents the properties of a global table.
-type GlobalTable struct {
- _ struct{} `type:"structure"`
-
- // The global table name.
- GlobalTableName *string `min:"3" type:"string"`
-
- // The Regions where the global table has replicas.
- ReplicationGroup []*Replica `type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalTable) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalTable) GoString() string {
- return s.String()
-}
-
-// SetGlobalTableName sets the GlobalTableName field's value.
-func (s *GlobalTable) SetGlobalTableName(v string) *GlobalTable {
- s.GlobalTableName = &v
- return s
-}
-
-// SetReplicationGroup sets the ReplicationGroup field's value.
-func (s *GlobalTable) SetReplicationGroup(v []*Replica) *GlobalTable {
- s.ReplicationGroup = v
- return s
-}
-
-// The specified global table already exists.
-type GlobalTableAlreadyExistsException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalTableAlreadyExistsException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalTableAlreadyExistsException) GoString() string {
- return s.String()
-}
-
-func newErrorGlobalTableAlreadyExistsException(v protocol.ResponseMetadata) error {
- return &GlobalTableAlreadyExistsException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *GlobalTableAlreadyExistsException) Code() string {
- return "GlobalTableAlreadyExistsException"
-}
-
-// Message returns the exception's message.
-func (s *GlobalTableAlreadyExistsException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *GlobalTableAlreadyExistsException) OrigErr() error {
- return nil
-}
-
-func (s *GlobalTableAlreadyExistsException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *GlobalTableAlreadyExistsException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *GlobalTableAlreadyExistsException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Contains details about the global table.
-type GlobalTableDescription struct {
- _ struct{} `type:"structure"`
-
- // The creation time of the global table.
- CreationDateTime *time.Time `type:"timestamp"`
-
- // The unique identifier of the global table.
- GlobalTableArn *string `type:"string"`
-
- // The global table name.
- GlobalTableName *string `min:"3" type:"string"`
-
- // The current state of the global table:
- //
- // * CREATING - The global table is being created.
- //
- // * UPDATING - The global table is being updated.
- //
- // * DELETING - The global table is being deleted.
- //
- // * ACTIVE - The global table is ready for use.
- GlobalTableStatus *string `type:"string" enum:"GlobalTableStatus"`
-
- // The Regions where the global table has replicas.
- ReplicationGroup []*ReplicaDescription `type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalTableDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalTableDescription) GoString() string {
- return s.String()
-}
-
-// SetCreationDateTime sets the CreationDateTime field's value.
-func (s *GlobalTableDescription) SetCreationDateTime(v time.Time) *GlobalTableDescription {
- s.CreationDateTime = &v
- return s
-}
-
-// SetGlobalTableArn sets the GlobalTableArn field's value.
-func (s *GlobalTableDescription) SetGlobalTableArn(v string) *GlobalTableDescription {
- s.GlobalTableArn = &v
- return s
-}
-
-// SetGlobalTableName sets the GlobalTableName field's value.
-func (s *GlobalTableDescription) SetGlobalTableName(v string) *GlobalTableDescription {
- s.GlobalTableName = &v
- return s
-}
-
-// SetGlobalTableStatus sets the GlobalTableStatus field's value.
-func (s *GlobalTableDescription) SetGlobalTableStatus(v string) *GlobalTableDescription {
- s.GlobalTableStatus = &v
- return s
-}
-
-// SetReplicationGroup sets the ReplicationGroup field's value.
-func (s *GlobalTableDescription) SetReplicationGroup(v []*ReplicaDescription) *GlobalTableDescription {
- s.ReplicationGroup = v
- return s
-}
-
-// Represents the settings of a global secondary index for a global table that
-// will be modified.
-type GlobalTableGlobalSecondaryIndexSettingsUpdate struct {
- _ struct{} `type:"structure"`
-
- // The name of the global secondary index. The name must be unique among all
- // other indexes on this table.
- //
- // IndexName is a required field
- IndexName *string `min:"3" type:"string" required:"true"`
-
- // Auto scaling settings for managing a global secondary index's write capacity
- // units.
- ProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"`
-
- // The maximum number of writes consumed per second before DynamoDB returns
- // a ThrottlingException.
- ProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalTableGlobalSecondaryIndexSettingsUpdate) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalTableGlobalSecondaryIndexSettingsUpdate) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GlobalTableGlobalSecondaryIndexSettingsUpdate"}
- if s.IndexName == nil {
- invalidParams.Add(request.NewErrParamRequired("IndexName"))
- }
- if s.IndexName != nil && len(*s.IndexName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
- }
- if s.ProvisionedWriteCapacityUnits != nil && *s.ProvisionedWriteCapacityUnits < 1 {
- invalidParams.Add(request.NewErrParamMinValue("ProvisionedWriteCapacityUnits", 1))
- }
- if s.ProvisionedWriteCapacityAutoScalingSettingsUpdate != nil {
- if err := s.ProvisionedWriteCapacityAutoScalingSettingsUpdate.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetIndexName(v string) *GlobalTableGlobalSecondaryIndexSettingsUpdate {
- s.IndexName = &v
- return s
-}
-
-// SetProvisionedWriteCapacityAutoScalingSettingsUpdate sets the ProvisionedWriteCapacityAutoScalingSettingsUpdate field's value.
-func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetProvisionedWriteCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *GlobalTableGlobalSecondaryIndexSettingsUpdate {
- s.ProvisionedWriteCapacityAutoScalingSettingsUpdate = v
- return s
-}
-
-// SetProvisionedWriteCapacityUnits sets the ProvisionedWriteCapacityUnits field's value.
-func (s *GlobalTableGlobalSecondaryIndexSettingsUpdate) SetProvisionedWriteCapacityUnits(v int64) *GlobalTableGlobalSecondaryIndexSettingsUpdate {
- s.ProvisionedWriteCapacityUnits = &v
- return s
-}
-
-// The specified global table does not exist.
-type GlobalTableNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalTableNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GlobalTableNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorGlobalTableNotFoundException(v protocol.ResponseMetadata) error {
- return &GlobalTableNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *GlobalTableNotFoundException) Code() string {
- return "GlobalTableNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *GlobalTableNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *GlobalTableNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *GlobalTableNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *GlobalTableNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *GlobalTableNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// DynamoDB rejected the request because you retried a request with a different
-// payload but with an idempotent token that was already used.
-type IdempotentParameterMismatchException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"Message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s IdempotentParameterMismatchException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s IdempotentParameterMismatchException) GoString() string {
- return s.String()
-}
-
-func newErrorIdempotentParameterMismatchException(v protocol.ResponseMetadata) error {
- return &IdempotentParameterMismatchException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *IdempotentParameterMismatchException) Code() string {
- return "IdempotentParameterMismatchException"
-}
-
-// Message returns the exception's message.
-func (s *IdempotentParameterMismatchException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *IdempotentParameterMismatchException) OrigErr() error {
- return nil
-}
-
-func (s *IdempotentParameterMismatchException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *IdempotentParameterMismatchException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *IdempotentParameterMismatchException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// There was a conflict when importing from the specified S3 source. This can
-// occur when the current import conflicts with a previous import request that
-// had the same client token.
-type ImportConflictException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImportConflictException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImportConflictException) GoString() string {
- return s.String()
-}
-
-func newErrorImportConflictException(v protocol.ResponseMetadata) error {
- return &ImportConflictException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ImportConflictException) Code() string {
- return "ImportConflictException"
-}
-
-// Message returns the exception's message.
-func (s *ImportConflictException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ImportConflictException) OrigErr() error {
- return nil
-}
-
-func (s *ImportConflictException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ImportConflictException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ImportConflictException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The specified import was not found.
-type ImportNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImportNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImportNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorImportNotFoundException(v protocol.ResponseMetadata) error {
- return &ImportNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ImportNotFoundException) Code() string {
- return "ImportNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *ImportNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ImportNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *ImportNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ImportNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ImportNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Summary information about the source file for the import.
-type ImportSummary struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with
- // this import task.
- CloudWatchLogGroupArn *string `min:"1" type:"string"`
-
- // The time at which this import task ended. (Does this include the successful
- // complete creation of the table it was imported to?)
- EndTime *time.Time `type:"timestamp"`
-
- // The Amazon Resource Number (ARN) corresponding to the import request.
- ImportArn *string `min:"37" type:"string"`
-
- // The status of the import operation.
- ImportStatus *string `type:"string" enum:"ImportStatus"`
-
- // The format of the source data. Valid values are CSV, DYNAMODB_JSON or ION.
- InputFormat *string `type:"string" enum:"InputFormat"`
-
- // The path and S3 bucket of the source file that is being imported. This includes
- // the S3Bucket (required), S3KeyPrefix (optional) and S3BucketOwner (optional
- // if the bucket is owned by the requester).
- S3BucketSource *S3BucketSource `type:"structure"`
-
- // The time at which this import task began.
- StartTime *time.Time `type:"timestamp"`
-
- // The Amazon Resource Number (ARN) of the table being imported into.
- TableArn *string `min:"1" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImportSummary) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImportSummary) GoString() string {
- return s.String()
-}
-
-// SetCloudWatchLogGroupArn sets the CloudWatchLogGroupArn field's value.
-func (s *ImportSummary) SetCloudWatchLogGroupArn(v string) *ImportSummary {
- s.CloudWatchLogGroupArn = &v
- return s
-}
-
-// SetEndTime sets the EndTime field's value.
-func (s *ImportSummary) SetEndTime(v time.Time) *ImportSummary {
- s.EndTime = &v
- return s
-}
-
-// SetImportArn sets the ImportArn field's value.
-func (s *ImportSummary) SetImportArn(v string) *ImportSummary {
- s.ImportArn = &v
- return s
-}
-
-// SetImportStatus sets the ImportStatus field's value.
-func (s *ImportSummary) SetImportStatus(v string) *ImportSummary {
- s.ImportStatus = &v
- return s
-}
-
-// SetInputFormat sets the InputFormat field's value.
-func (s *ImportSummary) SetInputFormat(v string) *ImportSummary {
- s.InputFormat = &v
- return s
-}
-
-// SetS3BucketSource sets the S3BucketSource field's value.
-func (s *ImportSummary) SetS3BucketSource(v *S3BucketSource) *ImportSummary {
- s.S3BucketSource = v
- return s
-}
-
-// SetStartTime sets the StartTime field's value.
-func (s *ImportSummary) SetStartTime(v time.Time) *ImportSummary {
- s.StartTime = &v
- return s
-}
-
-// SetTableArn sets the TableArn field's value.
-func (s *ImportSummary) SetTableArn(v string) *ImportSummary {
- s.TableArn = &v
- return s
-}
-
-// Represents the properties of the table being imported into.
-type ImportTableDescription struct {
- _ struct{} `type:"structure"`
-
- // The client token that was provided for the import task. Reusing the client
- // token on retry makes a call to ImportTable idempotent.
- ClientToken *string `type:"string"`
-
- // The Amazon Resource Number (ARN) of the Cloudwatch Log Group associated with
- // the target table.
- CloudWatchLogGroupArn *string `min:"1" type:"string"`
-
- // The time at which the creation of the table associated with this import task
- // completed.
- EndTime *time.Time `type:"timestamp"`
-
- // The number of errors occurred on importing the source file into the target
- // table.
- ErrorCount *int64 `type:"long"`
-
- // The error code corresponding to the failure that the import job ran into
- // during execution.
- FailureCode *string `type:"string"`
-
- // The error message corresponding to the failure that the import job ran into
- // during execution.
- FailureMessage *string `type:"string"`
-
- // The Amazon Resource Number (ARN) corresponding to the import request.
- ImportArn *string `min:"37" type:"string"`
-
- // The status of the import.
- ImportStatus *string `type:"string" enum:"ImportStatus"`
-
- // The number of items successfully imported into the new table.
- ImportedItemCount *int64 `type:"long"`
-
- // The compression options for the data that has been imported into the target
- // table. The values are NONE, GZIP, or ZSTD.
- InputCompressionType *string `type:"string" enum:"InputCompressionType"`
-
- // The format of the source data going into the target table.
- InputFormat *string `type:"string" enum:"InputFormat"`
-
- // The format options for the data that was imported into the target table.
- // There is one value, CsvOption.
- InputFormatOptions *InputFormatOptions `type:"structure"`
-
- // The total number of items processed from the source file.
- ProcessedItemCount *int64 `type:"long"`
-
- // The total size of data processed from the source file, in Bytes.
- ProcessedSizeBytes *int64 `type:"long"`
-
- // Values for the S3 bucket the source file is imported from. Includes bucket
- // name (required), key prefix (optional) and bucket account owner ID (optional).
- S3BucketSource *S3BucketSource `type:"structure"`
-
- // The time when this import task started.
- StartTime *time.Time `type:"timestamp"`
-
- // The Amazon Resource Number (ARN) of the table being imported into.
- TableArn *string `min:"1" type:"string"`
-
- // The parameters for the new table that is being imported into.
- TableCreationParameters *TableCreationParameters `type:"structure"`
-
- // The table id corresponding to the table created by import table process.
- TableId *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImportTableDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImportTableDescription) GoString() string {
- return s.String()
-}
-
-// SetClientToken sets the ClientToken field's value.
-func (s *ImportTableDescription) SetClientToken(v string) *ImportTableDescription {
- s.ClientToken = &v
- return s
-}
-
-// SetCloudWatchLogGroupArn sets the CloudWatchLogGroupArn field's value.
-func (s *ImportTableDescription) SetCloudWatchLogGroupArn(v string) *ImportTableDescription {
- s.CloudWatchLogGroupArn = &v
- return s
-}
-
-// SetEndTime sets the EndTime field's value.
-func (s *ImportTableDescription) SetEndTime(v time.Time) *ImportTableDescription {
- s.EndTime = &v
- return s
-}
-
-// SetErrorCount sets the ErrorCount field's value.
-func (s *ImportTableDescription) SetErrorCount(v int64) *ImportTableDescription {
- s.ErrorCount = &v
- return s
-}
-
-// SetFailureCode sets the FailureCode field's value.
-func (s *ImportTableDescription) SetFailureCode(v string) *ImportTableDescription {
- s.FailureCode = &v
- return s
-}
-
-// SetFailureMessage sets the FailureMessage field's value.
-func (s *ImportTableDescription) SetFailureMessage(v string) *ImportTableDescription {
- s.FailureMessage = &v
- return s
-}
-
-// SetImportArn sets the ImportArn field's value.
-func (s *ImportTableDescription) SetImportArn(v string) *ImportTableDescription {
- s.ImportArn = &v
- return s
-}
-
-// SetImportStatus sets the ImportStatus field's value.
-func (s *ImportTableDescription) SetImportStatus(v string) *ImportTableDescription {
- s.ImportStatus = &v
- return s
-}
-
-// SetImportedItemCount sets the ImportedItemCount field's value.
-func (s *ImportTableDescription) SetImportedItemCount(v int64) *ImportTableDescription {
- s.ImportedItemCount = &v
- return s
-}
-
-// SetInputCompressionType sets the InputCompressionType field's value.
-func (s *ImportTableDescription) SetInputCompressionType(v string) *ImportTableDescription {
- s.InputCompressionType = &v
- return s
-}
-
-// SetInputFormat sets the InputFormat field's value.
-func (s *ImportTableDescription) SetInputFormat(v string) *ImportTableDescription {
- s.InputFormat = &v
- return s
-}
-
-// SetInputFormatOptions sets the InputFormatOptions field's value.
-func (s *ImportTableDescription) SetInputFormatOptions(v *InputFormatOptions) *ImportTableDescription {
- s.InputFormatOptions = v
- return s
-}
-
-// SetProcessedItemCount sets the ProcessedItemCount field's value.
-func (s *ImportTableDescription) SetProcessedItemCount(v int64) *ImportTableDescription {
- s.ProcessedItemCount = &v
- return s
-}
-
-// SetProcessedSizeBytes sets the ProcessedSizeBytes field's value.
-func (s *ImportTableDescription) SetProcessedSizeBytes(v int64) *ImportTableDescription {
- s.ProcessedSizeBytes = &v
- return s
-}
-
-// SetS3BucketSource sets the S3BucketSource field's value.
-func (s *ImportTableDescription) SetS3BucketSource(v *S3BucketSource) *ImportTableDescription {
- s.S3BucketSource = v
- return s
-}
-
-// SetStartTime sets the StartTime field's value.
-func (s *ImportTableDescription) SetStartTime(v time.Time) *ImportTableDescription {
- s.StartTime = &v
- return s
-}
-
-// SetTableArn sets the TableArn field's value.
-func (s *ImportTableDescription) SetTableArn(v string) *ImportTableDescription {
- s.TableArn = &v
- return s
-}
-
-// SetTableCreationParameters sets the TableCreationParameters field's value.
-func (s *ImportTableDescription) SetTableCreationParameters(v *TableCreationParameters) *ImportTableDescription {
- s.TableCreationParameters = v
- return s
-}
-
-// SetTableId sets the TableId field's value.
-func (s *ImportTableDescription) SetTableId(v string) *ImportTableDescription {
- s.TableId = &v
- return s
-}
-
-type ImportTableInput struct {
- _ struct{} `type:"structure"`
-
- // Providing a ClientToken makes the call to ImportTableInput idempotent, meaning
- // that multiple identical calls have the same effect as one single call.
- //
- // A client token is valid for 8 hours after the first request that uses it
- // is completed. After 8 hours, any request with the same client token is treated
- // as a new request. Do not resubmit the same request with the same client token
- // for more than 8 hours, or the result might not be idempotent.
- //
- // If you submit a request with the same client token but a change in other
- // parameters within the 8-hour idempotency window, DynamoDB returns an IdempotentParameterMismatch
- // exception.
- ClientToken *string `type:"string" idempotencyToken:"true"`
-
- // Type of compression to be used on the input coming from the imported table.
- InputCompressionType *string `type:"string" enum:"InputCompressionType"`
-
- // The format of the source data. Valid values for ImportFormat are CSV, DYNAMODB_JSON
- // or ION.
- //
- // InputFormat is a required field
- InputFormat *string `type:"string" required:"true" enum:"InputFormat"`
-
- // Additional properties that specify how the input is formatted,
- InputFormatOptions *InputFormatOptions `type:"structure"`
-
- // The S3 bucket that provides the source for the import.
- //
- // S3BucketSource is a required field
- S3BucketSource *S3BucketSource `type:"structure" required:"true"`
-
- // Parameters for the table to import the data into.
- //
- // TableCreationParameters is a required field
- TableCreationParameters *TableCreationParameters `type:"structure" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImportTableInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImportTableInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ImportTableInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ImportTableInput"}
- if s.InputFormat == nil {
- invalidParams.Add(request.NewErrParamRequired("InputFormat"))
- }
- if s.S3BucketSource == nil {
- invalidParams.Add(request.NewErrParamRequired("S3BucketSource"))
- }
- if s.TableCreationParameters == nil {
- invalidParams.Add(request.NewErrParamRequired("TableCreationParameters"))
- }
- if s.InputFormatOptions != nil {
- if err := s.InputFormatOptions.Validate(); err != nil {
- invalidParams.AddNested("InputFormatOptions", err.(request.ErrInvalidParams))
- }
- }
- if s.S3BucketSource != nil {
- if err := s.S3BucketSource.Validate(); err != nil {
- invalidParams.AddNested("S3BucketSource", err.(request.ErrInvalidParams))
- }
- }
- if s.TableCreationParameters != nil {
- if err := s.TableCreationParameters.Validate(); err != nil {
- invalidParams.AddNested("TableCreationParameters", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetClientToken sets the ClientToken field's value.
-func (s *ImportTableInput) SetClientToken(v string) *ImportTableInput {
- s.ClientToken = &v
- return s
-}
-
-// SetInputCompressionType sets the InputCompressionType field's value.
-func (s *ImportTableInput) SetInputCompressionType(v string) *ImportTableInput {
- s.InputCompressionType = &v
- return s
-}
-
-// SetInputFormat sets the InputFormat field's value.
-func (s *ImportTableInput) SetInputFormat(v string) *ImportTableInput {
- s.InputFormat = &v
- return s
-}
-
-// SetInputFormatOptions sets the InputFormatOptions field's value.
-func (s *ImportTableInput) SetInputFormatOptions(v *InputFormatOptions) *ImportTableInput {
- s.InputFormatOptions = v
- return s
-}
-
-// SetS3BucketSource sets the S3BucketSource field's value.
-func (s *ImportTableInput) SetS3BucketSource(v *S3BucketSource) *ImportTableInput {
- s.S3BucketSource = v
- return s
-}
-
-// SetTableCreationParameters sets the TableCreationParameters field's value.
-func (s *ImportTableInput) SetTableCreationParameters(v *TableCreationParameters) *ImportTableInput {
- s.TableCreationParameters = v
- return s
-}
-
-type ImportTableOutput struct {
- _ struct{} `type:"structure"`
-
- // Represents the properties of the table created for the import, and parameters
- // of the import. The import parameters include import status, how many items
- // were processed, and how many errors were encountered.
- //
- // ImportTableDescription is a required field
- ImportTableDescription *ImportTableDescription `type:"structure" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImportTableOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ImportTableOutput) GoString() string {
- return s.String()
-}
-
-// SetImportTableDescription sets the ImportTableDescription field's value.
-func (s *ImportTableOutput) SetImportTableDescription(v *ImportTableDescription) *ImportTableOutput {
- s.ImportTableDescription = v
- return s
-}
-
-// Optional object containing the parameters specific to an incremental export.
-type IncrementalExportSpecification struct {
- _ struct{} `type:"structure"`
-
- // Time in the past which provides the inclusive start range for the export
- // table's data, counted in seconds from the start of the Unix epoch. The incremental
- // export will reflect the table's state including and after this point in time.
- ExportFromTime *time.Time `type:"timestamp"`
-
- // Time in the past which provides the exclusive end range for the export table's
- // data, counted in seconds from the start of the Unix epoch. The incremental
- // export will reflect the table's state just prior to this point in time. If
- // this is not provided, the latest time with data available will be used.
- ExportToTime *time.Time `type:"timestamp"`
-
- // The view type that was chosen for the export. Valid values are NEW_AND_OLD_IMAGES
- // and NEW_IMAGES. The default value is NEW_AND_OLD_IMAGES.
- ExportViewType *string `type:"string" enum:"ExportViewType"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s IncrementalExportSpecification) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s IncrementalExportSpecification) GoString() string {
- return s.String()
-}
-
-// SetExportFromTime sets the ExportFromTime field's value.
-func (s *IncrementalExportSpecification) SetExportFromTime(v time.Time) *IncrementalExportSpecification {
- s.ExportFromTime = &v
- return s
-}
-
-// SetExportToTime sets the ExportToTime field's value.
-func (s *IncrementalExportSpecification) SetExportToTime(v time.Time) *IncrementalExportSpecification {
- s.ExportToTime = &v
- return s
-}
-
-// SetExportViewType sets the ExportViewType field's value.
-func (s *IncrementalExportSpecification) SetExportViewType(v string) *IncrementalExportSpecification {
- s.ExportViewType = &v
- return s
-}
-
-// The operation tried to access a nonexistent index.
-type IndexNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s IndexNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s IndexNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorIndexNotFoundException(v protocol.ResponseMetadata) error {
- return &IndexNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *IndexNotFoundException) Code() string {
- return "IndexNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *IndexNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *IndexNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *IndexNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *IndexNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *IndexNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The format options for the data that was imported into the target table.
-// There is one value, CsvOption.
-type InputFormatOptions struct {
- _ struct{} `type:"structure"`
-
- // The options for imported source files in CSV format. The values are Delimiter
- // and HeaderList.
- Csv *CsvOptions `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InputFormatOptions) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InputFormatOptions) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *InputFormatOptions) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "InputFormatOptions"}
- if s.Csv != nil {
- if err := s.Csv.Validate(); err != nil {
- invalidParams.AddNested("Csv", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetCsv sets the Csv field's value.
-func (s *InputFormatOptions) SetCsv(v *CsvOptions) *InputFormatOptions {
- s.Csv = v
- return s
-}
-
-// An error occurred on the server side.
-type InternalServerError struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The server encountered an internal error trying to fulfill the request.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InternalServerError) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InternalServerError) GoString() string {
- return s.String()
-}
-
-func newErrorInternalServerError(v protocol.ResponseMetadata) error {
- return &InternalServerError{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InternalServerError) Code() string {
- return "InternalServerError"
-}
-
-// Message returns the exception's message.
-func (s *InternalServerError) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InternalServerError) OrigErr() error {
- return nil
-}
-
-func (s *InternalServerError) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InternalServerError) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InternalServerError) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The specified ExportTime is outside of the point in time recovery window.
-type InvalidExportTimeException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidExportTimeException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidExportTimeException) GoString() string {
- return s.String()
-}
-
-func newErrorInvalidExportTimeException(v protocol.ResponseMetadata) error {
- return &InvalidExportTimeException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InvalidExportTimeException) Code() string {
- return "InvalidExportTimeException"
-}
-
-// Message returns the exception's message.
-func (s *InvalidExportTimeException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InvalidExportTimeException) OrigErr() error {
- return nil
-}
-
-func (s *InvalidExportTimeException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InvalidExportTimeException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InvalidExportTimeException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime
-// and LatestRestorableDateTime.
-type InvalidRestoreTimeException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidRestoreTimeException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidRestoreTimeException) GoString() string {
- return s.String()
-}
-
-func newErrorInvalidRestoreTimeException(v protocol.ResponseMetadata) error {
- return &InvalidRestoreTimeException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InvalidRestoreTimeException) Code() string {
- return "InvalidRestoreTimeException"
-}
-
-// Message returns the exception's message.
-func (s *InvalidRestoreTimeException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InvalidRestoreTimeException) OrigErr() error {
- return nil
-}
-
-func (s *InvalidRestoreTimeException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InvalidRestoreTimeException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InvalidRestoreTimeException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Information about item collections, if any, that were affected by the operation.
-// ItemCollectionMetrics is only returned if the request asked for it. If the
-// table does not have any local secondary indexes, this information is not
-// returned in the response.
-type ItemCollectionMetrics struct {
- _ struct{} `type:"structure"`
-
- // The partition key value of the item collection. This value is the same as
- // the partition key value of the item.
- ItemCollectionKey map[string]*AttributeValue `type:"map"`
-
- // An estimate of item collection size, in gigabytes. This value is a two-element
- // array containing a lower bound and an upper bound for the estimate. The estimate
- // includes the size of all the items in the table, plus the size of all attributes
- // projected into all of the local secondary indexes on that table. Use this
- // estimate to measure whether a local secondary index is approaching its size
- // limit.
- //
- // The estimate is subject to change over time; therefore, do not rely on the
- // precision or accuracy of the estimate.
- SizeEstimateRangeGB []*float64 `type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ItemCollectionMetrics) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ItemCollectionMetrics) GoString() string {
- return s.String()
-}
-
-// SetItemCollectionKey sets the ItemCollectionKey field's value.
-func (s *ItemCollectionMetrics) SetItemCollectionKey(v map[string]*AttributeValue) *ItemCollectionMetrics {
- s.ItemCollectionKey = v
- return s
-}
-
-// SetSizeEstimateRangeGB sets the SizeEstimateRangeGB field's value.
-func (s *ItemCollectionMetrics) SetSizeEstimateRangeGB(v []*float64) *ItemCollectionMetrics {
- s.SizeEstimateRangeGB = v
- return s
-}
-
-// An item collection is too large. This exception is only returned for tables
-// that have one or more local secondary indexes.
-type ItemCollectionSizeLimitExceededException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The total size of an item collection has exceeded the maximum limit of 10
- // gigabytes.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ItemCollectionSizeLimitExceededException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ItemCollectionSizeLimitExceededException) GoString() string {
- return s.String()
-}
-
-func newErrorItemCollectionSizeLimitExceededException(v protocol.ResponseMetadata) error {
- return &ItemCollectionSizeLimitExceededException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ItemCollectionSizeLimitExceededException) Code() string {
- return "ItemCollectionSizeLimitExceededException"
-}
-
-// Message returns the exception's message.
-func (s *ItemCollectionSizeLimitExceededException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ItemCollectionSizeLimitExceededException) OrigErr() error {
- return nil
-}
-
-func (s *ItemCollectionSizeLimitExceededException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ItemCollectionSizeLimitExceededException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ItemCollectionSizeLimitExceededException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Details for the requested item.
-type ItemResponse struct {
- _ struct{} `type:"structure"`
-
- // Map of attribute data consisting of the data type and attribute value.
- Item map[string]*AttributeValue `type:"map"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ItemResponse) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ItemResponse) GoString() string {
- return s.String()
-}
-
-// SetItem sets the Item field's value.
-func (s *ItemResponse) SetItem(v map[string]*AttributeValue) *ItemResponse {
- s.Item = v
- return s
-}
-
-// Represents a single element of a key schema. A key schema specifies the attributes
-// that make up the primary key of a table, or the key attributes of an index.
-//
-// A KeySchemaElement represents exactly one attribute of the primary key. For
-// example, a simple primary key would be represented by one KeySchemaElement
-// (for the partition key). A composite primary key would require one KeySchemaElement
-// for the partition key, and another KeySchemaElement for the sort key.
-//
-// A KeySchemaElement must be a scalar, top-level attribute (not a nested attribute).
-// The data type must be one of String, Number, or Binary. The attribute cannot
-// be nested within a List or a Map.
-type KeySchemaElement struct {
- _ struct{} `type:"structure"`
-
- // The name of a key attribute.
- //
- // AttributeName is a required field
- AttributeName *string `min:"1" type:"string" required:"true"`
-
- // The role that this key attribute will assume:
- //
- // * HASH - partition key
- //
- // * RANGE - sort key
- //
- // The partition key of an item is also known as its hash attribute. The term
- // "hash attribute" derives from DynamoDB's usage of an internal hash function
- // to evenly distribute data items across partitions, based on their partition
- // key values.
- //
- // The sort key of an item is also known as its range attribute. The term "range
- // attribute" derives from the way DynamoDB stores items with the same partition
- // key physically close together, in sorted order by the sort key value.
- //
- // KeyType is a required field
- KeyType *string `type:"string" required:"true" enum:"KeyType"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s KeySchemaElement) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s KeySchemaElement) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *KeySchemaElement) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "KeySchemaElement"}
- if s.AttributeName == nil {
- invalidParams.Add(request.NewErrParamRequired("AttributeName"))
- }
- if s.AttributeName != nil && len(*s.AttributeName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1))
- }
- if s.KeyType == nil {
- invalidParams.Add(request.NewErrParamRequired("KeyType"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAttributeName sets the AttributeName field's value.
-func (s *KeySchemaElement) SetAttributeName(v string) *KeySchemaElement {
- s.AttributeName = &v
- return s
-}
-
-// SetKeyType sets the KeyType field's value.
-func (s *KeySchemaElement) SetKeyType(v string) *KeySchemaElement {
- s.KeyType = &v
- return s
-}
-
-// Represents a set of primary keys and, for each key, the attributes to retrieve
-// from the table.
-//
-// For each primary key, you must provide all of the key attributes. For example,
-// with a simple primary key, you only need to provide the partition key. For
-// a composite primary key, you must provide both the partition key and the
-// sort key.
-type KeysAndAttributes struct {
- _ struct{} `type:"structure"`
-
- // This is a legacy parameter. Use ProjectionExpression instead. For more information,
- // see Legacy Conditional Parameters (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.html)
- // in the Amazon DynamoDB Developer Guide.
- AttributesToGet []*string `min:"1" type:"list"`
-
- // The consistency of a read operation. If set to true, then a strongly consistent
- // read is used; otherwise, an eventually consistent read is used.
- ConsistentRead *bool `type:"boolean"`
-
- // One or more substitution tokens for attribute names in an expression. The
- // following are some use cases for using ExpressionAttributeNames:
- //
- // * To access an attribute whose name conflicts with a DynamoDB reserved
- // word.
- //
- // * To create a placeholder for repeating occurrences of an attribute name
- // in an expression.
- //
- // * To prevent special characters in an attribute name from being misinterpreted
- // in an expression.
- //
- // Use the # character in an expression to dereference an attribute name. For
- // example, consider the following attribute name:
- //
- // * Percentile
- //
- // The name of this attribute conflicts with a reserved word, so it cannot be
- // used directly in an expression. (For the complete list of reserved words,
- // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
- // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
- // the following for ExpressionAttributeNames:
- //
- // * {"#P":"Percentile"}
- //
- // You could then use this substitution in an expression, as in this example:
- //
- // * #P = :val
- //
- // Tokens that begin with the : character are expression attribute values, which
- // are placeholders for the actual value at runtime.
- //
- // For more information on expression attribute names, see Accessing Item Attributes
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
- // in the Amazon DynamoDB Developer Guide.
- ExpressionAttributeNames map[string]*string `type:"map"`
-
- // The primary key attribute values that define the items and the attributes
- // associated with the items.
- //
- // Keys is a required field
- Keys []map[string]*AttributeValue `min:"1" type:"list" required:"true"`
-
- // A string that identifies one or more attributes to retrieve from the table.
- // These attributes can include scalars, sets, or elements of a JSON document.
- // The attributes in the ProjectionExpression must be separated by commas.
- //
- // If no attribute names are specified, then all attributes will be returned.
- // If any of the requested attributes are not found, they will not appear in
- // the result.
- //
- // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
- // in the Amazon DynamoDB Developer Guide.
- ProjectionExpression *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s KeysAndAttributes) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s KeysAndAttributes) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *KeysAndAttributes) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "KeysAndAttributes"}
- if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1))
- }
- if s.Keys == nil {
- invalidParams.Add(request.NewErrParamRequired("Keys"))
- }
- if s.Keys != nil && len(s.Keys) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Keys", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAttributesToGet sets the AttributesToGet field's value.
-func (s *KeysAndAttributes) SetAttributesToGet(v []*string) *KeysAndAttributes {
- s.AttributesToGet = v
- return s
-}
-
-// SetConsistentRead sets the ConsistentRead field's value.
-func (s *KeysAndAttributes) SetConsistentRead(v bool) *KeysAndAttributes {
- s.ConsistentRead = &v
- return s
-}
-
-// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
-func (s *KeysAndAttributes) SetExpressionAttributeNames(v map[string]*string) *KeysAndAttributes {
- s.ExpressionAttributeNames = v
- return s
-}
-
-// SetKeys sets the Keys field's value.
-func (s *KeysAndAttributes) SetKeys(v []map[string]*AttributeValue) *KeysAndAttributes {
- s.Keys = v
- return s
-}
-
-// SetProjectionExpression sets the ProjectionExpression field's value.
-func (s *KeysAndAttributes) SetProjectionExpression(v string) *KeysAndAttributes {
- s.ProjectionExpression = &v
- return s
-}
-
-// Describes a Kinesis data stream destination.
-type KinesisDataStreamDestination struct {
- _ struct{} `type:"structure"`
-
- // The precision of the Kinesis data stream timestamp. The values are either
- // MILLISECOND or MICROSECOND.
- ApproximateCreationDateTimePrecision *string `type:"string" enum:"ApproximateCreationDateTimePrecision"`
-
- // The current status of replication.
- DestinationStatus *string `type:"string" enum:"DestinationStatus"`
-
- // The human-readable string that corresponds to the replica status.
- DestinationStatusDescription *string `type:"string"`
-
- // The ARN for a specific Kinesis data stream.
- StreamArn *string `min:"37" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s KinesisDataStreamDestination) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s KinesisDataStreamDestination) GoString() string {
- return s.String()
-}
-
-// SetApproximateCreationDateTimePrecision sets the ApproximateCreationDateTimePrecision field's value.
-func (s *KinesisDataStreamDestination) SetApproximateCreationDateTimePrecision(v string) *KinesisDataStreamDestination {
- s.ApproximateCreationDateTimePrecision = &v
- return s
-}
-
-// SetDestinationStatus sets the DestinationStatus field's value.
-func (s *KinesisDataStreamDestination) SetDestinationStatus(v string) *KinesisDataStreamDestination {
- s.DestinationStatus = &v
- return s
-}
-
-// SetDestinationStatusDescription sets the DestinationStatusDescription field's value.
-func (s *KinesisDataStreamDestination) SetDestinationStatusDescription(v string) *KinesisDataStreamDestination {
- s.DestinationStatusDescription = &v
- return s
-}
-
-// SetStreamArn sets the StreamArn field's value.
-func (s *KinesisDataStreamDestination) SetStreamArn(v string) *KinesisDataStreamDestination {
- s.StreamArn = &v
- return s
-}
-
-// There is no limit to the number of daily on-demand backups that can be taken.
-//
-// For most purposes, up to 500 simultaneous table operations are allowed per
-// account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
-// RestoreTableFromBackup, and RestoreTableToPointInTime.
-//
-// When you are creating a table with one or more secondary indexes, you can
-// have up to 250 such requests running at a time. However, if the table or
-// index specifications are complex, then DynamoDB might temporarily reduce
-// the number of concurrent operations.
-//
-// When importing into DynamoDB, up to 50 simultaneous import table operations
-// are allowed per account.
-//
-// There is a soft account quota of 2,500 tables.
-//
-// GetRecords was called with a value of more than 1000 for the limit request
-// parameter.
-//
-// More than 2 processes are reading from the same streams shard at the same
-// time. Exceeding this limit may result in request throttling.
-type LimitExceededException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Too many operations for a given subscriber.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LimitExceededException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LimitExceededException) GoString() string {
- return s.String()
-}
-
-func newErrorLimitExceededException(v protocol.ResponseMetadata) error {
- return &LimitExceededException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *LimitExceededException) Code() string {
- return "LimitExceededException"
-}
-
-// Message returns the exception's message.
-func (s *LimitExceededException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *LimitExceededException) OrigErr() error {
- return nil
-}
-
-func (s *LimitExceededException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *LimitExceededException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *LimitExceededException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-type ListBackupsInput struct {
- _ struct{} `type:"structure"`
-
- // The backups from the table specified by BackupType are listed.
- //
- // Where BackupType can be:
- //
- // * USER - On-demand backup created by you. (The default setting if no other
- // backup types are specified.)
- //
- // * SYSTEM - On-demand backup automatically created by DynamoDB.
- //
- // * ALL - All types of on-demand backups (USER and SYSTEM).
- BackupType *string `type:"string" enum:"BackupTypeFilter"`
-
- // LastEvaluatedBackupArn is the Amazon Resource Name (ARN) of the backup last
- // evaluated when the current page of results was returned, inclusive of the
- // current page of results. This value may be specified as the ExclusiveStartBackupArn
- // of a new ListBackups operation in order to fetch the next page of results.
- ExclusiveStartBackupArn *string `min:"37" type:"string"`
-
- // Maximum number of backups to return at once.
- Limit *int64 `min:"1" type:"integer"`
-
- // Lists the backups from the table specified in TableName. You can also provide
- // the Amazon Resource Name (ARN) of the table in this parameter.
- TableName *string `min:"1" type:"string"`
-
- // Only backups created after this time are listed. TimeRangeLowerBound is inclusive.
- TimeRangeLowerBound *time.Time `type:"timestamp"`
-
- // Only backups created before this time are listed. TimeRangeUpperBound is
- // exclusive.
- TimeRangeUpperBound *time.Time `type:"timestamp"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListBackupsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListBackupsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ListBackupsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ListBackupsInput"}
- if s.ExclusiveStartBackupArn != nil && len(*s.ExclusiveStartBackupArn) < 37 {
- invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartBackupArn", 37))
- }
- if s.Limit != nil && *s.Limit < 1 {
- invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetBackupType sets the BackupType field's value.
-func (s *ListBackupsInput) SetBackupType(v string) *ListBackupsInput {
- s.BackupType = &v
- return s
-}
-
-// SetExclusiveStartBackupArn sets the ExclusiveStartBackupArn field's value.
-func (s *ListBackupsInput) SetExclusiveStartBackupArn(v string) *ListBackupsInput {
- s.ExclusiveStartBackupArn = &v
- return s
-}
-
-// SetLimit sets the Limit field's value.
-func (s *ListBackupsInput) SetLimit(v int64) *ListBackupsInput {
- s.Limit = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *ListBackupsInput) SetTableName(v string) *ListBackupsInput {
- s.TableName = &v
- return s
-}
-
-// SetTimeRangeLowerBound sets the TimeRangeLowerBound field's value.
-func (s *ListBackupsInput) SetTimeRangeLowerBound(v time.Time) *ListBackupsInput {
- s.TimeRangeLowerBound = &v
- return s
-}
-
-// SetTimeRangeUpperBound sets the TimeRangeUpperBound field's value.
-func (s *ListBackupsInput) SetTimeRangeUpperBound(v time.Time) *ListBackupsInput {
- s.TimeRangeUpperBound = &v
- return s
-}
-
-type ListBackupsOutput struct {
- _ struct{} `type:"structure"`
-
- // List of BackupSummary objects.
- BackupSummaries []*BackupSummary `type:"list"`
-
- // The ARN of the backup last evaluated when the current page of results was
- // returned, inclusive of the current page of results. This value may be specified
- // as the ExclusiveStartBackupArn of a new ListBackups operation in order to
- // fetch the next page of results.
- //
- // If LastEvaluatedBackupArn is empty, then the last page of results has been
- // processed and there are no more results to be retrieved.
- //
- // If LastEvaluatedBackupArn is not empty, this may or may not indicate that
- // there is more data to be returned. All results are guaranteed to have been
- // returned if and only if no value for LastEvaluatedBackupArn is returned.
- LastEvaluatedBackupArn *string `min:"37" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListBackupsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListBackupsOutput) GoString() string {
- return s.String()
-}
-
-// SetBackupSummaries sets the BackupSummaries field's value.
-func (s *ListBackupsOutput) SetBackupSummaries(v []*BackupSummary) *ListBackupsOutput {
- s.BackupSummaries = v
- return s
-}
-
-// SetLastEvaluatedBackupArn sets the LastEvaluatedBackupArn field's value.
-func (s *ListBackupsOutput) SetLastEvaluatedBackupArn(v string) *ListBackupsOutput {
- s.LastEvaluatedBackupArn = &v
- return s
-}
-
-type ListContributorInsightsInput struct {
- _ struct{} `type:"structure"`
-
- // Maximum number of results to return per page.
- MaxResults *int64 `type:"integer"`
-
- // A token to for the desired page, if there is one.
- NextToken *string `type:"string"`
-
- // The name of the table. You can also provide the Amazon Resource Name (ARN)
- // of the table in this parameter.
- TableName *string `min:"1" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListContributorInsightsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListContributorInsightsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ListContributorInsightsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ListContributorInsightsInput"}
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetMaxResults sets the MaxResults field's value.
-func (s *ListContributorInsightsInput) SetMaxResults(v int64) *ListContributorInsightsInput {
- s.MaxResults = &v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ListContributorInsightsInput) SetNextToken(v string) *ListContributorInsightsInput {
- s.NextToken = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *ListContributorInsightsInput) SetTableName(v string) *ListContributorInsightsInput {
- s.TableName = &v
- return s
-}
-
-type ListContributorInsightsOutput struct {
- _ struct{} `type:"structure"`
-
- // A list of ContributorInsightsSummary.
- ContributorInsightsSummaries []*ContributorInsightsSummary `type:"list"`
-
- // A token to go to the next page if there is one.
- NextToken *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListContributorInsightsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListContributorInsightsOutput) GoString() string {
- return s.String()
-}
-
-// SetContributorInsightsSummaries sets the ContributorInsightsSummaries field's value.
-func (s *ListContributorInsightsOutput) SetContributorInsightsSummaries(v []*ContributorInsightsSummary) *ListContributorInsightsOutput {
- s.ContributorInsightsSummaries = v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ListContributorInsightsOutput) SetNextToken(v string) *ListContributorInsightsOutput {
- s.NextToken = &v
- return s
-}
-
-type ListExportsInput struct {
- _ struct{} `type:"structure"`
-
- // Maximum number of results to return per page.
- MaxResults *int64 `min:"1" type:"integer"`
-
- // An optional string that, if supplied, must be copied from the output of a
- // previous call to ListExports. When provided in this manner, the API fetches
- // the next page of results.
- NextToken *string `type:"string"`
-
- // The Amazon Resource Name (ARN) associated with the exported table.
- TableArn *string `min:"1" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListExportsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListExportsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ListExportsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ListExportsInput"}
- if s.MaxResults != nil && *s.MaxResults < 1 {
- invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
- }
- if s.TableArn != nil && len(*s.TableArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableArn", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetMaxResults sets the MaxResults field's value.
-func (s *ListExportsInput) SetMaxResults(v int64) *ListExportsInput {
- s.MaxResults = &v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ListExportsInput) SetNextToken(v string) *ListExportsInput {
- s.NextToken = &v
- return s
-}
-
-// SetTableArn sets the TableArn field's value.
-func (s *ListExportsInput) SetTableArn(v string) *ListExportsInput {
- s.TableArn = &v
- return s
-}
-
-type ListExportsOutput struct {
- _ struct{} `type:"structure"`
-
- // A list of ExportSummary objects.
- ExportSummaries []*ExportSummary `type:"list"`
-
- // If this value is returned, there are additional results to be displayed.
- // To retrieve them, call ListExports again, with NextToken set to this value.
- NextToken *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListExportsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListExportsOutput) GoString() string {
- return s.String()
-}
-
-// SetExportSummaries sets the ExportSummaries field's value.
-func (s *ListExportsOutput) SetExportSummaries(v []*ExportSummary) *ListExportsOutput {
- s.ExportSummaries = v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ListExportsOutput) SetNextToken(v string) *ListExportsOutput {
- s.NextToken = &v
- return s
-}
-
-type ListGlobalTablesInput struct {
- _ struct{} `type:"structure"`
-
- // The first global table name that this operation will evaluate.
- ExclusiveStartGlobalTableName *string `min:"3" type:"string"`
-
- // The maximum number of table names to return, if the parameter is not specified
- // DynamoDB defaults to 100.
- //
- // If the number of global tables DynamoDB finds reaches this limit, it stops
- // the operation and returns the table names collected up to that point, with
- // a table name in the LastEvaluatedGlobalTableName to apply in a subsequent
- // operation to the ExclusiveStartGlobalTableName parameter.
- Limit *int64 `min:"1" type:"integer"`
-
- // Lists the global tables in a specific Region.
- RegionName *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListGlobalTablesInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListGlobalTablesInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ListGlobalTablesInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ListGlobalTablesInput"}
- if s.ExclusiveStartGlobalTableName != nil && len(*s.ExclusiveStartGlobalTableName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartGlobalTableName", 3))
- }
- if s.Limit != nil && *s.Limit < 1 {
- invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetExclusiveStartGlobalTableName sets the ExclusiveStartGlobalTableName field's value.
-func (s *ListGlobalTablesInput) SetExclusiveStartGlobalTableName(v string) *ListGlobalTablesInput {
- s.ExclusiveStartGlobalTableName = &v
- return s
-}
-
-// SetLimit sets the Limit field's value.
-func (s *ListGlobalTablesInput) SetLimit(v int64) *ListGlobalTablesInput {
- s.Limit = &v
- return s
-}
-
-// SetRegionName sets the RegionName field's value.
-func (s *ListGlobalTablesInput) SetRegionName(v string) *ListGlobalTablesInput {
- s.RegionName = &v
- return s
-}
-
-type ListGlobalTablesOutput struct {
- _ struct{} `type:"structure"`
-
- // List of global table names.
- GlobalTables []*GlobalTable `type:"list"`
-
- // Last evaluated global table name.
- LastEvaluatedGlobalTableName *string `min:"3" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListGlobalTablesOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListGlobalTablesOutput) GoString() string {
- return s.String()
-}
-
-// SetGlobalTables sets the GlobalTables field's value.
-func (s *ListGlobalTablesOutput) SetGlobalTables(v []*GlobalTable) *ListGlobalTablesOutput {
- s.GlobalTables = v
- return s
-}
-
-// SetLastEvaluatedGlobalTableName sets the LastEvaluatedGlobalTableName field's value.
-func (s *ListGlobalTablesOutput) SetLastEvaluatedGlobalTableName(v string) *ListGlobalTablesOutput {
- s.LastEvaluatedGlobalTableName = &v
- return s
-}
-
-type ListImportsInput struct {
- _ struct{} `type:"structure"`
-
- // An optional string that, if supplied, must be copied from the output of a
- // previous call to ListImports. When provided in this manner, the API fetches
- // the next page of results.
- NextToken *string `min:"112" type:"string"`
-
- // The number of ImportSummary objects returned in a single page.
- PageSize *int64 `min:"1" type:"integer"`
-
- // The Amazon Resource Name (ARN) associated with the table that was imported
- // to.
- TableArn *string `min:"1" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListImportsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListImportsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ListImportsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ListImportsInput"}
- if s.NextToken != nil && len(*s.NextToken) < 112 {
- invalidParams.Add(request.NewErrParamMinLen("NextToken", 112))
- }
- if s.PageSize != nil && *s.PageSize < 1 {
- invalidParams.Add(request.NewErrParamMinValue("PageSize", 1))
- }
- if s.TableArn != nil && len(*s.TableArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableArn", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ListImportsInput) SetNextToken(v string) *ListImportsInput {
- s.NextToken = &v
- return s
-}
-
-// SetPageSize sets the PageSize field's value.
-func (s *ListImportsInput) SetPageSize(v int64) *ListImportsInput {
- s.PageSize = &v
- return s
-}
-
-// SetTableArn sets the TableArn field's value.
-func (s *ListImportsInput) SetTableArn(v string) *ListImportsInput {
- s.TableArn = &v
- return s
-}
-
-type ListImportsOutput struct {
- _ struct{} `type:"structure"`
-
- // A list of ImportSummary objects.
- ImportSummaryList []*ImportSummary `type:"list"`
-
- // If this value is returned, there are additional results to be displayed.
- // To retrieve them, call ListImports again, with NextToken set to this value.
- NextToken *string `min:"112" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListImportsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListImportsOutput) GoString() string {
- return s.String()
-}
-
-// SetImportSummaryList sets the ImportSummaryList field's value.
-func (s *ListImportsOutput) SetImportSummaryList(v []*ImportSummary) *ListImportsOutput {
- s.ImportSummaryList = v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ListImportsOutput) SetNextToken(v string) *ListImportsOutput {
- s.NextToken = &v
- return s
-}
-
-// Represents the input of a ListTables operation.
-type ListTablesInput struct {
- _ struct{} `type:"structure"`
-
- // The first table name that this operation will evaluate. Use the value that
- // was returned for LastEvaluatedTableName in a previous operation, so that
- // you can obtain the next page of results.
- ExclusiveStartTableName *string `min:"3" type:"string"`
-
- // A maximum number of table names to return. If this parameter is not specified,
- // the limit is 100.
- Limit *int64 `min:"1" type:"integer"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListTablesInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListTablesInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ListTablesInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ListTablesInput"}
- if s.ExclusiveStartTableName != nil && len(*s.ExclusiveStartTableName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("ExclusiveStartTableName", 3))
- }
- if s.Limit != nil && *s.Limit < 1 {
- invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetExclusiveStartTableName sets the ExclusiveStartTableName field's value.
-func (s *ListTablesInput) SetExclusiveStartTableName(v string) *ListTablesInput {
- s.ExclusiveStartTableName = &v
- return s
-}
-
-// SetLimit sets the Limit field's value.
-func (s *ListTablesInput) SetLimit(v int64) *ListTablesInput {
- s.Limit = &v
- return s
-}
-
-// Represents the output of a ListTables operation.
-type ListTablesOutput struct {
- _ struct{} `type:"structure"`
-
- // The name of the last table in the current page of results. Use this value
- // as the ExclusiveStartTableName in a new request to obtain the next page of
- // results, until all the table names are returned.
- //
- // If you do not receive a LastEvaluatedTableName value in the response, this
- // means that there are no more table names to be retrieved.
- LastEvaluatedTableName *string `min:"3" type:"string"`
-
- // The names of the tables associated with the current account at the current
- // endpoint. The maximum size of this array is 100.
- //
- // If LastEvaluatedTableName also appears in the output, you can use this value
- // as the ExclusiveStartTableName parameter in a subsequent ListTables request
- // and obtain the next page of results.
- TableNames []*string `type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListTablesOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListTablesOutput) GoString() string {
- return s.String()
-}
-
-// SetLastEvaluatedTableName sets the LastEvaluatedTableName field's value.
-func (s *ListTablesOutput) SetLastEvaluatedTableName(v string) *ListTablesOutput {
- s.LastEvaluatedTableName = &v
- return s
-}
-
-// SetTableNames sets the TableNames field's value.
-func (s *ListTablesOutput) SetTableNames(v []*string) *ListTablesOutput {
- s.TableNames = v
- return s
-}
-
-type ListTagsOfResourceInput struct {
- _ struct{} `type:"structure"`
-
- // An optional string that, if supplied, must be copied from the output of a
- // previous call to ListTagOfResource. When provided in this manner, this API
- // fetches the next page of results.
- NextToken *string `type:"string"`
-
- // The Amazon DynamoDB resource with tags to be listed. This value is an Amazon
- // Resource Name (ARN).
- //
- // ResourceArn is a required field
- ResourceArn *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListTagsOfResourceInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListTagsOfResourceInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ListTagsOfResourceInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ListTagsOfResourceInput"}
- if s.ResourceArn == nil {
- invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
- }
- if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ListTagsOfResourceInput) SetNextToken(v string) *ListTagsOfResourceInput {
- s.NextToken = &v
- return s
-}
-
-// SetResourceArn sets the ResourceArn field's value.
-func (s *ListTagsOfResourceInput) SetResourceArn(v string) *ListTagsOfResourceInput {
- s.ResourceArn = &v
- return s
-}
-
-type ListTagsOfResourceOutput struct {
- _ struct{} `type:"structure"`
-
- // If this value is returned, there are additional results to be displayed.
- // To retrieve them, call ListTagsOfResource again, with NextToken set to this
- // value.
- NextToken *string `type:"string"`
-
- // The tags currently associated with the Amazon DynamoDB resource.
- Tags []*Tag `type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListTagsOfResourceOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListTagsOfResourceOutput) GoString() string {
- return s.String()
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ListTagsOfResourceOutput) SetNextToken(v string) *ListTagsOfResourceOutput {
- s.NextToken = &v
- return s
-}
-
-// SetTags sets the Tags field's value.
-func (s *ListTagsOfResourceOutput) SetTags(v []*Tag) *ListTagsOfResourceOutput {
- s.Tags = v
- return s
-}
-
-// Represents the properties of a local secondary index.
-type LocalSecondaryIndex struct {
- _ struct{} `type:"structure"`
-
- // The name of the local secondary index. The name must be unique among all
- // other indexes on this table.
- //
- // IndexName is a required field
- IndexName *string `min:"3" type:"string" required:"true"`
-
- // The complete key schema for the local secondary index, consisting of one
- // or more pairs of attribute names and key types:
- //
- // * HASH - partition key
- //
- // * RANGE - sort key
- //
- // The partition key of an item is also known as its hash attribute. The term
- // "hash attribute" derives from DynamoDB's usage of an internal hash function
- // to evenly distribute data items across partitions, based on their partition
- // key values.
- //
- // The sort key of an item is also known as its range attribute. The term "range
- // attribute" derives from the way DynamoDB stores items with the same partition
- // key physically close together, in sorted order by the sort key value.
- //
- // KeySchema is a required field
- KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"`
-
- // Represents attributes that are copied (projected) from the table into the
- // local secondary index. These are in addition to the primary key attributes
- // and index key attributes, which are automatically projected.
- //
- // Projection is a required field
- Projection *Projection `type:"structure" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LocalSecondaryIndex) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LocalSecondaryIndex) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *LocalSecondaryIndex) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "LocalSecondaryIndex"}
- if s.IndexName == nil {
- invalidParams.Add(request.NewErrParamRequired("IndexName"))
- }
- if s.IndexName != nil && len(*s.IndexName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
- }
- if s.KeySchema == nil {
- invalidParams.Add(request.NewErrParamRequired("KeySchema"))
- }
- if s.KeySchema != nil && len(s.KeySchema) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1))
- }
- if s.Projection == nil {
- invalidParams.Add(request.NewErrParamRequired("Projection"))
- }
- if s.KeySchema != nil {
- for i, v := range s.KeySchema {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.Projection != nil {
- if err := s.Projection.Validate(); err != nil {
- invalidParams.AddNested("Projection", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *LocalSecondaryIndex) SetIndexName(v string) *LocalSecondaryIndex {
- s.IndexName = &v
- return s
-}
-
-// SetKeySchema sets the KeySchema field's value.
-func (s *LocalSecondaryIndex) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndex {
- s.KeySchema = v
- return s
-}
-
-// SetProjection sets the Projection field's value.
-func (s *LocalSecondaryIndex) SetProjection(v *Projection) *LocalSecondaryIndex {
- s.Projection = v
- return s
-}
-
-// Represents the properties of a local secondary index.
-type LocalSecondaryIndexDescription struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) that uniquely identifies the index.
- IndexArn *string `type:"string"`
-
- // Represents the name of the local secondary index.
- IndexName *string `min:"3" type:"string"`
-
- // The total size of the specified index, in bytes. DynamoDB updates this value
- // approximately every six hours. Recent changes might not be reflected in this
- // value.
- IndexSizeBytes *int64 `type:"long"`
-
- // The number of items in the specified index. DynamoDB updates this value approximately
- // every six hours. Recent changes might not be reflected in this value.
- ItemCount *int64 `type:"long"`
-
- // The complete key schema for the local secondary index, consisting of one
- // or more pairs of attribute names and key types:
- //
- // * HASH - partition key
- //
- // * RANGE - sort key
- //
- // The partition key of an item is also known as its hash attribute. The term
- // "hash attribute" derives from DynamoDB's usage of an internal hash function
- // to evenly distribute data items across partitions, based on their partition
- // key values.
- //
- // The sort key of an item is also known as its range attribute. The term "range
- // attribute" derives from the way DynamoDB stores items with the same partition
- // key physically close together, in sorted order by the sort key value.
- KeySchema []*KeySchemaElement `min:"1" type:"list"`
-
- // Represents attributes that are copied (projected) from the table into the
- // global secondary index. These are in addition to the primary key attributes
- // and index key attributes, which are automatically projected.
- Projection *Projection `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LocalSecondaryIndexDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LocalSecondaryIndexDescription) GoString() string {
- return s.String()
-}
-
-// SetIndexArn sets the IndexArn field's value.
-func (s *LocalSecondaryIndexDescription) SetIndexArn(v string) *LocalSecondaryIndexDescription {
- s.IndexArn = &v
- return s
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *LocalSecondaryIndexDescription) SetIndexName(v string) *LocalSecondaryIndexDescription {
- s.IndexName = &v
- return s
-}
-
-// SetIndexSizeBytes sets the IndexSizeBytes field's value.
-func (s *LocalSecondaryIndexDescription) SetIndexSizeBytes(v int64) *LocalSecondaryIndexDescription {
- s.IndexSizeBytes = &v
- return s
-}
-
-// SetItemCount sets the ItemCount field's value.
-func (s *LocalSecondaryIndexDescription) SetItemCount(v int64) *LocalSecondaryIndexDescription {
- s.ItemCount = &v
- return s
-}
-
-// SetKeySchema sets the KeySchema field's value.
-func (s *LocalSecondaryIndexDescription) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndexDescription {
- s.KeySchema = v
- return s
-}
-
-// SetProjection sets the Projection field's value.
-func (s *LocalSecondaryIndexDescription) SetProjection(v *Projection) *LocalSecondaryIndexDescription {
- s.Projection = v
- return s
-}
-
-// Represents the properties of a local secondary index for the table when the
-// backup was created.
-type LocalSecondaryIndexInfo struct {
- _ struct{} `type:"structure"`
-
- // Represents the name of the local secondary index.
- IndexName *string `min:"3" type:"string"`
-
- // The complete key schema for a local secondary index, which consists of one
- // or more pairs of attribute names and key types:
- //
- // * HASH - partition key
- //
- // * RANGE - sort key
- //
- // The partition key of an item is also known as its hash attribute. The term
- // "hash attribute" derives from DynamoDB's usage of an internal hash function
- // to evenly distribute data items across partitions, based on their partition
- // key values.
- //
- // The sort key of an item is also known as its range attribute. The term "range
- // attribute" derives from the way DynamoDB stores items with the same partition
- // key physically close together, in sorted order by the sort key value.
- KeySchema []*KeySchemaElement `min:"1" type:"list"`
-
- // Represents attributes that are copied (projected) from the table into the
- // global secondary index. These are in addition to the primary key attributes
- // and index key attributes, which are automatically projected.
- Projection *Projection `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LocalSecondaryIndexInfo) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LocalSecondaryIndexInfo) GoString() string {
- return s.String()
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *LocalSecondaryIndexInfo) SetIndexName(v string) *LocalSecondaryIndexInfo {
- s.IndexName = &v
- return s
-}
-
-// SetKeySchema sets the KeySchema field's value.
-func (s *LocalSecondaryIndexInfo) SetKeySchema(v []*KeySchemaElement) *LocalSecondaryIndexInfo {
- s.KeySchema = v
- return s
-}
-
-// SetProjection sets the Projection field's value.
-func (s *LocalSecondaryIndexInfo) SetProjection(v *Projection) *LocalSecondaryIndexInfo {
- s.Projection = v
- return s
-}
-
-// Sets the maximum number of read and write units for the specified on-demand
-// table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits,
-// or both.
-type OnDemandThroughput struct {
- _ struct{} `type:"structure"`
-
- // Maximum number of read request units for the specified table.
- //
- // To specify a maximum OnDemandThroughput on your table, set the value of MaxReadRequestUnits
- // as greater than or equal to 1. To remove the maximum OnDemandThroughput that
- // is currently set on your table, set the value of MaxReadRequestUnits to -1.
- MaxReadRequestUnits *int64 `type:"long"`
-
- // Maximum number of write request units for the specified table.
- //
- // To specify a maximum OnDemandThroughput on your table, set the value of MaxWriteRequestUnits
- // as greater than or equal to 1. To remove the maximum OnDemandThroughput that
- // is currently set on your table, set the value of MaxWriteRequestUnits to
- // -1.
- MaxWriteRequestUnits *int64 `type:"long"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s OnDemandThroughput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s OnDemandThroughput) GoString() string {
- return s.String()
-}
-
-// SetMaxReadRequestUnits sets the MaxReadRequestUnits field's value.
-func (s *OnDemandThroughput) SetMaxReadRequestUnits(v int64) *OnDemandThroughput {
- s.MaxReadRequestUnits = &v
- return s
-}
-
-// SetMaxWriteRequestUnits sets the MaxWriteRequestUnits field's value.
-func (s *OnDemandThroughput) SetMaxWriteRequestUnits(v int64) *OnDemandThroughput {
- s.MaxWriteRequestUnits = &v
- return s
-}
-
-// Overrides the on-demand throughput settings for this replica table. If you
-// don't specify a value for this parameter, it uses the source table's on-demand
-// throughput settings.
-type OnDemandThroughputOverride struct {
- _ struct{} `type:"structure"`
-
- // Maximum number of read request units for the specified replica table.
- MaxReadRequestUnits *int64 `type:"long"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s OnDemandThroughputOverride) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s OnDemandThroughputOverride) GoString() string {
- return s.String()
-}
-
-// SetMaxReadRequestUnits sets the MaxReadRequestUnits field's value.
-func (s *OnDemandThroughputOverride) SetMaxReadRequestUnits(v int64) *OnDemandThroughputOverride {
- s.MaxReadRequestUnits = &v
- return s
-}
-
-// Represents a PartiQL statement that uses parameters.
-type ParameterizedStatement struct {
- _ struct{} `type:"structure"`
-
- // The parameter values.
- Parameters []*AttributeValue `min:"1" type:"list"`
-
- // An optional parameter that returns the item attributes for a PartiQL ParameterizedStatement
- // operation that failed a condition check.
- //
- // There is no additional cost associated with requesting a return value aside
- // from the small network and processing overhead of receiving a larger response.
- // No read capacity units are consumed.
- ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"`
-
- // A PartiQL statement that uses parameters.
- //
- // Statement is a required field
- Statement *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ParameterizedStatement) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ParameterizedStatement) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ParameterizedStatement) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ParameterizedStatement"}
- if s.Parameters != nil && len(s.Parameters) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Parameters", 1))
- }
- if s.Statement == nil {
- invalidParams.Add(request.NewErrParamRequired("Statement"))
- }
- if s.Statement != nil && len(*s.Statement) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Statement", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetParameters sets the Parameters field's value.
-func (s *ParameterizedStatement) SetParameters(v []*AttributeValue) *ParameterizedStatement {
- s.Parameters = v
- return s
-}
-
-// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value.
-func (s *ParameterizedStatement) SetReturnValuesOnConditionCheckFailure(v string) *ParameterizedStatement {
- s.ReturnValuesOnConditionCheckFailure = &v
- return s
-}
-
-// SetStatement sets the Statement field's value.
-func (s *ParameterizedStatement) SetStatement(v string) *ParameterizedStatement {
- s.Statement = &v
- return s
-}
-
-// The description of the point in time settings applied to the table.
-type PointInTimeRecoveryDescription struct {
- _ struct{} `type:"structure"`
-
- // Specifies the earliest point in time you can restore your table to. You can
- // restore your table to any point in time during the last 35 days.
- EarliestRestorableDateTime *time.Time `type:"timestamp"`
-
- // LatestRestorableDateTime is typically 5 minutes before the current time.
- LatestRestorableDateTime *time.Time `type:"timestamp"`
-
- // The current state of point in time recovery:
- //
- // * ENABLED - Point in time recovery is enabled.
- //
- // * DISABLED - Point in time recovery is disabled.
- PointInTimeRecoveryStatus *string `type:"string" enum:"PointInTimeRecoveryStatus"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PointInTimeRecoveryDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PointInTimeRecoveryDescription) GoString() string {
- return s.String()
-}
-
-// SetEarliestRestorableDateTime sets the EarliestRestorableDateTime field's value.
-func (s *PointInTimeRecoveryDescription) SetEarliestRestorableDateTime(v time.Time) *PointInTimeRecoveryDescription {
- s.EarliestRestorableDateTime = &v
- return s
-}
-
-// SetLatestRestorableDateTime sets the LatestRestorableDateTime field's value.
-func (s *PointInTimeRecoveryDescription) SetLatestRestorableDateTime(v time.Time) *PointInTimeRecoveryDescription {
- s.LatestRestorableDateTime = &v
- return s
-}
-
-// SetPointInTimeRecoveryStatus sets the PointInTimeRecoveryStatus field's value.
-func (s *PointInTimeRecoveryDescription) SetPointInTimeRecoveryStatus(v string) *PointInTimeRecoveryDescription {
- s.PointInTimeRecoveryStatus = &v
- return s
-}
-
-// Represents the settings used to enable point in time recovery.
-type PointInTimeRecoverySpecification struct {
- _ struct{} `type:"structure"`
-
- // Indicates whether point in time recovery is enabled (true) or disabled (false)
- // on the table.
- //
- // PointInTimeRecoveryEnabled is a required field
- PointInTimeRecoveryEnabled *bool `type:"boolean" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PointInTimeRecoverySpecification) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PointInTimeRecoverySpecification) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *PointInTimeRecoverySpecification) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "PointInTimeRecoverySpecification"}
- if s.PointInTimeRecoveryEnabled == nil {
- invalidParams.Add(request.NewErrParamRequired("PointInTimeRecoveryEnabled"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetPointInTimeRecoveryEnabled sets the PointInTimeRecoveryEnabled field's value.
-func (s *PointInTimeRecoverySpecification) SetPointInTimeRecoveryEnabled(v bool) *PointInTimeRecoverySpecification {
- s.PointInTimeRecoveryEnabled = &v
- return s
-}
-
-// Point in time recovery has not yet been enabled for this source table.
-type PointInTimeRecoveryUnavailableException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PointInTimeRecoveryUnavailableException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PointInTimeRecoveryUnavailableException) GoString() string {
- return s.String()
-}
-
-func newErrorPointInTimeRecoveryUnavailableException(v protocol.ResponseMetadata) error {
- return &PointInTimeRecoveryUnavailableException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *PointInTimeRecoveryUnavailableException) Code() string {
- return "PointInTimeRecoveryUnavailableException"
-}
-
-// Message returns the exception's message.
-func (s *PointInTimeRecoveryUnavailableException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *PointInTimeRecoveryUnavailableException) OrigErr() error {
- return nil
-}
-
-func (s *PointInTimeRecoveryUnavailableException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *PointInTimeRecoveryUnavailableException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *PointInTimeRecoveryUnavailableException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The operation tried to access a nonexistent resource-based policy.
-//
-// If you specified an ExpectedRevisionId, it's possible that a policy is present
-// for the resource but its revision ID didn't match the expected value.
-type PolicyNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PolicyNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PolicyNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorPolicyNotFoundException(v protocol.ResponseMetadata) error {
- return &PolicyNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *PolicyNotFoundException) Code() string {
- return "PolicyNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *PolicyNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *PolicyNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *PolicyNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *PolicyNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *PolicyNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Represents attributes that are copied (projected) from the table into an
-// index. These are in addition to the primary key attributes and index key
-// attributes, which are automatically projected.
-type Projection struct {
- _ struct{} `type:"structure"`
-
- // Represents the non-key attribute names which will be projected into the index.
- //
- // For local secondary indexes, the total count of NonKeyAttributes summed across
- // all of the local secondary indexes, must not exceed 100. If you project the
- // same attribute into two different indexes, this counts as two distinct attributes
- // when determining the total.
- NonKeyAttributes []*string `min:"1" type:"list"`
-
- // The set of attributes that are projected into the index:
- //
- // * KEYS_ONLY - Only the index and primary keys are projected into the index.
- //
- // * INCLUDE - In addition to the attributes described in KEYS_ONLY, the
- // secondary index will include other non-key attributes that you specify.
- //
- // * ALL - All of the table attributes are projected into the index.
- //
- // When using the DynamoDB console, ALL is selected by default.
- ProjectionType *string `type:"string" enum:"ProjectionType"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Projection) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Projection) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *Projection) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "Projection"}
- if s.NonKeyAttributes != nil && len(s.NonKeyAttributes) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("NonKeyAttributes", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetNonKeyAttributes sets the NonKeyAttributes field's value.
-func (s *Projection) SetNonKeyAttributes(v []*string) *Projection {
- s.NonKeyAttributes = v
- return s
-}
-
-// SetProjectionType sets the ProjectionType field's value.
-func (s *Projection) SetProjectionType(v string) *Projection {
- s.ProjectionType = &v
- return s
-}
-
-// Represents the provisioned throughput settings for a specified table or index.
-// The settings can be modified using the UpdateTable operation.
-//
-// For current minimum and maximum provisioned throughput values, see Service,
-// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
-// in the Amazon DynamoDB Developer Guide.
-type ProvisionedThroughput struct {
- _ struct{} `type:"structure"`
-
- // The maximum number of strongly consistent reads consumed per second before
- // DynamoDB returns a ThrottlingException. For more information, see Specifying
- // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html)
- // in the Amazon DynamoDB Developer Guide.
- //
- // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.
- //
- // ReadCapacityUnits is a required field
- ReadCapacityUnits *int64 `min:"1" type:"long" required:"true"`
-
- // The maximum number of writes consumed per second before DynamoDB returns
- // a ThrottlingException. For more information, see Specifying Read and Write
- // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughput.html)
- // in the Amazon DynamoDB Developer Guide.
- //
- // If read/write capacity mode is PAY_PER_REQUEST the value is set to 0.
- //
- // WriteCapacityUnits is a required field
- WriteCapacityUnits *int64 `min:"1" type:"long" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ProvisionedThroughput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ProvisionedThroughput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ProvisionedThroughput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ProvisionedThroughput"}
- if s.ReadCapacityUnits == nil {
- invalidParams.Add(request.NewErrParamRequired("ReadCapacityUnits"))
- }
- if s.ReadCapacityUnits != nil && *s.ReadCapacityUnits < 1 {
- invalidParams.Add(request.NewErrParamMinValue("ReadCapacityUnits", 1))
- }
- if s.WriteCapacityUnits == nil {
- invalidParams.Add(request.NewErrParamRequired("WriteCapacityUnits"))
- }
- if s.WriteCapacityUnits != nil && *s.WriteCapacityUnits < 1 {
- invalidParams.Add(request.NewErrParamMinValue("WriteCapacityUnits", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetReadCapacityUnits sets the ReadCapacityUnits field's value.
-func (s *ProvisionedThroughput) SetReadCapacityUnits(v int64) *ProvisionedThroughput {
- s.ReadCapacityUnits = &v
- return s
-}
-
-// SetWriteCapacityUnits sets the WriteCapacityUnits field's value.
-func (s *ProvisionedThroughput) SetWriteCapacityUnits(v int64) *ProvisionedThroughput {
- s.WriteCapacityUnits = &v
- return s
-}
-
-// Represents the provisioned throughput settings for the table, consisting
-// of read and write capacity units, along with data about increases and decreases.
-type ProvisionedThroughputDescription struct {
- _ struct{} `type:"structure"`
-
- // The date and time of the last provisioned throughput decrease for this table.
- LastDecreaseDateTime *time.Time `type:"timestamp"`
-
- // The date and time of the last provisioned throughput increase for this table.
- LastIncreaseDateTime *time.Time `type:"timestamp"`
-
- // The number of provisioned throughput decreases for this table during this
- // UTC calendar day. For current maximums on provisioned throughput decreases,
- // see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
- // in the Amazon DynamoDB Developer Guide.
- NumberOfDecreasesToday *int64 `min:"1" type:"long"`
-
- // The maximum number of strongly consistent reads consumed per second before
- // DynamoDB returns a ThrottlingException. Eventually consistent reads require
- // less effort than strongly consistent reads, so a setting of 50 ReadCapacityUnits
- // per second provides 100 eventually consistent ReadCapacityUnits per second.
- ReadCapacityUnits *int64 `type:"long"`
-
- // The maximum number of writes consumed per second before DynamoDB returns
- // a ThrottlingException.
- WriteCapacityUnits *int64 `type:"long"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ProvisionedThroughputDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ProvisionedThroughputDescription) GoString() string {
- return s.String()
-}
-
-// SetLastDecreaseDateTime sets the LastDecreaseDateTime field's value.
-func (s *ProvisionedThroughputDescription) SetLastDecreaseDateTime(v time.Time) *ProvisionedThroughputDescription {
- s.LastDecreaseDateTime = &v
- return s
-}
-
-// SetLastIncreaseDateTime sets the LastIncreaseDateTime field's value.
-func (s *ProvisionedThroughputDescription) SetLastIncreaseDateTime(v time.Time) *ProvisionedThroughputDescription {
- s.LastIncreaseDateTime = &v
- return s
-}
-
-// SetNumberOfDecreasesToday sets the NumberOfDecreasesToday field's value.
-func (s *ProvisionedThroughputDescription) SetNumberOfDecreasesToday(v int64) *ProvisionedThroughputDescription {
- s.NumberOfDecreasesToday = &v
- return s
-}
-
-// SetReadCapacityUnits sets the ReadCapacityUnits field's value.
-func (s *ProvisionedThroughputDescription) SetReadCapacityUnits(v int64) *ProvisionedThroughputDescription {
- s.ReadCapacityUnits = &v
- return s
-}
-
-// SetWriteCapacityUnits sets the WriteCapacityUnits field's value.
-func (s *ProvisionedThroughputDescription) SetWriteCapacityUnits(v int64) *ProvisionedThroughputDescription {
- s.WriteCapacityUnits = &v
- return s
-}
-
-// Your request rate is too high. The Amazon Web Services SDKs for DynamoDB
-// automatically retry requests that receive this exception. Your request is
-// eventually successful, unless your retry queue is too large to finish. Reduce
-// the frequency of requests and use exponential backoff. For more information,
-// go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
-// in the Amazon DynamoDB Developer Guide.
-type ProvisionedThroughputExceededException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // You exceeded your maximum allowed provisioned throughput.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ProvisionedThroughputExceededException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ProvisionedThroughputExceededException) GoString() string {
- return s.String()
-}
-
-func newErrorProvisionedThroughputExceededException(v protocol.ResponseMetadata) error {
- return &ProvisionedThroughputExceededException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ProvisionedThroughputExceededException) Code() string {
- return "ProvisionedThroughputExceededException"
-}
-
-// Message returns the exception's message.
-func (s *ProvisionedThroughputExceededException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ProvisionedThroughputExceededException) OrigErr() error {
- return nil
-}
-
-func (s *ProvisionedThroughputExceededException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ProvisionedThroughputExceededException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ProvisionedThroughputExceededException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Replica-specific provisioned throughput settings. If not specified, uses
-// the source table's provisioned throughput settings.
-type ProvisionedThroughputOverride struct {
- _ struct{} `type:"structure"`
-
- // Replica-specific read capacity units. If not specified, uses the source table's
- // read capacity settings.
- ReadCapacityUnits *int64 `min:"1" type:"long"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ProvisionedThroughputOverride) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ProvisionedThroughputOverride) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ProvisionedThroughputOverride) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ProvisionedThroughputOverride"}
- if s.ReadCapacityUnits != nil && *s.ReadCapacityUnits < 1 {
- invalidParams.Add(request.NewErrParamMinValue("ReadCapacityUnits", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetReadCapacityUnits sets the ReadCapacityUnits field's value.
-func (s *ProvisionedThroughputOverride) SetReadCapacityUnits(v int64) *ProvisionedThroughputOverride {
- s.ReadCapacityUnits = &v
- return s
-}
-
-// Represents a request to perform a PutItem operation.
-type Put struct {
- _ struct{} `type:"structure"`
-
- // A condition that must be satisfied in order for a conditional update to succeed.
- ConditionExpression *string `type:"string"`
-
- // One or more substitution tokens for attribute names in an expression.
- ExpressionAttributeNames map[string]*string `type:"map"`
-
- // One or more values that can be substituted in an expression.
- ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
-
- // A map of attribute name to attribute values, representing the primary key
- // of the item to be written by PutItem. All of the table's primary key attributes
- // must be specified, and their data types must match those of the table's key
- // schema. If any attributes are present in the item that are part of an index
- // key schema for the table, their types must match the index key schema.
- //
- // Item is a required field
- Item map[string]*AttributeValue `type:"map" required:"true"`
-
- // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the
- // Put condition fails. For ReturnValuesOnConditionCheckFailure, the valid values
- // are: NONE and ALL_OLD.
- ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"`
-
- // Name of the table in which to write the item. You can also provide the Amazon
- // Resource Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Put) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Put) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *Put) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "Put"}
- if s.Item == nil {
- invalidParams.Add(request.NewErrParamRequired("Item"))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetConditionExpression sets the ConditionExpression field's value.
-func (s *Put) SetConditionExpression(v string) *Put {
- s.ConditionExpression = &v
- return s
-}
-
-// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
-func (s *Put) SetExpressionAttributeNames(v map[string]*string) *Put {
- s.ExpressionAttributeNames = v
- return s
-}
-
-// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
-func (s *Put) SetExpressionAttributeValues(v map[string]*AttributeValue) *Put {
- s.ExpressionAttributeValues = v
- return s
-}
-
-// SetItem sets the Item field's value.
-func (s *Put) SetItem(v map[string]*AttributeValue) *Put {
- s.Item = v
- return s
-}
-
-// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value.
-func (s *Put) SetReturnValuesOnConditionCheckFailure(v string) *Put {
- s.ReturnValuesOnConditionCheckFailure = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *Put) SetTableName(v string) *Put {
- s.TableName = &v
- return s
-}
-
-// Represents the input of a PutItem operation.
-type PutItemInput struct {
- _ struct{} `type:"structure"`
-
- // A condition that must be satisfied in order for a conditional PutItem operation
- // to succeed.
- //
- // An expression can contain any of the following:
- //
- // * Functions: attribute_exists | attribute_not_exists | attribute_type
- // | contains | begins_with | size These function names are case-sensitive.
- //
- // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
- //
- // * Logical operators: AND | OR | NOT
- //
- // For more information on condition expressions, see Condition Expressions
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
- // in the Amazon DynamoDB Developer Guide.
- ConditionExpression *string `type:"string"`
-
- // This is a legacy parameter. Use ConditionExpression instead. For more information,
- // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
- // in the Amazon DynamoDB Developer Guide.
- ConditionalOperator *string `type:"string" enum:"ConditionalOperator"`
-
- // This is a legacy parameter. Use ConditionExpression instead. For more information,
- // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html)
- // in the Amazon DynamoDB Developer Guide.
- Expected map[string]*ExpectedAttributeValue `type:"map"`
-
- // One or more substitution tokens for attribute names in an expression. The
- // following are some use cases for using ExpressionAttributeNames:
- //
- // * To access an attribute whose name conflicts with a DynamoDB reserved
- // word.
- //
- // * To create a placeholder for repeating occurrences of an attribute name
- // in an expression.
- //
- // * To prevent special characters in an attribute name from being misinterpreted
- // in an expression.
- //
- // Use the # character in an expression to dereference an attribute name. For
- // example, consider the following attribute name:
- //
- // * Percentile
- //
- // The name of this attribute conflicts with a reserved word, so it cannot be
- // used directly in an expression. (For the complete list of reserved words,
- // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
- // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
- // the following for ExpressionAttributeNames:
- //
- // * {"#P":"Percentile"}
- //
- // You could then use this substitution in an expression, as in this example:
- //
- // * #P = :val
- //
- // Tokens that begin with the : character are expression attribute values, which
- // are placeholders for the actual value at runtime.
- //
- // For more information on expression attribute names, see Specifying Item Attributes
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
- // in the Amazon DynamoDB Developer Guide.
- ExpressionAttributeNames map[string]*string `type:"map"`
-
- // One or more values that can be substituted in an expression.
- //
- // Use the : (colon) character in an expression to dereference an attribute
- // value. For example, suppose that you wanted to check whether the value of
- // the ProductStatus attribute was one of the following:
- //
- // Available | Backordered | Discontinued
- //
- // You would first need to specify ExpressionAttributeValues as follows:
- //
- // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
- // }
- //
- // You could then use these values in an expression, such as this:
- //
- // ProductStatus IN (:avail, :back, :disc)
- //
- // For more information on expression attribute values, see Condition Expressions
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
- // in the Amazon DynamoDB Developer Guide.
- ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
-
- // A map of attribute name/value pairs, one for each attribute. Only the primary
- // key attributes are required; you can optionally provide other attribute name-value
- // pairs for the item.
- //
- // You must provide all of the attributes for the primary key. For example,
- // with a simple primary key, you only need to provide a value for the partition
- // key. For a composite primary key, you must provide both values for both the
- // partition key and the sort key.
- //
- // If you specify any attributes that are part of an index key, then the data
- // types for those attributes must match those of the schema in the table's
- // attribute definition.
- //
- // Empty String and Binary attribute values are allowed. Attribute values of
- // type String and Binary must have a length greater than zero if the attribute
- // is used as a key attribute for a table or index.
- //
- // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/HowItWorks.CoreComponents.html#HowItWorks.CoreComponents.PrimaryKey)
- // in the Amazon DynamoDB Developer Guide.
- //
- // Each element in the Item map is an AttributeValue object.
- //
- // Item is a required field
- Item map[string]*AttributeValue `type:"map" required:"true"`
-
- // Determines the level of detail about either provisioned or on-demand throughput
- // consumption that is returned in the response:
- //
- // * INDEXES - The response includes the aggregate ConsumedCapacity for the
- // operation, together with ConsumedCapacity for each table and secondary
- // index that was accessed. Note that some operations, such as GetItem and
- // BatchGetItem, do not access any indexes at all. In these cases, specifying
- // INDEXES will only return ConsumedCapacity information for table(s).
- //
- // * TOTAL - The response includes only the aggregate ConsumedCapacity for
- // the operation.
- //
- // * NONE - No ConsumedCapacity details are included in the response.
- ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
-
- // Determines whether item collection metrics are returned. If set to SIZE,
- // the response includes statistics about item collections, if any, that were
- // modified during the operation are returned in the response. If set to NONE
- // (the default), no statistics are returned.
- ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"`
-
- // Use ReturnValues if you want to get the item attributes as they appeared
- // before they were updated with the PutItem request. For PutItem, the valid
- // values are:
- //
- // * NONE - If ReturnValues is not specified, or if its value is NONE, then
- // nothing is returned. (This setting is the default for ReturnValues.)
- //
- // * ALL_OLD - If PutItem overwrote an attribute name-value pair, then the
- // content of the old item is returned.
- //
- // The values returned are strongly consistent.
- //
- // There is no additional cost associated with requesting a return value aside
- // from the small network and processing overhead of receiving a larger response.
- // No read capacity units are consumed.
- //
- // The ReturnValues parameter is used by several DynamoDB operations; however,
- // PutItem does not recognize any values other than NONE or ALL_OLD.
- ReturnValues *string `type:"string" enum:"ReturnValue"`
-
- // An optional parameter that returns the item attributes for a PutItem operation
- // that failed a condition check.
- //
- // There is no additional cost associated with requesting a return value aside
- // from the small network and processing overhead of receiving a larger response.
- // No read capacity units are consumed.
- ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"`
-
- // The name of the table to contain the item. You can also provide the Amazon
- // Resource Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutItemInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutItemInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *PutItemInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "PutItemInput"}
- if s.Item == nil {
- invalidParams.Add(request.NewErrParamRequired("Item"))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetConditionExpression sets the ConditionExpression field's value.
-func (s *PutItemInput) SetConditionExpression(v string) *PutItemInput {
- s.ConditionExpression = &v
- return s
-}
-
-// SetConditionalOperator sets the ConditionalOperator field's value.
-func (s *PutItemInput) SetConditionalOperator(v string) *PutItemInput {
- s.ConditionalOperator = &v
- return s
-}
-
-// SetExpected sets the Expected field's value.
-func (s *PutItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *PutItemInput {
- s.Expected = v
- return s
-}
-
-// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
-func (s *PutItemInput) SetExpressionAttributeNames(v map[string]*string) *PutItemInput {
- s.ExpressionAttributeNames = v
- return s
-}
-
-// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
-func (s *PutItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *PutItemInput {
- s.ExpressionAttributeValues = v
- return s
-}
-
-// SetItem sets the Item field's value.
-func (s *PutItemInput) SetItem(v map[string]*AttributeValue) *PutItemInput {
- s.Item = v
- return s
-}
-
-// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
-func (s *PutItemInput) SetReturnConsumedCapacity(v string) *PutItemInput {
- s.ReturnConsumedCapacity = &v
- return s
-}
-
-// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value.
-func (s *PutItemInput) SetReturnItemCollectionMetrics(v string) *PutItemInput {
- s.ReturnItemCollectionMetrics = &v
- return s
-}
-
-// SetReturnValues sets the ReturnValues field's value.
-func (s *PutItemInput) SetReturnValues(v string) *PutItemInput {
- s.ReturnValues = &v
- return s
-}
-
-// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value.
-func (s *PutItemInput) SetReturnValuesOnConditionCheckFailure(v string) *PutItemInput {
- s.ReturnValuesOnConditionCheckFailure = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *PutItemInput) SetTableName(v string) *PutItemInput {
- s.TableName = &v
- return s
-}
-
-// Represents the output of a PutItem operation.
-type PutItemOutput struct {
- _ struct{} `type:"structure"`
-
- // The attribute values as they appeared before the PutItem operation, but only
- // if ReturnValues is specified as ALL_OLD in the request. Each element consists
- // of an attribute name and an attribute value.
- Attributes map[string]*AttributeValue `type:"map"`
-
- // The capacity units consumed by the PutItem operation. The data returned includes
- // the total provisioned throughput consumed, along with statistics for the
- // table and any indexes involved in the operation. ConsumedCapacity is only
- // returned if the ReturnConsumedCapacity parameter was specified. For more
- // information, see Capacity unity consumption for write operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#write-operation-consumption)
- // in the Amazon DynamoDB Developer Guide.
- ConsumedCapacity *ConsumedCapacity `type:"structure"`
-
- // Information about item collections, if any, that were affected by the PutItem
- // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics
- // parameter was specified. If the table does not have any local secondary indexes,
- // this information is not returned in the response.
- //
- // Each ItemCollectionMetrics element consists of:
- //
- // * ItemCollectionKey - The partition key value of the item collection.
- // This is the same as the partition key value of the item itself.
- //
- // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
- // This value is a two-element array containing a lower bound and an upper
- // bound for the estimate. The estimate includes the size of all the items
- // in the table, plus the size of all attributes projected into all of the
- // local secondary indexes on that table. Use this estimate to measure whether
- // a local secondary index is approaching its size limit. The estimate is
- // subject to change over time; therefore, do not rely on the precision or
- // accuracy of the estimate.
- ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutItemOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutItemOutput) GoString() string {
- return s.String()
-}
-
-// SetAttributes sets the Attributes field's value.
-func (s *PutItemOutput) SetAttributes(v map[string]*AttributeValue) *PutItemOutput {
- s.Attributes = v
- return s
-}
-
-// SetConsumedCapacity sets the ConsumedCapacity field's value.
-func (s *PutItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *PutItemOutput {
- s.ConsumedCapacity = v
- return s
-}
-
-// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value.
-func (s *PutItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *PutItemOutput {
- s.ItemCollectionMetrics = v
- return s
-}
-
-// Represents a request to perform a PutItem operation on an item.
-type PutRequest struct {
- _ struct{} `type:"structure"`
-
- // A map of attribute name to attribute values, representing the primary key
- // of an item to be processed by PutItem. All of the table's primary key attributes
- // must be specified, and their data types must match those of the table's key
- // schema. If any attributes are present in the item that are part of an index
- // key schema for the table, their types must match the index key schema.
- //
- // Item is a required field
- Item map[string]*AttributeValue `type:"map" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutRequest) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutRequest) GoString() string {
- return s.String()
-}
-
-// SetItem sets the Item field's value.
-func (s *PutRequest) SetItem(v map[string]*AttributeValue) *PutRequest {
- s.Item = v
- return s
-}
-
-type PutResourcePolicyInput struct {
- _ struct{} `type:"structure"`
-
- // Set this parameter to true to confirm that you want to remove your permissions
- // to change the policy of this resource in the future.
- ConfirmRemoveSelfResourceAccess *bool `type:"boolean"`
-
- // A string value that you can use to conditionally update your policy. You
- // can provide the revision ID of your existing policy to make mutating requests
- // against that policy.
- //
- // When you provide an expected revision ID, if the revision ID of the existing
- // policy on the resource doesn't match or if there's no policy attached to
- // the resource, your request will be rejected with a PolicyNotFoundException.
- //
- // To conditionally attach a policy when no policy exists for the resource,
- // specify NO_POLICY for the revision ID.
- ExpectedRevisionId *string `min:"1" type:"string"`
-
- // An Amazon Web Services resource-based policy document in JSON format.
- //
- // * The maximum size supported for a resource-based policy document is 20
- // KB. DynamoDB counts whitespaces when calculating the size of a policy
- // against this limit.
- //
- // * Within a resource-based policy, if the action for a DynamoDB service-linked
- // role (SLR) to replicate data for a global table is denied, adding or deleting
- // a replica will fail with an error.
- //
- // For a full list of all considerations that apply while attaching a resource-based
- // policy, see Resource-based policy considerations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/rbac-considerations.html).
- //
- // Policy is a required field
- Policy *string `type:"string" required:"true"`
-
- // The Amazon Resource Name (ARN) of the DynamoDB resource to which the policy
- // will be attached. The resources you can specify include tables and streams.
- //
- // You can control index permissions using the base table's policy. To specify
- // the same permission level for your table and its indexes, you can provide
- // both the table and index Amazon Resource Name (ARN)s in the Resource field
- // of a given Statement in your policy document. Alternatively, to specify different
- // permissions for your table, indexes, or both, you can define multiple Statement
- // fields in your policy document.
- //
- // ResourceArn is a required field
- ResourceArn *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutResourcePolicyInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutResourcePolicyInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *PutResourcePolicyInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "PutResourcePolicyInput"}
- if s.ExpectedRevisionId != nil && len(*s.ExpectedRevisionId) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ExpectedRevisionId", 1))
- }
- if s.Policy == nil {
- invalidParams.Add(request.NewErrParamRequired("Policy"))
- }
- if s.ResourceArn == nil {
- invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
- }
- if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetConfirmRemoveSelfResourceAccess sets the ConfirmRemoveSelfResourceAccess field's value.
-func (s *PutResourcePolicyInput) SetConfirmRemoveSelfResourceAccess(v bool) *PutResourcePolicyInput {
- s.ConfirmRemoveSelfResourceAccess = &v
- return s
-}
-
-// SetExpectedRevisionId sets the ExpectedRevisionId field's value.
-func (s *PutResourcePolicyInput) SetExpectedRevisionId(v string) *PutResourcePolicyInput {
- s.ExpectedRevisionId = &v
- return s
-}
-
-// SetPolicy sets the Policy field's value.
-func (s *PutResourcePolicyInput) SetPolicy(v string) *PutResourcePolicyInput {
- s.Policy = &v
- return s
-}
-
-// SetResourceArn sets the ResourceArn field's value.
-func (s *PutResourcePolicyInput) SetResourceArn(v string) *PutResourcePolicyInput {
- s.ResourceArn = &v
- return s
-}
-
-type PutResourcePolicyOutput struct {
- _ struct{} `type:"structure"`
-
- // A unique string that represents the revision ID of the policy. If you're
- // comparing revision IDs, make sure to always use string comparison logic.
- RevisionId *string `min:"1" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutResourcePolicyOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PutResourcePolicyOutput) GoString() string {
- return s.String()
-}
-
-// SetRevisionId sets the RevisionId field's value.
-func (s *PutResourcePolicyOutput) SetRevisionId(v string) *PutResourcePolicyOutput {
- s.RevisionId = &v
- return s
-}
-
-// Represents the input of a Query operation.
-type QueryInput struct {
- _ struct{} `type:"structure"`
-
- // This is a legacy parameter. Use ProjectionExpression instead. For more information,
- // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html)
- // in the Amazon DynamoDB Developer Guide.
- AttributesToGet []*string `min:"1" type:"list"`
-
- // This is a legacy parameter. Use FilterExpression instead. For more information,
- // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
- // in the Amazon DynamoDB Developer Guide.
- ConditionalOperator *string `type:"string" enum:"ConditionalOperator"`
-
- // Determines the read consistency model: If set to true, then the operation
- // uses strongly consistent reads; otherwise, the operation uses eventually
- // consistent reads.
- //
- // Strongly consistent reads are not supported on global secondary indexes.
- // If you query a global secondary index with ConsistentRead set to true, you
- // will receive a ValidationException.
- ConsistentRead *bool `type:"boolean"`
-
- // The primary key of the first item that this operation will evaluate. Use
- // the value that was returned for LastEvaluatedKey in the previous operation.
- //
- // The data type for ExclusiveStartKey must be String, Number, or Binary. No
- // set data types are allowed.
- ExclusiveStartKey map[string]*AttributeValue `type:"map"`
-
- // One or more substitution tokens for attribute names in an expression. The
- // following are some use cases for using ExpressionAttributeNames:
- //
- // * To access an attribute whose name conflicts with a DynamoDB reserved
- // word.
- //
- // * To create a placeholder for repeating occurrences of an attribute name
- // in an expression.
- //
- // * To prevent special characters in an attribute name from being misinterpreted
- // in an expression.
- //
- // Use the # character in an expression to dereference an attribute name. For
- // example, consider the following attribute name:
- //
- // * Percentile
- //
- // The name of this attribute conflicts with a reserved word, so it cannot be
- // used directly in an expression. (For the complete list of reserved words,
- // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
- // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
- // the following for ExpressionAttributeNames:
- //
- // * {"#P":"Percentile"}
- //
- // You could then use this substitution in an expression, as in this example:
- //
- // * #P = :val
- //
- // Tokens that begin with the : character are expression attribute values, which
- // are placeholders for the actual value at runtime.
- //
- // For more information on expression attribute names, see Specifying Item Attributes
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
- // in the Amazon DynamoDB Developer Guide.
- ExpressionAttributeNames map[string]*string `type:"map"`
-
- // One or more values that can be substituted in an expression.
- //
- // Use the : (colon) character in an expression to dereference an attribute
- // value. For example, suppose that you wanted to check whether the value of
- // the ProductStatus attribute was one of the following:
- //
- // Available | Backordered | Discontinued
- //
- // You would first need to specify ExpressionAttributeValues as follows:
- //
- // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
- // }
- //
- // You could then use these values in an expression, such as this:
- //
- // ProductStatus IN (:avail, :back, :disc)
- //
- // For more information on expression attribute values, see Specifying Conditions
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
- // in the Amazon DynamoDB Developer Guide.
- ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
-
- // A string that contains conditions that DynamoDB applies after the Query operation,
- // but before the data is returned to you. Items that do not satisfy the FilterExpression
- // criteria are not returned.
- //
- // A FilterExpression does not allow key attributes. You cannot define a filter
- // expression based on a partition key or a sort key.
- //
- // A FilterExpression is applied after the items have already been read; the
- // process of filtering does not consume any additional read capacity units.
- //
- // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.FilterExpression.html)
- // in the Amazon DynamoDB Developer Guide.
- FilterExpression *string `type:"string"`
-
- // The name of an index to query. This index can be any local secondary index
- // or global secondary index on the table. Note that if you use the IndexName
- // parameter, you must also provide TableName.
- IndexName *string `min:"3" type:"string"`
-
- // The condition that specifies the key values for items to be retrieved by
- // the Query action.
- //
- // The condition must perform an equality test on a single partition key value.
- //
- // The condition can optionally perform one of several comparison tests on a
- // single sort key value. This allows Query to retrieve one item with a given
- // partition key value and sort key value, or several items that have the same
- // partition key value but different sort key values.
- //
- // The partition key equality test is required, and must be specified in the
- // following format:
- //
- // partitionKeyName = :partitionkeyval
- //
- // If you also want to provide a condition for the sort key, it must be combined
- // using AND with the condition for the sort key. Following is an example, using
- // the = comparison operator for the sort key:
- //
- // partitionKeyName = :partitionkeyval AND sortKeyName = :sortkeyval
- //
- // Valid comparisons for the sort key condition are as follows:
- //
- // * sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval.
- //
- // * sortKeyName < :sortkeyval - true if the sort key value is less than
- // :sortkeyval.
- //
- // * sortKeyName <= :sortkeyval - true if the sort key value is less than
- // or equal to :sortkeyval.
- //
- // * sortKeyName > :sortkeyval - true if the sort key value is greater than
- // :sortkeyval.
- //
- // * sortKeyName >= :sortkeyval - true if the sort key value is greater than
- // or equal to :sortkeyval.
- //
- // * sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort
- // key value is greater than or equal to :sortkeyval1, and less than or equal
- // to :sortkeyval2.
- //
- // * begins_with ( sortKeyName, :sortkeyval ) - true if the sort key value
- // begins with a particular operand. (You cannot use this function with a
- // sort key that is of type Number.) Note that the function name begins_with
- // is case-sensitive.
- //
- // Use the ExpressionAttributeValues parameter to replace tokens such as :partitionval
- // and :sortval with actual values at runtime.
- //
- // You can optionally use the ExpressionAttributeNames parameter to replace
- // the names of the partition key and sort key with placeholder tokens. This
- // option might be necessary if an attribute name conflicts with a DynamoDB
- // reserved word. For example, the following KeyConditionExpression parameter
- // causes an error because Size is a reserved word:
- //
- // * Size = :myval
- //
- // To work around this, define a placeholder (such a #S) to represent the attribute
- // name Size. KeyConditionExpression then is as follows:
- //
- // * #S = :myval
- //
- // For a list of reserved words, see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
- // in the Amazon DynamoDB Developer Guide.
- //
- // For more information on ExpressionAttributeNames and ExpressionAttributeValues,
- // see Using Placeholders for Attribute Names and Values (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html)
- // in the Amazon DynamoDB Developer Guide.
- KeyConditionExpression *string `type:"string"`
-
- // This is a legacy parameter. Use KeyConditionExpression instead. For more
- // information, see KeyConditions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.KeyConditions.html)
- // in the Amazon DynamoDB Developer Guide.
- KeyConditions map[string]*Condition `type:"map"`
-
- // The maximum number of items to evaluate (not necessarily the number of matching
- // items). If DynamoDB processes the number of items up to the limit while processing
- // the results, it stops the operation and returns the matching values up to
- // that point, and a key in LastEvaluatedKey to apply in a subsequent operation,
- // so that you can pick up where you left off. Also, if the processed dataset
- // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation
- // and returns the matching values up to the limit, and a key in LastEvaluatedKey
- // to apply in a subsequent operation to continue the operation. For more information,
- // see Query and Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html)
- // in the Amazon DynamoDB Developer Guide.
- Limit *int64 `min:"1" type:"integer"`
-
- // A string that identifies one or more attributes to retrieve from the table.
- // These attributes can include scalars, sets, or elements of a JSON document.
- // The attributes in the expression must be separated by commas.
- //
- // If no attribute names are specified, then all attributes will be returned.
- // If any of the requested attributes are not found, they will not appear in
- // the result.
- //
- // For more information, see Accessing Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
- // in the Amazon DynamoDB Developer Guide.
- ProjectionExpression *string `type:"string"`
-
- // This is a legacy parameter. Use FilterExpression instead. For more information,
- // see QueryFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.QueryFilter.html)
- // in the Amazon DynamoDB Developer Guide.
- QueryFilter map[string]*Condition `type:"map"`
-
- // Determines the level of detail about either provisioned or on-demand throughput
- // consumption that is returned in the response:
- //
- // * INDEXES - The response includes the aggregate ConsumedCapacity for the
- // operation, together with ConsumedCapacity for each table and secondary
- // index that was accessed. Note that some operations, such as GetItem and
- // BatchGetItem, do not access any indexes at all. In these cases, specifying
- // INDEXES will only return ConsumedCapacity information for table(s).
- //
- // * TOTAL - The response includes only the aggregate ConsumedCapacity for
- // the operation.
- //
- // * NONE - No ConsumedCapacity details are included in the response.
- ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
-
- // Specifies the order for index traversal: If true (default), the traversal
- // is performed in ascending order; if false, the traversal is performed in
- // descending order.
- //
- // Items with the same partition key value are stored in sorted order by sort
- // key. If the sort key data type is Number, the results are stored in numeric
- // order. For type String, the results are stored in order of UTF-8 bytes. For
- // type Binary, DynamoDB treats each byte of the binary data as unsigned.
- //
- // If ScanIndexForward is true, DynamoDB returns the results in the order in
- // which they are stored (by sort key value). This is the default behavior.
- // If ScanIndexForward is false, DynamoDB reads the results in reverse order
- // by sort key value, and then returns the results to the client.
- ScanIndexForward *bool `type:"boolean"`
-
- // The attributes to be returned in the result. You can retrieve all item attributes,
- // specific item attributes, the count of matching items, or in the case of
- // an index, some or all of the attributes projected into the index.
- //
- // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified
- // table or index. If you query a local secondary index, then for each matching
- // item in the index, DynamoDB fetches the entire item from the parent table.
- // If the index is configured to project all item attributes, then all of
- // the data can be obtained from the local secondary index, and no fetching
- // is required.
- //
- // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves
- // all attributes that have been projected into the index. If the index is
- // configured to project all attributes, this return value is equivalent
- // to specifying ALL_ATTRIBUTES.
- //
- // * COUNT - Returns the number of matching items, rather than the matching
- // items themselves. Note that this uses the same quantity of read capacity
- // units as getting the items, and is subject to the same item size calculations.
- //
- // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression.
- // This return value is equivalent to specifying ProjectionExpression without
- // specifying any value for Select. If you query or scan a local secondary
- // index and request only attributes that are projected into that index,
- // the operation will read only the index and not the table. If any of the
- // requested attributes are not projected into the local secondary index,
- // DynamoDB fetches each of these attributes from the parent table. This
- // extra fetching incurs additional throughput cost and latency. If you query
- // or scan a global secondary index, you can only request attributes that
- // are projected into the index. Global secondary index queries cannot fetch
- // attributes from the parent table.
- //
- // If neither Select nor ProjectionExpression are specified, DynamoDB defaults
- // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when
- // accessing an index. You cannot use both Select and ProjectionExpression together
- // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES.
- // (This usage is equivalent to specifying ProjectionExpression without any
- // value for Select.)
- //
- // If you use the ProjectionExpression parameter, then the value for Select
- // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an
- // error.
- Select *string `type:"string" enum:"Select"`
-
- // The name of the table containing the requested items. You can also provide
- // the Amazon Resource Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s QueryInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s QueryInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *QueryInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "QueryInput"}
- if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1))
- }
- if s.IndexName != nil && len(*s.IndexName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
- }
- if s.Limit != nil && *s.Limit < 1 {
- invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
- if s.KeyConditions != nil {
- for i, v := range s.KeyConditions {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeyConditions", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.QueryFilter != nil {
- for i, v := range s.QueryFilter {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueryFilter", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAttributesToGet sets the AttributesToGet field's value.
-func (s *QueryInput) SetAttributesToGet(v []*string) *QueryInput {
- s.AttributesToGet = v
- return s
-}
-
-// SetConditionalOperator sets the ConditionalOperator field's value.
-func (s *QueryInput) SetConditionalOperator(v string) *QueryInput {
- s.ConditionalOperator = &v
- return s
-}
-
-// SetConsistentRead sets the ConsistentRead field's value.
-func (s *QueryInput) SetConsistentRead(v bool) *QueryInput {
- s.ConsistentRead = &v
- return s
-}
-
-// SetExclusiveStartKey sets the ExclusiveStartKey field's value.
-func (s *QueryInput) SetExclusiveStartKey(v map[string]*AttributeValue) *QueryInput {
- s.ExclusiveStartKey = v
- return s
-}
-
-// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
-func (s *QueryInput) SetExpressionAttributeNames(v map[string]*string) *QueryInput {
- s.ExpressionAttributeNames = v
- return s
-}
-
-// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
-func (s *QueryInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *QueryInput {
- s.ExpressionAttributeValues = v
- return s
-}
-
-// SetFilterExpression sets the FilterExpression field's value.
-func (s *QueryInput) SetFilterExpression(v string) *QueryInput {
- s.FilterExpression = &v
- return s
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *QueryInput) SetIndexName(v string) *QueryInput {
- s.IndexName = &v
- return s
-}
-
-// SetKeyConditionExpression sets the KeyConditionExpression field's value.
-func (s *QueryInput) SetKeyConditionExpression(v string) *QueryInput {
- s.KeyConditionExpression = &v
- return s
-}
-
-// SetKeyConditions sets the KeyConditions field's value.
-func (s *QueryInput) SetKeyConditions(v map[string]*Condition) *QueryInput {
- s.KeyConditions = v
- return s
-}
-
-// SetLimit sets the Limit field's value.
-func (s *QueryInput) SetLimit(v int64) *QueryInput {
- s.Limit = &v
- return s
-}
-
-// SetProjectionExpression sets the ProjectionExpression field's value.
-func (s *QueryInput) SetProjectionExpression(v string) *QueryInput {
- s.ProjectionExpression = &v
- return s
-}
-
-// SetQueryFilter sets the QueryFilter field's value.
-func (s *QueryInput) SetQueryFilter(v map[string]*Condition) *QueryInput {
- s.QueryFilter = v
- return s
-}
-
-// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
-func (s *QueryInput) SetReturnConsumedCapacity(v string) *QueryInput {
- s.ReturnConsumedCapacity = &v
- return s
-}
-
-// SetScanIndexForward sets the ScanIndexForward field's value.
-func (s *QueryInput) SetScanIndexForward(v bool) *QueryInput {
- s.ScanIndexForward = &v
- return s
-}
-
-// SetSelect sets the Select field's value.
-func (s *QueryInput) SetSelect(v string) *QueryInput {
- s.Select = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *QueryInput) SetTableName(v string) *QueryInput {
- s.TableName = &v
- return s
-}
-
-// Represents the output of a Query operation.
-type QueryOutput struct {
- _ struct{} `type:"structure"`
-
- // The capacity units consumed by the Query operation. The data returned includes
- // the total provisioned throughput consumed, along with statistics for the
- // table and any indexes involved in the operation. ConsumedCapacity is only
- // returned if the ReturnConsumedCapacity parameter was specified. For more
- // information, see Capacity unit consumption for read operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption)
- // in the Amazon DynamoDB Developer Guide.
- ConsumedCapacity *ConsumedCapacity `type:"structure"`
-
- // The number of items in the response.
- //
- // If you used a QueryFilter in the request, then Count is the number of items
- // returned after the filter was applied, and ScannedCount is the number of
- // matching items before the filter was applied.
- //
- // If you did not use a filter in the request, then Count and ScannedCount are
- // the same.
- Count *int64 `type:"integer"`
-
- // An array of item attributes that match the query criteria. Each element in
- // this array consists of an attribute name and the value for that attribute.
- Items []map[string]*AttributeValue `type:"list"`
-
- // The primary key of the item where the operation stopped, inclusive of the
- // previous result set. Use this value to start a new operation, excluding this
- // value in the new request.
- //
- // If LastEvaluatedKey is empty, then the "last page" of results has been processed
- // and there is no more data to be retrieved.
- //
- // If LastEvaluatedKey is not empty, it does not necessarily mean that there
- // is more data in the result set. The only way to know when you have reached
- // the end of the result set is when LastEvaluatedKey is empty.
- LastEvaluatedKey map[string]*AttributeValue `type:"map"`
-
- // The number of items evaluated, before any QueryFilter is applied. A high
- // ScannedCount value with few, or no, Count results indicates an inefficient
- // Query operation. For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Count)
- // in the Amazon DynamoDB Developer Guide.
- //
- // If you did not use a filter in the request, then ScannedCount is the same
- // as Count.
- ScannedCount *int64 `type:"integer"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s QueryOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s QueryOutput) GoString() string {
- return s.String()
-}
-
-// SetConsumedCapacity sets the ConsumedCapacity field's value.
-func (s *QueryOutput) SetConsumedCapacity(v *ConsumedCapacity) *QueryOutput {
- s.ConsumedCapacity = v
- return s
-}
-
-// SetCount sets the Count field's value.
-func (s *QueryOutput) SetCount(v int64) *QueryOutput {
- s.Count = &v
- return s
-}
-
-// SetItems sets the Items field's value.
-func (s *QueryOutput) SetItems(v []map[string]*AttributeValue) *QueryOutput {
- s.Items = v
- return s
-}
-
-// SetLastEvaluatedKey sets the LastEvaluatedKey field's value.
-func (s *QueryOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *QueryOutput {
- s.LastEvaluatedKey = v
- return s
-}
-
-// SetScannedCount sets the ScannedCount field's value.
-func (s *QueryOutput) SetScannedCount(v int64) *QueryOutput {
- s.ScannedCount = &v
- return s
-}
-
-// Represents the properties of a replica.
-type Replica struct {
- _ struct{} `type:"structure"`
-
- // The Region where the replica needs to be created.
- RegionName *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Replica) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Replica) GoString() string {
- return s.String()
-}
-
-// SetRegionName sets the RegionName field's value.
-func (s *Replica) SetRegionName(v string) *Replica {
- s.RegionName = &v
- return s
-}
-
-// The specified replica is already part of the global table.
-type ReplicaAlreadyExistsException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaAlreadyExistsException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaAlreadyExistsException) GoString() string {
- return s.String()
-}
-
-func newErrorReplicaAlreadyExistsException(v protocol.ResponseMetadata) error {
- return &ReplicaAlreadyExistsException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ReplicaAlreadyExistsException) Code() string {
- return "ReplicaAlreadyExistsException"
-}
-
-// Message returns the exception's message.
-func (s *ReplicaAlreadyExistsException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ReplicaAlreadyExistsException) OrigErr() error {
- return nil
-}
-
-func (s *ReplicaAlreadyExistsException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ReplicaAlreadyExistsException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ReplicaAlreadyExistsException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Represents the auto scaling settings of the replica.
-type ReplicaAutoScalingDescription struct {
- _ struct{} `type:"structure"`
-
- // Replica-specific global secondary index auto scaling settings.
- GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndexAutoScalingDescription `type:"list"`
-
- // The Region where the replica exists.
- RegionName *string `type:"string"`
-
- // Represents the auto scaling settings for a global table or global secondary
- // index.
- ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
-
- // Represents the auto scaling settings for a global table or global secondary
- // index.
- ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
-
- // The current state of the replica:
- //
- // * CREATING - The replica is being created.
- //
- // * UPDATING - The replica is being updated.
- //
- // * DELETING - The replica is being deleted.
- //
- // * ACTIVE - The replica is ready for use.
- ReplicaStatus *string `type:"string" enum:"ReplicaStatus"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaAutoScalingDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaAutoScalingDescription) GoString() string {
- return s.String()
-}
-
-// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value.
-func (s *ReplicaAutoScalingDescription) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndexAutoScalingDescription) *ReplicaAutoScalingDescription {
- s.GlobalSecondaryIndexes = v
- return s
-}
-
-// SetRegionName sets the RegionName field's value.
-func (s *ReplicaAutoScalingDescription) SetRegionName(v string) *ReplicaAutoScalingDescription {
- s.RegionName = &v
- return s
-}
-
-// SetReplicaProvisionedReadCapacityAutoScalingSettings sets the ReplicaProvisionedReadCapacityAutoScalingSettings field's value.
-func (s *ReplicaAutoScalingDescription) SetReplicaProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaAutoScalingDescription {
- s.ReplicaProvisionedReadCapacityAutoScalingSettings = v
- return s
-}
-
-// SetReplicaProvisionedWriteCapacityAutoScalingSettings sets the ReplicaProvisionedWriteCapacityAutoScalingSettings field's value.
-func (s *ReplicaAutoScalingDescription) SetReplicaProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaAutoScalingDescription {
- s.ReplicaProvisionedWriteCapacityAutoScalingSettings = v
- return s
-}
-
-// SetReplicaStatus sets the ReplicaStatus field's value.
-func (s *ReplicaAutoScalingDescription) SetReplicaStatus(v string) *ReplicaAutoScalingDescription {
- s.ReplicaStatus = &v
- return s
-}
-
-// Represents the auto scaling settings of a replica that will be modified.
-type ReplicaAutoScalingUpdate struct {
- _ struct{} `type:"structure"`
-
- // The Region where the replica exists.
- //
- // RegionName is a required field
- RegionName *string `type:"string" required:"true"`
-
- // Represents the auto scaling settings of global secondary indexes that will
- // be modified.
- ReplicaGlobalSecondaryIndexUpdates []*ReplicaGlobalSecondaryIndexAutoScalingUpdate `type:"list"`
-
- // Represents the auto scaling settings to be modified for a global table or
- // global secondary index.
- ReplicaProvisionedReadCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaAutoScalingUpdate) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaAutoScalingUpdate) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ReplicaAutoScalingUpdate) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ReplicaAutoScalingUpdate"}
- if s.RegionName == nil {
- invalidParams.Add(request.NewErrParamRequired("RegionName"))
- }
- if s.ReplicaGlobalSecondaryIndexUpdates != nil {
- for i, v := range s.ReplicaGlobalSecondaryIndexUpdates {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaGlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.ReplicaProvisionedReadCapacityAutoScalingUpdate != nil {
- if err := s.ReplicaProvisionedReadCapacityAutoScalingUpdate.Validate(); err != nil {
- invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingUpdate", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRegionName sets the RegionName field's value.
-func (s *ReplicaAutoScalingUpdate) SetRegionName(v string) *ReplicaAutoScalingUpdate {
- s.RegionName = &v
- return s
-}
-
-// SetReplicaGlobalSecondaryIndexUpdates sets the ReplicaGlobalSecondaryIndexUpdates field's value.
-func (s *ReplicaAutoScalingUpdate) SetReplicaGlobalSecondaryIndexUpdates(v []*ReplicaGlobalSecondaryIndexAutoScalingUpdate) *ReplicaAutoScalingUpdate {
- s.ReplicaGlobalSecondaryIndexUpdates = v
- return s
-}
-
-// SetReplicaProvisionedReadCapacityAutoScalingUpdate sets the ReplicaProvisionedReadCapacityAutoScalingUpdate field's value.
-func (s *ReplicaAutoScalingUpdate) SetReplicaProvisionedReadCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *ReplicaAutoScalingUpdate {
- s.ReplicaProvisionedReadCapacityAutoScalingUpdate = v
- return s
-}
-
-// Contains the details of the replica.
-type ReplicaDescription struct {
- _ struct{} `type:"structure"`
-
- // Replica-specific global secondary index settings.
- GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndexDescription `type:"list"`
-
- // The KMS key of the replica that will be used for KMS encryption.
- KMSMasterKeyId *string `type:"string"`
-
- // Overrides the maximum on-demand throughput settings for the specified replica
- // table.
- OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"`
-
- // Replica-specific provisioned throughput. If not described, uses the source
- // table's provisioned throughput settings.
- ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"`
-
- // The name of the Region.
- RegionName *string `type:"string"`
-
- // The time at which the replica was first detected as inaccessible. To determine
- // cause of inaccessibility check the ReplicaStatus property.
- ReplicaInaccessibleDateTime *time.Time `type:"timestamp"`
-
- // The current state of the replica:
- //
- // * CREATING - The replica is being created.
- //
- // * UPDATING - The replica is being updated.
- //
- // * DELETING - The replica is being deleted.
- //
- // * ACTIVE - The replica is ready for use.
- //
- // * REGION_DISABLED - The replica is inaccessible because the Amazon Web
- // Services Region has been disabled. If the Amazon Web Services Region remains
- // inaccessible for more than 20 hours, DynamoDB will remove this replica
- // from the replication group. The replica will not be deleted and replication
- // will stop from and to this region.
- //
- // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the
- // table is inaccessible. If the KMS key remains inaccessible for more than
- // 20 hours, DynamoDB will remove this replica from the replication group.
- // The replica will not be deleted and replication will stop from and to
- // this region.
- ReplicaStatus *string `type:"string" enum:"ReplicaStatus"`
-
- // Detailed information about the replica status.
- ReplicaStatusDescription *string `type:"string"`
-
- // Specifies the progress of a Create, Update, or Delete action on the replica
- // as a percentage.
- ReplicaStatusPercentProgress *string `type:"string"`
-
- // Contains details of the table class.
- ReplicaTableClassSummary *TableClassSummary `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaDescription) GoString() string {
- return s.String()
-}
-
-// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value.
-func (s *ReplicaDescription) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndexDescription) *ReplicaDescription {
- s.GlobalSecondaryIndexes = v
- return s
-}
-
-// SetKMSMasterKeyId sets the KMSMasterKeyId field's value.
-func (s *ReplicaDescription) SetKMSMasterKeyId(v string) *ReplicaDescription {
- s.KMSMasterKeyId = &v
- return s
-}
-
-// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value.
-func (s *ReplicaDescription) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *ReplicaDescription {
- s.OnDemandThroughputOverride = v
- return s
-}
-
-// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value.
-func (s *ReplicaDescription) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaDescription {
- s.ProvisionedThroughputOverride = v
- return s
-}
-
-// SetRegionName sets the RegionName field's value.
-func (s *ReplicaDescription) SetRegionName(v string) *ReplicaDescription {
- s.RegionName = &v
- return s
-}
-
-// SetReplicaInaccessibleDateTime sets the ReplicaInaccessibleDateTime field's value.
-func (s *ReplicaDescription) SetReplicaInaccessibleDateTime(v time.Time) *ReplicaDescription {
- s.ReplicaInaccessibleDateTime = &v
- return s
-}
-
-// SetReplicaStatus sets the ReplicaStatus field's value.
-func (s *ReplicaDescription) SetReplicaStatus(v string) *ReplicaDescription {
- s.ReplicaStatus = &v
- return s
-}
-
-// SetReplicaStatusDescription sets the ReplicaStatusDescription field's value.
-func (s *ReplicaDescription) SetReplicaStatusDescription(v string) *ReplicaDescription {
- s.ReplicaStatusDescription = &v
- return s
-}
-
-// SetReplicaStatusPercentProgress sets the ReplicaStatusPercentProgress field's value.
-func (s *ReplicaDescription) SetReplicaStatusPercentProgress(v string) *ReplicaDescription {
- s.ReplicaStatusPercentProgress = &v
- return s
-}
-
-// SetReplicaTableClassSummary sets the ReplicaTableClassSummary field's value.
-func (s *ReplicaDescription) SetReplicaTableClassSummary(v *TableClassSummary) *ReplicaDescription {
- s.ReplicaTableClassSummary = v
- return s
-}
-
-// Represents the properties of a replica global secondary index.
-type ReplicaGlobalSecondaryIndex struct {
- _ struct{} `type:"structure"`
-
- // The name of the global secondary index.
- //
- // IndexName is a required field
- IndexName *string `min:"3" type:"string" required:"true"`
-
- // Overrides the maximum on-demand throughput settings for the specified global
- // secondary index in the specified replica table.
- OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"`
-
- // Replica table GSI-specific provisioned throughput. If not specified, uses
- // the source table GSI's read capacity settings.
- ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaGlobalSecondaryIndex) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaGlobalSecondaryIndex) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ReplicaGlobalSecondaryIndex) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndex"}
- if s.IndexName == nil {
- invalidParams.Add(request.NewErrParamRequired("IndexName"))
- }
- if s.IndexName != nil && len(*s.IndexName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
- }
- if s.ProvisionedThroughputOverride != nil {
- if err := s.ProvisionedThroughputOverride.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *ReplicaGlobalSecondaryIndex) SetIndexName(v string) *ReplicaGlobalSecondaryIndex {
- s.IndexName = &v
- return s
-}
-
-// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value.
-func (s *ReplicaGlobalSecondaryIndex) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *ReplicaGlobalSecondaryIndex {
- s.OnDemandThroughputOverride = v
- return s
-}
-
-// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value.
-func (s *ReplicaGlobalSecondaryIndex) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaGlobalSecondaryIndex {
- s.ProvisionedThroughputOverride = v
- return s
-}
-
-// Represents the auto scaling configuration for a replica global secondary
-// index.
-type ReplicaGlobalSecondaryIndexAutoScalingDescription struct {
- _ struct{} `type:"structure"`
-
- // The name of the global secondary index.
- IndexName *string `min:"3" type:"string"`
-
- // The current state of the replica global secondary index:
- //
- // * CREATING - The index is being created.
- //
- // * UPDATING - The table/index configuration is being updated. The table/index
- // remains available for data operations when UPDATING
- //
- // * DELETING - The index is being deleted.
- //
- // * ACTIVE - The index is ready for use.
- IndexStatus *string `type:"string" enum:"IndexStatus"`
-
- // Represents the auto scaling settings for a global table or global secondary
- // index.
- ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
-
- // Represents the auto scaling settings for a global table or global secondary
- // index.
- ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaGlobalSecondaryIndexAutoScalingDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaGlobalSecondaryIndexAutoScalingDescription) GoString() string {
- return s.String()
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetIndexName(v string) *ReplicaGlobalSecondaryIndexAutoScalingDescription {
- s.IndexName = &v
- return s
-}
-
-// SetIndexStatus sets the IndexStatus field's value.
-func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetIndexStatus(v string) *ReplicaGlobalSecondaryIndexAutoScalingDescription {
- s.IndexStatus = &v
- return s
-}
-
-// SetProvisionedReadCapacityAutoScalingSettings sets the ProvisionedReadCapacityAutoScalingSettings field's value.
-func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexAutoScalingDescription {
- s.ProvisionedReadCapacityAutoScalingSettings = v
- return s
-}
-
-// SetProvisionedWriteCapacityAutoScalingSettings sets the ProvisionedWriteCapacityAutoScalingSettings field's value.
-func (s *ReplicaGlobalSecondaryIndexAutoScalingDescription) SetProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexAutoScalingDescription {
- s.ProvisionedWriteCapacityAutoScalingSettings = v
- return s
-}
-
-// Represents the auto scaling settings of a global secondary index for a replica
-// that will be modified.
-type ReplicaGlobalSecondaryIndexAutoScalingUpdate struct {
- _ struct{} `type:"structure"`
-
- // The name of the global secondary index.
- IndexName *string `min:"3" type:"string"`
-
- // Represents the auto scaling settings to be modified for a global table or
- // global secondary index.
- ProvisionedReadCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaGlobalSecondaryIndexAutoScalingUpdate) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaGlobalSecondaryIndexAutoScalingUpdate) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ReplicaGlobalSecondaryIndexAutoScalingUpdate) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndexAutoScalingUpdate"}
- if s.IndexName != nil && len(*s.IndexName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
- }
- if s.ProvisionedReadCapacityAutoScalingUpdate != nil {
- if err := s.ProvisionedReadCapacityAutoScalingUpdate.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedReadCapacityAutoScalingUpdate", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *ReplicaGlobalSecondaryIndexAutoScalingUpdate) SetIndexName(v string) *ReplicaGlobalSecondaryIndexAutoScalingUpdate {
- s.IndexName = &v
- return s
-}
-
-// SetProvisionedReadCapacityAutoScalingUpdate sets the ProvisionedReadCapacityAutoScalingUpdate field's value.
-func (s *ReplicaGlobalSecondaryIndexAutoScalingUpdate) SetProvisionedReadCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *ReplicaGlobalSecondaryIndexAutoScalingUpdate {
- s.ProvisionedReadCapacityAutoScalingUpdate = v
- return s
-}
-
-// Represents the properties of a replica global secondary index.
-type ReplicaGlobalSecondaryIndexDescription struct {
- _ struct{} `type:"structure"`
-
- // The name of the global secondary index.
- IndexName *string `min:"3" type:"string"`
-
- // Overrides the maximum on-demand throughput for the specified global secondary
- // index in the specified replica table.
- OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"`
-
- // If not described, uses the source table GSI's read capacity settings.
- ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaGlobalSecondaryIndexDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaGlobalSecondaryIndexDescription) GoString() string {
- return s.String()
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *ReplicaGlobalSecondaryIndexDescription) SetIndexName(v string) *ReplicaGlobalSecondaryIndexDescription {
- s.IndexName = &v
- return s
-}
-
-// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value.
-func (s *ReplicaGlobalSecondaryIndexDescription) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *ReplicaGlobalSecondaryIndexDescription {
- s.OnDemandThroughputOverride = v
- return s
-}
-
-// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value.
-func (s *ReplicaGlobalSecondaryIndexDescription) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *ReplicaGlobalSecondaryIndexDescription {
- s.ProvisionedThroughputOverride = v
- return s
-}
-
-// Represents the properties of a global secondary index.
-type ReplicaGlobalSecondaryIndexSettingsDescription struct {
- _ struct{} `type:"structure"`
-
- // The name of the global secondary index. The name must be unique among all
- // other indexes on this table.
- //
- // IndexName is a required field
- IndexName *string `min:"3" type:"string" required:"true"`
-
- // The current status of the global secondary index:
- //
- // * CREATING - The global secondary index is being created.
- //
- // * UPDATING - The global secondary index is being updated.
- //
- // * DELETING - The global secondary index is being deleted.
- //
- // * ACTIVE - The global secondary index is ready for use.
- IndexStatus *string `type:"string" enum:"IndexStatus"`
-
- // Auto scaling settings for a global secondary index replica's read capacity
- // units.
- ProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
-
- // The maximum number of strongly consistent reads consumed per second before
- // DynamoDB returns a ThrottlingException.
- ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"`
-
- // Auto scaling settings for a global secondary index replica's write capacity
- // units.
- ProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
-
- // The maximum number of writes consumed per second before DynamoDB returns
- // a ThrottlingException.
- ProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaGlobalSecondaryIndexSettingsDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaGlobalSecondaryIndexSettingsDescription) GoString() string {
- return s.String()
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetIndexName(v string) *ReplicaGlobalSecondaryIndexSettingsDescription {
- s.IndexName = &v
- return s
-}
-
-// SetIndexStatus sets the IndexStatus field's value.
-func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetIndexStatus(v string) *ReplicaGlobalSecondaryIndexSettingsDescription {
- s.IndexStatus = &v
- return s
-}
-
-// SetProvisionedReadCapacityAutoScalingSettings sets the ProvisionedReadCapacityAutoScalingSettings field's value.
-func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexSettingsDescription {
- s.ProvisionedReadCapacityAutoScalingSettings = v
- return s
-}
-
-// SetProvisionedReadCapacityUnits sets the ProvisionedReadCapacityUnits field's value.
-func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedReadCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsDescription {
- s.ProvisionedReadCapacityUnits = &v
- return s
-}
-
-// SetProvisionedWriteCapacityAutoScalingSettings sets the ProvisionedWriteCapacityAutoScalingSettings field's value.
-func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaGlobalSecondaryIndexSettingsDescription {
- s.ProvisionedWriteCapacityAutoScalingSettings = v
- return s
-}
-
-// SetProvisionedWriteCapacityUnits sets the ProvisionedWriteCapacityUnits field's value.
-func (s *ReplicaGlobalSecondaryIndexSettingsDescription) SetProvisionedWriteCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsDescription {
- s.ProvisionedWriteCapacityUnits = &v
- return s
-}
-
-// Represents the settings of a global secondary index for a global table that
-// will be modified.
-type ReplicaGlobalSecondaryIndexSettingsUpdate struct {
- _ struct{} `type:"structure"`
-
- // The name of the global secondary index. The name must be unique among all
- // other indexes on this table.
- //
- // IndexName is a required field
- IndexName *string `min:"3" type:"string" required:"true"`
-
- // Auto scaling settings for managing a global secondary index replica's read
- // capacity units.
- ProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"`
-
- // The maximum number of strongly consistent reads consumed per second before
- // DynamoDB returns a ThrottlingException.
- ProvisionedReadCapacityUnits *int64 `min:"1" type:"long"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaGlobalSecondaryIndexSettingsUpdate) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaGlobalSecondaryIndexSettingsUpdate) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ReplicaGlobalSecondaryIndexSettingsUpdate"}
- if s.IndexName == nil {
- invalidParams.Add(request.NewErrParamRequired("IndexName"))
- }
- if s.IndexName != nil && len(*s.IndexName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
- }
- if s.ProvisionedReadCapacityUnits != nil && *s.ProvisionedReadCapacityUnits < 1 {
- invalidParams.Add(request.NewErrParamMinValue("ProvisionedReadCapacityUnits", 1))
- }
- if s.ProvisionedReadCapacityAutoScalingSettingsUpdate != nil {
- if err := s.ProvisionedReadCapacityAutoScalingSettingsUpdate.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedReadCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetIndexName(v string) *ReplicaGlobalSecondaryIndexSettingsUpdate {
- s.IndexName = &v
- return s
-}
-
-// SetProvisionedReadCapacityAutoScalingSettingsUpdate sets the ProvisionedReadCapacityAutoScalingSettingsUpdate field's value.
-func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetProvisionedReadCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *ReplicaGlobalSecondaryIndexSettingsUpdate {
- s.ProvisionedReadCapacityAutoScalingSettingsUpdate = v
- return s
-}
-
-// SetProvisionedReadCapacityUnits sets the ProvisionedReadCapacityUnits field's value.
-func (s *ReplicaGlobalSecondaryIndexSettingsUpdate) SetProvisionedReadCapacityUnits(v int64) *ReplicaGlobalSecondaryIndexSettingsUpdate {
- s.ProvisionedReadCapacityUnits = &v
- return s
-}
-
-// The specified replica is no longer part of the global table.
-type ReplicaNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorReplicaNotFoundException(v protocol.ResponseMetadata) error {
- return &ReplicaNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ReplicaNotFoundException) Code() string {
- return "ReplicaNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *ReplicaNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ReplicaNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *ReplicaNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ReplicaNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ReplicaNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Represents the properties of a replica.
-type ReplicaSettingsDescription struct {
- _ struct{} `type:"structure"`
-
- // The Region name of the replica.
- //
- // RegionName is a required field
- RegionName *string `type:"string" required:"true"`
-
- // The read/write capacity mode of the replica.
- ReplicaBillingModeSummary *BillingModeSummary `type:"structure"`
-
- // Replica global secondary index settings for the global table.
- ReplicaGlobalSecondaryIndexSettings []*ReplicaGlobalSecondaryIndexSettingsDescription `type:"list"`
-
- // Auto scaling settings for a global table replica's read capacity units.
- ReplicaProvisionedReadCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
-
- // The maximum number of strongly consistent reads consumed per second before
- // DynamoDB returns a ThrottlingException. For more information, see Specifying
- // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput)
- // in the Amazon DynamoDB Developer Guide.
- ReplicaProvisionedReadCapacityUnits *int64 `type:"long"`
-
- // Auto scaling settings for a global table replica's write capacity units.
- ReplicaProvisionedWriteCapacityAutoScalingSettings *AutoScalingSettingsDescription `type:"structure"`
-
- // The maximum number of writes consumed per second before DynamoDB returns
- // a ThrottlingException. For more information, see Specifying Read and Write
- // Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput)
- // in the Amazon DynamoDB Developer Guide.
- ReplicaProvisionedWriteCapacityUnits *int64 `type:"long"`
-
- // The current state of the Region:
- //
- // * CREATING - The Region is being created.
- //
- // * UPDATING - The Region is being updated.
- //
- // * DELETING - The Region is being deleted.
- //
- // * ACTIVE - The Region is ready for use.
- ReplicaStatus *string `type:"string" enum:"ReplicaStatus"`
-
- // Contains details of the table class.
- ReplicaTableClassSummary *TableClassSummary `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaSettingsDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaSettingsDescription) GoString() string {
- return s.String()
-}
-
-// SetRegionName sets the RegionName field's value.
-func (s *ReplicaSettingsDescription) SetRegionName(v string) *ReplicaSettingsDescription {
- s.RegionName = &v
- return s
-}
-
-// SetReplicaBillingModeSummary sets the ReplicaBillingModeSummary field's value.
-func (s *ReplicaSettingsDescription) SetReplicaBillingModeSummary(v *BillingModeSummary) *ReplicaSettingsDescription {
- s.ReplicaBillingModeSummary = v
- return s
-}
-
-// SetReplicaGlobalSecondaryIndexSettings sets the ReplicaGlobalSecondaryIndexSettings field's value.
-func (s *ReplicaSettingsDescription) SetReplicaGlobalSecondaryIndexSettings(v []*ReplicaGlobalSecondaryIndexSettingsDescription) *ReplicaSettingsDescription {
- s.ReplicaGlobalSecondaryIndexSettings = v
- return s
-}
-
-// SetReplicaProvisionedReadCapacityAutoScalingSettings sets the ReplicaProvisionedReadCapacityAutoScalingSettings field's value.
-func (s *ReplicaSettingsDescription) SetReplicaProvisionedReadCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaSettingsDescription {
- s.ReplicaProvisionedReadCapacityAutoScalingSettings = v
- return s
-}
-
-// SetReplicaProvisionedReadCapacityUnits sets the ReplicaProvisionedReadCapacityUnits field's value.
-func (s *ReplicaSettingsDescription) SetReplicaProvisionedReadCapacityUnits(v int64) *ReplicaSettingsDescription {
- s.ReplicaProvisionedReadCapacityUnits = &v
- return s
-}
-
-// SetReplicaProvisionedWriteCapacityAutoScalingSettings sets the ReplicaProvisionedWriteCapacityAutoScalingSettings field's value.
-func (s *ReplicaSettingsDescription) SetReplicaProvisionedWriteCapacityAutoScalingSettings(v *AutoScalingSettingsDescription) *ReplicaSettingsDescription {
- s.ReplicaProvisionedWriteCapacityAutoScalingSettings = v
- return s
-}
-
-// SetReplicaProvisionedWriteCapacityUnits sets the ReplicaProvisionedWriteCapacityUnits field's value.
-func (s *ReplicaSettingsDescription) SetReplicaProvisionedWriteCapacityUnits(v int64) *ReplicaSettingsDescription {
- s.ReplicaProvisionedWriteCapacityUnits = &v
- return s
-}
-
-// SetReplicaStatus sets the ReplicaStatus field's value.
-func (s *ReplicaSettingsDescription) SetReplicaStatus(v string) *ReplicaSettingsDescription {
- s.ReplicaStatus = &v
- return s
-}
-
-// SetReplicaTableClassSummary sets the ReplicaTableClassSummary field's value.
-func (s *ReplicaSettingsDescription) SetReplicaTableClassSummary(v *TableClassSummary) *ReplicaSettingsDescription {
- s.ReplicaTableClassSummary = v
- return s
-}
-
-// Represents the settings for a global table in a Region that will be modified.
-type ReplicaSettingsUpdate struct {
- _ struct{} `type:"structure"`
-
- // The Region of the replica to be added.
- //
- // RegionName is a required field
- RegionName *string `type:"string" required:"true"`
-
- // Represents the settings of a global secondary index for a global table that
- // will be modified.
- ReplicaGlobalSecondaryIndexSettingsUpdate []*ReplicaGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"`
-
- // Auto scaling settings for managing a global table replica's read capacity
- // units.
- ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"`
-
- // The maximum number of strongly consistent reads consumed per second before
- // DynamoDB returns a ThrottlingException. For more information, see Specifying
- // Read and Write Requirements (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html#ProvisionedThroughput)
- // in the Amazon DynamoDB Developer Guide.
- ReplicaProvisionedReadCapacityUnits *int64 `min:"1" type:"long"`
-
- // Replica-specific table class. If not specified, uses the source table's table
- // class.
- ReplicaTableClass *string `type:"string" enum:"TableClass"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaSettingsUpdate) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaSettingsUpdate) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ReplicaSettingsUpdate) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ReplicaSettingsUpdate"}
- if s.RegionName == nil {
- invalidParams.Add(request.NewErrParamRequired("RegionName"))
- }
- if s.ReplicaGlobalSecondaryIndexSettingsUpdate != nil && len(s.ReplicaGlobalSecondaryIndexSettingsUpdate) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ReplicaGlobalSecondaryIndexSettingsUpdate", 1))
- }
- if s.ReplicaProvisionedReadCapacityUnits != nil && *s.ReplicaProvisionedReadCapacityUnits < 1 {
- invalidParams.Add(request.NewErrParamMinValue("ReplicaProvisionedReadCapacityUnits", 1))
- }
- if s.ReplicaGlobalSecondaryIndexSettingsUpdate != nil {
- for i, v := range s.ReplicaGlobalSecondaryIndexSettingsUpdate {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaGlobalSecondaryIndexSettingsUpdate", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate != nil {
- if err := s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate.Validate(); err != nil {
- invalidParams.AddNested("ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetRegionName sets the RegionName field's value.
-func (s *ReplicaSettingsUpdate) SetRegionName(v string) *ReplicaSettingsUpdate {
- s.RegionName = &v
- return s
-}
-
-// SetReplicaGlobalSecondaryIndexSettingsUpdate sets the ReplicaGlobalSecondaryIndexSettingsUpdate field's value.
-func (s *ReplicaSettingsUpdate) SetReplicaGlobalSecondaryIndexSettingsUpdate(v []*ReplicaGlobalSecondaryIndexSettingsUpdate) *ReplicaSettingsUpdate {
- s.ReplicaGlobalSecondaryIndexSettingsUpdate = v
- return s
-}
-
-// SetReplicaProvisionedReadCapacityAutoScalingSettingsUpdate sets the ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate field's value.
-func (s *ReplicaSettingsUpdate) SetReplicaProvisionedReadCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *ReplicaSettingsUpdate {
- s.ReplicaProvisionedReadCapacityAutoScalingSettingsUpdate = v
- return s
-}
-
-// SetReplicaProvisionedReadCapacityUnits sets the ReplicaProvisionedReadCapacityUnits field's value.
-func (s *ReplicaSettingsUpdate) SetReplicaProvisionedReadCapacityUnits(v int64) *ReplicaSettingsUpdate {
- s.ReplicaProvisionedReadCapacityUnits = &v
- return s
-}
-
-// SetReplicaTableClass sets the ReplicaTableClass field's value.
-func (s *ReplicaSettingsUpdate) SetReplicaTableClass(v string) *ReplicaSettingsUpdate {
- s.ReplicaTableClass = &v
- return s
-}
-
-// Represents one of the following:
-//
-// - A new replica to be added to an existing global table.
-//
-// - New parameters for an existing replica.
-//
-// - An existing replica to be removed from an existing global table.
-type ReplicaUpdate struct {
- _ struct{} `type:"structure"`
-
- // The parameters required for creating a replica on an existing global table.
- Create *CreateReplicaAction `type:"structure"`
-
- // The name of the existing replica to be removed.
- Delete *DeleteReplicaAction `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaUpdate) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicaUpdate) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ReplicaUpdate) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ReplicaUpdate"}
- if s.Create != nil {
- if err := s.Create.Validate(); err != nil {
- invalidParams.AddNested("Create", err.(request.ErrInvalidParams))
- }
- }
- if s.Delete != nil {
- if err := s.Delete.Validate(); err != nil {
- invalidParams.AddNested("Delete", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetCreate sets the Create field's value.
-func (s *ReplicaUpdate) SetCreate(v *CreateReplicaAction) *ReplicaUpdate {
- s.Create = v
- return s
-}
-
-// SetDelete sets the Delete field's value.
-func (s *ReplicaUpdate) SetDelete(v *DeleteReplicaAction) *ReplicaUpdate {
- s.Delete = v
- return s
-}
-
-// Represents one of the following:
-//
-// - A new replica to be added to an existing regional table or global table.
-// This request invokes the CreateTableReplica action in the destination
-// Region.
-//
-// - New parameters for an existing replica. This request invokes the UpdateTable
-// action in the destination Region.
-//
-// - An existing replica to be deleted. The request invokes the DeleteTableReplica
-// action in the destination Region, deleting the replica and all if its
-// items in the destination Region.
-//
-// When you manually remove a table or global table replica, you do not automatically
-// remove any associated scalable targets, scaling policies, or CloudWatch alarms.
-type ReplicationGroupUpdate struct {
- _ struct{} `type:"structure"`
-
- // The parameters required for creating a replica for the table.
- Create *CreateReplicationGroupMemberAction `type:"structure"`
-
- // The parameters required for deleting a replica for the table.
- Delete *DeleteReplicationGroupMemberAction `type:"structure"`
-
- // The parameters required for updating a replica for the table.
- Update *UpdateReplicationGroupMemberAction `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicationGroupUpdate) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ReplicationGroupUpdate) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ReplicationGroupUpdate) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ReplicationGroupUpdate"}
- if s.Create != nil {
- if err := s.Create.Validate(); err != nil {
- invalidParams.AddNested("Create", err.(request.ErrInvalidParams))
- }
- }
- if s.Delete != nil {
- if err := s.Delete.Validate(); err != nil {
- invalidParams.AddNested("Delete", err.(request.ErrInvalidParams))
- }
- }
- if s.Update != nil {
- if err := s.Update.Validate(); err != nil {
- invalidParams.AddNested("Update", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetCreate sets the Create field's value.
-func (s *ReplicationGroupUpdate) SetCreate(v *CreateReplicationGroupMemberAction) *ReplicationGroupUpdate {
- s.Create = v
- return s
-}
-
-// SetDelete sets the Delete field's value.
-func (s *ReplicationGroupUpdate) SetDelete(v *DeleteReplicationGroupMemberAction) *ReplicationGroupUpdate {
- s.Delete = v
- return s
-}
-
-// SetUpdate sets the Update field's value.
-func (s *ReplicationGroupUpdate) SetUpdate(v *UpdateReplicationGroupMemberAction) *ReplicationGroupUpdate {
- s.Update = v
- return s
-}
-
-// Throughput exceeds the current throughput quota for your account. Please
-// contact Amazon Web Services Support (https://aws.amazon.com/support) to request
-// a quota increase.
-type RequestLimitExceeded struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RequestLimitExceeded) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RequestLimitExceeded) GoString() string {
- return s.String()
-}
-
-func newErrorRequestLimitExceeded(v protocol.ResponseMetadata) error {
- return &RequestLimitExceeded{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *RequestLimitExceeded) Code() string {
- return "RequestLimitExceeded"
-}
-
-// Message returns the exception's message.
-func (s *RequestLimitExceeded) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *RequestLimitExceeded) OrigErr() error {
- return nil
-}
-
-func (s *RequestLimitExceeded) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *RequestLimitExceeded) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *RequestLimitExceeded) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The operation conflicts with the resource's availability. For example, you
-// attempted to recreate an existing table, or tried to delete a table currently
-// in the CREATING state.
-type ResourceInUseException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The resource which is being attempted to be changed is in use.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ResourceInUseException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ResourceInUseException) GoString() string {
- return s.String()
-}
-
-func newErrorResourceInUseException(v protocol.ResponseMetadata) error {
- return &ResourceInUseException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ResourceInUseException) Code() string {
- return "ResourceInUseException"
-}
-
-// Message returns the exception's message.
-func (s *ResourceInUseException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ResourceInUseException) OrigErr() error {
- return nil
-}
-
-func (s *ResourceInUseException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ResourceInUseException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ResourceInUseException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The operation tried to access a nonexistent table or index. The resource
-// might not be specified correctly, or its status might not be ACTIVE.
-type ResourceNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // The resource which is being requested does not exist.
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ResourceNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ResourceNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error {
- return &ResourceNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ResourceNotFoundException) Code() string {
- return "ResourceNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *ResourceNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ResourceNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *ResourceNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ResourceNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ResourceNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Contains details for the restore.
-type RestoreSummary struct {
- _ struct{} `type:"structure"`
-
- // Point in time or source backup time.
- //
- // RestoreDateTime is a required field
- RestoreDateTime *time.Time `type:"timestamp" required:"true"`
-
- // Indicates if a restore is in progress or not.
- //
- // RestoreInProgress is a required field
- RestoreInProgress *bool `type:"boolean" required:"true"`
-
- // The Amazon Resource Name (ARN) of the backup from which the table was restored.
- SourceBackupArn *string `min:"37" type:"string"`
-
- // The ARN of the source table of the backup that is being restored.
- SourceTableArn *string `min:"1" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RestoreSummary) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RestoreSummary) GoString() string {
- return s.String()
-}
-
-// SetRestoreDateTime sets the RestoreDateTime field's value.
-func (s *RestoreSummary) SetRestoreDateTime(v time.Time) *RestoreSummary {
- s.RestoreDateTime = &v
- return s
-}
-
-// SetRestoreInProgress sets the RestoreInProgress field's value.
-func (s *RestoreSummary) SetRestoreInProgress(v bool) *RestoreSummary {
- s.RestoreInProgress = &v
- return s
-}
-
-// SetSourceBackupArn sets the SourceBackupArn field's value.
-func (s *RestoreSummary) SetSourceBackupArn(v string) *RestoreSummary {
- s.SourceBackupArn = &v
- return s
-}
-
-// SetSourceTableArn sets the SourceTableArn field's value.
-func (s *RestoreSummary) SetSourceTableArn(v string) *RestoreSummary {
- s.SourceTableArn = &v
- return s
-}
-
-type RestoreTableFromBackupInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) associated with the backup.
- //
- // BackupArn is a required field
- BackupArn *string `min:"37" type:"string" required:"true"`
-
- // The billing mode of the restored table.
- BillingModeOverride *string `type:"string" enum:"BillingMode"`
-
- // List of global secondary indexes for the restored table. The indexes provided
- // should match existing secondary indexes. You can choose to exclude some or
- // all of the indexes at the time of restore.
- GlobalSecondaryIndexOverride []*GlobalSecondaryIndex `type:"list"`
-
- // List of local secondary indexes for the restored table. The indexes provided
- // should match existing secondary indexes. You can choose to exclude some or
- // all of the indexes at the time of restore.
- LocalSecondaryIndexOverride []*LocalSecondaryIndex `type:"list"`
-
- // Sets the maximum number of read and write units for the specified on-demand
- // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits,
- // or both.
- OnDemandThroughputOverride *OnDemandThroughput `type:"structure"`
-
- // Provisioned throughput settings for the restored table.
- ProvisionedThroughputOverride *ProvisionedThroughput `type:"structure"`
-
- // The new server-side encryption settings for the restored table.
- SSESpecificationOverride *SSESpecification `type:"structure"`
-
- // The name of the new table to which the backup must be restored.
- //
- // TargetTableName is a required field
- TargetTableName *string `min:"3" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RestoreTableFromBackupInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RestoreTableFromBackupInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *RestoreTableFromBackupInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "RestoreTableFromBackupInput"}
- if s.BackupArn == nil {
- invalidParams.Add(request.NewErrParamRequired("BackupArn"))
- }
- if s.BackupArn != nil && len(*s.BackupArn) < 37 {
- invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37))
- }
- if s.TargetTableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TargetTableName"))
- }
- if s.TargetTableName != nil && len(*s.TargetTableName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("TargetTableName", 3))
- }
- if s.GlobalSecondaryIndexOverride != nil {
- for i, v := range s.GlobalSecondaryIndexOverride {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexOverride", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.LocalSecondaryIndexOverride != nil {
- for i, v := range s.LocalSecondaryIndexOverride {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexOverride", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.ProvisionedThroughputOverride != nil {
- if err := s.ProvisionedThroughputOverride.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetBackupArn sets the BackupArn field's value.
-func (s *RestoreTableFromBackupInput) SetBackupArn(v string) *RestoreTableFromBackupInput {
- s.BackupArn = &v
- return s
-}
-
-// SetBillingModeOverride sets the BillingModeOverride field's value.
-func (s *RestoreTableFromBackupInput) SetBillingModeOverride(v string) *RestoreTableFromBackupInput {
- s.BillingModeOverride = &v
- return s
-}
-
-// SetGlobalSecondaryIndexOverride sets the GlobalSecondaryIndexOverride field's value.
-func (s *RestoreTableFromBackupInput) SetGlobalSecondaryIndexOverride(v []*GlobalSecondaryIndex) *RestoreTableFromBackupInput {
- s.GlobalSecondaryIndexOverride = v
- return s
-}
-
-// SetLocalSecondaryIndexOverride sets the LocalSecondaryIndexOverride field's value.
-func (s *RestoreTableFromBackupInput) SetLocalSecondaryIndexOverride(v []*LocalSecondaryIndex) *RestoreTableFromBackupInput {
- s.LocalSecondaryIndexOverride = v
- return s
-}
-
-// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value.
-func (s *RestoreTableFromBackupInput) SetOnDemandThroughputOverride(v *OnDemandThroughput) *RestoreTableFromBackupInput {
- s.OnDemandThroughputOverride = v
- return s
-}
-
-// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value.
-func (s *RestoreTableFromBackupInput) SetProvisionedThroughputOverride(v *ProvisionedThroughput) *RestoreTableFromBackupInput {
- s.ProvisionedThroughputOverride = v
- return s
-}
-
-// SetSSESpecificationOverride sets the SSESpecificationOverride field's value.
-func (s *RestoreTableFromBackupInput) SetSSESpecificationOverride(v *SSESpecification) *RestoreTableFromBackupInput {
- s.SSESpecificationOverride = v
- return s
-}
-
-// SetTargetTableName sets the TargetTableName field's value.
-func (s *RestoreTableFromBackupInput) SetTargetTableName(v string) *RestoreTableFromBackupInput {
- s.TargetTableName = &v
- return s
-}
-
-type RestoreTableFromBackupOutput struct {
- _ struct{} `type:"structure"`
-
- // The description of the table created from an existing backup.
- TableDescription *TableDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RestoreTableFromBackupOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RestoreTableFromBackupOutput) GoString() string {
- return s.String()
-}
-
-// SetTableDescription sets the TableDescription field's value.
-func (s *RestoreTableFromBackupOutput) SetTableDescription(v *TableDescription) *RestoreTableFromBackupOutput {
- s.TableDescription = v
- return s
-}
-
-type RestoreTableToPointInTimeInput struct {
- _ struct{} `type:"structure"`
-
- // The billing mode of the restored table.
- BillingModeOverride *string `type:"string" enum:"BillingMode"`
-
- // List of global secondary indexes for the restored table. The indexes provided
- // should match existing secondary indexes. You can choose to exclude some or
- // all of the indexes at the time of restore.
- GlobalSecondaryIndexOverride []*GlobalSecondaryIndex `type:"list"`
-
- // List of local secondary indexes for the restored table. The indexes provided
- // should match existing secondary indexes. You can choose to exclude some or
- // all of the indexes at the time of restore.
- LocalSecondaryIndexOverride []*LocalSecondaryIndex `type:"list"`
-
- // Sets the maximum number of read and write units for the specified on-demand
- // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits,
- // or both.
- OnDemandThroughputOverride *OnDemandThroughput `type:"structure"`
-
- // Provisioned throughput settings for the restored table.
- ProvisionedThroughputOverride *ProvisionedThroughput `type:"structure"`
-
- // Time in the past to restore the table to.
- RestoreDateTime *time.Time `type:"timestamp"`
-
- // The new server-side encryption settings for the restored table.
- SSESpecificationOverride *SSESpecification `type:"structure"`
-
- // The DynamoDB table that will be restored. This value is an Amazon Resource
- // Name (ARN).
- SourceTableArn *string `min:"1" type:"string"`
-
- // Name of the source table that is being restored.
- SourceTableName *string `min:"3" type:"string"`
-
- // The name of the new table to which it must be restored to.
- //
- // TargetTableName is a required field
- TargetTableName *string `min:"3" type:"string" required:"true"`
-
- // Restore the table to the latest possible time. LatestRestorableDateTime is
- // typically 5 minutes before the current time.
- UseLatestRestorableTime *bool `type:"boolean"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RestoreTableToPointInTimeInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RestoreTableToPointInTimeInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *RestoreTableToPointInTimeInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "RestoreTableToPointInTimeInput"}
- if s.SourceTableArn != nil && len(*s.SourceTableArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("SourceTableArn", 1))
- }
- if s.SourceTableName != nil && len(*s.SourceTableName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("SourceTableName", 3))
- }
- if s.TargetTableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TargetTableName"))
- }
- if s.TargetTableName != nil && len(*s.TargetTableName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("TargetTableName", 3))
- }
- if s.GlobalSecondaryIndexOverride != nil {
- for i, v := range s.GlobalSecondaryIndexOverride {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexOverride", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.LocalSecondaryIndexOverride != nil {
- for i, v := range s.LocalSecondaryIndexOverride {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LocalSecondaryIndexOverride", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.ProvisionedThroughputOverride != nil {
- if err := s.ProvisionedThroughputOverride.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetBillingModeOverride sets the BillingModeOverride field's value.
-func (s *RestoreTableToPointInTimeInput) SetBillingModeOverride(v string) *RestoreTableToPointInTimeInput {
- s.BillingModeOverride = &v
- return s
-}
-
-// SetGlobalSecondaryIndexOverride sets the GlobalSecondaryIndexOverride field's value.
-func (s *RestoreTableToPointInTimeInput) SetGlobalSecondaryIndexOverride(v []*GlobalSecondaryIndex) *RestoreTableToPointInTimeInput {
- s.GlobalSecondaryIndexOverride = v
- return s
-}
-
-// SetLocalSecondaryIndexOverride sets the LocalSecondaryIndexOverride field's value.
-func (s *RestoreTableToPointInTimeInput) SetLocalSecondaryIndexOverride(v []*LocalSecondaryIndex) *RestoreTableToPointInTimeInput {
- s.LocalSecondaryIndexOverride = v
- return s
-}
-
-// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value.
-func (s *RestoreTableToPointInTimeInput) SetOnDemandThroughputOverride(v *OnDemandThroughput) *RestoreTableToPointInTimeInput {
- s.OnDemandThroughputOverride = v
- return s
-}
-
-// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value.
-func (s *RestoreTableToPointInTimeInput) SetProvisionedThroughputOverride(v *ProvisionedThroughput) *RestoreTableToPointInTimeInput {
- s.ProvisionedThroughputOverride = v
- return s
-}
-
-// SetRestoreDateTime sets the RestoreDateTime field's value.
-func (s *RestoreTableToPointInTimeInput) SetRestoreDateTime(v time.Time) *RestoreTableToPointInTimeInput {
- s.RestoreDateTime = &v
- return s
-}
-
-// SetSSESpecificationOverride sets the SSESpecificationOverride field's value.
-func (s *RestoreTableToPointInTimeInput) SetSSESpecificationOverride(v *SSESpecification) *RestoreTableToPointInTimeInput {
- s.SSESpecificationOverride = v
- return s
-}
-
-// SetSourceTableArn sets the SourceTableArn field's value.
-func (s *RestoreTableToPointInTimeInput) SetSourceTableArn(v string) *RestoreTableToPointInTimeInput {
- s.SourceTableArn = &v
- return s
-}
-
-// SetSourceTableName sets the SourceTableName field's value.
-func (s *RestoreTableToPointInTimeInput) SetSourceTableName(v string) *RestoreTableToPointInTimeInput {
- s.SourceTableName = &v
- return s
-}
-
-// SetTargetTableName sets the TargetTableName field's value.
-func (s *RestoreTableToPointInTimeInput) SetTargetTableName(v string) *RestoreTableToPointInTimeInput {
- s.TargetTableName = &v
- return s
-}
-
-// SetUseLatestRestorableTime sets the UseLatestRestorableTime field's value.
-func (s *RestoreTableToPointInTimeInput) SetUseLatestRestorableTime(v bool) *RestoreTableToPointInTimeInput {
- s.UseLatestRestorableTime = &v
- return s
-}
-
-type RestoreTableToPointInTimeOutput struct {
- _ struct{} `type:"structure"`
-
- // Represents the properties of a table.
- TableDescription *TableDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RestoreTableToPointInTimeOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RestoreTableToPointInTimeOutput) GoString() string {
- return s.String()
-}
-
-// SetTableDescription sets the TableDescription field's value.
-func (s *RestoreTableToPointInTimeOutput) SetTableDescription(v *TableDescription) *RestoreTableToPointInTimeOutput {
- s.TableDescription = v
- return s
-}
-
-// The S3 bucket that is being imported from.
-type S3BucketSource struct {
- _ struct{} `type:"structure"`
-
- // The S3 bucket that is being imported from.
- //
- // S3Bucket is a required field
- S3Bucket *string `type:"string" required:"true"`
-
- // The account number of the S3 bucket that is being imported from. If the bucket
- // is owned by the requester this is optional.
- S3BucketOwner *string `type:"string"`
-
- // The key prefix shared by all S3 Objects that are being imported.
- S3KeyPrefix *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s S3BucketSource) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s S3BucketSource) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *S3BucketSource) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "S3BucketSource"}
- if s.S3Bucket == nil {
- invalidParams.Add(request.NewErrParamRequired("S3Bucket"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetS3Bucket sets the S3Bucket field's value.
-func (s *S3BucketSource) SetS3Bucket(v string) *S3BucketSource {
- s.S3Bucket = &v
- return s
-}
-
-// SetS3BucketOwner sets the S3BucketOwner field's value.
-func (s *S3BucketSource) SetS3BucketOwner(v string) *S3BucketSource {
- s.S3BucketOwner = &v
- return s
-}
-
-// SetS3KeyPrefix sets the S3KeyPrefix field's value.
-func (s *S3BucketSource) SetS3KeyPrefix(v string) *S3BucketSource {
- s.S3KeyPrefix = &v
- return s
-}
-
-// The description of the server-side encryption status on the specified table.
-type SSEDescription struct {
- _ struct{} `type:"structure"`
-
- // Indicates the time, in UNIX epoch date format, when DynamoDB detected that
- // the table's KMS key was inaccessible. This attribute will automatically be
- // cleared when DynamoDB detects that the table's KMS key is accessible again.
- // DynamoDB will initiate the table archival process when table's KMS key remains
- // inaccessible for more than seven days from this date.
- InaccessibleEncryptionDateTime *time.Time `type:"timestamp"`
-
- // The KMS key ARN used for the KMS encryption.
- KMSMasterKeyArn *string `type:"string"`
-
- // Server-side encryption type. The only supported value is:
- //
- // * KMS - Server-side encryption that uses Key Management Service. The key
- // is stored in your account and is managed by KMS (KMS charges apply).
- SSEType *string `type:"string" enum:"SSEType"`
-
- // Represents the current state of server-side encryption. The only supported
- // values are:
- //
- // * ENABLED - Server-side encryption is enabled.
- //
- // * UPDATING - Server-side encryption is being updated.
- Status *string `type:"string" enum:"SSEStatus"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s SSEDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s SSEDescription) GoString() string {
- return s.String()
-}
-
-// SetInaccessibleEncryptionDateTime sets the InaccessibleEncryptionDateTime field's value.
-func (s *SSEDescription) SetInaccessibleEncryptionDateTime(v time.Time) *SSEDescription {
- s.InaccessibleEncryptionDateTime = &v
- return s
-}
-
-// SetKMSMasterKeyArn sets the KMSMasterKeyArn field's value.
-func (s *SSEDescription) SetKMSMasterKeyArn(v string) *SSEDescription {
- s.KMSMasterKeyArn = &v
- return s
-}
-
-// SetSSEType sets the SSEType field's value.
-func (s *SSEDescription) SetSSEType(v string) *SSEDescription {
- s.SSEType = &v
- return s
-}
-
-// SetStatus sets the Status field's value.
-func (s *SSEDescription) SetStatus(v string) *SSEDescription {
- s.Status = &v
- return s
-}
-
-// Represents the settings used to enable server-side encryption.
-type SSESpecification struct {
- _ struct{} `type:"structure"`
-
- // Indicates whether server-side encryption is done using an Amazon Web Services
- // managed key or an Amazon Web Services owned key. If enabled (true), server-side
- // encryption type is set to KMS and an Amazon Web Services managed key is used
- // (KMS charges apply). If disabled (false) or not specified, server-side encryption
- // is set to Amazon Web Services owned key.
- Enabled *bool `type:"boolean"`
-
- // The KMS key that should be used for the KMS encryption. To specify a key,
- // use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN. Note
- // that you should only provide this parameter if the key is different from
- // the default DynamoDB key alias/aws/dynamodb.
- KMSMasterKeyId *string `type:"string"`
-
- // Server-side encryption type. The only supported value is:
- //
- // * KMS - Server-side encryption that uses Key Management Service. The key
- // is stored in your account and is managed by KMS (KMS charges apply).
- SSEType *string `type:"string" enum:"SSEType"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s SSESpecification) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s SSESpecification) GoString() string {
- return s.String()
-}
-
-// SetEnabled sets the Enabled field's value.
-func (s *SSESpecification) SetEnabled(v bool) *SSESpecification {
- s.Enabled = &v
- return s
-}
-
-// SetKMSMasterKeyId sets the KMSMasterKeyId field's value.
-func (s *SSESpecification) SetKMSMasterKeyId(v string) *SSESpecification {
- s.KMSMasterKeyId = &v
- return s
-}
-
-// SetSSEType sets the SSEType field's value.
-func (s *SSESpecification) SetSSEType(v string) *SSESpecification {
- s.SSEType = &v
- return s
-}
-
-// Represents the input of a Scan operation.
-type ScanInput struct {
- _ struct{} `type:"structure"`
-
- // This is a legacy parameter. Use ProjectionExpression instead. For more information,
- // see AttributesToGet (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributesToGet.html)
- // in the Amazon DynamoDB Developer Guide.
- AttributesToGet []*string `min:"1" type:"list"`
-
- // This is a legacy parameter. Use FilterExpression instead. For more information,
- // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
- // in the Amazon DynamoDB Developer Guide.
- ConditionalOperator *string `type:"string" enum:"ConditionalOperator"`
-
- // A Boolean value that determines the read consistency model during the scan:
- //
- // * If ConsistentRead is false, then the data returned from Scan might not
- // contain the results from other recently completed write operations (PutItem,
- // UpdateItem, or DeleteItem).
- //
- // * If ConsistentRead is true, then all of the write operations that completed
- // before the Scan began are guaranteed to be contained in the Scan response.
- //
- // The default setting for ConsistentRead is false.
- //
- // The ConsistentRead parameter is not supported on global secondary indexes.
- // If you scan a global secondary index with ConsistentRead set to true, you
- // will receive a ValidationException.
- ConsistentRead *bool `type:"boolean"`
-
- // The primary key of the first item that this operation will evaluate. Use
- // the value that was returned for LastEvaluatedKey in the previous operation.
- //
- // The data type for ExclusiveStartKey must be String, Number or Binary. No
- // set data types are allowed.
- //
- // In a parallel scan, a Scan request that includes ExclusiveStartKey must specify
- // the same segment whose previous Scan returned the corresponding value of
- // LastEvaluatedKey.
- ExclusiveStartKey map[string]*AttributeValue `type:"map"`
-
- // One or more substitution tokens for attribute names in an expression. The
- // following are some use cases for using ExpressionAttributeNames:
- //
- // * To access an attribute whose name conflicts with a DynamoDB reserved
- // word.
- //
- // * To create a placeholder for repeating occurrences of an attribute name
- // in an expression.
- //
- // * To prevent special characters in an attribute name from being misinterpreted
- // in an expression.
- //
- // Use the # character in an expression to dereference an attribute name. For
- // example, consider the following attribute name:
- //
- // * Percentile
- //
- // The name of this attribute conflicts with a reserved word, so it cannot be
- // used directly in an expression. (For the complete list of reserved words,
- // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
- // in the Amazon DynamoDB Developer Guide). To work around this, you could specify
- // the following for ExpressionAttributeNames:
- //
- // * {"#P":"Percentile"}
- //
- // You could then use this substitution in an expression, as in this example:
- //
- // * #P = :val
- //
- // Tokens that begin with the : character are expression attribute values, which
- // are placeholders for the actual value at runtime.
- //
- // For more information on expression attribute names, see Specifying Item Attributes
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
- // in the Amazon DynamoDB Developer Guide.
- ExpressionAttributeNames map[string]*string `type:"map"`
-
- // One or more values that can be substituted in an expression.
- //
- // Use the : (colon) character in an expression to dereference an attribute
- // value. For example, suppose that you wanted to check whether the value of
- // the ProductStatus attribute was one of the following:
- //
- // Available | Backordered | Discontinued
- //
- // You would first need to specify ExpressionAttributeValues as follows:
- //
- // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
- // }
- //
- // You could then use these values in an expression, such as this:
- //
- // ProductStatus IN (:avail, :back, :disc)
- //
- // For more information on expression attribute values, see Condition Expressions
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
- // in the Amazon DynamoDB Developer Guide.
- ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
-
- // A string that contains conditions that DynamoDB applies after the Scan operation,
- // but before the data is returned to you. Items that do not satisfy the FilterExpression
- // criteria are not returned.
- //
- // A FilterExpression is applied after the items have already been read; the
- // process of filtering does not consume any additional read capacity units.
- //
- // For more information, see Filter Expressions (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.FilterExpression)
- // in the Amazon DynamoDB Developer Guide.
- FilterExpression *string `type:"string"`
-
- // The name of a secondary index to scan. This index can be any local secondary
- // index or global secondary index. Note that if you use the IndexName parameter,
- // you must also provide TableName.
- IndexName *string `min:"3" type:"string"`
-
- // The maximum number of items to evaluate (not necessarily the number of matching
- // items). If DynamoDB processes the number of items up to the limit while processing
- // the results, it stops the operation and returns the matching values up to
- // that point, and a key in LastEvaluatedKey to apply in a subsequent operation,
- // so that you can pick up where you left off. Also, if the processed dataset
- // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation
- // and returns the matching values up to the limit, and a key in LastEvaluatedKey
- // to apply in a subsequent operation to continue the operation. For more information,
- // see Working with Queries (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html)
- // in the Amazon DynamoDB Developer Guide.
- Limit *int64 `min:"1" type:"integer"`
-
- // A string that identifies one or more attributes to retrieve from the specified
- // table or index. These attributes can include scalars, sets, or elements of
- // a JSON document. The attributes in the expression must be separated by commas.
- //
- // If no attribute names are specified, then all attributes will be returned.
- // If any of the requested attributes are not found, they will not appear in
- // the result.
- //
- // For more information, see Specifying Item Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
- // in the Amazon DynamoDB Developer Guide.
- ProjectionExpression *string `type:"string"`
-
- // Determines the level of detail about either provisioned or on-demand throughput
- // consumption that is returned in the response:
- //
- // * INDEXES - The response includes the aggregate ConsumedCapacity for the
- // operation, together with ConsumedCapacity for each table and secondary
- // index that was accessed. Note that some operations, such as GetItem and
- // BatchGetItem, do not access any indexes at all. In these cases, specifying
- // INDEXES will only return ConsumedCapacity information for table(s).
- //
- // * TOTAL - The response includes only the aggregate ConsumedCapacity for
- // the operation.
- //
- // * NONE - No ConsumedCapacity details are included in the response.
- ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
-
- // This is a legacy parameter. Use FilterExpression instead. For more information,
- // see ScanFilter (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ScanFilter.html)
- // in the Amazon DynamoDB Developer Guide.
- ScanFilter map[string]*Condition `type:"map"`
-
- // For a parallel Scan request, Segment identifies an individual segment to
- // be scanned by an application worker.
- //
- // Segment IDs are zero-based, so the first segment is always 0. For example,
- // if you want to use four application threads to scan a table or an index,
- // then the first thread specifies a Segment value of 0, the second thread specifies
- // 1, and so on.
- //
- // The value of LastEvaluatedKey returned from a parallel Scan request must
- // be used as ExclusiveStartKey with the same segment ID in a subsequent Scan
- // operation.
- //
- // The value for Segment must be greater than or equal to 0, and less than the
- // value provided for TotalSegments.
- //
- // If you provide Segment, you must also provide TotalSegments.
- Segment *int64 `type:"integer"`
-
- // The attributes to be returned in the result. You can retrieve all item attributes,
- // specific item attributes, the count of matching items, or in the case of
- // an index, some or all of the attributes projected into the index.
- //
- // * ALL_ATTRIBUTES - Returns all of the item attributes from the specified
- // table or index. If you query a local secondary index, then for each matching
- // item in the index, DynamoDB fetches the entire item from the parent table.
- // If the index is configured to project all item attributes, then all of
- // the data can be obtained from the local secondary index, and no fetching
- // is required.
- //
- // * ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves
- // all attributes that have been projected into the index. If the index is
- // configured to project all attributes, this return value is equivalent
- // to specifying ALL_ATTRIBUTES.
- //
- // * COUNT - Returns the number of matching items, rather than the matching
- // items themselves. Note that this uses the same quantity of read capacity
- // units as getting the items, and is subject to the same item size calculations.
- //
- // * SPECIFIC_ATTRIBUTES - Returns only the attributes listed in ProjectionExpression.
- // This return value is equivalent to specifying ProjectionExpression without
- // specifying any value for Select. If you query or scan a local secondary
- // index and request only attributes that are projected into that index,
- // the operation reads only the index and not the table. If any of the requested
- // attributes are not projected into the local secondary index, DynamoDB
- // fetches each of these attributes from the parent table. This extra fetching
- // incurs additional throughput cost and latency. If you query or scan a
- // global secondary index, you can only request attributes that are projected
- // into the index. Global secondary index queries cannot fetch attributes
- // from the parent table.
- //
- // If neither Select nor ProjectionExpression are specified, DynamoDB defaults
- // to ALL_ATTRIBUTES when accessing a table, and ALL_PROJECTED_ATTRIBUTES when
- // accessing an index. You cannot use both Select and ProjectionExpression together
- // in a single request, unless the value for Select is SPECIFIC_ATTRIBUTES.
- // (This usage is equivalent to specifying ProjectionExpression without any
- // value for Select.)
- //
- // If you use the ProjectionExpression parameter, then the value for Select
- // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an
- // error.
- Select *string `type:"string" enum:"Select"`
-
- // The name of the table containing the requested items or if you provide IndexName,
- // the name of the table to which that index belongs.
- //
- // You can also provide the Amazon Resource Name (ARN) of the table in this
- // parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-
- // For a parallel Scan request, TotalSegments represents the total number of
- // segments into which the Scan operation will be divided. The value of TotalSegments
- // corresponds to the number of application workers that will perform the parallel
- // scan. For example, if you want to use four application threads to scan a
- // table or an index, specify a TotalSegments value of 4.
- //
- // The value for TotalSegments must be greater than or equal to 1, and less
- // than or equal to 1000000. If you specify a TotalSegments value of 1, the
- // Scan operation will be sequential rather than parallel.
- //
- // If you specify TotalSegments, you must also specify Segment.
- TotalSegments *int64 `min:"1" type:"integer"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ScanInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ScanInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ScanInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ScanInput"}
- if s.AttributesToGet != nil && len(s.AttributesToGet) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("AttributesToGet", 1))
- }
- if s.IndexName != nil && len(*s.IndexName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
- }
- if s.Limit != nil && *s.Limit < 1 {
- invalidParams.Add(request.NewErrParamMinValue("Limit", 1))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
- if s.TotalSegments != nil && *s.TotalSegments < 1 {
- invalidParams.Add(request.NewErrParamMinValue("TotalSegments", 1))
- }
- if s.ScanFilter != nil {
- for i, v := range s.ScanFilter {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ScanFilter", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAttributesToGet sets the AttributesToGet field's value.
-func (s *ScanInput) SetAttributesToGet(v []*string) *ScanInput {
- s.AttributesToGet = v
- return s
-}
-
-// SetConditionalOperator sets the ConditionalOperator field's value.
-func (s *ScanInput) SetConditionalOperator(v string) *ScanInput {
- s.ConditionalOperator = &v
- return s
-}
-
-// SetConsistentRead sets the ConsistentRead field's value.
-func (s *ScanInput) SetConsistentRead(v bool) *ScanInput {
- s.ConsistentRead = &v
- return s
-}
-
-// SetExclusiveStartKey sets the ExclusiveStartKey field's value.
-func (s *ScanInput) SetExclusiveStartKey(v map[string]*AttributeValue) *ScanInput {
- s.ExclusiveStartKey = v
- return s
-}
-
-// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
-func (s *ScanInput) SetExpressionAttributeNames(v map[string]*string) *ScanInput {
- s.ExpressionAttributeNames = v
- return s
-}
-
-// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
-func (s *ScanInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *ScanInput {
- s.ExpressionAttributeValues = v
- return s
-}
-
-// SetFilterExpression sets the FilterExpression field's value.
-func (s *ScanInput) SetFilterExpression(v string) *ScanInput {
- s.FilterExpression = &v
- return s
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *ScanInput) SetIndexName(v string) *ScanInput {
- s.IndexName = &v
- return s
-}
-
-// SetLimit sets the Limit field's value.
-func (s *ScanInput) SetLimit(v int64) *ScanInput {
- s.Limit = &v
- return s
-}
-
-// SetProjectionExpression sets the ProjectionExpression field's value.
-func (s *ScanInput) SetProjectionExpression(v string) *ScanInput {
- s.ProjectionExpression = &v
- return s
-}
-
-// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
-func (s *ScanInput) SetReturnConsumedCapacity(v string) *ScanInput {
- s.ReturnConsumedCapacity = &v
- return s
-}
-
-// SetScanFilter sets the ScanFilter field's value.
-func (s *ScanInput) SetScanFilter(v map[string]*Condition) *ScanInput {
- s.ScanFilter = v
- return s
-}
-
-// SetSegment sets the Segment field's value.
-func (s *ScanInput) SetSegment(v int64) *ScanInput {
- s.Segment = &v
- return s
-}
-
-// SetSelect sets the Select field's value.
-func (s *ScanInput) SetSelect(v string) *ScanInput {
- s.Select = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *ScanInput) SetTableName(v string) *ScanInput {
- s.TableName = &v
- return s
-}
-
-// SetTotalSegments sets the TotalSegments field's value.
-func (s *ScanInput) SetTotalSegments(v int64) *ScanInput {
- s.TotalSegments = &v
- return s
-}
-
-// Represents the output of a Scan operation.
-type ScanOutput struct {
- _ struct{} `type:"structure"`
-
- // The capacity units consumed by the Scan operation. The data returned includes
- // the total provisioned throughput consumed, along with statistics for the
- // table and any indexes involved in the operation. ConsumedCapacity is only
- // returned if the ReturnConsumedCapacity parameter was specified. For more
- // information, see Capacity unit consumption for read operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#read-operation-consumption)
- // in the Amazon DynamoDB Developer Guide.
- ConsumedCapacity *ConsumedCapacity `type:"structure"`
-
- // The number of items in the response.
- //
- // If you set ScanFilter in the request, then Count is the number of items returned
- // after the filter was applied, and ScannedCount is the number of matching
- // items before the filter was applied.
- //
- // If you did not use a filter in the request, then Count is the same as ScannedCount.
- Count *int64 `type:"integer"`
-
- // An array of item attributes that match the scan criteria. Each element in
- // this array consists of an attribute name and the value for that attribute.
- Items []map[string]*AttributeValue `type:"list"`
-
- // The primary key of the item where the operation stopped, inclusive of the
- // previous result set. Use this value to start a new operation, excluding this
- // value in the new request.
- //
- // If LastEvaluatedKey is empty, then the "last page" of results has been processed
- // and there is no more data to be retrieved.
- //
- // If LastEvaluatedKey is not empty, it does not necessarily mean that there
- // is more data in the result set. The only way to know when you have reached
- // the end of the result set is when LastEvaluatedKey is empty.
- LastEvaluatedKey map[string]*AttributeValue `type:"map"`
-
- // The number of items evaluated, before any ScanFilter is applied. A high ScannedCount
- // value with few, or no, Count results indicates an inefficient Scan operation.
- // For more information, see Count and ScannedCount (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#Count)
- // in the Amazon DynamoDB Developer Guide.
- //
- // If you did not use a filter in the request, then ScannedCount is the same
- // as Count.
- ScannedCount *int64 `type:"integer"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ScanOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ScanOutput) GoString() string {
- return s.String()
-}
-
-// SetConsumedCapacity sets the ConsumedCapacity field's value.
-func (s *ScanOutput) SetConsumedCapacity(v *ConsumedCapacity) *ScanOutput {
- s.ConsumedCapacity = v
- return s
-}
-
-// SetCount sets the Count field's value.
-func (s *ScanOutput) SetCount(v int64) *ScanOutput {
- s.Count = &v
- return s
-}
-
-// SetItems sets the Items field's value.
-func (s *ScanOutput) SetItems(v []map[string]*AttributeValue) *ScanOutput {
- s.Items = v
- return s
-}
-
-// SetLastEvaluatedKey sets the LastEvaluatedKey field's value.
-func (s *ScanOutput) SetLastEvaluatedKey(v map[string]*AttributeValue) *ScanOutput {
- s.LastEvaluatedKey = v
- return s
-}
-
-// SetScannedCount sets the ScannedCount field's value.
-func (s *ScanOutput) SetScannedCount(v int64) *ScanOutput {
- s.ScannedCount = &v
- return s
-}
-
-// Contains the details of the table when the backup was created.
-type SourceTableDetails struct {
- _ struct{} `type:"structure"`
-
- // Controls how you are charged for read and write throughput and how you manage
- // capacity. This setting can be changed later.
- //
- // * PROVISIONED - Sets the read/write capacity mode to PROVISIONED. We recommend
- // using PROVISIONED for predictable workloads.
- //
- // * PAY_PER_REQUEST - Sets the read/write capacity mode to PAY_PER_REQUEST.
- // We recommend using PAY_PER_REQUEST for unpredictable workloads.
- BillingMode *string `type:"string" enum:"BillingMode"`
-
- // Number of items in the table. Note that this is an approximate value.
- ItemCount *int64 `type:"long"`
-
- // Schema of the table.
- //
- // KeySchema is a required field
- KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"`
-
- // Sets the maximum number of read and write units for the specified on-demand
- // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits,
- // or both.
- OnDemandThroughput *OnDemandThroughput `type:"structure"`
-
- // Read IOPs and Write IOPS on the table when the backup was created.
- //
- // ProvisionedThroughput is a required field
- ProvisionedThroughput *ProvisionedThroughput `type:"structure" required:"true"`
-
- // ARN of the table for which backup was created.
- TableArn *string `min:"1" type:"string"`
-
- // Time when the source table was created.
- //
- // TableCreationDateTime is a required field
- TableCreationDateTime *time.Time `type:"timestamp" required:"true"`
-
- // Unique identifier for the table for which the backup was created.
- //
- // TableId is a required field
- TableId *string `type:"string" required:"true"`
-
- // The name of the table for which the backup was created.
- //
- // TableName is a required field
- TableName *string `min:"3" type:"string" required:"true"`
-
- // Size of the table in bytes. Note that this is an approximate value.
- TableSizeBytes *int64 `type:"long"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s SourceTableDetails) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s SourceTableDetails) GoString() string {
- return s.String()
-}
-
-// SetBillingMode sets the BillingMode field's value.
-func (s *SourceTableDetails) SetBillingMode(v string) *SourceTableDetails {
- s.BillingMode = &v
- return s
-}
-
-// SetItemCount sets the ItemCount field's value.
-func (s *SourceTableDetails) SetItemCount(v int64) *SourceTableDetails {
- s.ItemCount = &v
- return s
-}
-
-// SetKeySchema sets the KeySchema field's value.
-func (s *SourceTableDetails) SetKeySchema(v []*KeySchemaElement) *SourceTableDetails {
- s.KeySchema = v
- return s
-}
-
-// SetOnDemandThroughput sets the OnDemandThroughput field's value.
-func (s *SourceTableDetails) SetOnDemandThroughput(v *OnDemandThroughput) *SourceTableDetails {
- s.OnDemandThroughput = v
- return s
-}
-
-// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
-func (s *SourceTableDetails) SetProvisionedThroughput(v *ProvisionedThroughput) *SourceTableDetails {
- s.ProvisionedThroughput = v
- return s
-}
-
-// SetTableArn sets the TableArn field's value.
-func (s *SourceTableDetails) SetTableArn(v string) *SourceTableDetails {
- s.TableArn = &v
- return s
-}
-
-// SetTableCreationDateTime sets the TableCreationDateTime field's value.
-func (s *SourceTableDetails) SetTableCreationDateTime(v time.Time) *SourceTableDetails {
- s.TableCreationDateTime = &v
- return s
-}
-
-// SetTableId sets the TableId field's value.
-func (s *SourceTableDetails) SetTableId(v string) *SourceTableDetails {
- s.TableId = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *SourceTableDetails) SetTableName(v string) *SourceTableDetails {
- s.TableName = &v
- return s
-}
-
-// SetTableSizeBytes sets the TableSizeBytes field's value.
-func (s *SourceTableDetails) SetTableSizeBytes(v int64) *SourceTableDetails {
- s.TableSizeBytes = &v
- return s
-}
-
-// Contains the details of the features enabled on the table when the backup
-// was created. For example, LSIs, GSIs, streams, TTL.
-type SourceTableFeatureDetails struct {
- _ struct{} `type:"structure"`
-
- // Represents the GSI properties for the table when the backup was created.
- // It includes the IndexName, KeySchema, Projection, and ProvisionedThroughput
- // for the GSIs on the table at the time of backup.
- GlobalSecondaryIndexes []*GlobalSecondaryIndexInfo `type:"list"`
-
- // Represents the LSI properties for the table when the backup was created.
- // It includes the IndexName, KeySchema and Projection for the LSIs on the table
- // at the time of backup.
- LocalSecondaryIndexes []*LocalSecondaryIndexInfo `type:"list"`
-
- // The description of the server-side encryption status on the table when the
- // backup was created.
- SSEDescription *SSEDescription `type:"structure"`
-
- // Stream settings on the table when the backup was created.
- StreamDescription *StreamSpecification `type:"structure"`
-
- // Time to Live settings on the table when the backup was created.
- TimeToLiveDescription *TimeToLiveDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s SourceTableFeatureDetails) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s SourceTableFeatureDetails) GoString() string {
- return s.String()
-}
-
-// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value.
-func (s *SourceTableFeatureDetails) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndexInfo) *SourceTableFeatureDetails {
- s.GlobalSecondaryIndexes = v
- return s
-}
-
-// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value.
-func (s *SourceTableFeatureDetails) SetLocalSecondaryIndexes(v []*LocalSecondaryIndexInfo) *SourceTableFeatureDetails {
- s.LocalSecondaryIndexes = v
- return s
-}
-
-// SetSSEDescription sets the SSEDescription field's value.
-func (s *SourceTableFeatureDetails) SetSSEDescription(v *SSEDescription) *SourceTableFeatureDetails {
- s.SSEDescription = v
- return s
-}
-
-// SetStreamDescription sets the StreamDescription field's value.
-func (s *SourceTableFeatureDetails) SetStreamDescription(v *StreamSpecification) *SourceTableFeatureDetails {
- s.StreamDescription = v
- return s
-}
-
-// SetTimeToLiveDescription sets the TimeToLiveDescription field's value.
-func (s *SourceTableFeatureDetails) SetTimeToLiveDescription(v *TimeToLiveDescription) *SourceTableFeatureDetails {
- s.TimeToLiveDescription = v
- return s
-}
-
-// Represents the DynamoDB Streams configuration for a table in DynamoDB.
-type StreamSpecification struct {
- _ struct{} `type:"structure"`
-
- // Indicates whether DynamoDB Streams is enabled (true) or disabled (false)
- // on the table.
- //
- // StreamEnabled is a required field
- StreamEnabled *bool `type:"boolean" required:"true"`
-
- // When an item in the table is modified, StreamViewType determines what information
- // is written to the stream for this table. Valid values for StreamViewType
- // are:
- //
- // * KEYS_ONLY - Only the key attributes of the modified item are written
- // to the stream.
- //
- // * NEW_IMAGE - The entire item, as it appears after it was modified, is
- // written to the stream.
- //
- // * OLD_IMAGE - The entire item, as it appeared before it was modified,
- // is written to the stream.
- //
- // * NEW_AND_OLD_IMAGES - Both the new and the old item images of the item
- // are written to the stream.
- StreamViewType *string `type:"string" enum:"StreamViewType"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s StreamSpecification) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s StreamSpecification) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *StreamSpecification) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "StreamSpecification"}
- if s.StreamEnabled == nil {
- invalidParams.Add(request.NewErrParamRequired("StreamEnabled"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetStreamEnabled sets the StreamEnabled field's value.
-func (s *StreamSpecification) SetStreamEnabled(v bool) *StreamSpecification {
- s.StreamEnabled = &v
- return s
-}
-
-// SetStreamViewType sets the StreamViewType field's value.
-func (s *StreamSpecification) SetStreamViewType(v string) *StreamSpecification {
- s.StreamViewType = &v
- return s
-}
-
-// A target table with the specified name already exists.
-type TableAlreadyExistsException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TableAlreadyExistsException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TableAlreadyExistsException) GoString() string {
- return s.String()
-}
-
-func newErrorTableAlreadyExistsException(v protocol.ResponseMetadata) error {
- return &TableAlreadyExistsException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *TableAlreadyExistsException) Code() string {
- return "TableAlreadyExistsException"
-}
-
-// Message returns the exception's message.
-func (s *TableAlreadyExistsException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *TableAlreadyExistsException) OrigErr() error {
- return nil
-}
-
-func (s *TableAlreadyExistsException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *TableAlreadyExistsException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *TableAlreadyExistsException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Represents the auto scaling configuration for a global table.
-type TableAutoScalingDescription struct {
- _ struct{} `type:"structure"`
-
- // Represents replicas of the global table.
- Replicas []*ReplicaAutoScalingDescription `type:"list"`
-
- // The name of the table.
- TableName *string `min:"3" type:"string"`
-
- // The current state of the table:
- //
- // * CREATING - The table is being created.
- //
- // * UPDATING - The table is being updated.
- //
- // * DELETING - The table is being deleted.
- //
- // * ACTIVE - The table is ready for use.
- TableStatus *string `type:"string" enum:"TableStatus"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TableAutoScalingDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TableAutoScalingDescription) GoString() string {
- return s.String()
-}
-
-// SetReplicas sets the Replicas field's value.
-func (s *TableAutoScalingDescription) SetReplicas(v []*ReplicaAutoScalingDescription) *TableAutoScalingDescription {
- s.Replicas = v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *TableAutoScalingDescription) SetTableName(v string) *TableAutoScalingDescription {
- s.TableName = &v
- return s
-}
-
-// SetTableStatus sets the TableStatus field's value.
-func (s *TableAutoScalingDescription) SetTableStatus(v string) *TableAutoScalingDescription {
- s.TableStatus = &v
- return s
-}
-
-// Contains details of the table class.
-type TableClassSummary struct {
- _ struct{} `type:"structure"`
-
- // The date and time at which the table class was last updated.
- LastUpdateDateTime *time.Time `type:"timestamp"`
-
- // The table class of the specified table. Valid values are STANDARD and STANDARD_INFREQUENT_ACCESS.
- TableClass *string `type:"string" enum:"TableClass"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TableClassSummary) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TableClassSummary) GoString() string {
- return s.String()
-}
-
-// SetLastUpdateDateTime sets the LastUpdateDateTime field's value.
-func (s *TableClassSummary) SetLastUpdateDateTime(v time.Time) *TableClassSummary {
- s.LastUpdateDateTime = &v
- return s
-}
-
-// SetTableClass sets the TableClass field's value.
-func (s *TableClassSummary) SetTableClass(v string) *TableClassSummary {
- s.TableClass = &v
- return s
-}
-
-// The parameters for the table created as part of the import operation.
-type TableCreationParameters struct {
- _ struct{} `type:"structure"`
-
- // The attributes of the table created as part of the import operation.
- //
- // AttributeDefinitions is a required field
- AttributeDefinitions []*AttributeDefinition `type:"list" required:"true"`
-
- // The billing mode for provisioning the table created as part of the import
- // operation.
- BillingMode *string `type:"string" enum:"BillingMode"`
-
- // The Global Secondary Indexes (GSI) of the table to be created as part of
- // the import operation.
- GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"`
-
- // The primary key and option sort key of the table created as part of the import
- // operation.
- //
- // KeySchema is a required field
- KeySchema []*KeySchemaElement `min:"1" type:"list" required:"true"`
-
- // Sets the maximum number of read and write units for the specified on-demand
- // table. If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits,
- // or both.
- OnDemandThroughput *OnDemandThroughput `type:"structure"`
-
- // Represents the provisioned throughput settings for a specified table or index.
- // The settings can be modified using the UpdateTable operation.
- //
- // For current minimum and maximum provisioned throughput values, see Service,
- // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
- // in the Amazon DynamoDB Developer Guide.
- ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
-
- // Represents the settings used to enable server-side encryption.
- SSESpecification *SSESpecification `type:"structure"`
-
- // The name of the table created as part of the import operation.
- //
- // TableName is a required field
- TableName *string `min:"3" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TableCreationParameters) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TableCreationParameters) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *TableCreationParameters) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "TableCreationParameters"}
- if s.AttributeDefinitions == nil {
- invalidParams.Add(request.NewErrParamRequired("AttributeDefinitions"))
- }
- if s.KeySchema == nil {
- invalidParams.Add(request.NewErrParamRequired("KeySchema"))
- }
- if s.KeySchema != nil && len(s.KeySchema) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("KeySchema", 1))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 3))
- }
- if s.AttributeDefinitions != nil {
- for i, v := range s.AttributeDefinitions {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.GlobalSecondaryIndexes != nil {
- for i, v := range s.GlobalSecondaryIndexes {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.KeySchema != nil {
- for i, v := range s.KeySchema {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "KeySchema", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.ProvisionedThroughput != nil {
- if err := s.ProvisionedThroughput.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAttributeDefinitions sets the AttributeDefinitions field's value.
-func (s *TableCreationParameters) SetAttributeDefinitions(v []*AttributeDefinition) *TableCreationParameters {
- s.AttributeDefinitions = v
- return s
-}
-
-// SetBillingMode sets the BillingMode field's value.
-func (s *TableCreationParameters) SetBillingMode(v string) *TableCreationParameters {
- s.BillingMode = &v
- return s
-}
-
-// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value.
-func (s *TableCreationParameters) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndex) *TableCreationParameters {
- s.GlobalSecondaryIndexes = v
- return s
-}
-
-// SetKeySchema sets the KeySchema field's value.
-func (s *TableCreationParameters) SetKeySchema(v []*KeySchemaElement) *TableCreationParameters {
- s.KeySchema = v
- return s
-}
-
-// SetOnDemandThroughput sets the OnDemandThroughput field's value.
-func (s *TableCreationParameters) SetOnDemandThroughput(v *OnDemandThroughput) *TableCreationParameters {
- s.OnDemandThroughput = v
- return s
-}
-
-// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
-func (s *TableCreationParameters) SetProvisionedThroughput(v *ProvisionedThroughput) *TableCreationParameters {
- s.ProvisionedThroughput = v
- return s
-}
-
-// SetSSESpecification sets the SSESpecification field's value.
-func (s *TableCreationParameters) SetSSESpecification(v *SSESpecification) *TableCreationParameters {
- s.SSESpecification = v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *TableCreationParameters) SetTableName(v string) *TableCreationParameters {
- s.TableName = &v
- return s
-}
-
-// Represents the properties of a table.
-type TableDescription struct {
- _ struct{} `type:"structure"`
-
- // Contains information about the table archive.
- ArchivalSummary *ArchivalSummary `type:"structure"`
-
- // An array of AttributeDefinition objects. Each of these objects describes
- // one attribute in the table and index key schema.
- //
- // Each AttributeDefinition object in this array is composed of:
- //
- // * AttributeName - The name of the attribute.
- //
- // * AttributeType - The data type for the attribute.
- AttributeDefinitions []*AttributeDefinition `type:"list"`
-
- // Contains the details for the read/write capacity mode.
- BillingModeSummary *BillingModeSummary `type:"structure"`
-
- // The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/)
- // format.
- CreationDateTime *time.Time `type:"timestamp"`
-
- // Indicates whether deletion protection is enabled (true) or disabled (false)
- // on the table.
- DeletionProtectionEnabled *bool `type:"boolean"`
-
- // The global secondary indexes, if any, on the table. Each index is scoped
- // to a given partition key value. Each element is composed of:
- //
- // * Backfilling - If true, then the index is currently in the backfilling
- // phase. Backfilling occurs only when a new global secondary index is added
- // to the table. It is the process by which DynamoDB populates the new index
- // with data from the table. (This attribute does not appear for indexes
- // that were created during a CreateTable operation.) You can delete an index
- // that is being created during the Backfilling phase when IndexStatus is
- // set to CREATING and Backfilling is true. You can't delete the index that
- // is being created when IndexStatus is set to CREATING and Backfilling is
- // false. (This attribute does not appear for indexes that were created during
- // a CreateTable operation.)
- //
- // * IndexName - The name of the global secondary index.
- //
- // * IndexSizeBytes - The total size of the global secondary index, in bytes.
- // DynamoDB updates this value approximately every six hours. Recent changes
- // might not be reflected in this value.
- //
- // * IndexStatus - The current status of the global secondary index: CREATING
- // - The index is being created. UPDATING - The index is being updated. DELETING
- // - The index is being deleted. ACTIVE - The index is ready for use.
- //
- // * ItemCount - The number of items in the global secondary index. DynamoDB
- // updates this value approximately every six hours. Recent changes might
- // not be reflected in this value.
- //
- // * KeySchema - Specifies the complete index key schema. The attribute names
- // in the key schema must be between 1 and 255 characters (inclusive). The
- // key schema must begin with the same partition key as the table.
- //
- // * Projection - Specifies attributes that are copied (projected) from the
- // table into the index. These are in addition to the primary key attributes
- // and index key attributes, which are automatically projected. Each attribute
- // specification is composed of: ProjectionType - One of the following: KEYS_ONLY
- // - Only the index and primary keys are projected into the index. INCLUDE
- // - In addition to the attributes described in KEYS_ONLY, the secondary
- // index will include other non-key attributes that you specify. ALL - All
- // of the table attributes are projected into the index. NonKeyAttributes
- // - A list of one or more non-key attribute names that are projected into
- // the secondary index. The total count of attributes provided in NonKeyAttributes,
- // summed across all of the secondary indexes, must not exceed 100. If you
- // project the same attribute into two different indexes, this counts as
- // two distinct attributes when determining the total.
- //
- // * ProvisionedThroughput - The provisioned throughput settings for the
- // global secondary index, consisting of read and write capacity units, along
- // with data about increases and decreases.
- //
- // If the table is in the DELETING state, no information about indexes will
- // be returned.
- GlobalSecondaryIndexes []*GlobalSecondaryIndexDescription `type:"list"`
-
- // Represents the version of global tables (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GlobalTables.html)
- // in use, if the table is replicated across Amazon Web Services Regions.
- GlobalTableVersion *string `type:"string"`
-
- // The number of items in the specified table. DynamoDB updates this value approximately
- // every six hours. Recent changes might not be reflected in this value.
- ItemCount *int64 `type:"long"`
-
- // The primary key structure for the table. Each KeySchemaElement consists of:
- //
- // * AttributeName - The name of the attribute.
- //
- // * KeyType - The role of the attribute: HASH - partition key RANGE - sort
- // key The partition key of an item is also known as its hash attribute.
- // The term "hash attribute" derives from DynamoDB's usage of an internal
- // hash function to evenly distribute data items across partitions, based
- // on their partition key values. The sort key of an item is also known as
- // its range attribute. The term "range attribute" derives from the way DynamoDB
- // stores items with the same partition key physically close together, in
- // sorted order by the sort key value.
- //
- // For more information about primary keys, see Primary Key (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey)
- // in the Amazon DynamoDB Developer Guide.
- KeySchema []*KeySchemaElement `min:"1" type:"list"`
-
- // The Amazon Resource Name (ARN) that uniquely identifies the latest stream
- // for this table.
- LatestStreamArn *string `min:"37" type:"string"`
-
- // A timestamp, in ISO 8601 format, for this stream.
- //
- // Note that LatestStreamLabel is not a unique identifier for the stream, because
- // it is possible that a stream from another table might have the same timestamp.
- // However, the combination of the following three elements is guaranteed to
- // be unique:
- //
- // * Amazon Web Services customer ID
- //
- // * Table name
- //
- // * StreamLabel
- LatestStreamLabel *string `type:"string"`
-
- // Represents one or more local secondary indexes on the table. Each index is
- // scoped to a given partition key value. Tables with one or more local secondary
- // indexes are subject to an item collection size limit, where the amount of
- // data within a given item collection cannot exceed 10 GB. Each element is
- // composed of:
- //
- // * IndexName - The name of the local secondary index.
- //
- // * KeySchema - Specifies the complete index key schema. The attribute names
- // in the key schema must be between 1 and 255 characters (inclusive). The
- // key schema must begin with the same partition key as the table.
- //
- // * Projection - Specifies attributes that are copied (projected) from the
- // table into the index. These are in addition to the primary key attributes
- // and index key attributes, which are automatically projected. Each attribute
- // specification is composed of: ProjectionType - One of the following: KEYS_ONLY
- // - Only the index and primary keys are projected into the index. INCLUDE
- // - Only the specified table attributes are projected into the index. The
- // list of projected attributes is in NonKeyAttributes. ALL - All of the
- // table attributes are projected into the index. NonKeyAttributes - A list
- // of one or more non-key attribute names that are projected into the secondary
- // index. The total count of attributes provided in NonKeyAttributes, summed
- // across all of the secondary indexes, must not exceed 100. If you project
- // the same attribute into two different indexes, this counts as two distinct
- // attributes when determining the total.
- //
- // * IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB
- // updates this value approximately every six hours. Recent changes might
- // not be reflected in this value.
- //
- // * ItemCount - Represents the number of items in the index. DynamoDB updates
- // this value approximately every six hours. Recent changes might not be
- // reflected in this value.
- //
- // If the table is in the DELETING state, no information about indexes will
- // be returned.
- LocalSecondaryIndexes []*LocalSecondaryIndexDescription `type:"list"`
-
- // The maximum number of read and write units for the specified on-demand table.
- // If you use this parameter, you must specify MaxReadRequestUnits, MaxWriteRequestUnits,
- // or both.
- OnDemandThroughput *OnDemandThroughput `type:"structure"`
-
- // The provisioned throughput settings for the table, consisting of read and
- // write capacity units, along with data about increases and decreases.
- ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"`
-
- // Represents replicas of the table.
- Replicas []*ReplicaDescription `type:"list"`
-
- // Contains details for the restore.
- RestoreSummary *RestoreSummary `type:"structure"`
-
- // The description of the server-side encryption status on the specified table.
- SSEDescription *SSEDescription `type:"structure"`
-
- // The current DynamoDB Streams configuration for the table.
- StreamSpecification *StreamSpecification `type:"structure"`
-
- // The Amazon Resource Name (ARN) that uniquely identifies the table.
- TableArn *string `type:"string"`
-
- // Contains details of the table class.
- TableClassSummary *TableClassSummary `type:"structure"`
-
- // Unique identifier for the table for which the backup was created.
- TableId *string `type:"string"`
-
- // The name of the table.
- TableName *string `min:"3" type:"string"`
-
- // The total size of the specified table, in bytes. DynamoDB updates this value
- // approximately every six hours. Recent changes might not be reflected in this
- // value.
- TableSizeBytes *int64 `type:"long"`
-
- // The current state of the table:
- //
- // * CREATING - The table is being created.
- //
- // * UPDATING - The table/index configuration is being updated. The table/index
- // remains available for data operations when UPDATING.
- //
- // * DELETING - The table is being deleted.
- //
- // * ACTIVE - The table is ready for use.
- //
- // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The KMS key used to encrypt the
- // table in inaccessible. Table operations may fail due to failure to use
- // the KMS key. DynamoDB will initiate the table archival process when a
- // table's KMS key remains inaccessible for more than seven days.
- //
- // * ARCHIVING - The table is being archived. Operations are not allowed
- // until archival is complete.
- //
- // * ARCHIVED - The table has been archived. See the ArchivalReason for more
- // information.
- TableStatus *string `type:"string" enum:"TableStatus"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TableDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TableDescription) GoString() string {
- return s.String()
-}
-
-// SetArchivalSummary sets the ArchivalSummary field's value.
-func (s *TableDescription) SetArchivalSummary(v *ArchivalSummary) *TableDescription {
- s.ArchivalSummary = v
- return s
-}
-
-// SetAttributeDefinitions sets the AttributeDefinitions field's value.
-func (s *TableDescription) SetAttributeDefinitions(v []*AttributeDefinition) *TableDescription {
- s.AttributeDefinitions = v
- return s
-}
-
-// SetBillingModeSummary sets the BillingModeSummary field's value.
-func (s *TableDescription) SetBillingModeSummary(v *BillingModeSummary) *TableDescription {
- s.BillingModeSummary = v
- return s
-}
-
-// SetCreationDateTime sets the CreationDateTime field's value.
-func (s *TableDescription) SetCreationDateTime(v time.Time) *TableDescription {
- s.CreationDateTime = &v
- return s
-}
-
-// SetDeletionProtectionEnabled sets the DeletionProtectionEnabled field's value.
-func (s *TableDescription) SetDeletionProtectionEnabled(v bool) *TableDescription {
- s.DeletionProtectionEnabled = &v
- return s
-}
-
-// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value.
-func (s *TableDescription) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndexDescription) *TableDescription {
- s.GlobalSecondaryIndexes = v
- return s
-}
-
-// SetGlobalTableVersion sets the GlobalTableVersion field's value.
-func (s *TableDescription) SetGlobalTableVersion(v string) *TableDescription {
- s.GlobalTableVersion = &v
- return s
-}
-
-// SetItemCount sets the ItemCount field's value.
-func (s *TableDescription) SetItemCount(v int64) *TableDescription {
- s.ItemCount = &v
- return s
-}
-
-// SetKeySchema sets the KeySchema field's value.
-func (s *TableDescription) SetKeySchema(v []*KeySchemaElement) *TableDescription {
- s.KeySchema = v
- return s
-}
-
-// SetLatestStreamArn sets the LatestStreamArn field's value.
-func (s *TableDescription) SetLatestStreamArn(v string) *TableDescription {
- s.LatestStreamArn = &v
- return s
-}
-
-// SetLatestStreamLabel sets the LatestStreamLabel field's value.
-func (s *TableDescription) SetLatestStreamLabel(v string) *TableDescription {
- s.LatestStreamLabel = &v
- return s
-}
-
-// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value.
-func (s *TableDescription) SetLocalSecondaryIndexes(v []*LocalSecondaryIndexDescription) *TableDescription {
- s.LocalSecondaryIndexes = v
- return s
-}
-
-// SetOnDemandThroughput sets the OnDemandThroughput field's value.
-func (s *TableDescription) SetOnDemandThroughput(v *OnDemandThroughput) *TableDescription {
- s.OnDemandThroughput = v
- return s
-}
-
-// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
-func (s *TableDescription) SetProvisionedThroughput(v *ProvisionedThroughputDescription) *TableDescription {
- s.ProvisionedThroughput = v
- return s
-}
-
-// SetReplicas sets the Replicas field's value.
-func (s *TableDescription) SetReplicas(v []*ReplicaDescription) *TableDescription {
- s.Replicas = v
- return s
-}
-
-// SetRestoreSummary sets the RestoreSummary field's value.
-func (s *TableDescription) SetRestoreSummary(v *RestoreSummary) *TableDescription {
- s.RestoreSummary = v
- return s
-}
-
-// SetSSEDescription sets the SSEDescription field's value.
-func (s *TableDescription) SetSSEDescription(v *SSEDescription) *TableDescription {
- s.SSEDescription = v
- return s
-}
-
-// SetStreamSpecification sets the StreamSpecification field's value.
-func (s *TableDescription) SetStreamSpecification(v *StreamSpecification) *TableDescription {
- s.StreamSpecification = v
- return s
-}
-
-// SetTableArn sets the TableArn field's value.
-func (s *TableDescription) SetTableArn(v string) *TableDescription {
- s.TableArn = &v
- return s
-}
-
-// SetTableClassSummary sets the TableClassSummary field's value.
-func (s *TableDescription) SetTableClassSummary(v *TableClassSummary) *TableDescription {
- s.TableClassSummary = v
- return s
-}
-
-// SetTableId sets the TableId field's value.
-func (s *TableDescription) SetTableId(v string) *TableDescription {
- s.TableId = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *TableDescription) SetTableName(v string) *TableDescription {
- s.TableName = &v
- return s
-}
-
-// SetTableSizeBytes sets the TableSizeBytes field's value.
-func (s *TableDescription) SetTableSizeBytes(v int64) *TableDescription {
- s.TableSizeBytes = &v
- return s
-}
-
-// SetTableStatus sets the TableStatus field's value.
-func (s *TableDescription) SetTableStatus(v string) *TableDescription {
- s.TableStatus = &v
- return s
-}
-
-// A target table with the specified name is either being created or deleted.
-type TableInUseException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TableInUseException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TableInUseException) GoString() string {
- return s.String()
-}
-
-func newErrorTableInUseException(v protocol.ResponseMetadata) error {
- return &TableInUseException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *TableInUseException) Code() string {
- return "TableInUseException"
-}
-
-// Message returns the exception's message.
-func (s *TableInUseException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *TableInUseException) OrigErr() error {
- return nil
-}
-
-func (s *TableInUseException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *TableInUseException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *TableInUseException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// A source table with the name TableName does not currently exist within the
-// subscriber's account or the subscriber is operating in the wrong Amazon Web
-// Services Region.
-type TableNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TableNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TableNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorTableNotFoundException(v protocol.ResponseMetadata) error {
- return &TableNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *TableNotFoundException) Code() string {
- return "TableNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *TableNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *TableNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *TableNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *TableNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *TableNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Describes a tag. A tag is a key-value pair. You can add up to 50 tags to
-// a single DynamoDB table.
-//
-// Amazon Web Services-assigned tag names and values are automatically assigned
-// the aws: prefix, which the user cannot assign. Amazon Web Services-assigned
-// tag names do not count towards the tag limit of 50. User-assigned tag names
-// have the prefix user: in the Cost Allocation Report. You cannot backdate
-// the application of a tag.
-//
-// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html)
-// in the Amazon DynamoDB Developer Guide.
-type Tag struct {
- _ struct{} `type:"structure"`
-
- // The key of the tag. Tag keys are case sensitive. Each DynamoDB table can
- // only have up to one tag with the same key. If you try to add an existing
- // tag (same key), the existing tag value will be updated to the new value.
- //
- // Key is a required field
- Key *string `min:"1" type:"string" required:"true"`
-
- // The value of the tag. Tag values are case-sensitive and can be null.
- //
- // Value is a required field
- Value *string `type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Tag) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Tag) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *Tag) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "Tag"}
- if s.Key == nil {
- invalidParams.Add(request.NewErrParamRequired("Key"))
- }
- if s.Key != nil && len(*s.Key) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Key", 1))
- }
- if s.Value == nil {
- invalidParams.Add(request.NewErrParamRequired("Value"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetKey sets the Key field's value.
-func (s *Tag) SetKey(v string) *Tag {
- s.Key = &v
- return s
-}
-
-// SetValue sets the Value field's value.
-func (s *Tag) SetValue(v string) *Tag {
- s.Value = &v
- return s
-}
-
-type TagResourceInput struct {
- _ struct{} `type:"structure"`
-
- // Identifies the Amazon DynamoDB resource to which tags should be added. This
- // value is an Amazon Resource Name (ARN).
- //
- // ResourceArn is a required field
- ResourceArn *string `min:"1" type:"string" required:"true"`
-
- // The tags to be assigned to the Amazon DynamoDB resource.
- //
- // Tags is a required field
- Tags []*Tag `type:"list" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TagResourceInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TagResourceInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *TagResourceInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"}
- if s.ResourceArn == nil {
- invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
- }
- if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
- }
- if s.Tags == nil {
- invalidParams.Add(request.NewErrParamRequired("Tags"))
- }
- if s.Tags != nil {
- for i, v := range s.Tags {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetResourceArn sets the ResourceArn field's value.
-func (s *TagResourceInput) SetResourceArn(v string) *TagResourceInput {
- s.ResourceArn = &v
- return s
-}
-
-// SetTags sets the Tags field's value.
-func (s *TagResourceInput) SetTags(v []*Tag) *TagResourceInput {
- s.Tags = v
- return s
-}
-
-type TagResourceOutput struct {
- _ struct{} `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TagResourceOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TagResourceOutput) GoString() string {
- return s.String()
-}
-
-// The description of the Time to Live (TTL) status on the specified table.
-type TimeToLiveDescription struct {
- _ struct{} `type:"structure"`
-
- // The name of the TTL attribute for items in the table.
- AttributeName *string `min:"1" type:"string"`
-
- // The TTL status for the table.
- TimeToLiveStatus *string `type:"string" enum:"TimeToLiveStatus"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TimeToLiveDescription) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TimeToLiveDescription) GoString() string {
- return s.String()
-}
-
-// SetAttributeName sets the AttributeName field's value.
-func (s *TimeToLiveDescription) SetAttributeName(v string) *TimeToLiveDescription {
- s.AttributeName = &v
- return s
-}
-
-// SetTimeToLiveStatus sets the TimeToLiveStatus field's value.
-func (s *TimeToLiveDescription) SetTimeToLiveStatus(v string) *TimeToLiveDescription {
- s.TimeToLiveStatus = &v
- return s
-}
-
-// Represents the settings used to enable or disable Time to Live (TTL) for
-// the specified table.
-type TimeToLiveSpecification struct {
- _ struct{} `type:"structure"`
-
- // The name of the TTL attribute used to store the expiration time for items
- // in the table.
- //
- // AttributeName is a required field
- AttributeName *string `min:"1" type:"string" required:"true"`
-
- // Indicates whether TTL is to be enabled (true) or disabled (false) on the
- // table.
- //
- // Enabled is a required field
- Enabled *bool `type:"boolean" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TimeToLiveSpecification) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TimeToLiveSpecification) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *TimeToLiveSpecification) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "TimeToLiveSpecification"}
- if s.AttributeName == nil {
- invalidParams.Add(request.NewErrParamRequired("AttributeName"))
- }
- if s.AttributeName != nil && len(*s.AttributeName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("AttributeName", 1))
- }
- if s.Enabled == nil {
- invalidParams.Add(request.NewErrParamRequired("Enabled"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAttributeName sets the AttributeName field's value.
-func (s *TimeToLiveSpecification) SetAttributeName(v string) *TimeToLiveSpecification {
- s.AttributeName = &v
- return s
-}
-
-// SetEnabled sets the Enabled field's value.
-func (s *TimeToLiveSpecification) SetEnabled(v bool) *TimeToLiveSpecification {
- s.Enabled = &v
- return s
-}
-
-// Specifies an item to be retrieved as part of the transaction.
-type TransactGetItem struct {
- _ struct{} `type:"structure"`
-
- // Contains the primary key that identifies the item to get, together with the
- // name of the table that contains the item, and optionally the specific attributes
- // of the item to retrieve.
- //
- // Get is a required field
- Get *Get `type:"structure" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactGetItem) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactGetItem) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *TransactGetItem) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "TransactGetItem"}
- if s.Get == nil {
- invalidParams.Add(request.NewErrParamRequired("Get"))
- }
- if s.Get != nil {
- if err := s.Get.Validate(); err != nil {
- invalidParams.AddNested("Get", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetGet sets the Get field's value.
-func (s *TransactGetItem) SetGet(v *Get) *TransactGetItem {
- s.Get = v
- return s
-}
-
-type TransactGetItemsInput struct {
- _ struct{} `type:"structure"`
-
- // A value of TOTAL causes consumed capacity information to be returned, and
- // a value of NONE prevents that information from being returned. No other value
- // is valid.
- ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
-
- // An ordered array of up to 100 TransactGetItem objects, each of which contains
- // a Get structure.
- //
- // TransactItems is a required field
- TransactItems []*TransactGetItem `min:"1" type:"list" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactGetItemsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactGetItemsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *TransactGetItemsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "TransactGetItemsInput"}
- if s.TransactItems == nil {
- invalidParams.Add(request.NewErrParamRequired("TransactItems"))
- }
- if s.TransactItems != nil && len(s.TransactItems) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TransactItems", 1))
- }
- if s.TransactItems != nil {
- for i, v := range s.TransactItems {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactItems", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
-func (s *TransactGetItemsInput) SetReturnConsumedCapacity(v string) *TransactGetItemsInput {
- s.ReturnConsumedCapacity = &v
- return s
-}
-
-// SetTransactItems sets the TransactItems field's value.
-func (s *TransactGetItemsInput) SetTransactItems(v []*TransactGetItem) *TransactGetItemsInput {
- s.TransactItems = v
- return s
-}
-
-type TransactGetItemsOutput struct {
- _ struct{} `type:"structure"`
-
- // If the ReturnConsumedCapacity value was TOTAL, this is an array of ConsumedCapacity
- // objects, one for each table addressed by TransactGetItem objects in the TransactItems
- // parameter. These ConsumedCapacity objects report the read-capacity units
- // consumed by the TransactGetItems call in that table.
- ConsumedCapacity []*ConsumedCapacity `type:"list"`
-
- // An ordered array of up to 100 ItemResponse objects, each of which corresponds
- // to the TransactGetItem object in the same position in the TransactItems array.
- // Each ItemResponse object contains a Map of the name-value pairs that are
- // the projected attributes of the requested item.
- //
- // If a requested item could not be retrieved, the corresponding ItemResponse
- // object is Null, or if the requested item has no projected attributes, the
- // corresponding ItemResponse object is an empty Map.
- Responses []*ItemResponse `min:"1" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactGetItemsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactGetItemsOutput) GoString() string {
- return s.String()
-}
-
-// SetConsumedCapacity sets the ConsumedCapacity field's value.
-func (s *TransactGetItemsOutput) SetConsumedCapacity(v []*ConsumedCapacity) *TransactGetItemsOutput {
- s.ConsumedCapacity = v
- return s
-}
-
-// SetResponses sets the Responses field's value.
-func (s *TransactGetItemsOutput) SetResponses(v []*ItemResponse) *TransactGetItemsOutput {
- s.Responses = v
- return s
-}
-
-// A list of requests that can perform update, put, delete, or check operations
-// on multiple items in one or more tables atomically.
-type TransactWriteItem struct {
- _ struct{} `type:"structure"`
-
- // A request to perform a check item operation.
- ConditionCheck *ConditionCheck `type:"structure"`
-
- // A request to perform a DeleteItem operation.
- Delete *Delete `type:"structure"`
-
- // A request to perform a PutItem operation.
- Put *Put `type:"structure"`
-
- // A request to perform an UpdateItem operation.
- Update *Update `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactWriteItem) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactWriteItem) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *TransactWriteItem) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "TransactWriteItem"}
- if s.ConditionCheck != nil {
- if err := s.ConditionCheck.Validate(); err != nil {
- invalidParams.AddNested("ConditionCheck", err.(request.ErrInvalidParams))
- }
- }
- if s.Delete != nil {
- if err := s.Delete.Validate(); err != nil {
- invalidParams.AddNested("Delete", err.(request.ErrInvalidParams))
- }
- }
- if s.Put != nil {
- if err := s.Put.Validate(); err != nil {
- invalidParams.AddNested("Put", err.(request.ErrInvalidParams))
- }
- }
- if s.Update != nil {
- if err := s.Update.Validate(); err != nil {
- invalidParams.AddNested("Update", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetConditionCheck sets the ConditionCheck field's value.
-func (s *TransactWriteItem) SetConditionCheck(v *ConditionCheck) *TransactWriteItem {
- s.ConditionCheck = v
- return s
-}
-
-// SetDelete sets the Delete field's value.
-func (s *TransactWriteItem) SetDelete(v *Delete) *TransactWriteItem {
- s.Delete = v
- return s
-}
-
-// SetPut sets the Put field's value.
-func (s *TransactWriteItem) SetPut(v *Put) *TransactWriteItem {
- s.Put = v
- return s
-}
-
-// SetUpdate sets the Update field's value.
-func (s *TransactWriteItem) SetUpdate(v *Update) *TransactWriteItem {
- s.Update = v
- return s
-}
-
-type TransactWriteItemsInput struct {
- _ struct{} `type:"structure"`
-
- // Providing a ClientRequestToken makes the call to TransactWriteItems idempotent,
- // meaning that multiple identical calls have the same effect as one single
- // call.
- //
- // Although multiple identical calls using the same client request token produce
- // the same result on the server (no side effects), the responses to the calls
- // might not be the same. If the ReturnConsumedCapacity parameter is set, then
- // the initial TransactWriteItems call returns the amount of write capacity
- // units consumed in making the changes. Subsequent TransactWriteItems calls
- // with the same client token return the number of read capacity units consumed
- // in reading the item.
- //
- // A client request token is valid for 10 minutes after the first request that
- // uses it is completed. After 10 minutes, any request with the same client
- // token is treated as a new request. Do not resubmit the same request with
- // the same client token for more than 10 minutes, or the result might not be
- // idempotent.
- //
- // If you submit a request with the same client token but a change in other
- // parameters within the 10-minute idempotency window, DynamoDB returns an IdempotentParameterMismatch
- // exception.
- ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"`
-
- // Determines the level of detail about either provisioned or on-demand throughput
- // consumption that is returned in the response:
- //
- // * INDEXES - The response includes the aggregate ConsumedCapacity for the
- // operation, together with ConsumedCapacity for each table and secondary
- // index that was accessed. Note that some operations, such as GetItem and
- // BatchGetItem, do not access any indexes at all. In these cases, specifying
- // INDEXES will only return ConsumedCapacity information for table(s).
- //
- // * TOTAL - The response includes only the aggregate ConsumedCapacity for
- // the operation.
- //
- // * NONE - No ConsumedCapacity details are included in the response.
- ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
-
- // Determines whether item collection metrics are returned. If set to SIZE,
- // the response includes statistics about item collections (if any), that were
- // modified during the operation and are returned in the response. If set to
- // NONE (the default), no statistics are returned.
- ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"`
-
- // An ordered array of up to 100 TransactWriteItem objects, each of which contains
- // a ConditionCheck, Put, Update, or Delete object. These can operate on items
- // in different tables, but the tables must reside in the same Amazon Web Services
- // account and Region, and no two of them can operate on the same item.
- //
- // TransactItems is a required field
- TransactItems []*TransactWriteItem `min:"1" type:"list" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactWriteItemsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactWriteItemsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *TransactWriteItemsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "TransactWriteItemsInput"}
- if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
- }
- if s.TransactItems == nil {
- invalidParams.Add(request.NewErrParamRequired("TransactItems"))
- }
- if s.TransactItems != nil && len(s.TransactItems) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TransactItems", 1))
- }
- if s.TransactItems != nil {
- for i, v := range s.TransactItems {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactItems", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetClientRequestToken sets the ClientRequestToken field's value.
-func (s *TransactWriteItemsInput) SetClientRequestToken(v string) *TransactWriteItemsInput {
- s.ClientRequestToken = &v
- return s
-}
-
-// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
-func (s *TransactWriteItemsInput) SetReturnConsumedCapacity(v string) *TransactWriteItemsInput {
- s.ReturnConsumedCapacity = &v
- return s
-}
-
-// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value.
-func (s *TransactWriteItemsInput) SetReturnItemCollectionMetrics(v string) *TransactWriteItemsInput {
- s.ReturnItemCollectionMetrics = &v
- return s
-}
-
-// SetTransactItems sets the TransactItems field's value.
-func (s *TransactWriteItemsInput) SetTransactItems(v []*TransactWriteItem) *TransactWriteItemsInput {
- s.TransactItems = v
- return s
-}
-
-type TransactWriteItemsOutput struct {
- _ struct{} `type:"structure"`
-
- // The capacity units consumed by the entire TransactWriteItems operation. The
- // values of the list are ordered according to the ordering of the TransactItems
- // request parameter.
- ConsumedCapacity []*ConsumedCapacity `type:"list"`
-
- // A list of tables that were processed by TransactWriteItems and, for each
- // table, information about any item collections that were affected by individual
- // UpdateItem, PutItem, or DeleteItem operations.
- ItemCollectionMetrics map[string][]*ItemCollectionMetrics `type:"map"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactWriteItemsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactWriteItemsOutput) GoString() string {
- return s.String()
-}
-
-// SetConsumedCapacity sets the ConsumedCapacity field's value.
-func (s *TransactWriteItemsOutput) SetConsumedCapacity(v []*ConsumedCapacity) *TransactWriteItemsOutput {
- s.ConsumedCapacity = v
- return s
-}
-
-// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value.
-func (s *TransactWriteItemsOutput) SetItemCollectionMetrics(v map[string][]*ItemCollectionMetrics) *TransactWriteItemsOutput {
- s.ItemCollectionMetrics = v
- return s
-}
-
-// The entire transaction request was canceled.
-//
-// DynamoDB cancels a TransactWriteItems request under the following circumstances:
-//
-// - A condition in one of the condition expressions is not met.
-//
-// - A table in the TransactWriteItems request is in a different account
-// or region.
-//
-// - More than one action in the TransactWriteItems operation targets the
-// same item.
-//
-// - There is insufficient provisioned capacity for the transaction to be
-// completed.
-//
-// - An item size becomes too large (larger than 400 KB), or a local secondary
-// index (LSI) becomes too large, or a similar validation error occurs because
-// of changes made by the transaction.
-//
-// - There is a user error, such as an invalid data format.
-//
-// - There is an ongoing TransactWriteItems operation that conflicts with
-// a concurrent TransactWriteItems request. In this case the TransactWriteItems
-// operation fails with a TransactionCanceledException.
-//
-// DynamoDB cancels a TransactGetItems request under the following circumstances:
-//
-// - There is an ongoing TransactGetItems operation that conflicts with a
-// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request.
-// In this case the TransactGetItems operation fails with a TransactionCanceledException.
-//
-// - A table in the TransactGetItems request is in a different account or
-// region.
-//
-// - There is insufficient provisioned capacity for the transaction to be
-// completed.
-//
-// - There is a user error, such as an invalid data format.
-//
-// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
-// property. This property is not set for other languages. Transaction cancellation
-// reasons are ordered in the order of requested items, if an item has no error
-// it will have None code and Null message.
-//
-// Cancellation reason codes and possible error messages:
-//
-// - No Errors: Code: None Message: null
-//
-// - Conditional Check Failed: Code: ConditionalCheckFailed Message: The
-// conditional request failed.
-//
-// - Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded
-// Message: Collection size exceeded.
-//
-// - Transaction Conflict: Code: TransactionConflict Message: Transaction
-// is ongoing for the item.
-//
-// - Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded
-// Messages: The level of configured provisioned throughput for the table
-// was exceeded. Consider increasing your provisioning level with the UpdateTable
-// API. This Message is received when provisioned throughput is exceeded
-// is on a provisioned DynamoDB table. The level of configured provisioned
-// throughput for one or more global secondary indexes of the table was exceeded.
-// Consider increasing your provisioning level for the under-provisioned
-// global secondary indexes with the UpdateTable API. This message is returned
-// when provisioned throughput is exceeded is on a provisioned GSI.
-//
-// - Throttling Error: Code: ThrottlingError Messages: Throughput exceeds
-// the current capacity of your table or index. DynamoDB is automatically
-// scaling your table or index so please try again shortly. If exceptions
-// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
-// This message is returned when writes get throttled on an On-Demand table
-// as DynamoDB is automatically scaling the table. Throughput exceeds the
-// current capacity for one or more global secondary indexes. DynamoDB is
-// automatically scaling your index so please try again shortly. This message
-// is returned when writes get throttled on an On-Demand GSI as DynamoDB
-// is automatically scaling the GSI.
-//
-// - Validation Error: Code: ValidationError Messages: One or more parameter
-// values were invalid. The update expression attempted to update the secondary
-// index key beyond allowed size limits. The update expression attempted
-// to update the secondary index key to unsupported type. An operand in the
-// update expression has an incorrect data type. Item size to update has
-// exceeded the maximum allowed size. Number overflow. Attempting to store
-// a number with magnitude larger than supported range. Type mismatch for
-// attribute to update. Nesting Levels have exceeded supported limits. The
-// document path provided in the update expression is invalid for update.
-// The provided expression refers to an attribute that does not exist in
-// the item.
-type TransactionCanceledException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // A list of cancellation reasons.
- CancellationReasons []*CancellationReason `min:"1" type:"list"`
-
- Message_ *string `locationName:"Message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactionCanceledException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactionCanceledException) GoString() string {
- return s.String()
-}
-
-func newErrorTransactionCanceledException(v protocol.ResponseMetadata) error {
- return &TransactionCanceledException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *TransactionCanceledException) Code() string {
- return "TransactionCanceledException"
-}
-
-// Message returns the exception's message.
-func (s *TransactionCanceledException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *TransactionCanceledException) OrigErr() error {
- return nil
-}
-
-func (s *TransactionCanceledException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *TransactionCanceledException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *TransactionCanceledException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Operation was rejected because there is an ongoing transaction for the item.
-type TransactionConflictException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactionConflictException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactionConflictException) GoString() string {
- return s.String()
-}
-
-func newErrorTransactionConflictException(v protocol.ResponseMetadata) error {
- return &TransactionConflictException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *TransactionConflictException) Code() string {
- return "TransactionConflictException"
-}
-
-// Message returns the exception's message.
-func (s *TransactionConflictException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *TransactionConflictException) OrigErr() error {
- return nil
-}
-
-func (s *TransactionConflictException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *TransactionConflictException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *TransactionConflictException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// The transaction with the given request token is already in progress.
-//
-// # Recommended Settings
-//
-// This is a general recommendation for handling the TransactionInProgressException.
-// These settings help ensure that the client retries will trigger completion
-// of the ongoing TransactWriteItems request.
-//
-// - Set clientExecutionTimeout to a value that allows at least one retry
-// to be processed after 5 seconds have elapsed since the first attempt for
-// the TransactWriteItems operation.
-//
-// - Set socketTimeout to a value a little lower than the requestTimeout
-// setting.
-//
-// - requestTimeout should be set based on the time taken for the individual
-// retries of a single HTTP request for your use case, but setting it to
-// 1 second or higher should work well to reduce chances of retries and TransactionInProgressException
-// errors.
-//
-// - Use exponential backoff when retrying and tune backoff if needed.
-//
-// Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97),
-// example timeout settings based on the guidelines above are as follows:
-//
-// Example timeline:
-//
-// - 0-1000 first attempt
-//
-// - 1000-1500 first sleep/delay (default retry policy uses 500 ms as base
-// delay for 4xx errors)
-//
-// - 1500-2500 second attempt
-//
-// - 2500-3500 second sleep/delay (500 * 2, exponential backoff)
-//
-// - 3500-4500 third attempt
-//
-// - 4500-6500 third sleep/delay (500 * 2^2)
-//
-// - 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds
-// have elapsed since the first attempt reached TC)
-type TransactionInProgressException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"Message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactionInProgressException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TransactionInProgressException) GoString() string {
- return s.String()
-}
-
-func newErrorTransactionInProgressException(v protocol.ResponseMetadata) error {
- return &TransactionInProgressException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *TransactionInProgressException) Code() string {
- return "TransactionInProgressException"
-}
-
-// Message returns the exception's message.
-func (s *TransactionInProgressException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *TransactionInProgressException) OrigErr() error {
- return nil
-}
-
-func (s *TransactionInProgressException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *TransactionInProgressException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *TransactionInProgressException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-type UntagResourceInput struct {
- _ struct{} `type:"structure"`
-
- // The DynamoDB resource that the tags will be removed from. This value is an
- // Amazon Resource Name (ARN).
- //
- // ResourceArn is a required field
- ResourceArn *string `min:"1" type:"string" required:"true"`
-
- // A list of tag keys. Existing tags of the resource whose keys are members
- // of this list will be removed from the DynamoDB resource.
- //
- // TagKeys is a required field
- TagKeys []*string `type:"list" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UntagResourceInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UntagResourceInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UntagResourceInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"}
- if s.ResourceArn == nil {
- invalidParams.Add(request.NewErrParamRequired("ResourceArn"))
- }
- if s.ResourceArn != nil && len(*s.ResourceArn) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ResourceArn", 1))
- }
- if s.TagKeys == nil {
- invalidParams.Add(request.NewErrParamRequired("TagKeys"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetResourceArn sets the ResourceArn field's value.
-func (s *UntagResourceInput) SetResourceArn(v string) *UntagResourceInput {
- s.ResourceArn = &v
- return s
-}
-
-// SetTagKeys sets the TagKeys field's value.
-func (s *UntagResourceInput) SetTagKeys(v []*string) *UntagResourceInput {
- s.TagKeys = v
- return s
-}
-
-type UntagResourceOutput struct {
- _ struct{} `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UntagResourceOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UntagResourceOutput) GoString() string {
- return s.String()
-}
-
-// Represents a request to perform an UpdateItem operation.
-type Update struct {
- _ struct{} `type:"structure"`
-
- // A condition that must be satisfied in order for a conditional update to succeed.
- ConditionExpression *string `type:"string"`
-
- // One or more substitution tokens for attribute names in an expression.
- ExpressionAttributeNames map[string]*string `type:"map"`
-
- // One or more values that can be substituted in an expression.
- ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
-
- // The primary key of the item to be updated. Each element consists of an attribute
- // name and a value for that attribute.
- //
- // Key is a required field
- Key map[string]*AttributeValue `type:"map" required:"true"`
-
- // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the
- // Update condition fails. For ReturnValuesOnConditionCheckFailure, the valid
- // values are: NONE and ALL_OLD.
- ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"`
-
- // Name of the table for the UpdateItem request. You can also provide the Amazon
- // Resource Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-
- // An expression that defines one or more attributes to be updated, the action
- // to be performed on them, and new value(s) for them.
- //
- // UpdateExpression is a required field
- UpdateExpression *string `type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Update) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Update) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *Update) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "Update"}
- if s.Key == nil {
- invalidParams.Add(request.NewErrParamRequired("Key"))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
- if s.UpdateExpression == nil {
- invalidParams.Add(request.NewErrParamRequired("UpdateExpression"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetConditionExpression sets the ConditionExpression field's value.
-func (s *Update) SetConditionExpression(v string) *Update {
- s.ConditionExpression = &v
- return s
-}
-
-// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
-func (s *Update) SetExpressionAttributeNames(v map[string]*string) *Update {
- s.ExpressionAttributeNames = v
- return s
-}
-
-// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
-func (s *Update) SetExpressionAttributeValues(v map[string]*AttributeValue) *Update {
- s.ExpressionAttributeValues = v
- return s
-}
-
-// SetKey sets the Key field's value.
-func (s *Update) SetKey(v map[string]*AttributeValue) *Update {
- s.Key = v
- return s
-}
-
-// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value.
-func (s *Update) SetReturnValuesOnConditionCheckFailure(v string) *Update {
- s.ReturnValuesOnConditionCheckFailure = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *Update) SetTableName(v string) *Update {
- s.TableName = &v
- return s
-}
-
-// SetUpdateExpression sets the UpdateExpression field's value.
-func (s *Update) SetUpdateExpression(v string) *Update {
- s.UpdateExpression = &v
- return s
-}
-
-type UpdateContinuousBackupsInput struct {
- _ struct{} `type:"structure"`
-
- // Represents the settings used to enable point in time recovery.
- //
- // PointInTimeRecoverySpecification is a required field
- PointInTimeRecoverySpecification *PointInTimeRecoverySpecification `type:"structure" required:"true"`
-
- // The name of the table. You can also provide the Amazon Resource Name (ARN)
- // of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateContinuousBackupsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateContinuousBackupsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UpdateContinuousBackupsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UpdateContinuousBackupsInput"}
- if s.PointInTimeRecoverySpecification == nil {
- invalidParams.Add(request.NewErrParamRequired("PointInTimeRecoverySpecification"))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
- if s.PointInTimeRecoverySpecification != nil {
- if err := s.PointInTimeRecoverySpecification.Validate(); err != nil {
- invalidParams.AddNested("PointInTimeRecoverySpecification", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetPointInTimeRecoverySpecification sets the PointInTimeRecoverySpecification field's value.
-func (s *UpdateContinuousBackupsInput) SetPointInTimeRecoverySpecification(v *PointInTimeRecoverySpecification) *UpdateContinuousBackupsInput {
- s.PointInTimeRecoverySpecification = v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *UpdateContinuousBackupsInput) SetTableName(v string) *UpdateContinuousBackupsInput {
- s.TableName = &v
- return s
-}
-
-type UpdateContinuousBackupsOutput struct {
- _ struct{} `type:"structure"`
-
- // Represents the continuous backups and point in time recovery settings on
- // the table.
- ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateContinuousBackupsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateContinuousBackupsOutput) GoString() string {
- return s.String()
-}
-
-// SetContinuousBackupsDescription sets the ContinuousBackupsDescription field's value.
-func (s *UpdateContinuousBackupsOutput) SetContinuousBackupsDescription(v *ContinuousBackupsDescription) *UpdateContinuousBackupsOutput {
- s.ContinuousBackupsDescription = v
- return s
-}
-
-type UpdateContributorInsightsInput struct {
- _ struct{} `type:"structure"`
-
- // Represents the contributor insights action.
- //
- // ContributorInsightsAction is a required field
- ContributorInsightsAction *string `type:"string" required:"true" enum:"ContributorInsightsAction"`
-
- // The global secondary index name, if applicable.
- IndexName *string `min:"3" type:"string"`
-
- // The name of the table. You can also provide the Amazon Resource Name (ARN)
- // of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateContributorInsightsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateContributorInsightsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UpdateContributorInsightsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UpdateContributorInsightsInput"}
- if s.ContributorInsightsAction == nil {
- invalidParams.Add(request.NewErrParamRequired("ContributorInsightsAction"))
- }
- if s.IndexName != nil && len(*s.IndexName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetContributorInsightsAction sets the ContributorInsightsAction field's value.
-func (s *UpdateContributorInsightsInput) SetContributorInsightsAction(v string) *UpdateContributorInsightsInput {
- s.ContributorInsightsAction = &v
- return s
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *UpdateContributorInsightsInput) SetIndexName(v string) *UpdateContributorInsightsInput {
- s.IndexName = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *UpdateContributorInsightsInput) SetTableName(v string) *UpdateContributorInsightsInput {
- s.TableName = &v
- return s
-}
-
-type UpdateContributorInsightsOutput struct {
- _ struct{} `type:"structure"`
-
- // The status of contributor insights
- ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"`
-
- // The name of the global secondary index, if applicable.
- IndexName *string `min:"3" type:"string"`
-
- // The name of the table.
- TableName *string `min:"3" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateContributorInsightsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateContributorInsightsOutput) GoString() string {
- return s.String()
-}
-
-// SetContributorInsightsStatus sets the ContributorInsightsStatus field's value.
-func (s *UpdateContributorInsightsOutput) SetContributorInsightsStatus(v string) *UpdateContributorInsightsOutput {
- s.ContributorInsightsStatus = &v
- return s
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *UpdateContributorInsightsOutput) SetIndexName(v string) *UpdateContributorInsightsOutput {
- s.IndexName = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *UpdateContributorInsightsOutput) SetTableName(v string) *UpdateContributorInsightsOutput {
- s.TableName = &v
- return s
-}
-
-// Represents the new provisioned throughput settings to be applied to a global
-// secondary index.
-type UpdateGlobalSecondaryIndexAction struct {
- _ struct{} `type:"structure"`
-
- // The name of the global secondary index to be updated.
- //
- // IndexName is a required field
- IndexName *string `min:"3" type:"string" required:"true"`
-
- // Updates the maximum number of read and write units for the specified global
- // secondary index. If you use this parameter, you must specify MaxReadRequestUnits,
- // MaxWriteRequestUnits, or both.
- OnDemandThroughput *OnDemandThroughput `type:"structure"`
-
- // Represents the provisioned throughput settings for the specified global secondary
- // index.
- //
- // For current minimum and maximum provisioned throughput values, see Service,
- // Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
- // in the Amazon DynamoDB Developer Guide.
- ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateGlobalSecondaryIndexAction) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateGlobalSecondaryIndexAction) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UpdateGlobalSecondaryIndexAction) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalSecondaryIndexAction"}
- if s.IndexName == nil {
- invalidParams.Add(request.NewErrParamRequired("IndexName"))
- }
- if s.IndexName != nil && len(*s.IndexName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("IndexName", 3))
- }
- if s.ProvisionedThroughput != nil {
- if err := s.ProvisionedThroughput.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetIndexName sets the IndexName field's value.
-func (s *UpdateGlobalSecondaryIndexAction) SetIndexName(v string) *UpdateGlobalSecondaryIndexAction {
- s.IndexName = &v
- return s
-}
-
-// SetOnDemandThroughput sets the OnDemandThroughput field's value.
-func (s *UpdateGlobalSecondaryIndexAction) SetOnDemandThroughput(v *OnDemandThroughput) *UpdateGlobalSecondaryIndexAction {
- s.OnDemandThroughput = v
- return s
-}
-
-// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
-func (s *UpdateGlobalSecondaryIndexAction) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateGlobalSecondaryIndexAction {
- s.ProvisionedThroughput = v
- return s
-}
-
-type UpdateGlobalTableInput struct {
- _ struct{} `type:"structure"`
-
- // The global table name.
- //
- // GlobalTableName is a required field
- GlobalTableName *string `min:"3" type:"string" required:"true"`
-
- // A list of Regions that should be added or removed from the global table.
- //
- // ReplicaUpdates is a required field
- ReplicaUpdates []*ReplicaUpdate `type:"list" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateGlobalTableInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateGlobalTableInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UpdateGlobalTableInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalTableInput"}
- if s.GlobalTableName == nil {
- invalidParams.Add(request.NewErrParamRequired("GlobalTableName"))
- }
- if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3))
- }
- if s.ReplicaUpdates == nil {
- invalidParams.Add(request.NewErrParamRequired("ReplicaUpdates"))
- }
- if s.ReplicaUpdates != nil {
- for i, v := range s.ReplicaUpdates {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetGlobalTableName sets the GlobalTableName field's value.
-func (s *UpdateGlobalTableInput) SetGlobalTableName(v string) *UpdateGlobalTableInput {
- s.GlobalTableName = &v
- return s
-}
-
-// SetReplicaUpdates sets the ReplicaUpdates field's value.
-func (s *UpdateGlobalTableInput) SetReplicaUpdates(v []*ReplicaUpdate) *UpdateGlobalTableInput {
- s.ReplicaUpdates = v
- return s
-}
-
-type UpdateGlobalTableOutput struct {
- _ struct{} `type:"structure"`
-
- // Contains the details of the global table.
- GlobalTableDescription *GlobalTableDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateGlobalTableOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateGlobalTableOutput) GoString() string {
- return s.String()
-}
-
-// SetGlobalTableDescription sets the GlobalTableDescription field's value.
-func (s *UpdateGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *UpdateGlobalTableOutput {
- s.GlobalTableDescription = v
- return s
-}
-
-type UpdateGlobalTableSettingsInput struct {
- _ struct{} `type:"structure"`
-
- // The billing mode of the global table. If GlobalTableBillingMode is not specified,
- // the global table defaults to PROVISIONED capacity billing mode.
- //
- // * PROVISIONED - We recommend using PROVISIONED for predictable workloads.
- // PROVISIONED sets the billing mode to Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html).
- //
- // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable
- // workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity
- // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html).
- GlobalTableBillingMode *string `type:"string" enum:"BillingMode"`
-
- // Represents the settings of a global secondary index for a global table that
- // will be modified.
- GlobalTableGlobalSecondaryIndexSettingsUpdate []*GlobalTableGlobalSecondaryIndexSettingsUpdate `min:"1" type:"list"`
-
- // The name of the global table
- //
- // GlobalTableName is a required field
- GlobalTableName *string `min:"3" type:"string" required:"true"`
-
- // Auto scaling settings for managing provisioned write capacity for the global
- // table.
- GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate *AutoScalingSettingsUpdate `type:"structure"`
-
- // The maximum number of writes consumed per second before DynamoDB returns
- // a ThrottlingException.
- GlobalTableProvisionedWriteCapacityUnits *int64 `min:"1" type:"long"`
-
- // Represents the settings for a global table in a Region that will be modified.
- ReplicaSettingsUpdate []*ReplicaSettingsUpdate `min:"1" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateGlobalTableSettingsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateGlobalTableSettingsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UpdateGlobalTableSettingsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UpdateGlobalTableSettingsInput"}
- if s.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil && len(s.GlobalTableGlobalSecondaryIndexSettingsUpdate) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("GlobalTableGlobalSecondaryIndexSettingsUpdate", 1))
- }
- if s.GlobalTableName == nil {
- invalidParams.Add(request.NewErrParamRequired("GlobalTableName"))
- }
- if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 {
- invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3))
- }
- if s.GlobalTableProvisionedWriteCapacityUnits != nil && *s.GlobalTableProvisionedWriteCapacityUnits < 1 {
- invalidParams.Add(request.NewErrParamMinValue("GlobalTableProvisionedWriteCapacityUnits", 1))
- }
- if s.ReplicaSettingsUpdate != nil && len(s.ReplicaSettingsUpdate) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ReplicaSettingsUpdate", 1))
- }
- if s.GlobalTableGlobalSecondaryIndexSettingsUpdate != nil {
- for i, v := range s.GlobalTableGlobalSecondaryIndexSettingsUpdate {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalTableGlobalSecondaryIndexSettingsUpdate", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate != nil {
- if err := s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate.Validate(); err != nil {
- invalidParams.AddNested("GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate", err.(request.ErrInvalidParams))
- }
- }
- if s.ReplicaSettingsUpdate != nil {
- for i, v := range s.ReplicaSettingsUpdate {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaSettingsUpdate", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetGlobalTableBillingMode sets the GlobalTableBillingMode field's value.
-func (s *UpdateGlobalTableSettingsInput) SetGlobalTableBillingMode(v string) *UpdateGlobalTableSettingsInput {
- s.GlobalTableBillingMode = &v
- return s
-}
-
-// SetGlobalTableGlobalSecondaryIndexSettingsUpdate sets the GlobalTableGlobalSecondaryIndexSettingsUpdate field's value.
-func (s *UpdateGlobalTableSettingsInput) SetGlobalTableGlobalSecondaryIndexSettingsUpdate(v []*GlobalTableGlobalSecondaryIndexSettingsUpdate) *UpdateGlobalTableSettingsInput {
- s.GlobalTableGlobalSecondaryIndexSettingsUpdate = v
- return s
-}
-
-// SetGlobalTableName sets the GlobalTableName field's value.
-func (s *UpdateGlobalTableSettingsInput) SetGlobalTableName(v string) *UpdateGlobalTableSettingsInput {
- s.GlobalTableName = &v
- return s
-}
-
-// SetGlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate sets the GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate field's value.
-func (s *UpdateGlobalTableSettingsInput) SetGlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate(v *AutoScalingSettingsUpdate) *UpdateGlobalTableSettingsInput {
- s.GlobalTableProvisionedWriteCapacityAutoScalingSettingsUpdate = v
- return s
-}
-
-// SetGlobalTableProvisionedWriteCapacityUnits sets the GlobalTableProvisionedWriteCapacityUnits field's value.
-func (s *UpdateGlobalTableSettingsInput) SetGlobalTableProvisionedWriteCapacityUnits(v int64) *UpdateGlobalTableSettingsInput {
- s.GlobalTableProvisionedWriteCapacityUnits = &v
- return s
-}
-
-// SetReplicaSettingsUpdate sets the ReplicaSettingsUpdate field's value.
-func (s *UpdateGlobalTableSettingsInput) SetReplicaSettingsUpdate(v []*ReplicaSettingsUpdate) *UpdateGlobalTableSettingsInput {
- s.ReplicaSettingsUpdate = v
- return s
-}
-
-type UpdateGlobalTableSettingsOutput struct {
- _ struct{} `type:"structure"`
-
- // The name of the global table.
- GlobalTableName *string `min:"3" type:"string"`
-
- // The Region-specific settings for the global table.
- ReplicaSettings []*ReplicaSettingsDescription `type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateGlobalTableSettingsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateGlobalTableSettingsOutput) GoString() string {
- return s.String()
-}
-
-// SetGlobalTableName sets the GlobalTableName field's value.
-func (s *UpdateGlobalTableSettingsOutput) SetGlobalTableName(v string) *UpdateGlobalTableSettingsOutput {
- s.GlobalTableName = &v
- return s
-}
-
-// SetReplicaSettings sets the ReplicaSettings field's value.
-func (s *UpdateGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSettingsDescription) *UpdateGlobalTableSettingsOutput {
- s.ReplicaSettings = v
- return s
-}
-
-// Represents the input of an UpdateItem operation.
-type UpdateItemInput struct {
- _ struct{} `type:"structure"`
-
- // This is a legacy parameter. Use UpdateExpression instead. For more information,
- // see AttributeUpdates (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.AttributeUpdates.html)
- // in the Amazon DynamoDB Developer Guide.
- AttributeUpdates map[string]*AttributeValueUpdate `type:"map"`
-
- // A condition that must be satisfied in order for a conditional update to succeed.
- //
- // An expression can contain any of the following:
- //
- // * Functions: attribute_exists | attribute_not_exists | attribute_type
- // | contains | begins_with | size These function names are case-sensitive.
- //
- // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN
- //
- // * Logical operators: AND | OR | NOT
- //
- // For more information about condition expressions, see Specifying Conditions
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
- // in the Amazon DynamoDB Developer Guide.
- ConditionExpression *string `type:"string"`
-
- // This is a legacy parameter. Use ConditionExpression instead. For more information,
- // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html)
- // in the Amazon DynamoDB Developer Guide.
- ConditionalOperator *string `type:"string" enum:"ConditionalOperator"`
-
- // This is a legacy parameter. Use ConditionExpression instead. For more information,
- // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html)
- // in the Amazon DynamoDB Developer Guide.
- Expected map[string]*ExpectedAttributeValue `type:"map"`
-
- // One or more substitution tokens for attribute names in an expression. The
- // following are some use cases for using ExpressionAttributeNames:
- //
- // * To access an attribute whose name conflicts with a DynamoDB reserved
- // word.
- //
- // * To create a placeholder for repeating occurrences of an attribute name
- // in an expression.
- //
- // * To prevent special characters in an attribute name from being misinterpreted
- // in an expression.
- //
- // Use the # character in an expression to dereference an attribute name. For
- // example, consider the following attribute name:
- //
- // * Percentile
- //
- // The name of this attribute conflicts with a reserved word, so it cannot be
- // used directly in an expression. (For the complete list of reserved words,
- // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html)
- // in the Amazon DynamoDB Developer Guide.) To work around this, you could specify
- // the following for ExpressionAttributeNames:
- //
- // * {"#P":"Percentile"}
- //
- // You could then use this substitution in an expression, as in this example:
- //
- // * #P = :val
- //
- // Tokens that begin with the : character are expression attribute values, which
- // are placeholders for the actual value at runtime.
- //
- // For more information about expression attribute names, see Specifying Item
- // Attributes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html)
- // in the Amazon DynamoDB Developer Guide.
- ExpressionAttributeNames map[string]*string `type:"map"`
-
- // One or more values that can be substituted in an expression.
- //
- // Use the : (colon) character in an expression to dereference an attribute
- // value. For example, suppose that you wanted to check whether the value of
- // the ProductStatus attribute was one of the following:
- //
- // Available | Backordered | Discontinued
- //
- // You would first need to specify ExpressionAttributeValues as follows:
- //
- // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"}
- // }
- //
- // You could then use these values in an expression, such as this:
- //
- // ProductStatus IN (:avail, :back, :disc)
- //
- // For more information on expression attribute values, see Condition Expressions
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html)
- // in the Amazon DynamoDB Developer Guide.
- ExpressionAttributeValues map[string]*AttributeValue `type:"map"`
-
- // The primary key of the item to be updated. Each element consists of an attribute
- // name and a value for that attribute.
- //
- // For the primary key, you must provide all of the attributes. For example,
- // with a simple primary key, you only need to provide a value for the partition
- // key. For a composite primary key, you must provide values for both the partition
- // key and the sort key.
- //
- // Key is a required field
- Key map[string]*AttributeValue `type:"map" required:"true"`
-
- // Determines the level of detail about either provisioned or on-demand throughput
- // consumption that is returned in the response:
- //
- // * INDEXES - The response includes the aggregate ConsumedCapacity for the
- // operation, together with ConsumedCapacity for each table and secondary
- // index that was accessed. Note that some operations, such as GetItem and
- // BatchGetItem, do not access any indexes at all. In these cases, specifying
- // INDEXES will only return ConsumedCapacity information for table(s).
- //
- // * TOTAL - The response includes only the aggregate ConsumedCapacity for
- // the operation.
- //
- // * NONE - No ConsumedCapacity details are included in the response.
- ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"`
-
- // Determines whether item collection metrics are returned. If set to SIZE,
- // the response includes statistics about item collections, if any, that were
- // modified during the operation are returned in the response. If set to NONE
- // (the default), no statistics are returned.
- ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"`
-
- // Use ReturnValues if you want to get the item attributes as they appear before
- // or after they are successfully updated. For UpdateItem, the valid values
- // are:
- //
- // * NONE - If ReturnValues is not specified, or if its value is NONE, then
- // nothing is returned. (This setting is the default for ReturnValues.)
- //
- // * ALL_OLD - Returns all of the attributes of the item, as they appeared
- // before the UpdateItem operation.
- //
- // * UPDATED_OLD - Returns only the updated attributes, as they appeared
- // before the UpdateItem operation.
- //
- // * ALL_NEW - Returns all of the attributes of the item, as they appear
- // after the UpdateItem operation.
- //
- // * UPDATED_NEW - Returns only the updated attributes, as they appear after
- // the UpdateItem operation.
- //
- // There is no additional cost associated with requesting a return value aside
- // from the small network and processing overhead of receiving a larger response.
- // No read capacity units are consumed.
- //
- // The values returned are strongly consistent.
- ReturnValues *string `type:"string" enum:"ReturnValue"`
-
- // An optional parameter that returns the item attributes for an UpdateItem
- // operation that failed a condition check.
- //
- // There is no additional cost associated with requesting a return value aside
- // from the small network and processing overhead of receiving a larger response.
- // No read capacity units are consumed.
- ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"`
-
- // The name of the table containing the item to update. You can also provide
- // the Amazon Resource Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-
- // An expression that defines one or more attributes to be updated, the action
- // to be performed on them, and new values for them.
- //
- // The following action values are available for UpdateExpression.
- //
- // * SET - Adds one or more attributes and values to an item. If any of these
- // attributes already exist, they are replaced by the new values. You can
- // also use SET to add or subtract from an attribute that is of type Number.
- // For example: SET myNum = myNum + :val SET supports the following functions:
- // if_not_exists (path, operand) - if the item does not contain an attribute
- // at the specified path, then if_not_exists evaluates to operand; otherwise,
- // it evaluates to path. You can use this function to avoid overwriting an
- // attribute that may already be present in the item. list_append (operand,
- // operand) - evaluates to a list with a new element added to it. You can
- // append the new element to the start or the end of the list by reversing
- // the order of the operands. These function names are case-sensitive.
- //
- // * REMOVE - Removes one or more attributes from an item.
- //
- // * ADD - Adds the specified value to the item, if the attribute does not
- // already exist. If the attribute does exist, then the behavior of ADD depends
- // on the data type of the attribute: If the existing attribute is a number,
- // and if Value is also a number, then Value is mathematically added to the
- // existing attribute. If Value is a negative number, then it is subtracted
- // from the existing attribute. If you use ADD to increment or decrement
- // a number value for an item that doesn't exist before the update, DynamoDB
- // uses 0 as the initial value. Similarly, if you use ADD for an existing
- // item to increment or decrement an attribute value that doesn't exist before
- // the update, DynamoDB uses 0 as the initial value. For example, suppose
- // that the item you want to update doesn't have an attribute named itemcount,
- // but you decide to ADD the number 3 to this attribute anyway. DynamoDB
- // will create the itemcount attribute, set its initial value to 0, and finally
- // add 3 to it. The result will be a new itemcount attribute in the item,
- // with a value of 3. If the existing data type is a set and if Value is
- // also a set, then Value is added to the existing set. For example, if the
- // attribute value is the set [1,2], and the ADD action specified [3], then
- // the final attribute value is [1,2,3]. An error occurs if an ADD action
- // is specified for a set attribute and the attribute type specified does
- // not match the existing set type. Both sets must have the same primitive
- // data type. For example, if the existing data type is a set of strings,
- // the Value must also be a set of strings. The ADD action only supports
- // Number and set data types. In addition, ADD can only be used on top-level
- // attributes, not nested attributes.
- //
- // * DELETE - Deletes an element from a set. If a set of values is specified,
- // then those values are subtracted from the old set. For example, if the
- // attribute value was the set [a,b,c] and the DELETE action specifies [a,c],
- // then the final attribute value is [b]. Specifying an empty set is an error.
- // The DELETE action only supports set data types. In addition, DELETE can
- // only be used on top-level attributes, not nested attributes.
- //
- // You can have many actions in a single expression, such as the following:
- // SET a=:value1, b=:value2 DELETE :value3, :value4, :value5
- //
- // For more information on update expressions, see Modifying Items and Attributes
- // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html)
- // in the Amazon DynamoDB Developer Guide.
- UpdateExpression *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateItemInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateItemInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UpdateItemInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UpdateItemInput"}
- if s.Key == nil {
- invalidParams.Add(request.NewErrParamRequired("Key"))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAttributeUpdates sets the AttributeUpdates field's value.
-func (s *UpdateItemInput) SetAttributeUpdates(v map[string]*AttributeValueUpdate) *UpdateItemInput {
- s.AttributeUpdates = v
- return s
-}
-
-// SetConditionExpression sets the ConditionExpression field's value.
-func (s *UpdateItemInput) SetConditionExpression(v string) *UpdateItemInput {
- s.ConditionExpression = &v
- return s
-}
-
-// SetConditionalOperator sets the ConditionalOperator field's value.
-func (s *UpdateItemInput) SetConditionalOperator(v string) *UpdateItemInput {
- s.ConditionalOperator = &v
- return s
-}
-
-// SetExpected sets the Expected field's value.
-func (s *UpdateItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *UpdateItemInput {
- s.Expected = v
- return s
-}
-
-// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value.
-func (s *UpdateItemInput) SetExpressionAttributeNames(v map[string]*string) *UpdateItemInput {
- s.ExpressionAttributeNames = v
- return s
-}
-
-// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value.
-func (s *UpdateItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *UpdateItemInput {
- s.ExpressionAttributeValues = v
- return s
-}
-
-// SetKey sets the Key field's value.
-func (s *UpdateItemInput) SetKey(v map[string]*AttributeValue) *UpdateItemInput {
- s.Key = v
- return s
-}
-
-// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value.
-func (s *UpdateItemInput) SetReturnConsumedCapacity(v string) *UpdateItemInput {
- s.ReturnConsumedCapacity = &v
- return s
-}
-
-// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value.
-func (s *UpdateItemInput) SetReturnItemCollectionMetrics(v string) *UpdateItemInput {
- s.ReturnItemCollectionMetrics = &v
- return s
-}
-
-// SetReturnValues sets the ReturnValues field's value.
-func (s *UpdateItemInput) SetReturnValues(v string) *UpdateItemInput {
- s.ReturnValues = &v
- return s
-}
-
-// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value.
-func (s *UpdateItemInput) SetReturnValuesOnConditionCheckFailure(v string) *UpdateItemInput {
- s.ReturnValuesOnConditionCheckFailure = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *UpdateItemInput) SetTableName(v string) *UpdateItemInput {
- s.TableName = &v
- return s
-}
-
-// SetUpdateExpression sets the UpdateExpression field's value.
-func (s *UpdateItemInput) SetUpdateExpression(v string) *UpdateItemInput {
- s.UpdateExpression = &v
- return s
-}
-
-// Represents the output of an UpdateItem operation.
-type UpdateItemOutput struct {
- _ struct{} `type:"structure"`
-
- // A map of attribute values as they appear before or after the UpdateItem operation,
- // as determined by the ReturnValues parameter.
- //
- // The Attributes map is only present if the update was successful and ReturnValues
- // was specified as something other than NONE in the request. Each element represents
- // one attribute.
- Attributes map[string]*AttributeValue `type:"map"`
-
- // The capacity units consumed by the UpdateItem operation. The data returned
- // includes the total provisioned throughput consumed, along with statistics
- // for the table and any indexes involved in the operation. ConsumedCapacity
- // is only returned if the ReturnConsumedCapacity parameter was specified. For
- // more information, see Capacity unity consumption for write operations (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/read-write-operations.html#write-operation-consumption)
- // in the Amazon DynamoDB Developer Guide.
- ConsumedCapacity *ConsumedCapacity `type:"structure"`
-
- // Information about item collections, if any, that were affected by the UpdateItem
- // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics
- // parameter was specified. If the table does not have any local secondary indexes,
- // this information is not returned in the response.
- //
- // Each ItemCollectionMetrics element consists of:
- //
- // * ItemCollectionKey - The partition key value of the item collection.
- // This is the same as the partition key value of the item itself.
- //
- // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes.
- // This value is a two-element array containing a lower bound and an upper
- // bound for the estimate. The estimate includes the size of all the items
- // in the table, plus the size of all attributes projected into all of the
- // local secondary indexes on that table. Use this estimate to measure whether
- // a local secondary index is approaching its size limit. The estimate is
- // subject to change over time; therefore, do not rely on the precision or
- // accuracy of the estimate.
- ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateItemOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateItemOutput) GoString() string {
- return s.String()
-}
-
-// SetAttributes sets the Attributes field's value.
-func (s *UpdateItemOutput) SetAttributes(v map[string]*AttributeValue) *UpdateItemOutput {
- s.Attributes = v
- return s
-}
-
-// SetConsumedCapacity sets the ConsumedCapacity field's value.
-func (s *UpdateItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *UpdateItemOutput {
- s.ConsumedCapacity = v
- return s
-}
-
-// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value.
-func (s *UpdateItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *UpdateItemOutput {
- s.ItemCollectionMetrics = v
- return s
-}
-
-// Enables updating the configuration for Kinesis Streaming.
-type UpdateKinesisStreamingConfiguration struct {
- _ struct{} `type:"structure"`
-
- // Enables updating the precision of Kinesis data stream timestamp.
- ApproximateCreationDateTimePrecision *string `type:"string" enum:"ApproximateCreationDateTimePrecision"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateKinesisStreamingConfiguration) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateKinesisStreamingConfiguration) GoString() string {
- return s.String()
-}
-
-// SetApproximateCreationDateTimePrecision sets the ApproximateCreationDateTimePrecision field's value.
-func (s *UpdateKinesisStreamingConfiguration) SetApproximateCreationDateTimePrecision(v string) *UpdateKinesisStreamingConfiguration {
- s.ApproximateCreationDateTimePrecision = &v
- return s
-}
-
-type UpdateKinesisStreamingDestinationInput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) for the Kinesis stream input.
- //
- // StreamArn is a required field
- StreamArn *string `min:"37" type:"string" required:"true"`
-
- // The table name for the Kinesis streaming destination input. You can also
- // provide the ARN of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-
- // The command to update the Kinesis stream configuration.
- UpdateKinesisStreamingConfiguration *UpdateKinesisStreamingConfiguration `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateKinesisStreamingDestinationInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateKinesisStreamingDestinationInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UpdateKinesisStreamingDestinationInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UpdateKinesisStreamingDestinationInput"}
- if s.StreamArn == nil {
- invalidParams.Add(request.NewErrParamRequired("StreamArn"))
- }
- if s.StreamArn != nil && len(*s.StreamArn) < 37 {
- invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetStreamArn sets the StreamArn field's value.
-func (s *UpdateKinesisStreamingDestinationInput) SetStreamArn(v string) *UpdateKinesisStreamingDestinationInput {
- s.StreamArn = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *UpdateKinesisStreamingDestinationInput) SetTableName(v string) *UpdateKinesisStreamingDestinationInput {
- s.TableName = &v
- return s
-}
-
-// SetUpdateKinesisStreamingConfiguration sets the UpdateKinesisStreamingConfiguration field's value.
-func (s *UpdateKinesisStreamingDestinationInput) SetUpdateKinesisStreamingConfiguration(v *UpdateKinesisStreamingConfiguration) *UpdateKinesisStreamingDestinationInput {
- s.UpdateKinesisStreamingConfiguration = v
- return s
-}
-
-type UpdateKinesisStreamingDestinationOutput struct {
- _ struct{} `type:"structure"`
-
- // The status of the attempt to update the Kinesis streaming destination output.
- DestinationStatus *string `type:"string" enum:"DestinationStatus"`
-
- // The ARN for the Kinesis stream input.
- StreamArn *string `min:"37" type:"string"`
-
- // The table name for the Kinesis streaming destination output.
- TableName *string `min:"3" type:"string"`
-
- // The command to update the Kinesis streaming destination configuration.
- UpdateKinesisStreamingConfiguration *UpdateKinesisStreamingConfiguration `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateKinesisStreamingDestinationOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateKinesisStreamingDestinationOutput) GoString() string {
- return s.String()
-}
-
-// SetDestinationStatus sets the DestinationStatus field's value.
-func (s *UpdateKinesisStreamingDestinationOutput) SetDestinationStatus(v string) *UpdateKinesisStreamingDestinationOutput {
- s.DestinationStatus = &v
- return s
-}
-
-// SetStreamArn sets the StreamArn field's value.
-func (s *UpdateKinesisStreamingDestinationOutput) SetStreamArn(v string) *UpdateKinesisStreamingDestinationOutput {
- s.StreamArn = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *UpdateKinesisStreamingDestinationOutput) SetTableName(v string) *UpdateKinesisStreamingDestinationOutput {
- s.TableName = &v
- return s
-}
-
-// SetUpdateKinesisStreamingConfiguration sets the UpdateKinesisStreamingConfiguration field's value.
-func (s *UpdateKinesisStreamingDestinationOutput) SetUpdateKinesisStreamingConfiguration(v *UpdateKinesisStreamingConfiguration) *UpdateKinesisStreamingDestinationOutput {
- s.UpdateKinesisStreamingConfiguration = v
- return s
-}
-
-// Represents a replica to be modified.
-type UpdateReplicationGroupMemberAction struct {
- _ struct{} `type:"structure"`
-
- // Replica-specific global secondary index settings.
- GlobalSecondaryIndexes []*ReplicaGlobalSecondaryIndex `min:"1" type:"list"`
-
- // The KMS key of the replica that should be used for KMS encryption. To specify
- // a key, use its key ID, Amazon Resource Name (ARN), alias name, or alias ARN.
- // Note that you should only provide this parameter if the key is different
- // from the default DynamoDB KMS key alias/aws/dynamodb.
- KMSMasterKeyId *string `type:"string"`
-
- // Overrides the maximum on-demand throughput for the replica table.
- OnDemandThroughputOverride *OnDemandThroughputOverride `type:"structure"`
-
- // Replica-specific provisioned throughput. If not specified, uses the source
- // table's provisioned throughput settings.
- ProvisionedThroughputOverride *ProvisionedThroughputOverride `type:"structure"`
-
- // The Region where the replica exists.
- //
- // RegionName is a required field
- RegionName *string `type:"string" required:"true"`
-
- // Replica-specific table class. If not specified, uses the source table's table
- // class.
- TableClassOverride *string `type:"string" enum:"TableClass"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateReplicationGroupMemberAction) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateReplicationGroupMemberAction) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UpdateReplicationGroupMemberAction) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UpdateReplicationGroupMemberAction"}
- if s.GlobalSecondaryIndexes != nil && len(s.GlobalSecondaryIndexes) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("GlobalSecondaryIndexes", 1))
- }
- if s.RegionName == nil {
- invalidParams.Add(request.NewErrParamRequired("RegionName"))
- }
- if s.GlobalSecondaryIndexes != nil {
- for i, v := range s.GlobalSecondaryIndexes {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexes", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.ProvisionedThroughputOverride != nil {
- if err := s.ProvisionedThroughputOverride.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedThroughputOverride", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value.
-func (s *UpdateReplicationGroupMemberAction) SetGlobalSecondaryIndexes(v []*ReplicaGlobalSecondaryIndex) *UpdateReplicationGroupMemberAction {
- s.GlobalSecondaryIndexes = v
- return s
-}
-
-// SetKMSMasterKeyId sets the KMSMasterKeyId field's value.
-func (s *UpdateReplicationGroupMemberAction) SetKMSMasterKeyId(v string) *UpdateReplicationGroupMemberAction {
- s.KMSMasterKeyId = &v
- return s
-}
-
-// SetOnDemandThroughputOverride sets the OnDemandThroughputOverride field's value.
-func (s *UpdateReplicationGroupMemberAction) SetOnDemandThroughputOverride(v *OnDemandThroughputOverride) *UpdateReplicationGroupMemberAction {
- s.OnDemandThroughputOverride = v
- return s
-}
-
-// SetProvisionedThroughputOverride sets the ProvisionedThroughputOverride field's value.
-func (s *UpdateReplicationGroupMemberAction) SetProvisionedThroughputOverride(v *ProvisionedThroughputOverride) *UpdateReplicationGroupMemberAction {
- s.ProvisionedThroughputOverride = v
- return s
-}
-
-// SetRegionName sets the RegionName field's value.
-func (s *UpdateReplicationGroupMemberAction) SetRegionName(v string) *UpdateReplicationGroupMemberAction {
- s.RegionName = &v
- return s
-}
-
-// SetTableClassOverride sets the TableClassOverride field's value.
-func (s *UpdateReplicationGroupMemberAction) SetTableClassOverride(v string) *UpdateReplicationGroupMemberAction {
- s.TableClassOverride = &v
- return s
-}
-
-// Represents the input of an UpdateTable operation.
-type UpdateTableInput struct {
- _ struct{} `type:"structure"`
-
- // An array of attributes that describe the key schema for the table and indexes.
- // If you are adding a new global secondary index to the table, AttributeDefinitions
- // must include the key element(s) of the new index.
- AttributeDefinitions []*AttributeDefinition `type:"list"`
-
- // Controls how you are charged for read and write throughput and how you manage
- // capacity. When switching from pay-per-request to provisioned capacity, initial
- // provisioned capacity values must be set. The initial provisioned capacity
- // values are estimated based on the consumed read and write capacity of your
- // table and global secondary indexes over the past 30 minutes.
- //
- // * PROVISIONED - We recommend using PROVISIONED for predictable workloads.
- // PROVISIONED sets the billing mode to Provisioned capacity mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/provisioned-capacity-mode.html).
- //
- // * PAY_PER_REQUEST - We recommend using PAY_PER_REQUEST for unpredictable
- // workloads. PAY_PER_REQUEST sets the billing mode to On-demand capacity
- // mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/on-demand-capacity-mode.html).
- BillingMode *string `type:"string" enum:"BillingMode"`
-
- // Indicates whether deletion protection is to be enabled (true) or disabled
- // (false) on the table.
- DeletionProtectionEnabled *bool `type:"boolean"`
-
- // An array of one or more global secondary indexes for the table. For each
- // index in the array, you can request one action:
- //
- // * Create - add a new global secondary index to the table.
- //
- // * Update - modify the provisioned throughput settings of an existing global
- // secondary index.
- //
- // * Delete - remove a global secondary index from the table.
- //
- // You can create or delete only one global secondary index per UpdateTable
- // operation.
- //
- // For more information, see Managing Global Secondary Indexes (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html)
- // in the Amazon DynamoDB Developer Guide.
- GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexUpdate `type:"list"`
-
- // Updates the maximum number of read and write units for the specified table
- // in on-demand capacity mode. If you use this parameter, you must specify MaxReadRequestUnits,
- // MaxWriteRequestUnits, or both.
- OnDemandThroughput *OnDemandThroughput `type:"structure"`
-
- // The new provisioned throughput settings for the specified table or index.
- ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
-
- // A list of replica update actions (create, delete, or update) for the table.
- //
- // For global tables, this property only applies to global tables using Version
- // 2019.11.21 (Current version).
- ReplicaUpdates []*ReplicationGroupUpdate `min:"1" type:"list"`
-
- // The new server-side encryption settings for the specified table.
- SSESpecification *SSESpecification `type:"structure"`
-
- // Represents the DynamoDB Streams configuration for the table.
- //
- // You receive a ValidationException if you try to enable a stream on a table
- // that already has a stream, or if you try to disable a stream on a table that
- // doesn't have a stream.
- StreamSpecification *StreamSpecification `type:"structure"`
-
- // The table class of the table to be updated. Valid values are STANDARD and
- // STANDARD_INFREQUENT_ACCESS.
- TableClass *string `type:"string" enum:"TableClass"`
-
- // The name of the table to be updated. You can also provide the Amazon Resource
- // Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateTableInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateTableInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UpdateTableInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UpdateTableInput"}
- if s.ReplicaUpdates != nil && len(s.ReplicaUpdates) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ReplicaUpdates", 1))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
- if s.AttributeDefinitions != nil {
- for i, v := range s.AttributeDefinitions {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "AttributeDefinitions", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.GlobalSecondaryIndexUpdates != nil {
- for i, v := range s.GlobalSecondaryIndexUpdates {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.ProvisionedThroughput != nil {
- if err := s.ProvisionedThroughput.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedThroughput", err.(request.ErrInvalidParams))
- }
- }
- if s.ReplicaUpdates != nil {
- for i, v := range s.ReplicaUpdates {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.StreamSpecification != nil {
- if err := s.StreamSpecification.Validate(); err != nil {
- invalidParams.AddNested("StreamSpecification", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAttributeDefinitions sets the AttributeDefinitions field's value.
-func (s *UpdateTableInput) SetAttributeDefinitions(v []*AttributeDefinition) *UpdateTableInput {
- s.AttributeDefinitions = v
- return s
-}
-
-// SetBillingMode sets the BillingMode field's value.
-func (s *UpdateTableInput) SetBillingMode(v string) *UpdateTableInput {
- s.BillingMode = &v
- return s
-}
-
-// SetDeletionProtectionEnabled sets the DeletionProtectionEnabled field's value.
-func (s *UpdateTableInput) SetDeletionProtectionEnabled(v bool) *UpdateTableInput {
- s.DeletionProtectionEnabled = &v
- return s
-}
-
-// SetGlobalSecondaryIndexUpdates sets the GlobalSecondaryIndexUpdates field's value.
-func (s *UpdateTableInput) SetGlobalSecondaryIndexUpdates(v []*GlobalSecondaryIndexUpdate) *UpdateTableInput {
- s.GlobalSecondaryIndexUpdates = v
- return s
-}
-
-// SetOnDemandThroughput sets the OnDemandThroughput field's value.
-func (s *UpdateTableInput) SetOnDemandThroughput(v *OnDemandThroughput) *UpdateTableInput {
- s.OnDemandThroughput = v
- return s
-}
-
-// SetProvisionedThroughput sets the ProvisionedThroughput field's value.
-func (s *UpdateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *UpdateTableInput {
- s.ProvisionedThroughput = v
- return s
-}
-
-// SetReplicaUpdates sets the ReplicaUpdates field's value.
-func (s *UpdateTableInput) SetReplicaUpdates(v []*ReplicationGroupUpdate) *UpdateTableInput {
- s.ReplicaUpdates = v
- return s
-}
-
-// SetSSESpecification sets the SSESpecification field's value.
-func (s *UpdateTableInput) SetSSESpecification(v *SSESpecification) *UpdateTableInput {
- s.SSESpecification = v
- return s
-}
-
-// SetStreamSpecification sets the StreamSpecification field's value.
-func (s *UpdateTableInput) SetStreamSpecification(v *StreamSpecification) *UpdateTableInput {
- s.StreamSpecification = v
- return s
-}
-
-// SetTableClass sets the TableClass field's value.
-func (s *UpdateTableInput) SetTableClass(v string) *UpdateTableInput {
- s.TableClass = &v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *UpdateTableInput) SetTableName(v string) *UpdateTableInput {
- s.TableName = &v
- return s
-}
-
-// Represents the output of an UpdateTable operation.
-type UpdateTableOutput struct {
- _ struct{} `type:"structure"`
-
- // Represents the properties of the table.
- TableDescription *TableDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateTableOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateTableOutput) GoString() string {
- return s.String()
-}
-
-// SetTableDescription sets the TableDescription field's value.
-func (s *UpdateTableOutput) SetTableDescription(v *TableDescription) *UpdateTableOutput {
- s.TableDescription = v
- return s
-}
-
-type UpdateTableReplicaAutoScalingInput struct {
- _ struct{} `type:"structure"`
-
- // Represents the auto scaling settings of the global secondary indexes of the
- // replica to be updated.
- GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexAutoScalingUpdate `min:"1" type:"list"`
-
- // Represents the auto scaling settings to be modified for a global table or
- // global secondary index.
- ProvisionedWriteCapacityAutoScalingUpdate *AutoScalingSettingsUpdate `type:"structure"`
-
- // Represents the auto scaling settings of replicas of the table that will be
- // modified.
- ReplicaUpdates []*ReplicaAutoScalingUpdate `min:"1" type:"list"`
-
- // The name of the global table to be updated. You can also provide the Amazon
- // Resource Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateTableReplicaAutoScalingInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateTableReplicaAutoScalingInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UpdateTableReplicaAutoScalingInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UpdateTableReplicaAutoScalingInput"}
- if s.GlobalSecondaryIndexUpdates != nil && len(s.GlobalSecondaryIndexUpdates) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("GlobalSecondaryIndexUpdates", 1))
- }
- if s.ReplicaUpdates != nil && len(s.ReplicaUpdates) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("ReplicaUpdates", 1))
- }
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
- if s.GlobalSecondaryIndexUpdates != nil {
- for i, v := range s.GlobalSecondaryIndexUpdates {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "GlobalSecondaryIndexUpdates", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.ProvisionedWriteCapacityAutoScalingUpdate != nil {
- if err := s.ProvisionedWriteCapacityAutoScalingUpdate.Validate(); err != nil {
- invalidParams.AddNested("ProvisionedWriteCapacityAutoScalingUpdate", err.(request.ErrInvalidParams))
- }
- }
- if s.ReplicaUpdates != nil {
- for i, v := range s.ReplicaUpdates {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ReplicaUpdates", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetGlobalSecondaryIndexUpdates sets the GlobalSecondaryIndexUpdates field's value.
-func (s *UpdateTableReplicaAutoScalingInput) SetGlobalSecondaryIndexUpdates(v []*GlobalSecondaryIndexAutoScalingUpdate) *UpdateTableReplicaAutoScalingInput {
- s.GlobalSecondaryIndexUpdates = v
- return s
-}
-
-// SetProvisionedWriteCapacityAutoScalingUpdate sets the ProvisionedWriteCapacityAutoScalingUpdate field's value.
-func (s *UpdateTableReplicaAutoScalingInput) SetProvisionedWriteCapacityAutoScalingUpdate(v *AutoScalingSettingsUpdate) *UpdateTableReplicaAutoScalingInput {
- s.ProvisionedWriteCapacityAutoScalingUpdate = v
- return s
-}
-
-// SetReplicaUpdates sets the ReplicaUpdates field's value.
-func (s *UpdateTableReplicaAutoScalingInput) SetReplicaUpdates(v []*ReplicaAutoScalingUpdate) *UpdateTableReplicaAutoScalingInput {
- s.ReplicaUpdates = v
- return s
-}
-
-// SetTableName sets the TableName field's value.
-func (s *UpdateTableReplicaAutoScalingInput) SetTableName(v string) *UpdateTableReplicaAutoScalingInput {
- s.TableName = &v
- return s
-}
-
-type UpdateTableReplicaAutoScalingOutput struct {
- _ struct{} `type:"structure"`
-
- // Returns information about the auto scaling settings of a table with replicas.
- TableAutoScalingDescription *TableAutoScalingDescription `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateTableReplicaAutoScalingOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateTableReplicaAutoScalingOutput) GoString() string {
- return s.String()
-}
-
-// SetTableAutoScalingDescription sets the TableAutoScalingDescription field's value.
-func (s *UpdateTableReplicaAutoScalingOutput) SetTableAutoScalingDescription(v *TableAutoScalingDescription) *UpdateTableReplicaAutoScalingOutput {
- s.TableAutoScalingDescription = v
- return s
-}
-
-// Represents the input of an UpdateTimeToLive operation.
-type UpdateTimeToLiveInput struct {
- _ struct{} `type:"structure"`
-
- // The name of the table to be configured. You can also provide the Amazon Resource
- // Name (ARN) of the table in this parameter.
- //
- // TableName is a required field
- TableName *string `min:"1" type:"string" required:"true"`
-
- // Represents the settings used to enable or disable Time to Live for the specified
- // table.
- //
- // TimeToLiveSpecification is a required field
- TimeToLiveSpecification *TimeToLiveSpecification `type:"structure" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateTimeToLiveInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateTimeToLiveInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *UpdateTimeToLiveInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "UpdateTimeToLiveInput"}
- if s.TableName == nil {
- invalidParams.Add(request.NewErrParamRequired("TableName"))
- }
- if s.TableName != nil && len(*s.TableName) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("TableName", 1))
- }
- if s.TimeToLiveSpecification == nil {
- invalidParams.Add(request.NewErrParamRequired("TimeToLiveSpecification"))
- }
- if s.TimeToLiveSpecification != nil {
- if err := s.TimeToLiveSpecification.Validate(); err != nil {
- invalidParams.AddNested("TimeToLiveSpecification", err.(request.ErrInvalidParams))
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetTableName sets the TableName field's value.
-func (s *UpdateTimeToLiveInput) SetTableName(v string) *UpdateTimeToLiveInput {
- s.TableName = &v
- return s
-}
-
-// SetTimeToLiveSpecification sets the TimeToLiveSpecification field's value.
-func (s *UpdateTimeToLiveInput) SetTimeToLiveSpecification(v *TimeToLiveSpecification) *UpdateTimeToLiveInput {
- s.TimeToLiveSpecification = v
- return s
-}
-
-type UpdateTimeToLiveOutput struct {
- _ struct{} `type:"structure"`
-
- // Represents the output of an UpdateTimeToLive operation.
- TimeToLiveSpecification *TimeToLiveSpecification `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateTimeToLiveOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UpdateTimeToLiveOutput) GoString() string {
- return s.String()
-}
-
-// SetTimeToLiveSpecification sets the TimeToLiveSpecification field's value.
-func (s *UpdateTimeToLiveOutput) SetTimeToLiveSpecification(v *TimeToLiveSpecification) *UpdateTimeToLiveOutput {
- s.TimeToLiveSpecification = v
- return s
-}
-
-// Represents an operation to perform - either DeleteItem or PutItem. You can
-// only request one of these operations, not both, in a single WriteRequest.
-// If you do need to perform both of these operations, you need to provide two
-// separate WriteRequest objects.
-type WriteRequest struct {
- _ struct{} `type:"structure"`
-
- // A request to perform a DeleteItem operation.
- DeleteRequest *DeleteRequest `type:"structure"`
-
- // A request to perform a PutItem operation.
- PutRequest *PutRequest `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s WriteRequest) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s WriteRequest) GoString() string {
- return s.String()
-}
-
-// SetDeleteRequest sets the DeleteRequest field's value.
-func (s *WriteRequest) SetDeleteRequest(v *DeleteRequest) *WriteRequest {
- s.DeleteRequest = v
- return s
-}
-
-// SetPutRequest sets the PutRequest field's value.
-func (s *WriteRequest) SetPutRequest(v *PutRequest) *WriteRequest {
- s.PutRequest = v
- return s
-}
-
-const (
- // ApproximateCreationDateTimePrecisionMillisecond is a ApproximateCreationDateTimePrecision enum value
- ApproximateCreationDateTimePrecisionMillisecond = "MILLISECOND"
-
- // ApproximateCreationDateTimePrecisionMicrosecond is a ApproximateCreationDateTimePrecision enum value
- ApproximateCreationDateTimePrecisionMicrosecond = "MICROSECOND"
-)
-
-// ApproximateCreationDateTimePrecision_Values returns all elements of the ApproximateCreationDateTimePrecision enum
-func ApproximateCreationDateTimePrecision_Values() []string {
- return []string{
- ApproximateCreationDateTimePrecisionMillisecond,
- ApproximateCreationDateTimePrecisionMicrosecond,
- }
-}
-
-const (
- // AttributeActionAdd is a AttributeAction enum value
- AttributeActionAdd = "ADD"
-
- // AttributeActionPut is a AttributeAction enum value
- AttributeActionPut = "PUT"
-
- // AttributeActionDelete is a AttributeAction enum value
- AttributeActionDelete = "DELETE"
-)
-
-// AttributeAction_Values returns all elements of the AttributeAction enum
-func AttributeAction_Values() []string {
- return []string{
- AttributeActionAdd,
- AttributeActionPut,
- AttributeActionDelete,
- }
-}
-
-const (
- // BackupStatusCreating is a BackupStatus enum value
- BackupStatusCreating = "CREATING"
-
- // BackupStatusDeleted is a BackupStatus enum value
- BackupStatusDeleted = "DELETED"
-
- // BackupStatusAvailable is a BackupStatus enum value
- BackupStatusAvailable = "AVAILABLE"
-)
-
-// BackupStatus_Values returns all elements of the BackupStatus enum
-func BackupStatus_Values() []string {
- return []string{
- BackupStatusCreating,
- BackupStatusDeleted,
- BackupStatusAvailable,
- }
-}
-
-const (
- // BackupTypeUser is a BackupType enum value
- BackupTypeUser = "USER"
-
- // BackupTypeSystem is a BackupType enum value
- BackupTypeSystem = "SYSTEM"
-
- // BackupTypeAwsBackup is a BackupType enum value
- BackupTypeAwsBackup = "AWS_BACKUP"
-)
-
-// BackupType_Values returns all elements of the BackupType enum
-func BackupType_Values() []string {
- return []string{
- BackupTypeUser,
- BackupTypeSystem,
- BackupTypeAwsBackup,
- }
-}
-
-const (
- // BackupTypeFilterUser is a BackupTypeFilter enum value
- BackupTypeFilterUser = "USER"
-
- // BackupTypeFilterSystem is a BackupTypeFilter enum value
- BackupTypeFilterSystem = "SYSTEM"
-
- // BackupTypeFilterAwsBackup is a BackupTypeFilter enum value
- BackupTypeFilterAwsBackup = "AWS_BACKUP"
-
- // BackupTypeFilterAll is a BackupTypeFilter enum value
- BackupTypeFilterAll = "ALL"
-)
-
-// BackupTypeFilter_Values returns all elements of the BackupTypeFilter enum
-func BackupTypeFilter_Values() []string {
- return []string{
- BackupTypeFilterUser,
- BackupTypeFilterSystem,
- BackupTypeFilterAwsBackup,
- BackupTypeFilterAll,
- }
-}
-
-const (
- // BatchStatementErrorCodeEnumConditionalCheckFailed is a BatchStatementErrorCodeEnum enum value
- BatchStatementErrorCodeEnumConditionalCheckFailed = "ConditionalCheckFailed"
-
- // BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded is a BatchStatementErrorCodeEnum enum value
- BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded = "ItemCollectionSizeLimitExceeded"
-
- // BatchStatementErrorCodeEnumRequestLimitExceeded is a BatchStatementErrorCodeEnum enum value
- BatchStatementErrorCodeEnumRequestLimitExceeded = "RequestLimitExceeded"
-
- // BatchStatementErrorCodeEnumValidationError is a BatchStatementErrorCodeEnum enum value
- BatchStatementErrorCodeEnumValidationError = "ValidationError"
-
- // BatchStatementErrorCodeEnumProvisionedThroughputExceeded is a BatchStatementErrorCodeEnum enum value
- BatchStatementErrorCodeEnumProvisionedThroughputExceeded = "ProvisionedThroughputExceeded"
-
- // BatchStatementErrorCodeEnumTransactionConflict is a BatchStatementErrorCodeEnum enum value
- BatchStatementErrorCodeEnumTransactionConflict = "TransactionConflict"
-
- // BatchStatementErrorCodeEnumThrottlingError is a BatchStatementErrorCodeEnum enum value
- BatchStatementErrorCodeEnumThrottlingError = "ThrottlingError"
-
- // BatchStatementErrorCodeEnumInternalServerError is a BatchStatementErrorCodeEnum enum value
- BatchStatementErrorCodeEnumInternalServerError = "InternalServerError"
-
- // BatchStatementErrorCodeEnumResourceNotFound is a BatchStatementErrorCodeEnum enum value
- BatchStatementErrorCodeEnumResourceNotFound = "ResourceNotFound"
-
- // BatchStatementErrorCodeEnumAccessDenied is a BatchStatementErrorCodeEnum enum value
- BatchStatementErrorCodeEnumAccessDenied = "AccessDenied"
-
- // BatchStatementErrorCodeEnumDuplicateItem is a BatchStatementErrorCodeEnum enum value
- BatchStatementErrorCodeEnumDuplicateItem = "DuplicateItem"
-)
-
-// BatchStatementErrorCodeEnum_Values returns all elements of the BatchStatementErrorCodeEnum enum
-func BatchStatementErrorCodeEnum_Values() []string {
- return []string{
- BatchStatementErrorCodeEnumConditionalCheckFailed,
- BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded,
- BatchStatementErrorCodeEnumRequestLimitExceeded,
- BatchStatementErrorCodeEnumValidationError,
- BatchStatementErrorCodeEnumProvisionedThroughputExceeded,
- BatchStatementErrorCodeEnumTransactionConflict,
- BatchStatementErrorCodeEnumThrottlingError,
- BatchStatementErrorCodeEnumInternalServerError,
- BatchStatementErrorCodeEnumResourceNotFound,
- BatchStatementErrorCodeEnumAccessDenied,
- BatchStatementErrorCodeEnumDuplicateItem,
- }
-}
-
-const (
- // BillingModeProvisioned is a BillingMode enum value
- BillingModeProvisioned = "PROVISIONED"
-
- // BillingModePayPerRequest is a BillingMode enum value
- BillingModePayPerRequest = "PAY_PER_REQUEST"
-)
-
-// BillingMode_Values returns all elements of the BillingMode enum
-func BillingMode_Values() []string {
- return []string{
- BillingModeProvisioned,
- BillingModePayPerRequest,
- }
-}
-
-const (
- // ComparisonOperatorEq is a ComparisonOperator enum value
- ComparisonOperatorEq = "EQ"
-
- // ComparisonOperatorNe is a ComparisonOperator enum value
- ComparisonOperatorNe = "NE"
-
- // ComparisonOperatorIn is a ComparisonOperator enum value
- ComparisonOperatorIn = "IN"
-
- // ComparisonOperatorLe is a ComparisonOperator enum value
- ComparisonOperatorLe = "LE"
-
- // ComparisonOperatorLt is a ComparisonOperator enum value
- ComparisonOperatorLt = "LT"
-
- // ComparisonOperatorGe is a ComparisonOperator enum value
- ComparisonOperatorGe = "GE"
-
- // ComparisonOperatorGt is a ComparisonOperator enum value
- ComparisonOperatorGt = "GT"
-
- // ComparisonOperatorBetween is a ComparisonOperator enum value
- ComparisonOperatorBetween = "BETWEEN"
-
- // ComparisonOperatorNotNull is a ComparisonOperator enum value
- ComparisonOperatorNotNull = "NOT_NULL"
-
- // ComparisonOperatorNull is a ComparisonOperator enum value
- ComparisonOperatorNull = "NULL"
-
- // ComparisonOperatorContains is a ComparisonOperator enum value
- ComparisonOperatorContains = "CONTAINS"
-
- // ComparisonOperatorNotContains is a ComparisonOperator enum value
- ComparisonOperatorNotContains = "NOT_CONTAINS"
-
- // ComparisonOperatorBeginsWith is a ComparisonOperator enum value
- ComparisonOperatorBeginsWith = "BEGINS_WITH"
-)
-
-// ComparisonOperator_Values returns all elements of the ComparisonOperator enum
-func ComparisonOperator_Values() []string {
- return []string{
- ComparisonOperatorEq,
- ComparisonOperatorNe,
- ComparisonOperatorIn,
- ComparisonOperatorLe,
- ComparisonOperatorLt,
- ComparisonOperatorGe,
- ComparisonOperatorGt,
- ComparisonOperatorBetween,
- ComparisonOperatorNotNull,
- ComparisonOperatorNull,
- ComparisonOperatorContains,
- ComparisonOperatorNotContains,
- ComparisonOperatorBeginsWith,
- }
-}
-
-const (
- // ConditionalOperatorAnd is a ConditionalOperator enum value
- ConditionalOperatorAnd = "AND"
-
- // ConditionalOperatorOr is a ConditionalOperator enum value
- ConditionalOperatorOr = "OR"
-)
-
-// ConditionalOperator_Values returns all elements of the ConditionalOperator enum
-func ConditionalOperator_Values() []string {
- return []string{
- ConditionalOperatorAnd,
- ConditionalOperatorOr,
- }
-}
-
-const (
- // ContinuousBackupsStatusEnabled is a ContinuousBackupsStatus enum value
- ContinuousBackupsStatusEnabled = "ENABLED"
-
- // ContinuousBackupsStatusDisabled is a ContinuousBackupsStatus enum value
- ContinuousBackupsStatusDisabled = "DISABLED"
-)
-
-// ContinuousBackupsStatus_Values returns all elements of the ContinuousBackupsStatus enum
-func ContinuousBackupsStatus_Values() []string {
- return []string{
- ContinuousBackupsStatusEnabled,
- ContinuousBackupsStatusDisabled,
- }
-}
-
-const (
- // ContributorInsightsActionEnable is a ContributorInsightsAction enum value
- ContributorInsightsActionEnable = "ENABLE"
-
- // ContributorInsightsActionDisable is a ContributorInsightsAction enum value
- ContributorInsightsActionDisable = "DISABLE"
-)
-
-// ContributorInsightsAction_Values returns all elements of the ContributorInsightsAction enum
-func ContributorInsightsAction_Values() []string {
- return []string{
- ContributorInsightsActionEnable,
- ContributorInsightsActionDisable,
- }
-}
-
-const (
- // ContributorInsightsStatusEnabling is a ContributorInsightsStatus enum value
- ContributorInsightsStatusEnabling = "ENABLING"
-
- // ContributorInsightsStatusEnabled is a ContributorInsightsStatus enum value
- ContributorInsightsStatusEnabled = "ENABLED"
-
- // ContributorInsightsStatusDisabling is a ContributorInsightsStatus enum value
- ContributorInsightsStatusDisabling = "DISABLING"
-
- // ContributorInsightsStatusDisabled is a ContributorInsightsStatus enum value
- ContributorInsightsStatusDisabled = "DISABLED"
-
- // ContributorInsightsStatusFailed is a ContributorInsightsStatus enum value
- ContributorInsightsStatusFailed = "FAILED"
-)
-
-// ContributorInsightsStatus_Values returns all elements of the ContributorInsightsStatus enum
-func ContributorInsightsStatus_Values() []string {
- return []string{
- ContributorInsightsStatusEnabling,
- ContributorInsightsStatusEnabled,
- ContributorInsightsStatusDisabling,
- ContributorInsightsStatusDisabled,
- ContributorInsightsStatusFailed,
- }
-}
-
-const (
- // DestinationStatusEnabling is a DestinationStatus enum value
- DestinationStatusEnabling = "ENABLING"
-
- // DestinationStatusActive is a DestinationStatus enum value
- DestinationStatusActive = "ACTIVE"
-
- // DestinationStatusDisabling is a DestinationStatus enum value
- DestinationStatusDisabling = "DISABLING"
-
- // DestinationStatusDisabled is a DestinationStatus enum value
- DestinationStatusDisabled = "DISABLED"
-
- // DestinationStatusEnableFailed is a DestinationStatus enum value
- DestinationStatusEnableFailed = "ENABLE_FAILED"
-
- // DestinationStatusUpdating is a DestinationStatus enum value
- DestinationStatusUpdating = "UPDATING"
-)
-
-// DestinationStatus_Values returns all elements of the DestinationStatus enum
-func DestinationStatus_Values() []string {
- return []string{
- DestinationStatusEnabling,
- DestinationStatusActive,
- DestinationStatusDisabling,
- DestinationStatusDisabled,
- DestinationStatusEnableFailed,
- DestinationStatusUpdating,
- }
-}
-
-const (
- // ExportFormatDynamodbJson is a ExportFormat enum value
- ExportFormatDynamodbJson = "DYNAMODB_JSON"
-
- // ExportFormatIon is a ExportFormat enum value
- ExportFormatIon = "ION"
-)
-
-// ExportFormat_Values returns all elements of the ExportFormat enum
-func ExportFormat_Values() []string {
- return []string{
- ExportFormatDynamodbJson,
- ExportFormatIon,
- }
-}
-
-const (
- // ExportStatusInProgress is a ExportStatus enum value
- ExportStatusInProgress = "IN_PROGRESS"
-
- // ExportStatusCompleted is a ExportStatus enum value
- ExportStatusCompleted = "COMPLETED"
-
- // ExportStatusFailed is a ExportStatus enum value
- ExportStatusFailed = "FAILED"
-)
-
-// ExportStatus_Values returns all elements of the ExportStatus enum
-func ExportStatus_Values() []string {
- return []string{
- ExportStatusInProgress,
- ExportStatusCompleted,
- ExportStatusFailed,
- }
-}
-
-const (
- // ExportTypeFullExport is a ExportType enum value
- ExportTypeFullExport = "FULL_EXPORT"
-
- // ExportTypeIncrementalExport is a ExportType enum value
- ExportTypeIncrementalExport = "INCREMENTAL_EXPORT"
-)
-
-// ExportType_Values returns all elements of the ExportType enum
-func ExportType_Values() []string {
- return []string{
- ExportTypeFullExport,
- ExportTypeIncrementalExport,
- }
-}
-
-const (
- // ExportViewTypeNewImage is a ExportViewType enum value
- ExportViewTypeNewImage = "NEW_IMAGE"
-
- // ExportViewTypeNewAndOldImages is a ExportViewType enum value
- ExportViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES"
-)
-
-// ExportViewType_Values returns all elements of the ExportViewType enum
-func ExportViewType_Values() []string {
- return []string{
- ExportViewTypeNewImage,
- ExportViewTypeNewAndOldImages,
- }
-}
-
-const (
- // GlobalTableStatusCreating is a GlobalTableStatus enum value
- GlobalTableStatusCreating = "CREATING"
-
- // GlobalTableStatusActive is a GlobalTableStatus enum value
- GlobalTableStatusActive = "ACTIVE"
-
- // GlobalTableStatusDeleting is a GlobalTableStatus enum value
- GlobalTableStatusDeleting = "DELETING"
-
- // GlobalTableStatusUpdating is a GlobalTableStatus enum value
- GlobalTableStatusUpdating = "UPDATING"
-)
-
-// GlobalTableStatus_Values returns all elements of the GlobalTableStatus enum
-func GlobalTableStatus_Values() []string {
- return []string{
- GlobalTableStatusCreating,
- GlobalTableStatusActive,
- GlobalTableStatusDeleting,
- GlobalTableStatusUpdating,
- }
-}
-
-const (
- // ImportStatusInProgress is a ImportStatus enum value
- ImportStatusInProgress = "IN_PROGRESS"
-
- // ImportStatusCompleted is a ImportStatus enum value
- ImportStatusCompleted = "COMPLETED"
-
- // ImportStatusCancelling is a ImportStatus enum value
- ImportStatusCancelling = "CANCELLING"
-
- // ImportStatusCancelled is a ImportStatus enum value
- ImportStatusCancelled = "CANCELLED"
-
- // ImportStatusFailed is a ImportStatus enum value
- ImportStatusFailed = "FAILED"
-)
-
-// ImportStatus_Values returns all elements of the ImportStatus enum
-func ImportStatus_Values() []string {
- return []string{
- ImportStatusInProgress,
- ImportStatusCompleted,
- ImportStatusCancelling,
- ImportStatusCancelled,
- ImportStatusFailed,
- }
-}
-
-const (
- // IndexStatusCreating is a IndexStatus enum value
- IndexStatusCreating = "CREATING"
-
- // IndexStatusUpdating is a IndexStatus enum value
- IndexStatusUpdating = "UPDATING"
-
- // IndexStatusDeleting is a IndexStatus enum value
- IndexStatusDeleting = "DELETING"
-
- // IndexStatusActive is a IndexStatus enum value
- IndexStatusActive = "ACTIVE"
-)
-
-// IndexStatus_Values returns all elements of the IndexStatus enum
-func IndexStatus_Values() []string {
- return []string{
- IndexStatusCreating,
- IndexStatusUpdating,
- IndexStatusDeleting,
- IndexStatusActive,
- }
-}
-
-const (
- // InputCompressionTypeGzip is a InputCompressionType enum value
- InputCompressionTypeGzip = "GZIP"
-
- // InputCompressionTypeZstd is a InputCompressionType enum value
- InputCompressionTypeZstd = "ZSTD"
-
- // InputCompressionTypeNone is a InputCompressionType enum value
- InputCompressionTypeNone = "NONE"
-)
-
-// InputCompressionType_Values returns all elements of the InputCompressionType enum
-func InputCompressionType_Values() []string {
- return []string{
- InputCompressionTypeGzip,
- InputCompressionTypeZstd,
- InputCompressionTypeNone,
- }
-}
-
-const (
- // InputFormatDynamodbJson is a InputFormat enum value
- InputFormatDynamodbJson = "DYNAMODB_JSON"
-
- // InputFormatIon is a InputFormat enum value
- InputFormatIon = "ION"
-
- // InputFormatCsv is a InputFormat enum value
- InputFormatCsv = "CSV"
-)
-
-// InputFormat_Values returns all elements of the InputFormat enum
-func InputFormat_Values() []string {
- return []string{
- InputFormatDynamodbJson,
- InputFormatIon,
- InputFormatCsv,
- }
-}
-
-const (
- // KeyTypeHash is a KeyType enum value
- KeyTypeHash = "HASH"
-
- // KeyTypeRange is a KeyType enum value
- KeyTypeRange = "RANGE"
-)
-
-// KeyType_Values returns all elements of the KeyType enum
-func KeyType_Values() []string {
- return []string{
- KeyTypeHash,
- KeyTypeRange,
- }
-}
-
-const (
- // PointInTimeRecoveryStatusEnabled is a PointInTimeRecoveryStatus enum value
- PointInTimeRecoveryStatusEnabled = "ENABLED"
-
- // PointInTimeRecoveryStatusDisabled is a PointInTimeRecoveryStatus enum value
- PointInTimeRecoveryStatusDisabled = "DISABLED"
-)
-
-// PointInTimeRecoveryStatus_Values returns all elements of the PointInTimeRecoveryStatus enum
-func PointInTimeRecoveryStatus_Values() []string {
- return []string{
- PointInTimeRecoveryStatusEnabled,
- PointInTimeRecoveryStatusDisabled,
- }
-}
-
-const (
- // ProjectionTypeAll is a ProjectionType enum value
- ProjectionTypeAll = "ALL"
-
- // ProjectionTypeKeysOnly is a ProjectionType enum value
- ProjectionTypeKeysOnly = "KEYS_ONLY"
-
- // ProjectionTypeInclude is a ProjectionType enum value
- ProjectionTypeInclude = "INCLUDE"
-)
-
-// ProjectionType_Values returns all elements of the ProjectionType enum
-func ProjectionType_Values() []string {
- return []string{
- ProjectionTypeAll,
- ProjectionTypeKeysOnly,
- ProjectionTypeInclude,
- }
-}
-
-const (
- // ReplicaStatusCreating is a ReplicaStatus enum value
- ReplicaStatusCreating = "CREATING"
-
- // ReplicaStatusCreationFailed is a ReplicaStatus enum value
- ReplicaStatusCreationFailed = "CREATION_FAILED"
-
- // ReplicaStatusUpdating is a ReplicaStatus enum value
- ReplicaStatusUpdating = "UPDATING"
-
- // ReplicaStatusDeleting is a ReplicaStatus enum value
- ReplicaStatusDeleting = "DELETING"
-
- // ReplicaStatusActive is a ReplicaStatus enum value
- ReplicaStatusActive = "ACTIVE"
-
- // ReplicaStatusRegionDisabled is a ReplicaStatus enum value
- ReplicaStatusRegionDisabled = "REGION_DISABLED"
-
- // ReplicaStatusInaccessibleEncryptionCredentials is a ReplicaStatus enum value
- ReplicaStatusInaccessibleEncryptionCredentials = "INACCESSIBLE_ENCRYPTION_CREDENTIALS"
-)
-
-// ReplicaStatus_Values returns all elements of the ReplicaStatus enum
-func ReplicaStatus_Values() []string {
- return []string{
- ReplicaStatusCreating,
- ReplicaStatusCreationFailed,
- ReplicaStatusUpdating,
- ReplicaStatusDeleting,
- ReplicaStatusActive,
- ReplicaStatusRegionDisabled,
- ReplicaStatusInaccessibleEncryptionCredentials,
- }
-}
-
-// Determines the level of detail about either provisioned or on-demand throughput
-// consumption that is returned in the response:
-//
-// - INDEXES - The response includes the aggregate ConsumedCapacity for the
-// operation, together with ConsumedCapacity for each table and secondary
-// index that was accessed. Note that some operations, such as GetItem and
-// BatchGetItem, do not access any indexes at all. In these cases, specifying
-// INDEXES will only return ConsumedCapacity information for table(s).
-//
-// - TOTAL - The response includes only the aggregate ConsumedCapacity for
-// the operation.
-//
-// - NONE - No ConsumedCapacity details are included in the response.
-const (
- // ReturnConsumedCapacityIndexes is a ReturnConsumedCapacity enum value
- ReturnConsumedCapacityIndexes = "INDEXES"
-
- // ReturnConsumedCapacityTotal is a ReturnConsumedCapacity enum value
- ReturnConsumedCapacityTotal = "TOTAL"
-
- // ReturnConsumedCapacityNone is a ReturnConsumedCapacity enum value
- ReturnConsumedCapacityNone = "NONE"
-)
-
-// ReturnConsumedCapacity_Values returns all elements of the ReturnConsumedCapacity enum
-func ReturnConsumedCapacity_Values() []string {
- return []string{
- ReturnConsumedCapacityIndexes,
- ReturnConsumedCapacityTotal,
- ReturnConsumedCapacityNone,
- }
-}
-
-const (
- // ReturnItemCollectionMetricsSize is a ReturnItemCollectionMetrics enum value
- ReturnItemCollectionMetricsSize = "SIZE"
-
- // ReturnItemCollectionMetricsNone is a ReturnItemCollectionMetrics enum value
- ReturnItemCollectionMetricsNone = "NONE"
-)
-
-// ReturnItemCollectionMetrics_Values returns all elements of the ReturnItemCollectionMetrics enum
-func ReturnItemCollectionMetrics_Values() []string {
- return []string{
- ReturnItemCollectionMetricsSize,
- ReturnItemCollectionMetricsNone,
- }
-}
-
-const (
- // ReturnValueNone is a ReturnValue enum value
- ReturnValueNone = "NONE"
-
- // ReturnValueAllOld is a ReturnValue enum value
- ReturnValueAllOld = "ALL_OLD"
-
- // ReturnValueUpdatedOld is a ReturnValue enum value
- ReturnValueUpdatedOld = "UPDATED_OLD"
-
- // ReturnValueAllNew is a ReturnValue enum value
- ReturnValueAllNew = "ALL_NEW"
-
- // ReturnValueUpdatedNew is a ReturnValue enum value
- ReturnValueUpdatedNew = "UPDATED_NEW"
-)
-
-// ReturnValue_Values returns all elements of the ReturnValue enum
-func ReturnValue_Values() []string {
- return []string{
- ReturnValueNone,
- ReturnValueAllOld,
- ReturnValueUpdatedOld,
- ReturnValueAllNew,
- ReturnValueUpdatedNew,
- }
-}
-
-const (
- // ReturnValuesOnConditionCheckFailureAllOld is a ReturnValuesOnConditionCheckFailure enum value
- ReturnValuesOnConditionCheckFailureAllOld = "ALL_OLD"
-
- // ReturnValuesOnConditionCheckFailureNone is a ReturnValuesOnConditionCheckFailure enum value
- ReturnValuesOnConditionCheckFailureNone = "NONE"
-)
-
-// ReturnValuesOnConditionCheckFailure_Values returns all elements of the ReturnValuesOnConditionCheckFailure enum
-func ReturnValuesOnConditionCheckFailure_Values() []string {
- return []string{
- ReturnValuesOnConditionCheckFailureAllOld,
- ReturnValuesOnConditionCheckFailureNone,
- }
-}
-
-const (
- // S3SseAlgorithmAes256 is a S3SseAlgorithm enum value
- S3SseAlgorithmAes256 = "AES256"
-
- // S3SseAlgorithmKms is a S3SseAlgorithm enum value
- S3SseAlgorithmKms = "KMS"
-)
-
-// S3SseAlgorithm_Values returns all elements of the S3SseAlgorithm enum
-func S3SseAlgorithm_Values() []string {
- return []string{
- S3SseAlgorithmAes256,
- S3SseAlgorithmKms,
- }
-}
-
-const (
- // SSEStatusEnabling is a SSEStatus enum value
- SSEStatusEnabling = "ENABLING"
-
- // SSEStatusEnabled is a SSEStatus enum value
- SSEStatusEnabled = "ENABLED"
-
- // SSEStatusDisabling is a SSEStatus enum value
- SSEStatusDisabling = "DISABLING"
-
- // SSEStatusDisabled is a SSEStatus enum value
- SSEStatusDisabled = "DISABLED"
-
- // SSEStatusUpdating is a SSEStatus enum value
- SSEStatusUpdating = "UPDATING"
-)
-
-// SSEStatus_Values returns all elements of the SSEStatus enum
-func SSEStatus_Values() []string {
- return []string{
- SSEStatusEnabling,
- SSEStatusEnabled,
- SSEStatusDisabling,
- SSEStatusDisabled,
- SSEStatusUpdating,
- }
-}
-
-const (
- // SSETypeAes256 is a SSEType enum value
- SSETypeAes256 = "AES256"
-
- // SSETypeKms is a SSEType enum value
- SSETypeKms = "KMS"
-)
-
-// SSEType_Values returns all elements of the SSEType enum
-func SSEType_Values() []string {
- return []string{
- SSETypeAes256,
- SSETypeKms,
- }
-}
-
-const (
- // ScalarAttributeTypeS is a ScalarAttributeType enum value
- ScalarAttributeTypeS = "S"
-
- // ScalarAttributeTypeN is a ScalarAttributeType enum value
- ScalarAttributeTypeN = "N"
-
- // ScalarAttributeTypeB is a ScalarAttributeType enum value
- ScalarAttributeTypeB = "B"
-)
-
-// ScalarAttributeType_Values returns all elements of the ScalarAttributeType enum
-func ScalarAttributeType_Values() []string {
- return []string{
- ScalarAttributeTypeS,
- ScalarAttributeTypeN,
- ScalarAttributeTypeB,
- }
-}
-
-const (
- // SelectAllAttributes is a Select enum value
- SelectAllAttributes = "ALL_ATTRIBUTES"
-
- // SelectAllProjectedAttributes is a Select enum value
- SelectAllProjectedAttributes = "ALL_PROJECTED_ATTRIBUTES"
-
- // SelectSpecificAttributes is a Select enum value
- SelectSpecificAttributes = "SPECIFIC_ATTRIBUTES"
-
- // SelectCount is a Select enum value
- SelectCount = "COUNT"
-)
-
-// Select_Values returns all elements of the Select enum
-func Select_Values() []string {
- return []string{
- SelectAllAttributes,
- SelectAllProjectedAttributes,
- SelectSpecificAttributes,
- SelectCount,
- }
-}
-
-const (
- // StreamViewTypeNewImage is a StreamViewType enum value
- StreamViewTypeNewImage = "NEW_IMAGE"
-
- // StreamViewTypeOldImage is a StreamViewType enum value
- StreamViewTypeOldImage = "OLD_IMAGE"
-
- // StreamViewTypeNewAndOldImages is a StreamViewType enum value
- StreamViewTypeNewAndOldImages = "NEW_AND_OLD_IMAGES"
-
- // StreamViewTypeKeysOnly is a StreamViewType enum value
- StreamViewTypeKeysOnly = "KEYS_ONLY"
-)
-
-// StreamViewType_Values returns all elements of the StreamViewType enum
-func StreamViewType_Values() []string {
- return []string{
- StreamViewTypeNewImage,
- StreamViewTypeOldImage,
- StreamViewTypeNewAndOldImages,
- StreamViewTypeKeysOnly,
- }
-}
-
-const (
- // TableClassStandard is a TableClass enum value
- TableClassStandard = "STANDARD"
-
- // TableClassStandardInfrequentAccess is a TableClass enum value
- TableClassStandardInfrequentAccess = "STANDARD_INFREQUENT_ACCESS"
-)
-
-// TableClass_Values returns all elements of the TableClass enum
-func TableClass_Values() []string {
- return []string{
- TableClassStandard,
- TableClassStandardInfrequentAccess,
- }
-}
-
-const (
- // TableStatusCreating is a TableStatus enum value
- TableStatusCreating = "CREATING"
-
- // TableStatusUpdating is a TableStatus enum value
- TableStatusUpdating = "UPDATING"
-
- // TableStatusDeleting is a TableStatus enum value
- TableStatusDeleting = "DELETING"
-
- // TableStatusActive is a TableStatus enum value
- TableStatusActive = "ACTIVE"
-
- // TableStatusInaccessibleEncryptionCredentials is a TableStatus enum value
- TableStatusInaccessibleEncryptionCredentials = "INACCESSIBLE_ENCRYPTION_CREDENTIALS"
-
- // TableStatusArchiving is a TableStatus enum value
- TableStatusArchiving = "ARCHIVING"
-
- // TableStatusArchived is a TableStatus enum value
- TableStatusArchived = "ARCHIVED"
-)
-
-// TableStatus_Values returns all elements of the TableStatus enum
-func TableStatus_Values() []string {
- return []string{
- TableStatusCreating,
- TableStatusUpdating,
- TableStatusDeleting,
- TableStatusActive,
- TableStatusInaccessibleEncryptionCredentials,
- TableStatusArchiving,
- TableStatusArchived,
- }
-}
-
-const (
- // TimeToLiveStatusEnabling is a TimeToLiveStatus enum value
- TimeToLiveStatusEnabling = "ENABLING"
-
- // TimeToLiveStatusDisabling is a TimeToLiveStatus enum value
- TimeToLiveStatusDisabling = "DISABLING"
-
- // TimeToLiveStatusEnabled is a TimeToLiveStatus enum value
- TimeToLiveStatusEnabled = "ENABLED"
-
- // TimeToLiveStatusDisabled is a TimeToLiveStatus enum value
- TimeToLiveStatusDisabled = "DISABLED"
-)
-
-// TimeToLiveStatus_Values returns all elements of the TimeToLiveStatus enum
-func TimeToLiveStatus_Values() []string {
- return []string{
- TimeToLiveStatusEnabling,
- TimeToLiveStatusDisabling,
- TimeToLiveStatusEnabled,
- TimeToLiveStatusDisabled,
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go
deleted file mode 100644
index c019e63df..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/customizations.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package dynamodb
-
-import (
- "bytes"
- "hash/crc32"
- "io"
- "io/ioutil"
- "strconv"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-func init() {
- initClient = func(c *client.Client) {
- if c.Config.Retryer == nil {
- // Only override the retryer with a custom one if the config
- // does not already contain a retryer
- setCustomRetryer(c)
- }
-
- c.Handlers.Build.PushBack(disableCompression)
- c.Handlers.Unmarshal.PushFront(validateCRC32)
- }
-}
-
-func setCustomRetryer(c *client.Client) {
- maxRetries := aws.IntValue(c.Config.MaxRetries)
- if c.Config.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
- maxRetries = 10
- }
-
- c.Retryer = client.DefaultRetryer{
- NumMaxRetries: maxRetries,
- MinRetryDelay: 50 * time.Millisecond,
- }
-}
-
-func drainBody(b io.ReadCloser, length int64) (out *bytes.Buffer, err error) {
- if length < 0 {
- length = 0
- }
- buf := bytes.NewBuffer(make([]byte, 0, length))
-
- if _, err = buf.ReadFrom(b); err != nil {
- return nil, err
- }
- if err = b.Close(); err != nil {
- return nil, err
- }
- return buf, nil
-}
-
-func disableCompression(r *request.Request) {
- r.HTTPRequest.Header.Set("Accept-Encoding", "identity")
-}
-
-func validateCRC32(r *request.Request) {
- if r.Error != nil {
- return // already have an error, no need to verify CRC
- }
-
- // Checksum validation is off, skip
- if aws.BoolValue(r.Config.DisableComputeChecksums) {
- return
- }
-
- // Try to get CRC from response
- header := r.HTTPResponse.Header.Get("X-Amz-Crc32")
- if header == "" {
- return // No header, skip
- }
-
- expected, err := strconv.ParseUint(header, 10, 32)
- if err != nil {
- return // Could not determine CRC value, skip
- }
-
- buf, err := drainBody(r.HTTPResponse.Body, r.HTTPResponse.ContentLength)
- if err != nil { // failed to read the response body, skip
- return
- }
-
- // Reset body for subsequent reads
- r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader(buf.Bytes()))
-
- // Compute the CRC checksum
- crc := crc32.ChecksumIEEE(buf.Bytes())
-
- if crc != uint32(expected) {
- // CRC does not match, set a retryable error
- r.Retryable = aws.Bool(true)
- r.Error = awserr.New("CRC32CheckFailed", "CRC32 integrity check failed", nil)
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go
deleted file mode 100644
index ab12b274f..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-// Package dynamodb provides the client and types for making API
-// requests to Amazon DynamoDB.
-//
-// Amazon DynamoDB is a fully managed NoSQL database service that provides fast
-// and predictable performance with seamless scalability. DynamoDB lets you
-// offload the administrative burdens of operating and scaling a distributed
-// database, so that you don't have to worry about hardware provisioning, setup
-// and configuration, replication, software patching, or cluster scaling.
-//
-// With DynamoDB, you can create database tables that can store and retrieve
-// any amount of data, and serve any level of request traffic. You can scale
-// up or scale down your tables' throughput capacity without downtime or performance
-// degradation, and use the Amazon Web Services Management Console to monitor
-// resource utilization and performance metrics.
-//
-// DynamoDB automatically spreads the data and traffic for your tables over
-// a sufficient number of servers to handle your throughput and storage requirements,
-// while maintaining consistent and fast performance. All of your data is stored
-// on solid state disks (SSDs) and automatically replicated across multiple
-// Availability Zones in an Amazon Web Services Region, providing built-in high
-// availability and data durability.
-//
-// See https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10 for more information on this service.
-//
-// See dynamodb package documentation for more information.
-// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/
-//
-// # Using the Client
-//
-// To contact Amazon DynamoDB with the SDK use the New function to create
-// a new service client. With that client you can make API requests to the service.
-// These clients are safe to use concurrently.
-//
-// See the SDK's documentation for more information on how to use the SDK.
-// https://docs.aws.amazon.com/sdk-for-go/api/
-//
-// See aws.Config documentation for more information on configuring SDK clients.
-// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
-//
-// See the Amazon DynamoDB client DynamoDB for more
-// information on creating client for this service.
-// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/#New
-package dynamodb
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go
deleted file mode 100644
index 0cca7e4b9..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/doc_custom.go
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
-AttributeValue Marshaling and Unmarshaling Helpers
-
-Utility helpers to marshal and unmarshal AttributeValue to and
-from Go types can be found in the dynamodbattribute sub package. This package
-provides specialized functions for the common ways of working with
-AttributeValues. Such as map[string]*AttributeValue, []*AttributeValue, and
-directly with *AttributeValue. This is helpful for marshaling Go types for API
-operations such as PutItem, and unmarshaling Query and Scan APIs' responses.
-
-See the dynamodbattribute package documentation for more information.
-https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/dynamodbattribute/
-
-# Expression Builders
-
-The expression package provides utility types and functions to build DynamoDB
-expression for type safe construction of API ExpressionAttributeNames, and
-ExpressionAttribute Values.
-
-The package represents the various DynamoDB Expressions as structs named
-accordingly. For example, ConditionBuilder represents a DynamoDB Condition
-Expression, an UpdateBuilder represents a DynamoDB Update Expression, and so on.
-
-See the expression package documentation for more information.
-https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/expression/
-*/
-package dynamodb
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go
deleted file mode 100644
index e63411248..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go
+++ /dev/null
@@ -1,319 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-// Package dynamodbiface provides an interface to enable mocking the Amazon DynamoDB service client
-// for testing your code.
-//
-// It is important to note that this interface will have breaking changes
-// when the service model is updated and adds new API operations, paginators,
-// and waiters.
-package dynamodbiface
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/service/dynamodb"
-)
-
-// DynamoDBAPI provides an interface to enable mocking the
-// dynamodb.DynamoDB service client's API operation,
-// paginators, and waiters. This make unit testing your code that calls out
-// to the SDK's service client's calls easier.
-//
-// The best way to use this interface is so the SDK's service client's calls
-// can be stubbed out for unit testing your code with the SDK without needing
-// to inject custom request handlers into the SDK's request pipeline.
-//
-// // myFunc uses an SDK service client to make a request to
-// // Amazon DynamoDB.
-// func myFunc(svc dynamodbiface.DynamoDBAPI) bool {
-// // Make svc.BatchExecuteStatement request
-// }
-//
-// func main() {
-// sess := session.New()
-// svc := dynamodb.New(sess)
-//
-// myFunc(svc)
-// }
-//
-// In your _test.go file:
-//
-// // Define a mock struct to be used in your unit tests of myFunc.
-// type mockDynamoDBClient struct {
-// dynamodbiface.DynamoDBAPI
-// }
-// func (m *mockDynamoDBClient) BatchExecuteStatement(input *dynamodb.BatchExecuteStatementInput) (*dynamodb.BatchExecuteStatementOutput, error) {
-// // mock response/functionality
-// }
-//
-// func TestMyFunc(t *testing.T) {
-// // Setup Test
-// mockSvc := &mockDynamoDBClient{}
-//
-// myfunc(mockSvc)
-//
-// // Verify myFunc's functionality
-// }
-//
-// It is important to note that this interface will have breaking changes
-// when the service model is updated and adds new API operations, paginators,
-// and waiters. Its suggested to use the pattern above for testing, or using
-// tooling to generate mocks to satisfy the interfaces.
-type DynamoDBAPI interface {
- BatchExecuteStatement(*dynamodb.BatchExecuteStatementInput) (*dynamodb.BatchExecuteStatementOutput, error)
- BatchExecuteStatementWithContext(aws.Context, *dynamodb.BatchExecuteStatementInput, ...request.Option) (*dynamodb.BatchExecuteStatementOutput, error)
- BatchExecuteStatementRequest(*dynamodb.BatchExecuteStatementInput) (*request.Request, *dynamodb.BatchExecuteStatementOutput)
-
- BatchGetItem(*dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error)
- BatchGetItemWithContext(aws.Context, *dynamodb.BatchGetItemInput, ...request.Option) (*dynamodb.BatchGetItemOutput, error)
- BatchGetItemRequest(*dynamodb.BatchGetItemInput) (*request.Request, *dynamodb.BatchGetItemOutput)
-
- BatchGetItemPages(*dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool) error
- BatchGetItemPagesWithContext(aws.Context, *dynamodb.BatchGetItemInput, func(*dynamodb.BatchGetItemOutput, bool) bool, ...request.Option) error
-
- BatchWriteItem(*dynamodb.BatchWriteItemInput) (*dynamodb.BatchWriteItemOutput, error)
- BatchWriteItemWithContext(aws.Context, *dynamodb.BatchWriteItemInput, ...request.Option) (*dynamodb.BatchWriteItemOutput, error)
- BatchWriteItemRequest(*dynamodb.BatchWriteItemInput) (*request.Request, *dynamodb.BatchWriteItemOutput)
-
- CreateBackup(*dynamodb.CreateBackupInput) (*dynamodb.CreateBackupOutput, error)
- CreateBackupWithContext(aws.Context, *dynamodb.CreateBackupInput, ...request.Option) (*dynamodb.CreateBackupOutput, error)
- CreateBackupRequest(*dynamodb.CreateBackupInput) (*request.Request, *dynamodb.CreateBackupOutput)
-
- CreateGlobalTable(*dynamodb.CreateGlobalTableInput) (*dynamodb.CreateGlobalTableOutput, error)
- CreateGlobalTableWithContext(aws.Context, *dynamodb.CreateGlobalTableInput, ...request.Option) (*dynamodb.CreateGlobalTableOutput, error)
- CreateGlobalTableRequest(*dynamodb.CreateGlobalTableInput) (*request.Request, *dynamodb.CreateGlobalTableOutput)
-
- CreateTable(*dynamodb.CreateTableInput) (*dynamodb.CreateTableOutput, error)
- CreateTableWithContext(aws.Context, *dynamodb.CreateTableInput, ...request.Option) (*dynamodb.CreateTableOutput, error)
- CreateTableRequest(*dynamodb.CreateTableInput) (*request.Request, *dynamodb.CreateTableOutput)
-
- DeleteBackup(*dynamodb.DeleteBackupInput) (*dynamodb.DeleteBackupOutput, error)
- DeleteBackupWithContext(aws.Context, *dynamodb.DeleteBackupInput, ...request.Option) (*dynamodb.DeleteBackupOutput, error)
- DeleteBackupRequest(*dynamodb.DeleteBackupInput) (*request.Request, *dynamodb.DeleteBackupOutput)
-
- DeleteItem(*dynamodb.DeleteItemInput) (*dynamodb.DeleteItemOutput, error)
- DeleteItemWithContext(aws.Context, *dynamodb.DeleteItemInput, ...request.Option) (*dynamodb.DeleteItemOutput, error)
- DeleteItemRequest(*dynamodb.DeleteItemInput) (*request.Request, *dynamodb.DeleteItemOutput)
-
- DeleteResourcePolicy(*dynamodb.DeleteResourcePolicyInput) (*dynamodb.DeleteResourcePolicyOutput, error)
- DeleteResourcePolicyWithContext(aws.Context, *dynamodb.DeleteResourcePolicyInput, ...request.Option) (*dynamodb.DeleteResourcePolicyOutput, error)
- DeleteResourcePolicyRequest(*dynamodb.DeleteResourcePolicyInput) (*request.Request, *dynamodb.DeleteResourcePolicyOutput)
-
- DeleteTable(*dynamodb.DeleteTableInput) (*dynamodb.DeleteTableOutput, error)
- DeleteTableWithContext(aws.Context, *dynamodb.DeleteTableInput, ...request.Option) (*dynamodb.DeleteTableOutput, error)
- DeleteTableRequest(*dynamodb.DeleteTableInput) (*request.Request, *dynamodb.DeleteTableOutput)
-
- DescribeBackup(*dynamodb.DescribeBackupInput) (*dynamodb.DescribeBackupOutput, error)
- DescribeBackupWithContext(aws.Context, *dynamodb.DescribeBackupInput, ...request.Option) (*dynamodb.DescribeBackupOutput, error)
- DescribeBackupRequest(*dynamodb.DescribeBackupInput) (*request.Request, *dynamodb.DescribeBackupOutput)
-
- DescribeContinuousBackups(*dynamodb.DescribeContinuousBackupsInput) (*dynamodb.DescribeContinuousBackupsOutput, error)
- DescribeContinuousBackupsWithContext(aws.Context, *dynamodb.DescribeContinuousBackupsInput, ...request.Option) (*dynamodb.DescribeContinuousBackupsOutput, error)
- DescribeContinuousBackupsRequest(*dynamodb.DescribeContinuousBackupsInput) (*request.Request, *dynamodb.DescribeContinuousBackupsOutput)
-
- DescribeContributorInsights(*dynamodb.DescribeContributorInsightsInput) (*dynamodb.DescribeContributorInsightsOutput, error)
- DescribeContributorInsightsWithContext(aws.Context, *dynamodb.DescribeContributorInsightsInput, ...request.Option) (*dynamodb.DescribeContributorInsightsOutput, error)
- DescribeContributorInsightsRequest(*dynamodb.DescribeContributorInsightsInput) (*request.Request, *dynamodb.DescribeContributorInsightsOutput)
-
- DescribeEndpoints(*dynamodb.DescribeEndpointsInput) (*dynamodb.DescribeEndpointsOutput, error)
- DescribeEndpointsWithContext(aws.Context, *dynamodb.DescribeEndpointsInput, ...request.Option) (*dynamodb.DescribeEndpointsOutput, error)
- DescribeEndpointsRequest(*dynamodb.DescribeEndpointsInput) (*request.Request, *dynamodb.DescribeEndpointsOutput)
-
- DescribeExport(*dynamodb.DescribeExportInput) (*dynamodb.DescribeExportOutput, error)
- DescribeExportWithContext(aws.Context, *dynamodb.DescribeExportInput, ...request.Option) (*dynamodb.DescribeExportOutput, error)
- DescribeExportRequest(*dynamodb.DescribeExportInput) (*request.Request, *dynamodb.DescribeExportOutput)
-
- DescribeGlobalTable(*dynamodb.DescribeGlobalTableInput) (*dynamodb.DescribeGlobalTableOutput, error)
- DescribeGlobalTableWithContext(aws.Context, *dynamodb.DescribeGlobalTableInput, ...request.Option) (*dynamodb.DescribeGlobalTableOutput, error)
- DescribeGlobalTableRequest(*dynamodb.DescribeGlobalTableInput) (*request.Request, *dynamodb.DescribeGlobalTableOutput)
-
- DescribeGlobalTableSettings(*dynamodb.DescribeGlobalTableSettingsInput) (*dynamodb.DescribeGlobalTableSettingsOutput, error)
- DescribeGlobalTableSettingsWithContext(aws.Context, *dynamodb.DescribeGlobalTableSettingsInput, ...request.Option) (*dynamodb.DescribeGlobalTableSettingsOutput, error)
- DescribeGlobalTableSettingsRequest(*dynamodb.DescribeGlobalTableSettingsInput) (*request.Request, *dynamodb.DescribeGlobalTableSettingsOutput)
-
- DescribeImport(*dynamodb.DescribeImportInput) (*dynamodb.DescribeImportOutput, error)
- DescribeImportWithContext(aws.Context, *dynamodb.DescribeImportInput, ...request.Option) (*dynamodb.DescribeImportOutput, error)
- DescribeImportRequest(*dynamodb.DescribeImportInput) (*request.Request, *dynamodb.DescribeImportOutput)
-
- DescribeKinesisStreamingDestination(*dynamodb.DescribeKinesisStreamingDestinationInput) (*dynamodb.DescribeKinesisStreamingDestinationOutput, error)
- DescribeKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.DescribeKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.DescribeKinesisStreamingDestinationOutput, error)
- DescribeKinesisStreamingDestinationRequest(*dynamodb.DescribeKinesisStreamingDestinationInput) (*request.Request, *dynamodb.DescribeKinesisStreamingDestinationOutput)
-
- DescribeLimits(*dynamodb.DescribeLimitsInput) (*dynamodb.DescribeLimitsOutput, error)
- DescribeLimitsWithContext(aws.Context, *dynamodb.DescribeLimitsInput, ...request.Option) (*dynamodb.DescribeLimitsOutput, error)
- DescribeLimitsRequest(*dynamodb.DescribeLimitsInput) (*request.Request, *dynamodb.DescribeLimitsOutput)
-
- DescribeTable(*dynamodb.DescribeTableInput) (*dynamodb.DescribeTableOutput, error)
- DescribeTableWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.Option) (*dynamodb.DescribeTableOutput, error)
- DescribeTableRequest(*dynamodb.DescribeTableInput) (*request.Request, *dynamodb.DescribeTableOutput)
-
- DescribeTableReplicaAutoScaling(*dynamodb.DescribeTableReplicaAutoScalingInput) (*dynamodb.DescribeTableReplicaAutoScalingOutput, error)
- DescribeTableReplicaAutoScalingWithContext(aws.Context, *dynamodb.DescribeTableReplicaAutoScalingInput, ...request.Option) (*dynamodb.DescribeTableReplicaAutoScalingOutput, error)
- DescribeTableReplicaAutoScalingRequest(*dynamodb.DescribeTableReplicaAutoScalingInput) (*request.Request, *dynamodb.DescribeTableReplicaAutoScalingOutput)
-
- DescribeTimeToLive(*dynamodb.DescribeTimeToLiveInput) (*dynamodb.DescribeTimeToLiveOutput, error)
- DescribeTimeToLiveWithContext(aws.Context, *dynamodb.DescribeTimeToLiveInput, ...request.Option) (*dynamodb.DescribeTimeToLiveOutput, error)
- DescribeTimeToLiveRequest(*dynamodb.DescribeTimeToLiveInput) (*request.Request, *dynamodb.DescribeTimeToLiveOutput)
-
- DisableKinesisStreamingDestination(*dynamodb.DisableKinesisStreamingDestinationInput) (*dynamodb.DisableKinesisStreamingDestinationOutput, error)
- DisableKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.DisableKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.DisableKinesisStreamingDestinationOutput, error)
- DisableKinesisStreamingDestinationRequest(*dynamodb.DisableKinesisStreamingDestinationInput) (*request.Request, *dynamodb.DisableKinesisStreamingDestinationOutput)
-
- EnableKinesisStreamingDestination(*dynamodb.EnableKinesisStreamingDestinationInput) (*dynamodb.EnableKinesisStreamingDestinationOutput, error)
- EnableKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.EnableKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.EnableKinesisStreamingDestinationOutput, error)
- EnableKinesisStreamingDestinationRequest(*dynamodb.EnableKinesisStreamingDestinationInput) (*request.Request, *dynamodb.EnableKinesisStreamingDestinationOutput)
-
- ExecuteStatement(*dynamodb.ExecuteStatementInput) (*dynamodb.ExecuteStatementOutput, error)
- ExecuteStatementWithContext(aws.Context, *dynamodb.ExecuteStatementInput, ...request.Option) (*dynamodb.ExecuteStatementOutput, error)
- ExecuteStatementRequest(*dynamodb.ExecuteStatementInput) (*request.Request, *dynamodb.ExecuteStatementOutput)
-
- ExecuteTransaction(*dynamodb.ExecuteTransactionInput) (*dynamodb.ExecuteTransactionOutput, error)
- ExecuteTransactionWithContext(aws.Context, *dynamodb.ExecuteTransactionInput, ...request.Option) (*dynamodb.ExecuteTransactionOutput, error)
- ExecuteTransactionRequest(*dynamodb.ExecuteTransactionInput) (*request.Request, *dynamodb.ExecuteTransactionOutput)
-
- ExportTableToPointInTime(*dynamodb.ExportTableToPointInTimeInput) (*dynamodb.ExportTableToPointInTimeOutput, error)
- ExportTableToPointInTimeWithContext(aws.Context, *dynamodb.ExportTableToPointInTimeInput, ...request.Option) (*dynamodb.ExportTableToPointInTimeOutput, error)
- ExportTableToPointInTimeRequest(*dynamodb.ExportTableToPointInTimeInput) (*request.Request, *dynamodb.ExportTableToPointInTimeOutput)
-
- GetItem(*dynamodb.GetItemInput) (*dynamodb.GetItemOutput, error)
- GetItemWithContext(aws.Context, *dynamodb.GetItemInput, ...request.Option) (*dynamodb.GetItemOutput, error)
- GetItemRequest(*dynamodb.GetItemInput) (*request.Request, *dynamodb.GetItemOutput)
-
- GetResourcePolicy(*dynamodb.GetResourcePolicyInput) (*dynamodb.GetResourcePolicyOutput, error)
- GetResourcePolicyWithContext(aws.Context, *dynamodb.GetResourcePolicyInput, ...request.Option) (*dynamodb.GetResourcePolicyOutput, error)
- GetResourcePolicyRequest(*dynamodb.GetResourcePolicyInput) (*request.Request, *dynamodb.GetResourcePolicyOutput)
-
- ImportTable(*dynamodb.ImportTableInput) (*dynamodb.ImportTableOutput, error)
- ImportTableWithContext(aws.Context, *dynamodb.ImportTableInput, ...request.Option) (*dynamodb.ImportTableOutput, error)
- ImportTableRequest(*dynamodb.ImportTableInput) (*request.Request, *dynamodb.ImportTableOutput)
-
- ListBackups(*dynamodb.ListBackupsInput) (*dynamodb.ListBackupsOutput, error)
- ListBackupsWithContext(aws.Context, *dynamodb.ListBackupsInput, ...request.Option) (*dynamodb.ListBackupsOutput, error)
- ListBackupsRequest(*dynamodb.ListBackupsInput) (*request.Request, *dynamodb.ListBackupsOutput)
-
- ListContributorInsights(*dynamodb.ListContributorInsightsInput) (*dynamodb.ListContributorInsightsOutput, error)
- ListContributorInsightsWithContext(aws.Context, *dynamodb.ListContributorInsightsInput, ...request.Option) (*dynamodb.ListContributorInsightsOutput, error)
- ListContributorInsightsRequest(*dynamodb.ListContributorInsightsInput) (*request.Request, *dynamodb.ListContributorInsightsOutput)
-
- ListContributorInsightsPages(*dynamodb.ListContributorInsightsInput, func(*dynamodb.ListContributorInsightsOutput, bool) bool) error
- ListContributorInsightsPagesWithContext(aws.Context, *dynamodb.ListContributorInsightsInput, func(*dynamodb.ListContributorInsightsOutput, bool) bool, ...request.Option) error
-
- ListExports(*dynamodb.ListExportsInput) (*dynamodb.ListExportsOutput, error)
- ListExportsWithContext(aws.Context, *dynamodb.ListExportsInput, ...request.Option) (*dynamodb.ListExportsOutput, error)
- ListExportsRequest(*dynamodb.ListExportsInput) (*request.Request, *dynamodb.ListExportsOutput)
-
- ListExportsPages(*dynamodb.ListExportsInput, func(*dynamodb.ListExportsOutput, bool) bool) error
- ListExportsPagesWithContext(aws.Context, *dynamodb.ListExportsInput, func(*dynamodb.ListExportsOutput, bool) bool, ...request.Option) error
-
- ListGlobalTables(*dynamodb.ListGlobalTablesInput) (*dynamodb.ListGlobalTablesOutput, error)
- ListGlobalTablesWithContext(aws.Context, *dynamodb.ListGlobalTablesInput, ...request.Option) (*dynamodb.ListGlobalTablesOutput, error)
- ListGlobalTablesRequest(*dynamodb.ListGlobalTablesInput) (*request.Request, *dynamodb.ListGlobalTablesOutput)
-
- ListImports(*dynamodb.ListImportsInput) (*dynamodb.ListImportsOutput, error)
- ListImportsWithContext(aws.Context, *dynamodb.ListImportsInput, ...request.Option) (*dynamodb.ListImportsOutput, error)
- ListImportsRequest(*dynamodb.ListImportsInput) (*request.Request, *dynamodb.ListImportsOutput)
-
- ListImportsPages(*dynamodb.ListImportsInput, func(*dynamodb.ListImportsOutput, bool) bool) error
- ListImportsPagesWithContext(aws.Context, *dynamodb.ListImportsInput, func(*dynamodb.ListImportsOutput, bool) bool, ...request.Option) error
-
- ListTables(*dynamodb.ListTablesInput) (*dynamodb.ListTablesOutput, error)
- ListTablesWithContext(aws.Context, *dynamodb.ListTablesInput, ...request.Option) (*dynamodb.ListTablesOutput, error)
- ListTablesRequest(*dynamodb.ListTablesInput) (*request.Request, *dynamodb.ListTablesOutput)
-
- ListTablesPages(*dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool) error
- ListTablesPagesWithContext(aws.Context, *dynamodb.ListTablesInput, func(*dynamodb.ListTablesOutput, bool) bool, ...request.Option) error
-
- ListTagsOfResource(*dynamodb.ListTagsOfResourceInput) (*dynamodb.ListTagsOfResourceOutput, error)
- ListTagsOfResourceWithContext(aws.Context, *dynamodb.ListTagsOfResourceInput, ...request.Option) (*dynamodb.ListTagsOfResourceOutput, error)
- ListTagsOfResourceRequest(*dynamodb.ListTagsOfResourceInput) (*request.Request, *dynamodb.ListTagsOfResourceOutput)
-
- PutItem(*dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error)
- PutItemWithContext(aws.Context, *dynamodb.PutItemInput, ...request.Option) (*dynamodb.PutItemOutput, error)
- PutItemRequest(*dynamodb.PutItemInput) (*request.Request, *dynamodb.PutItemOutput)
-
- PutResourcePolicy(*dynamodb.PutResourcePolicyInput) (*dynamodb.PutResourcePolicyOutput, error)
- PutResourcePolicyWithContext(aws.Context, *dynamodb.PutResourcePolicyInput, ...request.Option) (*dynamodb.PutResourcePolicyOutput, error)
- PutResourcePolicyRequest(*dynamodb.PutResourcePolicyInput) (*request.Request, *dynamodb.PutResourcePolicyOutput)
-
- Query(*dynamodb.QueryInput) (*dynamodb.QueryOutput, error)
- QueryWithContext(aws.Context, *dynamodb.QueryInput, ...request.Option) (*dynamodb.QueryOutput, error)
- QueryRequest(*dynamodb.QueryInput) (*request.Request, *dynamodb.QueryOutput)
-
- QueryPages(*dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool) error
- QueryPagesWithContext(aws.Context, *dynamodb.QueryInput, func(*dynamodb.QueryOutput, bool) bool, ...request.Option) error
-
- RestoreTableFromBackup(*dynamodb.RestoreTableFromBackupInput) (*dynamodb.RestoreTableFromBackupOutput, error)
- RestoreTableFromBackupWithContext(aws.Context, *dynamodb.RestoreTableFromBackupInput, ...request.Option) (*dynamodb.RestoreTableFromBackupOutput, error)
- RestoreTableFromBackupRequest(*dynamodb.RestoreTableFromBackupInput) (*request.Request, *dynamodb.RestoreTableFromBackupOutput)
-
- RestoreTableToPointInTime(*dynamodb.RestoreTableToPointInTimeInput) (*dynamodb.RestoreTableToPointInTimeOutput, error)
- RestoreTableToPointInTimeWithContext(aws.Context, *dynamodb.RestoreTableToPointInTimeInput, ...request.Option) (*dynamodb.RestoreTableToPointInTimeOutput, error)
- RestoreTableToPointInTimeRequest(*dynamodb.RestoreTableToPointInTimeInput) (*request.Request, *dynamodb.RestoreTableToPointInTimeOutput)
-
- Scan(*dynamodb.ScanInput) (*dynamodb.ScanOutput, error)
- ScanWithContext(aws.Context, *dynamodb.ScanInput, ...request.Option) (*dynamodb.ScanOutput, error)
- ScanRequest(*dynamodb.ScanInput) (*request.Request, *dynamodb.ScanOutput)
-
- ScanPages(*dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool) error
- ScanPagesWithContext(aws.Context, *dynamodb.ScanInput, func(*dynamodb.ScanOutput, bool) bool, ...request.Option) error
-
- TagResource(*dynamodb.TagResourceInput) (*dynamodb.TagResourceOutput, error)
- TagResourceWithContext(aws.Context, *dynamodb.TagResourceInput, ...request.Option) (*dynamodb.TagResourceOutput, error)
- TagResourceRequest(*dynamodb.TagResourceInput) (*request.Request, *dynamodb.TagResourceOutput)
-
- TransactGetItems(*dynamodb.TransactGetItemsInput) (*dynamodb.TransactGetItemsOutput, error)
- TransactGetItemsWithContext(aws.Context, *dynamodb.TransactGetItemsInput, ...request.Option) (*dynamodb.TransactGetItemsOutput, error)
- TransactGetItemsRequest(*dynamodb.TransactGetItemsInput) (*request.Request, *dynamodb.TransactGetItemsOutput)
-
- TransactWriteItems(*dynamodb.TransactWriteItemsInput) (*dynamodb.TransactWriteItemsOutput, error)
- TransactWriteItemsWithContext(aws.Context, *dynamodb.TransactWriteItemsInput, ...request.Option) (*dynamodb.TransactWriteItemsOutput, error)
- TransactWriteItemsRequest(*dynamodb.TransactWriteItemsInput) (*request.Request, *dynamodb.TransactWriteItemsOutput)
-
- UntagResource(*dynamodb.UntagResourceInput) (*dynamodb.UntagResourceOutput, error)
- UntagResourceWithContext(aws.Context, *dynamodb.UntagResourceInput, ...request.Option) (*dynamodb.UntagResourceOutput, error)
- UntagResourceRequest(*dynamodb.UntagResourceInput) (*request.Request, *dynamodb.UntagResourceOutput)
-
- UpdateContinuousBackups(*dynamodb.UpdateContinuousBackupsInput) (*dynamodb.UpdateContinuousBackupsOutput, error)
- UpdateContinuousBackupsWithContext(aws.Context, *dynamodb.UpdateContinuousBackupsInput, ...request.Option) (*dynamodb.UpdateContinuousBackupsOutput, error)
- UpdateContinuousBackupsRequest(*dynamodb.UpdateContinuousBackupsInput) (*request.Request, *dynamodb.UpdateContinuousBackupsOutput)
-
- UpdateContributorInsights(*dynamodb.UpdateContributorInsightsInput) (*dynamodb.UpdateContributorInsightsOutput, error)
- UpdateContributorInsightsWithContext(aws.Context, *dynamodb.UpdateContributorInsightsInput, ...request.Option) (*dynamodb.UpdateContributorInsightsOutput, error)
- UpdateContributorInsightsRequest(*dynamodb.UpdateContributorInsightsInput) (*request.Request, *dynamodb.UpdateContributorInsightsOutput)
-
- UpdateGlobalTable(*dynamodb.UpdateGlobalTableInput) (*dynamodb.UpdateGlobalTableOutput, error)
- UpdateGlobalTableWithContext(aws.Context, *dynamodb.UpdateGlobalTableInput, ...request.Option) (*dynamodb.UpdateGlobalTableOutput, error)
- UpdateGlobalTableRequest(*dynamodb.UpdateGlobalTableInput) (*request.Request, *dynamodb.UpdateGlobalTableOutput)
-
- UpdateGlobalTableSettings(*dynamodb.UpdateGlobalTableSettingsInput) (*dynamodb.UpdateGlobalTableSettingsOutput, error)
- UpdateGlobalTableSettingsWithContext(aws.Context, *dynamodb.UpdateGlobalTableSettingsInput, ...request.Option) (*dynamodb.UpdateGlobalTableSettingsOutput, error)
- UpdateGlobalTableSettingsRequest(*dynamodb.UpdateGlobalTableSettingsInput) (*request.Request, *dynamodb.UpdateGlobalTableSettingsOutput)
-
- UpdateItem(*dynamodb.UpdateItemInput) (*dynamodb.UpdateItemOutput, error)
- UpdateItemWithContext(aws.Context, *dynamodb.UpdateItemInput, ...request.Option) (*dynamodb.UpdateItemOutput, error)
- UpdateItemRequest(*dynamodb.UpdateItemInput) (*request.Request, *dynamodb.UpdateItemOutput)
-
- UpdateKinesisStreamingDestination(*dynamodb.UpdateKinesisStreamingDestinationInput) (*dynamodb.UpdateKinesisStreamingDestinationOutput, error)
- UpdateKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.UpdateKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.UpdateKinesisStreamingDestinationOutput, error)
- UpdateKinesisStreamingDestinationRequest(*dynamodb.UpdateKinesisStreamingDestinationInput) (*request.Request, *dynamodb.UpdateKinesisStreamingDestinationOutput)
-
- UpdateTable(*dynamodb.UpdateTableInput) (*dynamodb.UpdateTableOutput, error)
- UpdateTableWithContext(aws.Context, *dynamodb.UpdateTableInput, ...request.Option) (*dynamodb.UpdateTableOutput, error)
- UpdateTableRequest(*dynamodb.UpdateTableInput) (*request.Request, *dynamodb.UpdateTableOutput)
-
- UpdateTableReplicaAutoScaling(*dynamodb.UpdateTableReplicaAutoScalingInput) (*dynamodb.UpdateTableReplicaAutoScalingOutput, error)
- UpdateTableReplicaAutoScalingWithContext(aws.Context, *dynamodb.UpdateTableReplicaAutoScalingInput, ...request.Option) (*dynamodb.UpdateTableReplicaAutoScalingOutput, error)
- UpdateTableReplicaAutoScalingRequest(*dynamodb.UpdateTableReplicaAutoScalingInput) (*request.Request, *dynamodb.UpdateTableReplicaAutoScalingOutput)
-
- UpdateTimeToLive(*dynamodb.UpdateTimeToLiveInput) (*dynamodb.UpdateTimeToLiveOutput, error)
- UpdateTimeToLiveWithContext(aws.Context, *dynamodb.UpdateTimeToLiveInput, ...request.Option) (*dynamodb.UpdateTimeToLiveOutput, error)
- UpdateTimeToLiveRequest(*dynamodb.UpdateTimeToLiveInput) (*request.Request, *dynamodb.UpdateTimeToLiveOutput)
-
- WaitUntilTableExists(*dynamodb.DescribeTableInput) error
- WaitUntilTableExistsWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.WaiterOption) error
-
- WaitUntilTableNotExists(*dynamodb.DescribeTableInput) error
- WaitUntilTableNotExistsWithContext(aws.Context, *dynamodb.DescribeTableInput, ...request.WaiterOption) error
-}
-
-var _ DynamoDBAPI = (*dynamodb.DynamoDB)(nil)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go
deleted file mode 100644
index 2ef2cab53..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go
+++ /dev/null
@@ -1,408 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package dynamodb
-
-import (
- "github.com/aws/aws-sdk-go/private/protocol"
-)
-
-const (
-
- // ErrCodeBackupInUseException for service response error code
- // "BackupInUseException".
- //
- // There is another ongoing conflicting backup control plane operation on the
- // table. The backup is either being created, deleted or restored to a table.
- ErrCodeBackupInUseException = "BackupInUseException"
-
- // ErrCodeBackupNotFoundException for service response error code
- // "BackupNotFoundException".
- //
- // Backup not found for the given BackupARN.
- ErrCodeBackupNotFoundException = "BackupNotFoundException"
-
- // ErrCodeConditionalCheckFailedException for service response error code
- // "ConditionalCheckFailedException".
- //
- // A condition specified in the operation could not be evaluated.
- ErrCodeConditionalCheckFailedException = "ConditionalCheckFailedException"
-
- // ErrCodeContinuousBackupsUnavailableException for service response error code
- // "ContinuousBackupsUnavailableException".
- //
- // Backups have not yet been enabled for this table.
- ErrCodeContinuousBackupsUnavailableException = "ContinuousBackupsUnavailableException"
-
- // ErrCodeDuplicateItemException for service response error code
- // "DuplicateItemException".
- //
- // There was an attempt to insert an item with the same primary key as an item
- // that already exists in the DynamoDB table.
- ErrCodeDuplicateItemException = "DuplicateItemException"
-
- // ErrCodeExportConflictException for service response error code
- // "ExportConflictException".
- //
- // There was a conflict when writing to the specified S3 bucket.
- ErrCodeExportConflictException = "ExportConflictException"
-
- // ErrCodeExportNotFoundException for service response error code
- // "ExportNotFoundException".
- //
- // The specified export was not found.
- ErrCodeExportNotFoundException = "ExportNotFoundException"
-
- // ErrCodeGlobalTableAlreadyExistsException for service response error code
- // "GlobalTableAlreadyExistsException".
- //
- // The specified global table already exists.
- ErrCodeGlobalTableAlreadyExistsException = "GlobalTableAlreadyExistsException"
-
- // ErrCodeGlobalTableNotFoundException for service response error code
- // "GlobalTableNotFoundException".
- //
- // The specified global table does not exist.
- ErrCodeGlobalTableNotFoundException = "GlobalTableNotFoundException"
-
- // ErrCodeIdempotentParameterMismatchException for service response error code
- // "IdempotentParameterMismatchException".
- //
- // DynamoDB rejected the request because you retried a request with a different
- // payload but with an idempotent token that was already used.
- ErrCodeIdempotentParameterMismatchException = "IdempotentParameterMismatchException"
-
- // ErrCodeImportConflictException for service response error code
- // "ImportConflictException".
- //
- // There was a conflict when importing from the specified S3 source. This can
- // occur when the current import conflicts with a previous import request that
- // had the same client token.
- ErrCodeImportConflictException = "ImportConflictException"
-
- // ErrCodeImportNotFoundException for service response error code
- // "ImportNotFoundException".
- //
- // The specified import was not found.
- ErrCodeImportNotFoundException = "ImportNotFoundException"
-
- // ErrCodeIndexNotFoundException for service response error code
- // "IndexNotFoundException".
- //
- // The operation tried to access a nonexistent index.
- ErrCodeIndexNotFoundException = "IndexNotFoundException"
-
- // ErrCodeInternalServerError for service response error code
- // "InternalServerError".
- //
- // An error occurred on the server side.
- ErrCodeInternalServerError = "InternalServerError"
-
- // ErrCodeInvalidExportTimeException for service response error code
- // "InvalidExportTimeException".
- //
- // The specified ExportTime is outside of the point in time recovery window.
- ErrCodeInvalidExportTimeException = "InvalidExportTimeException"
-
- // ErrCodeInvalidRestoreTimeException for service response error code
- // "InvalidRestoreTimeException".
- //
- // An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime
- // and LatestRestorableDateTime.
- ErrCodeInvalidRestoreTimeException = "InvalidRestoreTimeException"
-
- // ErrCodeItemCollectionSizeLimitExceededException for service response error code
- // "ItemCollectionSizeLimitExceededException".
- //
- // An item collection is too large. This exception is only returned for tables
- // that have one or more local secondary indexes.
- ErrCodeItemCollectionSizeLimitExceededException = "ItemCollectionSizeLimitExceededException"
-
- // ErrCodeLimitExceededException for service response error code
- // "LimitExceededException".
- //
- // There is no limit to the number of daily on-demand backups that can be taken.
- //
- // For most purposes, up to 500 simultaneous table operations are allowed per
- // account. These operations include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive,
- // RestoreTableFromBackup, and RestoreTableToPointInTime.
- //
- // When you are creating a table with one or more secondary indexes, you can
- // have up to 250 such requests running at a time. However, if the table or
- // index specifications are complex, then DynamoDB might temporarily reduce
- // the number of concurrent operations.
- //
- // When importing into DynamoDB, up to 50 simultaneous import table operations
- // are allowed per account.
- //
- // There is a soft account quota of 2,500 tables.
- //
- // GetRecords was called with a value of more than 1000 for the limit request
- // parameter.
- //
- // More than 2 processes are reading from the same streams shard at the same
- // time. Exceeding this limit may result in request throttling.
- ErrCodeLimitExceededException = "LimitExceededException"
-
- // ErrCodePointInTimeRecoveryUnavailableException for service response error code
- // "PointInTimeRecoveryUnavailableException".
- //
- // Point in time recovery has not yet been enabled for this source table.
- ErrCodePointInTimeRecoveryUnavailableException = "PointInTimeRecoveryUnavailableException"
-
- // ErrCodePolicyNotFoundException for service response error code
- // "PolicyNotFoundException".
- //
- // The operation tried to access a nonexistent resource-based policy.
- //
- // If you specified an ExpectedRevisionId, it's possible that a policy is present
- // for the resource but its revision ID didn't match the expected value.
- ErrCodePolicyNotFoundException = "PolicyNotFoundException"
-
- // ErrCodeProvisionedThroughputExceededException for service response error code
- // "ProvisionedThroughputExceededException".
- //
- // Your request rate is too high. The Amazon Web Services SDKs for DynamoDB
- // automatically retry requests that receive this exception. Your request is
- // eventually successful, unless your retry queue is too large to finish. Reduce
- // the frequency of requests and use exponential backoff. For more information,
- // go to Error Retries and Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff)
- // in the Amazon DynamoDB Developer Guide.
- ErrCodeProvisionedThroughputExceededException = "ProvisionedThroughputExceededException"
-
- // ErrCodeReplicaAlreadyExistsException for service response error code
- // "ReplicaAlreadyExistsException".
- //
- // The specified replica is already part of the global table.
- ErrCodeReplicaAlreadyExistsException = "ReplicaAlreadyExistsException"
-
- // ErrCodeReplicaNotFoundException for service response error code
- // "ReplicaNotFoundException".
- //
- // The specified replica is no longer part of the global table.
- ErrCodeReplicaNotFoundException = "ReplicaNotFoundException"
-
- // ErrCodeRequestLimitExceeded for service response error code
- // "RequestLimitExceeded".
- //
- // Throughput exceeds the current throughput quota for your account. Please
- // contact Amazon Web Services Support (https://aws.amazon.com/support) to request
- // a quota increase.
- ErrCodeRequestLimitExceeded = "RequestLimitExceeded"
-
- // ErrCodeResourceInUseException for service response error code
- // "ResourceInUseException".
- //
- // The operation conflicts with the resource's availability. For example, you
- // attempted to recreate an existing table, or tried to delete a table currently
- // in the CREATING state.
- ErrCodeResourceInUseException = "ResourceInUseException"
-
- // ErrCodeResourceNotFoundException for service response error code
- // "ResourceNotFoundException".
- //
- // The operation tried to access a nonexistent table or index. The resource
- // might not be specified correctly, or its status might not be ACTIVE.
- ErrCodeResourceNotFoundException = "ResourceNotFoundException"
-
- // ErrCodeTableAlreadyExistsException for service response error code
- // "TableAlreadyExistsException".
- //
- // A target table with the specified name already exists.
- ErrCodeTableAlreadyExistsException = "TableAlreadyExistsException"
-
- // ErrCodeTableInUseException for service response error code
- // "TableInUseException".
- //
- // A target table with the specified name is either being created or deleted.
- ErrCodeTableInUseException = "TableInUseException"
-
- // ErrCodeTableNotFoundException for service response error code
- // "TableNotFoundException".
- //
- // A source table with the name TableName does not currently exist within the
- // subscriber's account or the subscriber is operating in the wrong Amazon Web
- // Services Region.
- ErrCodeTableNotFoundException = "TableNotFoundException"
-
- // ErrCodeTransactionCanceledException for service response error code
- // "TransactionCanceledException".
- //
- // The entire transaction request was canceled.
- //
- // DynamoDB cancels a TransactWriteItems request under the following circumstances:
- //
- // * A condition in one of the condition expressions is not met.
- //
- // * A table in the TransactWriteItems request is in a different account
- // or region.
- //
- // * More than one action in the TransactWriteItems operation targets the
- // same item.
- //
- // * There is insufficient provisioned capacity for the transaction to be
- // completed.
- //
- // * An item size becomes too large (larger than 400 KB), or a local secondary
- // index (LSI) becomes too large, or a similar validation error occurs because
- // of changes made by the transaction.
- //
- // * There is a user error, such as an invalid data format.
- //
- // * There is an ongoing TransactWriteItems operation that conflicts with
- // a concurrent TransactWriteItems request. In this case the TransactWriteItems
- // operation fails with a TransactionCanceledException.
- //
- // DynamoDB cancels a TransactGetItems request under the following circumstances:
- //
- // * There is an ongoing TransactGetItems operation that conflicts with a
- // concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request.
- // In this case the TransactGetItems operation fails with a TransactionCanceledException.
- //
- // * A table in the TransactGetItems request is in a different account or
- // region.
- //
- // * There is insufficient provisioned capacity for the transaction to be
- // completed.
- //
- // * There is a user error, such as an invalid data format.
- //
- // If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons
- // property. This property is not set for other languages. Transaction cancellation
- // reasons are ordered in the order of requested items, if an item has no error
- // it will have None code and Null message.
- //
- // Cancellation reason codes and possible error messages:
- //
- // * No Errors: Code: None Message: null
- //
- // * Conditional Check Failed: Code: ConditionalCheckFailed Message: The
- // conditional request failed.
- //
- // * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded
- // Message: Collection size exceeded.
- //
- // * Transaction Conflict: Code: TransactionConflict Message: Transaction
- // is ongoing for the item.
- //
- // * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded
- // Messages: The level of configured provisioned throughput for the table
- // was exceeded. Consider increasing your provisioning level with the UpdateTable
- // API. This Message is received when provisioned throughput is exceeded
- // is on a provisioned DynamoDB table. The level of configured provisioned
- // throughput for one or more global secondary indexes of the table was exceeded.
- // Consider increasing your provisioning level for the under-provisioned
- // global secondary indexes with the UpdateTable API. This message is returned
- // when provisioned throughput is exceeded is on a provisioned GSI.
- //
- // * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds
- // the current capacity of your table or index. DynamoDB is automatically
- // scaling your table or index so please try again shortly. If exceptions
- // persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html.
- // This message is returned when writes get throttled on an On-Demand table
- // as DynamoDB is automatically scaling the table. Throughput exceeds the
- // current capacity for one or more global secondary indexes. DynamoDB is
- // automatically scaling your index so please try again shortly. This message
- // is returned when writes get throttled on an On-Demand GSI as DynamoDB
- // is automatically scaling the GSI.
- //
- // * Validation Error: Code: ValidationError Messages: One or more parameter
- // values were invalid. The update expression attempted to update the secondary
- // index key beyond allowed size limits. The update expression attempted
- // to update the secondary index key to unsupported type. An operand in the
- // update expression has an incorrect data type. Item size to update has
- // exceeded the maximum allowed size. Number overflow. Attempting to store
- // a number with magnitude larger than supported range. Type mismatch for
- // attribute to update. Nesting Levels have exceeded supported limits. The
- // document path provided in the update expression is invalid for update.
- // The provided expression refers to an attribute that does not exist in
- // the item.
- ErrCodeTransactionCanceledException = "TransactionCanceledException"
-
- // ErrCodeTransactionConflictException for service response error code
- // "TransactionConflictException".
- //
- // Operation was rejected because there is an ongoing transaction for the item.
- ErrCodeTransactionConflictException = "TransactionConflictException"
-
- // ErrCodeTransactionInProgressException for service response error code
- // "TransactionInProgressException".
- //
- // The transaction with the given request token is already in progress.
- //
- // Recommended Settings
- //
- // This is a general recommendation for handling the TransactionInProgressException.
- // These settings help ensure that the client retries will trigger completion
- // of the ongoing TransactWriteItems request.
- //
- // * Set clientExecutionTimeout to a value that allows at least one retry
- // to be processed after 5 seconds have elapsed since the first attempt for
- // the TransactWriteItems operation.
- //
- // * Set socketTimeout to a value a little lower than the requestTimeout
- // setting.
- //
- // * requestTimeout should be set based on the time taken for the individual
- // retries of a single HTTP request for your use case, but setting it to
- // 1 second or higher should work well to reduce chances of retries and TransactionInProgressException
- // errors.
- //
- // * Use exponential backoff when retrying and tune backoff if needed.
- //
- // Assuming default retry policy (https://github.com/aws/aws-sdk-java/blob/fd409dee8ae23fb8953e0bb4dbde65536a7e0514/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L97),
- // example timeout settings based on the guidelines above are as follows:
- //
- // Example timeline:
- //
- // * 0-1000 first attempt
- //
- // * 1000-1500 first sleep/delay (default retry policy uses 500 ms as base
- // delay for 4xx errors)
- //
- // * 1500-2500 second attempt
- //
- // * 2500-3500 second sleep/delay (500 * 2, exponential backoff)
- //
- // * 3500-4500 third attempt
- //
- // * 4500-6500 third sleep/delay (500 * 2^2)
- //
- // * 6500-7500 fourth attempt (this can trigger inline recovery since 5 seconds
- // have elapsed since the first attempt reached TC)
- ErrCodeTransactionInProgressException = "TransactionInProgressException"
-)
-
-var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
- "BackupInUseException": newErrorBackupInUseException,
- "BackupNotFoundException": newErrorBackupNotFoundException,
- "ConditionalCheckFailedException": newErrorConditionalCheckFailedException,
- "ContinuousBackupsUnavailableException": newErrorContinuousBackupsUnavailableException,
- "DuplicateItemException": newErrorDuplicateItemException,
- "ExportConflictException": newErrorExportConflictException,
- "ExportNotFoundException": newErrorExportNotFoundException,
- "GlobalTableAlreadyExistsException": newErrorGlobalTableAlreadyExistsException,
- "GlobalTableNotFoundException": newErrorGlobalTableNotFoundException,
- "IdempotentParameterMismatchException": newErrorIdempotentParameterMismatchException,
- "ImportConflictException": newErrorImportConflictException,
- "ImportNotFoundException": newErrorImportNotFoundException,
- "IndexNotFoundException": newErrorIndexNotFoundException,
- "InternalServerError": newErrorInternalServerError,
- "InvalidExportTimeException": newErrorInvalidExportTimeException,
- "InvalidRestoreTimeException": newErrorInvalidRestoreTimeException,
- "ItemCollectionSizeLimitExceededException": newErrorItemCollectionSizeLimitExceededException,
- "LimitExceededException": newErrorLimitExceededException,
- "PointInTimeRecoveryUnavailableException": newErrorPointInTimeRecoveryUnavailableException,
- "PolicyNotFoundException": newErrorPolicyNotFoundException,
- "ProvisionedThroughputExceededException": newErrorProvisionedThroughputExceededException,
- "ReplicaAlreadyExistsException": newErrorReplicaAlreadyExistsException,
- "ReplicaNotFoundException": newErrorReplicaNotFoundException,
- "RequestLimitExceeded": newErrorRequestLimitExceeded,
- "ResourceInUseException": newErrorResourceInUseException,
- "ResourceNotFoundException": newErrorResourceNotFoundException,
- "TableAlreadyExistsException": newErrorTableAlreadyExistsException,
- "TableInUseException": newErrorTableInUseException,
- "TableNotFoundException": newErrorTableNotFoundException,
- "TransactionCanceledException": newErrorTransactionCanceledException,
- "TransactionConflictException": newErrorTransactionConflictException,
- "TransactionInProgressException": newErrorTransactionInProgressException,
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go
deleted file mode 100644
index ce0ed7446..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package dynamodb
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/crr"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/aws/signer/v4"
- "github.com/aws/aws-sdk-go/private/protocol"
- "github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
-)
-
-// DynamoDB provides the API operation methods for making requests to
-// Amazon DynamoDB. See this package's package overview docs
-// for details on the service.
-//
-// DynamoDB methods are safe to use concurrently. It is not safe to
-// modify mutate any of the struct's properties though.
-type DynamoDB struct {
- *client.Client
- endpointCache *crr.EndpointCache
-}
-
-// Used for custom client initialization logic
-var initClient func(*client.Client)
-
-// Used for custom request initialization logic
-var initRequest func(*request.Request)
-
-// Service information constants
-const (
- ServiceName = "dynamodb" // Name of service.
- EndpointsID = ServiceName // ID to lookup a service endpoint with.
- ServiceID = "DynamoDB" // ServiceID is a unique identifier of a specific service.
-)
-
-// New creates a new instance of the DynamoDB client with a session.
-// If additional configuration is needed for the client instance use the optional
-// aws.Config parameter to add your extra config.
-//
-// Example:
-//
-// mySession := session.Must(session.NewSession())
-//
-// // Create a DynamoDB client from just a session.
-// svc := dynamodb.New(mySession)
-//
-// // Create a DynamoDB client with additional configuration
-// svc := dynamodb.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
-func New(p client.ConfigProvider, cfgs ...*aws.Config) *DynamoDB {
- c := p.ClientConfig(EndpointsID, cfgs...)
- if c.SigningNameDerived || len(c.SigningName) == 0 {
- c.SigningName = EndpointsID
- // No Fallback
- }
- return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion)
-}
-
-// newClient creates, initializes and returns a new service client instance.
-func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *DynamoDB {
- svc := &DynamoDB{
- Client: client.New(
- cfg,
- metadata.ClientInfo{
- ServiceName: ServiceName,
- ServiceID: ServiceID,
- SigningName: signingName,
- SigningRegion: signingRegion,
- PartitionID: partitionID,
- Endpoint: endpoint,
- APIVersion: "2012-08-10",
- ResolvedRegion: resolvedRegion,
- JSONVersion: "1.0",
- TargetPrefix: "DynamoDB_20120810",
- },
- handlers,
- ),
- }
- svc.endpointCache = crr.NewEndpointCache(10)
-
- // Handlers
- svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
- svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
- svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
- svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
- svc.Handlers.UnmarshalError.PushBackNamed(
- protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(),
- )
-
- // Run custom client initialization if present
- if initClient != nil {
- initClient(svc.Client)
- }
-
- return svc
-}
-
-// newRequest creates a new request for a DynamoDB operation and runs any
-// custom request initialization.
-func (c *DynamoDB) newRequest(op *request.Operation, params, data interface{}) *request.Request {
- req := c.NewRequest(op, params, data)
-
- // Run custom request initialization if present
- if initRequest != nil {
- initRequest(req)
- }
-
- return req
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go
deleted file mode 100644
index ae515f7de..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/waiters.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package dynamodb
-
-import (
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// WaitUntilTableExists uses the DynamoDB API operation
-// DescribeTable to wait for a condition to be met before returning.
-// If the condition is not met within the max attempt window, an error will
-// be returned.
-func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error {
- return c.WaitUntilTableExistsWithContext(aws.BackgroundContext(), input)
-}
-
-// WaitUntilTableExistsWithContext is an extended version of WaitUntilTableExists.
-// With the support for passing in a context and options to configure the
-// Waiter and the underlying request options.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) WaitUntilTableExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.WaiterOption) error {
- w := request.Waiter{
- Name: "WaitUntilTableExists",
- MaxAttempts: 25,
- Delay: request.ConstantWaiterDelay(20 * time.Second),
- Acceptors: []request.WaiterAcceptor{
- {
- State: request.SuccessWaiterState,
- Matcher: request.PathWaiterMatch, Argument: "Table.TableStatus",
- Expected: "ACTIVE",
- },
- {
- State: request.RetryWaiterState,
- Matcher: request.ErrorWaiterMatch,
- Expected: "ResourceNotFoundException",
- },
- },
- Logger: c.Config.Logger,
- NewRequest: func(opts []request.Option) (*request.Request, error) {
- var inCpy *DescribeTableInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.DescribeTableRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
- w.ApplyOptions(opts...)
-
- return w.WaitWithContext(ctx)
-}
-
-// WaitUntilTableNotExists uses the DynamoDB API operation
-// DescribeTable to wait for a condition to be met before returning.
-// If the condition is not met within the max attempt window, an error will
-// be returned.
-func (c *DynamoDB) WaitUntilTableNotExists(input *DescribeTableInput) error {
- return c.WaitUntilTableNotExistsWithContext(aws.BackgroundContext(), input)
-}
-
-// WaitUntilTableNotExistsWithContext is an extended version of WaitUntilTableNotExists.
-// With the support for passing in a context and options to configure the
-// Waiter and the underlying request options.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *DynamoDB) WaitUntilTableNotExistsWithContext(ctx aws.Context, input *DescribeTableInput, opts ...request.WaiterOption) error {
- w := request.Waiter{
- Name: "WaitUntilTableNotExists",
- MaxAttempts: 25,
- Delay: request.ConstantWaiterDelay(20 * time.Second),
- Acceptors: []request.WaiterAcceptor{
- {
- State: request.SuccessWaiterState,
- Matcher: request.ErrorWaiterMatch,
- Expected: "ResourceNotFoundException",
- },
- },
- Logger: c.Config.Logger,
- NewRequest: func(opts []request.Option) (*request.Request, error) {
- var inCpy *DescribeTableInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.DescribeTableRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
- w.ApplyOptions(opts...)
-
- return w.WaitWithContext(ctx)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go b/vendor/github.com/aws/aws-sdk-go/service/sso/api.go
deleted file mode 100644
index b8f590f71..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/sso/api.go
+++ /dev/null
@@ -1,1367 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package sso
-
-import (
- "fmt"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awsutil"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/protocol"
- "github.com/aws/aws-sdk-go/private/protocol/restjson"
-)
-
-const opGetRoleCredentials = "GetRoleCredentials"
-
-// GetRoleCredentialsRequest generates a "aws/request.Request" representing the
-// client's request for the GetRoleCredentials operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See GetRoleCredentials for more information on using the GetRoleCredentials
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the GetRoleCredentialsRequest method.
-// req, resp := client.GetRoleCredentialsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials
-func (c *SSO) GetRoleCredentialsRequest(input *GetRoleCredentialsInput) (req *request.Request, output *GetRoleCredentialsOutput) {
- op := &request.Operation{
- Name: opGetRoleCredentials,
- HTTPMethod: "GET",
- HTTPPath: "/federation/credentials",
- }
-
- if input == nil {
- input = &GetRoleCredentialsInput{}
- }
-
- output = &GetRoleCredentialsOutput{}
- req = c.newRequest(op, input, output)
- req.Config.Credentials = credentials.AnonymousCredentials
- return
-}
-
-// GetRoleCredentials API operation for AWS Single Sign-On.
-//
-// Returns the STS short-term credentials for a given role name that is assigned
-// to the user.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS Single Sign-On's
-// API operation GetRoleCredentials for usage and error information.
-//
-// Returned Error Types:
-//
-// - InvalidRequestException
-// Indicates that a problem occurred with the input to the request. For example,
-// a required parameter might be missing or out of range.
-//
-// - UnauthorizedException
-// Indicates that the request is not authorized. This can happen due to an invalid
-// access token in the request.
-//
-// - TooManyRequestsException
-// Indicates that the request is being made too frequently and is more than
-// what the server can handle.
-//
-// - ResourceNotFoundException
-// The specified resource doesn't exist.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/GetRoleCredentials
-func (c *SSO) GetRoleCredentials(input *GetRoleCredentialsInput) (*GetRoleCredentialsOutput, error) {
- req, out := c.GetRoleCredentialsRequest(input)
- return out, req.Send()
-}
-
-// GetRoleCredentialsWithContext is the same as GetRoleCredentials with the addition of
-// the ability to pass a context and additional request options.
-//
-// See GetRoleCredentials for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *SSO) GetRoleCredentialsWithContext(ctx aws.Context, input *GetRoleCredentialsInput, opts ...request.Option) (*GetRoleCredentialsOutput, error) {
- req, out := c.GetRoleCredentialsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opListAccountRoles = "ListAccountRoles"
-
-// ListAccountRolesRequest generates a "aws/request.Request" representing the
-// client's request for the ListAccountRoles operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ListAccountRoles for more information on using the ListAccountRoles
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ListAccountRolesRequest method.
-// req, resp := client.ListAccountRolesRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles
-func (c *SSO) ListAccountRolesRequest(input *ListAccountRolesInput) (req *request.Request, output *ListAccountRolesOutput) {
- op := &request.Operation{
- Name: opListAccountRoles,
- HTTPMethod: "GET",
- HTTPPath: "/assignment/roles",
- Paginator: &request.Paginator{
- InputTokens: []string{"nextToken"},
- OutputTokens: []string{"nextToken"},
- LimitToken: "maxResults",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &ListAccountRolesInput{}
- }
-
- output = &ListAccountRolesOutput{}
- req = c.newRequest(op, input, output)
- req.Config.Credentials = credentials.AnonymousCredentials
- return
-}
-
-// ListAccountRoles API operation for AWS Single Sign-On.
-//
-// Lists all roles that are assigned to the user for a given AWS account.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS Single Sign-On's
-// API operation ListAccountRoles for usage and error information.
-//
-// Returned Error Types:
-//
-// - InvalidRequestException
-// Indicates that a problem occurred with the input to the request. For example,
-// a required parameter might be missing or out of range.
-//
-// - UnauthorizedException
-// Indicates that the request is not authorized. This can happen due to an invalid
-// access token in the request.
-//
-// - TooManyRequestsException
-// Indicates that the request is being made too frequently and is more than
-// what the server can handle.
-//
-// - ResourceNotFoundException
-// The specified resource doesn't exist.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccountRoles
-func (c *SSO) ListAccountRoles(input *ListAccountRolesInput) (*ListAccountRolesOutput, error) {
- req, out := c.ListAccountRolesRequest(input)
- return out, req.Send()
-}
-
-// ListAccountRolesWithContext is the same as ListAccountRoles with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ListAccountRoles for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *SSO) ListAccountRolesWithContext(ctx aws.Context, input *ListAccountRolesInput, opts ...request.Option) (*ListAccountRolesOutput, error) {
- req, out := c.ListAccountRolesRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// ListAccountRolesPages iterates over the pages of a ListAccountRoles operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See ListAccountRoles method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a ListAccountRoles operation.
-// pageNum := 0
-// err := client.ListAccountRolesPages(params,
-// func(page *sso.ListAccountRolesOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *SSO) ListAccountRolesPages(input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool) error {
- return c.ListAccountRolesPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// ListAccountRolesPagesWithContext same as ListAccountRolesPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *SSO) ListAccountRolesPagesWithContext(ctx aws.Context, input *ListAccountRolesInput, fn func(*ListAccountRolesOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *ListAccountRolesInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.ListAccountRolesRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*ListAccountRolesOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opListAccounts = "ListAccounts"
-
-// ListAccountsRequest generates a "aws/request.Request" representing the
-// client's request for the ListAccounts operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See ListAccounts for more information on using the ListAccounts
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the ListAccountsRequest method.
-// req, resp := client.ListAccountsRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts
-func (c *SSO) ListAccountsRequest(input *ListAccountsInput) (req *request.Request, output *ListAccountsOutput) {
- op := &request.Operation{
- Name: opListAccounts,
- HTTPMethod: "GET",
- HTTPPath: "/assignment/accounts",
- Paginator: &request.Paginator{
- InputTokens: []string{"nextToken"},
- OutputTokens: []string{"nextToken"},
- LimitToken: "maxResults",
- TruncationToken: "",
- },
- }
-
- if input == nil {
- input = &ListAccountsInput{}
- }
-
- output = &ListAccountsOutput{}
- req = c.newRequest(op, input, output)
- req.Config.Credentials = credentials.AnonymousCredentials
- return
-}
-
-// ListAccounts API operation for AWS Single Sign-On.
-//
-// Lists all AWS accounts assigned to the user. These AWS accounts are assigned
-// by the administrator of the account. For more information, see Assign User
-// Access (https://docs.aws.amazon.com/singlesignon/latest/userguide/useraccess.html#assignusers)
-// in the IAM Identity Center User Guide. This operation returns a paginated
-// response.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS Single Sign-On's
-// API operation ListAccounts for usage and error information.
-//
-// Returned Error Types:
-//
-// - InvalidRequestException
-// Indicates that a problem occurred with the input to the request. For example,
-// a required parameter might be missing or out of range.
-//
-// - UnauthorizedException
-// Indicates that the request is not authorized. This can happen due to an invalid
-// access token in the request.
-//
-// - TooManyRequestsException
-// Indicates that the request is being made too frequently and is more than
-// what the server can handle.
-//
-// - ResourceNotFoundException
-// The specified resource doesn't exist.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/ListAccounts
-func (c *SSO) ListAccounts(input *ListAccountsInput) (*ListAccountsOutput, error) {
- req, out := c.ListAccountsRequest(input)
- return out, req.Send()
-}
-
-// ListAccountsWithContext is the same as ListAccounts with the addition of
-// the ability to pass a context and additional request options.
-//
-// See ListAccounts for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *SSO) ListAccountsWithContext(ctx aws.Context, input *ListAccountsInput, opts ...request.Option) (*ListAccountsOutput, error) {
- req, out := c.ListAccountsRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// ListAccountsPages iterates over the pages of a ListAccounts operation,
-// calling the "fn" function with the response data for each page. To stop
-// iterating, return false from the fn function.
-//
-// See ListAccounts method for more information on how to use this operation.
-//
-// Note: This operation can generate multiple requests to a service.
-//
-// // Example iterating over at most 3 pages of a ListAccounts operation.
-// pageNum := 0
-// err := client.ListAccountsPages(params,
-// func(page *sso.ListAccountsOutput, lastPage bool) bool {
-// pageNum++
-// fmt.Println(page)
-// return pageNum <= 3
-// })
-func (c *SSO) ListAccountsPages(input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool) error {
- return c.ListAccountsPagesWithContext(aws.BackgroundContext(), input, fn)
-}
-
-// ListAccountsPagesWithContext same as ListAccountsPages except
-// it takes a Context and allows setting request options on the pages.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *SSO) ListAccountsPagesWithContext(ctx aws.Context, input *ListAccountsInput, fn func(*ListAccountsOutput, bool) bool, opts ...request.Option) error {
- p := request.Pagination{
- NewRequest: func() (*request.Request, error) {
- var inCpy *ListAccountsInput
- if input != nil {
- tmp := *input
- inCpy = &tmp
- }
- req, _ := c.ListAccountsRequest(inCpy)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return req, nil
- },
- }
-
- for p.Next() {
- if !fn(p.Page().(*ListAccountsOutput), !p.HasNextPage()) {
- break
- }
- }
-
- return p.Err()
-}
-
-const opLogout = "Logout"
-
-// LogoutRequest generates a "aws/request.Request" representing the
-// client's request for the Logout operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See Logout for more information on using the Logout
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the LogoutRequest method.
-// req, resp := client.LogoutRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout
-func (c *SSO) LogoutRequest(input *LogoutInput) (req *request.Request, output *LogoutOutput) {
- op := &request.Operation{
- Name: opLogout,
- HTTPMethod: "POST",
- HTTPPath: "/logout",
- }
-
- if input == nil {
- input = &LogoutInput{}
- }
-
- output = &LogoutOutput{}
- req = c.newRequest(op, input, output)
- req.Config.Credentials = credentials.AnonymousCredentials
- req.Handlers.Unmarshal.Swap(restjson.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
- return
-}
-
-// Logout API operation for AWS Single Sign-On.
-//
-// Removes the locally stored SSO tokens from the client-side cache and sends
-// an API call to the IAM Identity Center service to invalidate the corresponding
-// server-side IAM Identity Center sign in session.
-//
-// If a user uses IAM Identity Center to access the AWS CLI, the user’s IAM
-// Identity Center sign in session is used to obtain an IAM session, as specified
-// in the corresponding IAM Identity Center permission set. More specifically,
-// IAM Identity Center assumes an IAM role in the target account on behalf of
-// the user, and the corresponding temporary AWS credentials are returned to
-// the client.
-//
-// After user logout, any existing IAM role sessions that were created by using
-// IAM Identity Center permission sets continue based on the duration configured
-// in the permission set. For more information, see User authentications (https://docs.aws.amazon.com/singlesignon/latest/userguide/authconcept.html)
-// in the IAM Identity Center User Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS Single Sign-On's
-// API operation Logout for usage and error information.
-//
-// Returned Error Types:
-//
-// - InvalidRequestException
-// Indicates that a problem occurred with the input to the request. For example,
-// a required parameter might be missing or out of range.
-//
-// - UnauthorizedException
-// Indicates that the request is not authorized. This can happen due to an invalid
-// access token in the request.
-//
-// - TooManyRequestsException
-// Indicates that the request is being made too frequently and is more than
-// what the server can handle.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10/Logout
-func (c *SSO) Logout(input *LogoutInput) (*LogoutOutput, error) {
- req, out := c.LogoutRequest(input)
- return out, req.Send()
-}
-
-// LogoutWithContext is the same as Logout with the addition of
-// the ability to pass a context and additional request options.
-//
-// See Logout for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *SSO) LogoutWithContext(ctx aws.Context, input *LogoutInput, opts ...request.Option) (*LogoutOutput, error) {
- req, out := c.LogoutRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// Provides information about your AWS account.
-type AccountInfo struct {
- _ struct{} `type:"structure"`
-
- // The identifier of the AWS account that is assigned to the user.
- AccountId *string `locationName:"accountId" type:"string"`
-
- // The display name of the AWS account that is assigned to the user.
- AccountName *string `locationName:"accountName" type:"string"`
-
- // The email address of the AWS account that is assigned to the user.
- EmailAddress *string `locationName:"emailAddress" min:"1" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AccountInfo) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AccountInfo) GoString() string {
- return s.String()
-}
-
-// SetAccountId sets the AccountId field's value.
-func (s *AccountInfo) SetAccountId(v string) *AccountInfo {
- s.AccountId = &v
- return s
-}
-
-// SetAccountName sets the AccountName field's value.
-func (s *AccountInfo) SetAccountName(v string) *AccountInfo {
- s.AccountName = &v
- return s
-}
-
-// SetEmailAddress sets the EmailAddress field's value.
-func (s *AccountInfo) SetEmailAddress(v string) *AccountInfo {
- s.EmailAddress = &v
- return s
-}
-
-type GetRoleCredentialsInput struct {
- _ struct{} `type:"structure" nopayload:"true"`
-
- // The token issued by the CreateToken API call. For more information, see CreateToken
- // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
- // in the IAM Identity Center OIDC API Reference Guide.
- //
- // AccessToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by GetRoleCredentialsInput's
- // String and GoString methods.
- //
- // AccessToken is a required field
- AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
-
- // The identifier for the AWS account that is assigned to the user.
- //
- // AccountId is a required field
- AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"`
-
- // The friendly name of the role that is assigned to the user.
- //
- // RoleName is a required field
- RoleName *string `location:"querystring" locationName:"role_name" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRoleCredentialsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRoleCredentialsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GetRoleCredentialsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GetRoleCredentialsInput"}
- if s.AccessToken == nil {
- invalidParams.Add(request.NewErrParamRequired("AccessToken"))
- }
- if s.AccountId == nil {
- invalidParams.Add(request.NewErrParamRequired("AccountId"))
- }
- if s.RoleName == nil {
- invalidParams.Add(request.NewErrParamRequired("RoleName"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAccessToken sets the AccessToken field's value.
-func (s *GetRoleCredentialsInput) SetAccessToken(v string) *GetRoleCredentialsInput {
- s.AccessToken = &v
- return s
-}
-
-// SetAccountId sets the AccountId field's value.
-func (s *GetRoleCredentialsInput) SetAccountId(v string) *GetRoleCredentialsInput {
- s.AccountId = &v
- return s
-}
-
-// SetRoleName sets the RoleName field's value.
-func (s *GetRoleCredentialsInput) SetRoleName(v string) *GetRoleCredentialsInput {
- s.RoleName = &v
- return s
-}
-
-type GetRoleCredentialsOutput struct {
- _ struct{} `type:"structure"`
-
- // The credentials for the role that is assigned to the user.
- RoleCredentials *RoleCredentials `locationName:"roleCredentials" type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRoleCredentialsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetRoleCredentialsOutput) GoString() string {
- return s.String()
-}
-
-// SetRoleCredentials sets the RoleCredentials field's value.
-func (s *GetRoleCredentialsOutput) SetRoleCredentials(v *RoleCredentials) *GetRoleCredentialsOutput {
- s.RoleCredentials = v
- return s
-}
-
-// Indicates that a problem occurred with the input to the request. For example,
-// a required parameter might be missing or out of range.
-type InvalidRequestException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidRequestException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidRequestException) GoString() string {
- return s.String()
-}
-
-func newErrorInvalidRequestException(v protocol.ResponseMetadata) error {
- return &InvalidRequestException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InvalidRequestException) Code() string {
- return "InvalidRequestException"
-}
-
-// Message returns the exception's message.
-func (s *InvalidRequestException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InvalidRequestException) OrigErr() error {
- return nil
-}
-
-func (s *InvalidRequestException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InvalidRequestException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InvalidRequestException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-type ListAccountRolesInput struct {
- _ struct{} `type:"structure" nopayload:"true"`
-
- // The token issued by the CreateToken API call. For more information, see CreateToken
- // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
- // in the IAM Identity Center OIDC API Reference Guide.
- //
- // AccessToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by ListAccountRolesInput's
- // String and GoString methods.
- //
- // AccessToken is a required field
- AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
-
- // The identifier for the AWS account that is assigned to the user.
- //
- // AccountId is a required field
- AccountId *string `location:"querystring" locationName:"account_id" type:"string" required:"true"`
-
- // The number of items that clients can request per page.
- MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"`
-
- // The page token from the previous response output when you request subsequent
- // pages.
- NextToken *string `location:"querystring" locationName:"next_token" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListAccountRolesInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListAccountRolesInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ListAccountRolesInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ListAccountRolesInput"}
- if s.AccessToken == nil {
- invalidParams.Add(request.NewErrParamRequired("AccessToken"))
- }
- if s.AccountId == nil {
- invalidParams.Add(request.NewErrParamRequired("AccountId"))
- }
- if s.MaxResults != nil && *s.MaxResults < 1 {
- invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAccessToken sets the AccessToken field's value.
-func (s *ListAccountRolesInput) SetAccessToken(v string) *ListAccountRolesInput {
- s.AccessToken = &v
- return s
-}
-
-// SetAccountId sets the AccountId field's value.
-func (s *ListAccountRolesInput) SetAccountId(v string) *ListAccountRolesInput {
- s.AccountId = &v
- return s
-}
-
-// SetMaxResults sets the MaxResults field's value.
-func (s *ListAccountRolesInput) SetMaxResults(v int64) *ListAccountRolesInput {
- s.MaxResults = &v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ListAccountRolesInput) SetNextToken(v string) *ListAccountRolesInput {
- s.NextToken = &v
- return s
-}
-
-type ListAccountRolesOutput struct {
- _ struct{} `type:"structure"`
-
- // The page token client that is used to retrieve the list of accounts.
- NextToken *string `locationName:"nextToken" type:"string"`
-
- // A paginated response with the list of roles and the next token if more results
- // are available.
- RoleList []*RoleInfo `locationName:"roleList" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListAccountRolesOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListAccountRolesOutput) GoString() string {
- return s.String()
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ListAccountRolesOutput) SetNextToken(v string) *ListAccountRolesOutput {
- s.NextToken = &v
- return s
-}
-
-// SetRoleList sets the RoleList field's value.
-func (s *ListAccountRolesOutput) SetRoleList(v []*RoleInfo) *ListAccountRolesOutput {
- s.RoleList = v
- return s
-}
-
-type ListAccountsInput struct {
- _ struct{} `type:"structure" nopayload:"true"`
-
- // The token issued by the CreateToken API call. For more information, see CreateToken
- // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
- // in the IAM Identity Center OIDC API Reference Guide.
- //
- // AccessToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by ListAccountsInput's
- // String and GoString methods.
- //
- // AccessToken is a required field
- AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
-
- // This is the number of items clients can request per page.
- MaxResults *int64 `location:"querystring" locationName:"max_result" min:"1" type:"integer"`
-
- // (Optional) When requesting subsequent pages, this is the page token from
- // the previous response output.
- NextToken *string `location:"querystring" locationName:"next_token" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListAccountsInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListAccountsInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ListAccountsInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ListAccountsInput"}
- if s.AccessToken == nil {
- invalidParams.Add(request.NewErrParamRequired("AccessToken"))
- }
- if s.MaxResults != nil && *s.MaxResults < 1 {
- invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAccessToken sets the AccessToken field's value.
-func (s *ListAccountsInput) SetAccessToken(v string) *ListAccountsInput {
- s.AccessToken = &v
- return s
-}
-
-// SetMaxResults sets the MaxResults field's value.
-func (s *ListAccountsInput) SetMaxResults(v int64) *ListAccountsInput {
- s.MaxResults = &v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ListAccountsInput) SetNextToken(v string) *ListAccountsInput {
- s.NextToken = &v
- return s
-}
-
-type ListAccountsOutput struct {
- _ struct{} `type:"structure"`
-
- // A paginated response with the list of account information and the next token
- // if more results are available.
- AccountList []*AccountInfo `locationName:"accountList" type:"list"`
-
- // The page token client that is used to retrieve the list of accounts.
- NextToken *string `locationName:"nextToken" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListAccountsOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ListAccountsOutput) GoString() string {
- return s.String()
-}
-
-// SetAccountList sets the AccountList field's value.
-func (s *ListAccountsOutput) SetAccountList(v []*AccountInfo) *ListAccountsOutput {
- s.AccountList = v
- return s
-}
-
-// SetNextToken sets the NextToken field's value.
-func (s *ListAccountsOutput) SetNextToken(v string) *ListAccountsOutput {
- s.NextToken = &v
- return s
-}
-
-type LogoutInput struct {
- _ struct{} `type:"structure" nopayload:"true"`
-
- // The token issued by the CreateToken API call. For more information, see CreateToken
- // (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/API_CreateToken.html)
- // in the IAM Identity Center OIDC API Reference Guide.
- //
- // AccessToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by LogoutInput's
- // String and GoString methods.
- //
- // AccessToken is a required field
- AccessToken *string `location:"header" locationName:"x-amz-sso_bearer_token" type:"string" required:"true" sensitive:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LogoutInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LogoutInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *LogoutInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "LogoutInput"}
- if s.AccessToken == nil {
- invalidParams.Add(request.NewErrParamRequired("AccessToken"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAccessToken sets the AccessToken field's value.
-func (s *LogoutInput) SetAccessToken(v string) *LogoutInput {
- s.AccessToken = &v
- return s
-}
-
-type LogoutOutput struct {
- _ struct{} `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LogoutOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s LogoutOutput) GoString() string {
- return s.String()
-}
-
-// The specified resource doesn't exist.
-type ResourceNotFoundException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ResourceNotFoundException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ResourceNotFoundException) GoString() string {
- return s.String()
-}
-
-func newErrorResourceNotFoundException(v protocol.ResponseMetadata) error {
- return &ResourceNotFoundException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ResourceNotFoundException) Code() string {
- return "ResourceNotFoundException"
-}
-
-// Message returns the exception's message.
-func (s *ResourceNotFoundException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ResourceNotFoundException) OrigErr() error {
- return nil
-}
-
-func (s *ResourceNotFoundException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ResourceNotFoundException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ResourceNotFoundException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Provides information about the role credentials that are assigned to the
-// user.
-type RoleCredentials struct {
- _ struct{} `type:"structure"`
-
- // The identifier used for the temporary security credentials. For more information,
- // see Using Temporary Security Credentials to Request Access to AWS Resources
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
- // in the AWS IAM User Guide.
- AccessKeyId *string `locationName:"accessKeyId" type:"string"`
-
- // The date on which temporary security credentials expire.
- Expiration *int64 `locationName:"expiration" type:"long"`
-
- // The key that is used to sign the request. For more information, see Using
- // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
- // in the AWS IAM User Guide.
- //
- // SecretAccessKey is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by RoleCredentials's
- // String and GoString methods.
- SecretAccessKey *string `locationName:"secretAccessKey" type:"string" sensitive:"true"`
-
- // The token used for temporary credentials. For more information, see Using
- // Temporary Security Credentials to Request Access to AWS Resources (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_use-resources.html)
- // in the AWS IAM User Guide.
- //
- // SessionToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by RoleCredentials's
- // String and GoString methods.
- SessionToken *string `locationName:"sessionToken" type:"string" sensitive:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RoleCredentials) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RoleCredentials) GoString() string {
- return s.String()
-}
-
-// SetAccessKeyId sets the AccessKeyId field's value.
-func (s *RoleCredentials) SetAccessKeyId(v string) *RoleCredentials {
- s.AccessKeyId = &v
- return s
-}
-
-// SetExpiration sets the Expiration field's value.
-func (s *RoleCredentials) SetExpiration(v int64) *RoleCredentials {
- s.Expiration = &v
- return s
-}
-
-// SetSecretAccessKey sets the SecretAccessKey field's value.
-func (s *RoleCredentials) SetSecretAccessKey(v string) *RoleCredentials {
- s.SecretAccessKey = &v
- return s
-}
-
-// SetSessionToken sets the SessionToken field's value.
-func (s *RoleCredentials) SetSessionToken(v string) *RoleCredentials {
- s.SessionToken = &v
- return s
-}
-
-// Provides information about the role that is assigned to the user.
-type RoleInfo struct {
- _ struct{} `type:"structure"`
-
- // The identifier of the AWS account assigned to the user.
- AccountId *string `locationName:"accountId" type:"string"`
-
- // The friendly name of the role that is assigned to the user.
- RoleName *string `locationName:"roleName" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RoleInfo) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RoleInfo) GoString() string {
- return s.String()
-}
-
-// SetAccountId sets the AccountId field's value.
-func (s *RoleInfo) SetAccountId(v string) *RoleInfo {
- s.AccountId = &v
- return s
-}
-
-// SetRoleName sets the RoleName field's value.
-func (s *RoleInfo) SetRoleName(v string) *RoleInfo {
- s.RoleName = &v
- return s
-}
-
-// Indicates that the request is being made too frequently and is more than
-// what the server can handle.
-type TooManyRequestsException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TooManyRequestsException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s TooManyRequestsException) GoString() string {
- return s.String()
-}
-
-func newErrorTooManyRequestsException(v protocol.ResponseMetadata) error {
- return &TooManyRequestsException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *TooManyRequestsException) Code() string {
- return "TooManyRequestsException"
-}
-
-// Message returns the exception's message.
-func (s *TooManyRequestsException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *TooManyRequestsException) OrigErr() error {
- return nil
-}
-
-func (s *TooManyRequestsException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *TooManyRequestsException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *TooManyRequestsException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Indicates that the request is not authorized. This can happen due to an invalid
-// access token in the request.
-type UnauthorizedException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UnauthorizedException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UnauthorizedException) GoString() string {
- return s.String()
-}
-
-func newErrorUnauthorizedException(v protocol.ResponseMetadata) error {
- return &UnauthorizedException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *UnauthorizedException) Code() string {
- return "UnauthorizedException"
-}
-
-// Message returns the exception's message.
-func (s *UnauthorizedException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *UnauthorizedException) OrigErr() error {
- return nil
-}
-
-func (s *UnauthorizedException) Error() string {
- return fmt.Sprintf("%s: %s", s.Code(), s.Message())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *UnauthorizedException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *UnauthorizedException) RequestID() string {
- return s.RespMetadata.RequestID
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go
deleted file mode 100644
index 15e61a322..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/sso/doc.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-// Package sso provides the client and types for making API
-// requests to AWS Single Sign-On.
-//
-// AWS IAM Identity Center (successor to AWS Single Sign-On) Portal is a web
-// service that makes it easy for you to assign user access to IAM Identity
-// Center resources such as the AWS access portal. Users can get AWS account
-// applications and roles assigned to them and get federated into the application.
-//
-// Although AWS Single Sign-On was renamed, the sso and identitystore API namespaces
-// will continue to retain their original name for backward compatibility purposes.
-// For more information, see IAM Identity Center rename (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html#renamed).
-//
-// This reference guide describes the IAM Identity Center Portal operations
-// that you can call programatically and includes detailed information on data
-// types and errors.
-//
-// AWS provides SDKs that consist of libraries and sample code for various programming
-// languages and platforms, such as Java, Ruby, .Net, iOS, or Android. The SDKs
-// provide a convenient way to create programmatic access to IAM Identity Center
-// and other AWS services. For more information about the AWS SDKs, including
-// how to download and install them, see Tools for Amazon Web Services (http://aws.amazon.com/tools/).
-//
-// See https://docs.aws.amazon.com/goto/WebAPI/sso-2019-06-10 for more information on this service.
-//
-// See sso package documentation for more information.
-// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/
-//
-// # Using the Client
-//
-// To contact AWS Single Sign-On with the SDK use the New function to create
-// a new service client. With that client you can make API requests to the service.
-// These clients are safe to use concurrently.
-//
-// See the SDK's documentation for more information on how to use the SDK.
-// https://docs.aws.amazon.com/sdk-for-go/api/
-//
-// See aws.Config documentation for more information on configuring SDK clients.
-// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
-//
-// See the AWS Single Sign-On client SSO for more
-// information on creating client for this service.
-// https://docs.aws.amazon.com/sdk-for-go/api/service/sso/#New
-package sso
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go
deleted file mode 100644
index 77a6792e3..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/sso/errors.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package sso
-
-import (
- "github.com/aws/aws-sdk-go/private/protocol"
-)
-
-const (
-
- // ErrCodeInvalidRequestException for service response error code
- // "InvalidRequestException".
- //
- // Indicates that a problem occurred with the input to the request. For example,
- // a required parameter might be missing or out of range.
- ErrCodeInvalidRequestException = "InvalidRequestException"
-
- // ErrCodeResourceNotFoundException for service response error code
- // "ResourceNotFoundException".
- //
- // The specified resource doesn't exist.
- ErrCodeResourceNotFoundException = "ResourceNotFoundException"
-
- // ErrCodeTooManyRequestsException for service response error code
- // "TooManyRequestsException".
- //
- // Indicates that the request is being made too frequently and is more than
- // what the server can handle.
- ErrCodeTooManyRequestsException = "TooManyRequestsException"
-
- // ErrCodeUnauthorizedException for service response error code
- // "UnauthorizedException".
- //
- // Indicates that the request is not authorized. This can happen due to an invalid
- // access token in the request.
- ErrCodeUnauthorizedException = "UnauthorizedException"
-)
-
-var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
- "InvalidRequestException": newErrorInvalidRequestException,
- "ResourceNotFoundException": newErrorResourceNotFoundException,
- "TooManyRequestsException": newErrorTooManyRequestsException,
- "UnauthorizedException": newErrorUnauthorizedException,
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go b/vendor/github.com/aws/aws-sdk-go/service/sso/service.go
deleted file mode 100644
index 7094cfe41..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/sso/service.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package sso
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/aws/signer/v4"
- "github.com/aws/aws-sdk-go/private/protocol"
- "github.com/aws/aws-sdk-go/private/protocol/restjson"
-)
-
-// SSO provides the API operation methods for making requests to
-// AWS Single Sign-On. See this package's package overview docs
-// for details on the service.
-//
-// SSO methods are safe to use concurrently. It is not safe to
-// modify mutate any of the struct's properties though.
-type SSO struct {
- *client.Client
-}
-
-// Used for custom client initialization logic
-var initClient func(*client.Client)
-
-// Used for custom request initialization logic
-var initRequest func(*request.Request)
-
-// Service information constants
-const (
- ServiceName = "SSO" // Name of service.
- EndpointsID = "portal.sso" // ID to lookup a service endpoint with.
- ServiceID = "SSO" // ServiceID is a unique identifier of a specific service.
-)
-
-// New creates a new instance of the SSO client with a session.
-// If additional configuration is needed for the client instance use the optional
-// aws.Config parameter to add your extra config.
-//
-// Example:
-//
-// mySession := session.Must(session.NewSession())
-//
-// // Create a SSO client from just a session.
-// svc := sso.New(mySession)
-//
-// // Create a SSO client with additional configuration
-// svc := sso.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
-func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSO {
- c := p.ClientConfig(EndpointsID, cfgs...)
- if c.SigningNameDerived || len(c.SigningName) == 0 {
- c.SigningName = "awsssoportal"
- }
- return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion)
-}
-
-// newClient creates, initializes and returns a new service client instance.
-func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSO {
- svc := &SSO{
- Client: client.New(
- cfg,
- metadata.ClientInfo{
- ServiceName: ServiceName,
- ServiceID: ServiceID,
- SigningName: signingName,
- SigningRegion: signingRegion,
- PartitionID: partitionID,
- Endpoint: endpoint,
- APIVersion: "2019-06-10",
- ResolvedRegion: resolvedRegion,
- },
- handlers,
- ),
- }
-
- // Handlers
- svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
- svc.Handlers.Build.PushBackNamed(restjson.BuildHandler)
- svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler)
- svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler)
- svc.Handlers.UnmarshalError.PushBackNamed(
- protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(),
- )
-
- // Run custom client initialization if present
- if initClient != nil {
- initClient(svc.Client)
- }
-
- return svc
-}
-
-// newRequest creates a new request for a SSO operation and runs any
-// custom request initialization.
-func (c *SSO) newRequest(op *request.Operation, params, data interface{}) *request.Request {
- req := c.NewRequest(op, params, data)
-
- // Run custom request initialization if present
- if initRequest != nil {
- initRequest(req)
- }
-
- return req
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go
deleted file mode 100644
index 818cab7cd..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/sso/ssoiface/interface.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-// Package ssoiface provides an interface to enable mocking the AWS Single Sign-On service client
-// for testing your code.
-//
-// It is important to note that this interface will have breaking changes
-// when the service model is updated and adds new API operations, paginators,
-// and waiters.
-package ssoiface
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/service/sso"
-)
-
-// SSOAPI provides an interface to enable mocking the
-// sso.SSO service client's API operation,
-// paginators, and waiters. This make unit testing your code that calls out
-// to the SDK's service client's calls easier.
-//
-// The best way to use this interface is so the SDK's service client's calls
-// can be stubbed out for unit testing your code with the SDK without needing
-// to inject custom request handlers into the SDK's request pipeline.
-//
-// // myFunc uses an SDK service client to make a request to
-// // AWS Single Sign-On.
-// func myFunc(svc ssoiface.SSOAPI) bool {
-// // Make svc.GetRoleCredentials request
-// }
-//
-// func main() {
-// sess := session.New()
-// svc := sso.New(sess)
-//
-// myFunc(svc)
-// }
-//
-// In your _test.go file:
-//
-// // Define a mock struct to be used in your unit tests of myFunc.
-// type mockSSOClient struct {
-// ssoiface.SSOAPI
-// }
-// func (m *mockSSOClient) GetRoleCredentials(input *sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error) {
-// // mock response/functionality
-// }
-//
-// func TestMyFunc(t *testing.T) {
-// // Setup Test
-// mockSvc := &mockSSOClient{}
-//
-// myfunc(mockSvc)
-//
-// // Verify myFunc's functionality
-// }
-//
-// It is important to note that this interface will have breaking changes
-// when the service model is updated and adds new API operations, paginators,
-// and waiters. Its suggested to use the pattern above for testing, or using
-// tooling to generate mocks to satisfy the interfaces.
-type SSOAPI interface {
- GetRoleCredentials(*sso.GetRoleCredentialsInput) (*sso.GetRoleCredentialsOutput, error)
- GetRoleCredentialsWithContext(aws.Context, *sso.GetRoleCredentialsInput, ...request.Option) (*sso.GetRoleCredentialsOutput, error)
- GetRoleCredentialsRequest(*sso.GetRoleCredentialsInput) (*request.Request, *sso.GetRoleCredentialsOutput)
-
- ListAccountRoles(*sso.ListAccountRolesInput) (*sso.ListAccountRolesOutput, error)
- ListAccountRolesWithContext(aws.Context, *sso.ListAccountRolesInput, ...request.Option) (*sso.ListAccountRolesOutput, error)
- ListAccountRolesRequest(*sso.ListAccountRolesInput) (*request.Request, *sso.ListAccountRolesOutput)
-
- ListAccountRolesPages(*sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool) error
- ListAccountRolesPagesWithContext(aws.Context, *sso.ListAccountRolesInput, func(*sso.ListAccountRolesOutput, bool) bool, ...request.Option) error
-
- ListAccounts(*sso.ListAccountsInput) (*sso.ListAccountsOutput, error)
- ListAccountsWithContext(aws.Context, *sso.ListAccountsInput, ...request.Option) (*sso.ListAccountsOutput, error)
- ListAccountsRequest(*sso.ListAccountsInput) (*request.Request, *sso.ListAccountsOutput)
-
- ListAccountsPages(*sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool) error
- ListAccountsPagesWithContext(aws.Context, *sso.ListAccountsInput, func(*sso.ListAccountsOutput, bool) bool, ...request.Option) error
-
- Logout(*sso.LogoutInput) (*sso.LogoutOutput, error)
- LogoutWithContext(aws.Context, *sso.LogoutInput, ...request.Option) (*sso.LogoutOutput, error)
- LogoutRequest(*sso.LogoutInput) (*request.Request, *sso.LogoutOutput)
-}
-
-var _ SSOAPI = (*sso.SSO)(nil)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go
deleted file mode 100644
index 827bd5194..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/api.go
+++ /dev/null
@@ -1,2406 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package ssooidc
-
-import (
- "fmt"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awsutil"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/private/protocol"
-)
-
-const opCreateToken = "CreateToken"
-
-// CreateTokenRequest generates a "aws/request.Request" representing the
-// client's request for the CreateToken operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See CreateToken for more information on using the CreateToken
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the CreateTokenRequest method.
-// req, resp := client.CreateTokenRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken
-func (c *SSOOIDC) CreateTokenRequest(input *CreateTokenInput) (req *request.Request, output *CreateTokenOutput) {
- op := &request.Operation{
- Name: opCreateToken,
- HTTPMethod: "POST",
- HTTPPath: "/token",
- }
-
- if input == nil {
- input = &CreateTokenInput{}
- }
-
- output = &CreateTokenOutput{}
- req = c.newRequest(op, input, output)
- req.Config.Credentials = credentials.AnonymousCredentials
- return
-}
-
-// CreateToken API operation for AWS SSO OIDC.
-//
-// Creates and returns access and refresh tokens for clients that are authenticated
-// using client secrets. The access token can be used to fetch short-term credentials
-// for the assigned AWS accounts or to access application APIs using bearer
-// authentication.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS SSO OIDC's
-// API operation CreateToken for usage and error information.
-//
-// Returned Error Types:
-//
-// - InvalidRequestException
-// Indicates that something is wrong with the input to the request. For example,
-// a required parameter might be missing or out of range.
-//
-// - InvalidClientException
-// Indicates that the clientId or clientSecret in the request is invalid. For
-// example, this can occur when a client sends an incorrect clientId or an expired
-// clientSecret.
-//
-// - InvalidGrantException
-// Indicates that a request contains an invalid grant. This can occur if a client
-// makes a CreateToken request with an invalid grant type.
-//
-// - UnauthorizedClientException
-// Indicates that the client is not currently authorized to make the request.
-// This can happen when a clientId is not issued for a public client.
-//
-// - UnsupportedGrantTypeException
-// Indicates that the grant type in the request is not supported by the service.
-//
-// - InvalidScopeException
-// Indicates that the scope provided in the request is invalid.
-//
-// - AuthorizationPendingException
-// Indicates that a request to authorize a client with an access user session
-// token is pending.
-//
-// - SlowDownException
-// Indicates that the client is making the request too frequently and is more
-// than the service can handle.
-//
-// - AccessDeniedException
-// You do not have sufficient access to perform this action.
-//
-// - ExpiredTokenException
-// Indicates that the token issued by the service is expired and is no longer
-// valid.
-//
-// - InternalServerException
-// Indicates that an error from the service occurred while trying to process
-// a request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateToken
-func (c *SSOOIDC) CreateToken(input *CreateTokenInput) (*CreateTokenOutput, error) {
- req, out := c.CreateTokenRequest(input)
- return out, req.Send()
-}
-
-// CreateTokenWithContext is the same as CreateToken with the addition of
-// the ability to pass a context and additional request options.
-//
-// See CreateToken for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *SSOOIDC) CreateTokenWithContext(ctx aws.Context, input *CreateTokenInput, opts ...request.Option) (*CreateTokenOutput, error) {
- req, out := c.CreateTokenRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opCreateTokenWithIAM = "CreateTokenWithIAM"
-
-// CreateTokenWithIAMRequest generates a "aws/request.Request" representing the
-// client's request for the CreateTokenWithIAM operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See CreateTokenWithIAM for more information on using the CreateTokenWithIAM
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the CreateTokenWithIAMRequest method.
-// req, resp := client.CreateTokenWithIAMRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM
-func (c *SSOOIDC) CreateTokenWithIAMRequest(input *CreateTokenWithIAMInput) (req *request.Request, output *CreateTokenWithIAMOutput) {
- op := &request.Operation{
- Name: opCreateTokenWithIAM,
- HTTPMethod: "POST",
- HTTPPath: "/token?aws_iam=t",
- }
-
- if input == nil {
- input = &CreateTokenWithIAMInput{}
- }
-
- output = &CreateTokenWithIAMOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// CreateTokenWithIAM API operation for AWS SSO OIDC.
-//
-// Creates and returns access and refresh tokens for clients and applications
-// that are authenticated using IAM entities. The access token can be used to
-// fetch short-term credentials for the assigned Amazon Web Services accounts
-// or to access application APIs using bearer authentication.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS SSO OIDC's
-// API operation CreateTokenWithIAM for usage and error information.
-//
-// Returned Error Types:
-//
-// - InvalidRequestException
-// Indicates that something is wrong with the input to the request. For example,
-// a required parameter might be missing or out of range.
-//
-// - InvalidClientException
-// Indicates that the clientId or clientSecret in the request is invalid. For
-// example, this can occur when a client sends an incorrect clientId or an expired
-// clientSecret.
-//
-// - InvalidGrantException
-// Indicates that a request contains an invalid grant. This can occur if a client
-// makes a CreateToken request with an invalid grant type.
-//
-// - UnauthorizedClientException
-// Indicates that the client is not currently authorized to make the request.
-// This can happen when a clientId is not issued for a public client.
-//
-// - UnsupportedGrantTypeException
-// Indicates that the grant type in the request is not supported by the service.
-//
-// - InvalidScopeException
-// Indicates that the scope provided in the request is invalid.
-//
-// - AuthorizationPendingException
-// Indicates that a request to authorize a client with an access user session
-// token is pending.
-//
-// - SlowDownException
-// Indicates that the client is making the request too frequently and is more
-// than the service can handle.
-//
-// - AccessDeniedException
-// You do not have sufficient access to perform this action.
-//
-// - ExpiredTokenException
-// Indicates that the token issued by the service is expired and is no longer
-// valid.
-//
-// - InternalServerException
-// Indicates that an error from the service occurred while trying to process
-// a request.
-//
-// - InvalidRequestRegionException
-// Indicates that a token provided as input to the request was issued by and
-// is only usable by calling IAM Identity Center endpoints in another region.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/CreateTokenWithIAM
-func (c *SSOOIDC) CreateTokenWithIAM(input *CreateTokenWithIAMInput) (*CreateTokenWithIAMOutput, error) {
- req, out := c.CreateTokenWithIAMRequest(input)
- return out, req.Send()
-}
-
-// CreateTokenWithIAMWithContext is the same as CreateTokenWithIAM with the addition of
-// the ability to pass a context and additional request options.
-//
-// See CreateTokenWithIAM for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *SSOOIDC) CreateTokenWithIAMWithContext(ctx aws.Context, input *CreateTokenWithIAMInput, opts ...request.Option) (*CreateTokenWithIAMOutput, error) {
- req, out := c.CreateTokenWithIAMRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opRegisterClient = "RegisterClient"
-
-// RegisterClientRequest generates a "aws/request.Request" representing the
-// client's request for the RegisterClient operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See RegisterClient for more information on using the RegisterClient
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the RegisterClientRequest method.
-// req, resp := client.RegisterClientRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient
-func (c *SSOOIDC) RegisterClientRequest(input *RegisterClientInput) (req *request.Request, output *RegisterClientOutput) {
- op := &request.Operation{
- Name: opRegisterClient,
- HTTPMethod: "POST",
- HTTPPath: "/client/register",
- }
-
- if input == nil {
- input = &RegisterClientInput{}
- }
-
- output = &RegisterClientOutput{}
- req = c.newRequest(op, input, output)
- req.Config.Credentials = credentials.AnonymousCredentials
- return
-}
-
-// RegisterClient API operation for AWS SSO OIDC.
-//
-// Registers a client with IAM Identity Center. This allows clients to initiate
-// device authorization. The output should be persisted for reuse through many
-// authentication requests.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS SSO OIDC's
-// API operation RegisterClient for usage and error information.
-//
-// Returned Error Types:
-//
-// - InvalidRequestException
-// Indicates that something is wrong with the input to the request. For example,
-// a required parameter might be missing or out of range.
-//
-// - InvalidScopeException
-// Indicates that the scope provided in the request is invalid.
-//
-// - InvalidClientMetadataException
-// Indicates that the client information sent in the request during registration
-// is invalid.
-//
-// - InternalServerException
-// Indicates that an error from the service occurred while trying to process
-// a request.
-//
-// - InvalidRedirectUriException
-// Indicates that one or more redirect URI in the request is not supported for
-// this operation.
-//
-// - UnsupportedGrantTypeException
-// Indicates that the grant type in the request is not supported by the service.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/RegisterClient
-func (c *SSOOIDC) RegisterClient(input *RegisterClientInput) (*RegisterClientOutput, error) {
- req, out := c.RegisterClientRequest(input)
- return out, req.Send()
-}
-
-// RegisterClientWithContext is the same as RegisterClient with the addition of
-// the ability to pass a context and additional request options.
-//
-// See RegisterClient for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *SSOOIDC) RegisterClientWithContext(ctx aws.Context, input *RegisterClientInput, opts ...request.Option) (*RegisterClientOutput, error) {
- req, out := c.RegisterClientRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opStartDeviceAuthorization = "StartDeviceAuthorization"
-
-// StartDeviceAuthorizationRequest generates a "aws/request.Request" representing the
-// client's request for the StartDeviceAuthorization operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See StartDeviceAuthorization for more information on using the StartDeviceAuthorization
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the StartDeviceAuthorizationRequest method.
-// req, resp := client.StartDeviceAuthorizationRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization
-func (c *SSOOIDC) StartDeviceAuthorizationRequest(input *StartDeviceAuthorizationInput) (req *request.Request, output *StartDeviceAuthorizationOutput) {
- op := &request.Operation{
- Name: opStartDeviceAuthorization,
- HTTPMethod: "POST",
- HTTPPath: "/device_authorization",
- }
-
- if input == nil {
- input = &StartDeviceAuthorizationInput{}
- }
-
- output = &StartDeviceAuthorizationOutput{}
- req = c.newRequest(op, input, output)
- req.Config.Credentials = credentials.AnonymousCredentials
- return
-}
-
-// StartDeviceAuthorization API operation for AWS SSO OIDC.
-//
-// Initiates device authorization by requesting a pair of verification codes
-// from the authorization service.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS SSO OIDC's
-// API operation StartDeviceAuthorization for usage and error information.
-//
-// Returned Error Types:
-//
-// - InvalidRequestException
-// Indicates that something is wrong with the input to the request. For example,
-// a required parameter might be missing or out of range.
-//
-// - InvalidClientException
-// Indicates that the clientId or clientSecret in the request is invalid. For
-// example, this can occur when a client sends an incorrect clientId or an expired
-// clientSecret.
-//
-// - UnauthorizedClientException
-// Indicates that the client is not currently authorized to make the request.
-// This can happen when a clientId is not issued for a public client.
-//
-// - SlowDownException
-// Indicates that the client is making the request too frequently and is more
-// than the service can handle.
-//
-// - InternalServerException
-// Indicates that an error from the service occurred while trying to process
-// a request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10/StartDeviceAuthorization
-func (c *SSOOIDC) StartDeviceAuthorization(input *StartDeviceAuthorizationInput) (*StartDeviceAuthorizationOutput, error) {
- req, out := c.StartDeviceAuthorizationRequest(input)
- return out, req.Send()
-}
-
-// StartDeviceAuthorizationWithContext is the same as StartDeviceAuthorization with the addition of
-// the ability to pass a context and additional request options.
-//
-// See StartDeviceAuthorization for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *SSOOIDC) StartDeviceAuthorizationWithContext(ctx aws.Context, input *StartDeviceAuthorizationInput, opts ...request.Option) (*StartDeviceAuthorizationOutput, error) {
- req, out := c.StartDeviceAuthorizationRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-// You do not have sufficient access to perform this action.
-type AccessDeniedException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Single error code. For this exception the value will be access_denied.
- Error_ *string `locationName:"error" type:"string"`
-
- // Human-readable text providing additional information, used to assist the
- // client developer in understanding the error that occurred.
- Error_description *string `locationName:"error_description" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AccessDeniedException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AccessDeniedException) GoString() string {
- return s.String()
-}
-
-func newErrorAccessDeniedException(v protocol.ResponseMetadata) error {
- return &AccessDeniedException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *AccessDeniedException) Code() string {
- return "AccessDeniedException"
-}
-
-// Message returns the exception's message.
-func (s *AccessDeniedException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *AccessDeniedException) OrigErr() error {
- return nil
-}
-
-func (s *AccessDeniedException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *AccessDeniedException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *AccessDeniedException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Indicates that a request to authorize a client with an access user session
-// token is pending.
-type AuthorizationPendingException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Single error code. For this exception the value will be authorization_pending.
- Error_ *string `locationName:"error" type:"string"`
-
- // Human-readable text providing additional information, used to assist the
- // client developer in understanding the error that occurred.
- Error_description *string `locationName:"error_description" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AuthorizationPendingException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AuthorizationPendingException) GoString() string {
- return s.String()
-}
-
-func newErrorAuthorizationPendingException(v protocol.ResponseMetadata) error {
- return &AuthorizationPendingException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *AuthorizationPendingException) Code() string {
- return "AuthorizationPendingException"
-}
-
-// Message returns the exception's message.
-func (s *AuthorizationPendingException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *AuthorizationPendingException) OrigErr() error {
- return nil
-}
-
-func (s *AuthorizationPendingException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *AuthorizationPendingException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *AuthorizationPendingException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-type CreateTokenInput struct {
- _ struct{} `type:"structure"`
-
- // The unique identifier string for the client or application. This value comes
- // from the result of the RegisterClient API.
- //
- // ClientId is a required field
- ClientId *string `locationName:"clientId" type:"string" required:"true"`
-
- // A secret string generated for the client. This value should come from the
- // persisted result of the RegisterClient API.
- //
- // ClientSecret is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by CreateTokenInput's
- // String and GoString methods.
- //
- // ClientSecret is a required field
- ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"`
-
- // Used only when calling this API for the Authorization Code grant type. The
- // short-term code is used to identify this authorization request. This grant
- // type is currently unsupported for the CreateToken API.
- Code *string `locationName:"code" type:"string"`
-
- // Used only when calling this API for the Authorization Code grant type. This
- // value is generated by the client and presented to validate the original code
- // challenge value the client passed at authorization time.
- //
- // CodeVerifier is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by CreateTokenInput's
- // String and GoString methods.
- CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"`
-
- // Used only when calling this API for the Device Code grant type. This short-term
- // code is used to identify this authorization request. This comes from the
- // result of the StartDeviceAuthorization API.
- DeviceCode *string `locationName:"deviceCode" type:"string"`
-
- // Supports the following OAuth grant types: Device Code and Refresh Token.
- // Specify either of the following values, depending on the grant type that
- // you want:
- //
- // * Device Code - urn:ietf:params:oauth:grant-type:device_code
- //
- // * Refresh Token - refresh_token
- //
- // For information about how to obtain the device code, see the StartDeviceAuthorization
- // topic.
- //
- // GrantType is a required field
- GrantType *string `locationName:"grantType" type:"string" required:"true"`
-
- // Used only when calling this API for the Authorization Code grant type. This
- // value specifies the location of the client or application that has registered
- // to receive the authorization code.
- RedirectUri *string `locationName:"redirectUri" type:"string"`
-
- // Used only when calling this API for the Refresh Token grant type. This token
- // is used to refresh short-term tokens, such as the access token, that might
- // expire.
- //
- // For more information about the features and limitations of the current IAM
- // Identity Center OIDC implementation, see Considerations for Using this Guide
- // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
- //
- // RefreshToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by CreateTokenInput's
- // String and GoString methods.
- RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
-
- // The list of scopes for which authorization is requested. The access token
- // that is issued is limited to the scopes that are granted. If this value is
- // not specified, IAM Identity Center authorizes all scopes that are configured
- // for the client during the call to RegisterClient.
- Scope []*string `locationName:"scope" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateTokenInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateTokenInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *CreateTokenInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "CreateTokenInput"}
- if s.ClientId == nil {
- invalidParams.Add(request.NewErrParamRequired("ClientId"))
- }
- if s.ClientSecret == nil {
- invalidParams.Add(request.NewErrParamRequired("ClientSecret"))
- }
- if s.GrantType == nil {
- invalidParams.Add(request.NewErrParamRequired("GrantType"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetClientId sets the ClientId field's value.
-func (s *CreateTokenInput) SetClientId(v string) *CreateTokenInput {
- s.ClientId = &v
- return s
-}
-
-// SetClientSecret sets the ClientSecret field's value.
-func (s *CreateTokenInput) SetClientSecret(v string) *CreateTokenInput {
- s.ClientSecret = &v
- return s
-}
-
-// SetCode sets the Code field's value.
-func (s *CreateTokenInput) SetCode(v string) *CreateTokenInput {
- s.Code = &v
- return s
-}
-
-// SetCodeVerifier sets the CodeVerifier field's value.
-func (s *CreateTokenInput) SetCodeVerifier(v string) *CreateTokenInput {
- s.CodeVerifier = &v
- return s
-}
-
-// SetDeviceCode sets the DeviceCode field's value.
-func (s *CreateTokenInput) SetDeviceCode(v string) *CreateTokenInput {
- s.DeviceCode = &v
- return s
-}
-
-// SetGrantType sets the GrantType field's value.
-func (s *CreateTokenInput) SetGrantType(v string) *CreateTokenInput {
- s.GrantType = &v
- return s
-}
-
-// SetRedirectUri sets the RedirectUri field's value.
-func (s *CreateTokenInput) SetRedirectUri(v string) *CreateTokenInput {
- s.RedirectUri = &v
- return s
-}
-
-// SetRefreshToken sets the RefreshToken field's value.
-func (s *CreateTokenInput) SetRefreshToken(v string) *CreateTokenInput {
- s.RefreshToken = &v
- return s
-}
-
-// SetScope sets the Scope field's value.
-func (s *CreateTokenInput) SetScope(v []*string) *CreateTokenInput {
- s.Scope = v
- return s
-}
-
-type CreateTokenOutput struct {
- _ struct{} `type:"structure"`
-
- // A bearer token to access Amazon Web Services accounts and applications assigned
- // to a user.
- //
- // AccessToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by CreateTokenOutput's
- // String and GoString methods.
- AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"`
-
- // Indicates the time in seconds when an access token will expire.
- ExpiresIn *int64 `locationName:"expiresIn" type:"integer"`
-
- // The idToken is not implemented or supported. For more information about the
- // features and limitations of the current IAM Identity Center OIDC implementation,
- // see Considerations for Using this Guide in the IAM Identity Center OIDC API
- // Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
- //
- // A JSON Web Token (JWT) that identifies who is associated with the issued
- // access token.
- //
- // IdToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by CreateTokenOutput's
- // String and GoString methods.
- IdToken *string `locationName:"idToken" type:"string" sensitive:"true"`
-
- // A token that, if present, can be used to refresh a previously issued access
- // token that might have expired.
- //
- // For more information about the features and limitations of the current IAM
- // Identity Center OIDC implementation, see Considerations for Using this Guide
- // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
- //
- // RefreshToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by CreateTokenOutput's
- // String and GoString methods.
- RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
-
- // Used to notify the client that the returned token is an access token. The
- // supported token type is Bearer.
- TokenType *string `locationName:"tokenType" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateTokenOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateTokenOutput) GoString() string {
- return s.String()
-}
-
-// SetAccessToken sets the AccessToken field's value.
-func (s *CreateTokenOutput) SetAccessToken(v string) *CreateTokenOutput {
- s.AccessToken = &v
- return s
-}
-
-// SetExpiresIn sets the ExpiresIn field's value.
-func (s *CreateTokenOutput) SetExpiresIn(v int64) *CreateTokenOutput {
- s.ExpiresIn = &v
- return s
-}
-
-// SetIdToken sets the IdToken field's value.
-func (s *CreateTokenOutput) SetIdToken(v string) *CreateTokenOutput {
- s.IdToken = &v
- return s
-}
-
-// SetRefreshToken sets the RefreshToken field's value.
-func (s *CreateTokenOutput) SetRefreshToken(v string) *CreateTokenOutput {
- s.RefreshToken = &v
- return s
-}
-
-// SetTokenType sets the TokenType field's value.
-func (s *CreateTokenOutput) SetTokenType(v string) *CreateTokenOutput {
- s.TokenType = &v
- return s
-}
-
-type CreateTokenWithIAMInput struct {
- _ struct{} `type:"structure"`
-
- // Used only when calling this API for the JWT Bearer grant type. This value
- // specifies the JSON Web Token (JWT) issued by a trusted token issuer. To authorize
- // a trusted token issuer, configure the JWT Bearer GrantOptions for the application.
- //
- // Assertion is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
- // String and GoString methods.
- Assertion *string `locationName:"assertion" type:"string" sensitive:"true"`
-
- // The unique identifier string for the client or application. This value is
- // an application ARN that has OAuth grants configured.
- //
- // ClientId is a required field
- ClientId *string `locationName:"clientId" type:"string" required:"true"`
-
- // Used only when calling this API for the Authorization Code grant type. This
- // short-term code is used to identify this authorization request. The code
- // is obtained through a redirect from IAM Identity Center to a redirect URI
- // persisted in the Authorization Code GrantOptions for the application.
- Code *string `locationName:"code" type:"string"`
-
- // Used only when calling this API for the Authorization Code grant type. This
- // value is generated by the client and presented to validate the original code
- // challenge value the client passed at authorization time.
- //
- // CodeVerifier is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
- // String and GoString methods.
- CodeVerifier *string `locationName:"codeVerifier" type:"string" sensitive:"true"`
-
- // Supports the following OAuth grant types: Authorization Code, Refresh Token,
- // JWT Bearer, and Token Exchange. Specify one of the following values, depending
- // on the grant type that you want:
- //
- // * Authorization Code - authorization_code
- //
- // * Refresh Token - refresh_token
- //
- // * JWT Bearer - urn:ietf:params:oauth:grant-type:jwt-bearer
- //
- // * Token Exchange - urn:ietf:params:oauth:grant-type:token-exchange
- //
- // GrantType is a required field
- GrantType *string `locationName:"grantType" type:"string" required:"true"`
-
- // Used only when calling this API for the Authorization Code grant type. This
- // value specifies the location of the client or application that has registered
- // to receive the authorization code.
- RedirectUri *string `locationName:"redirectUri" type:"string"`
-
- // Used only when calling this API for the Refresh Token grant type. This token
- // is used to refresh short-term tokens, such as the access token, that might
- // expire.
- //
- // For more information about the features and limitations of the current IAM
- // Identity Center OIDC implementation, see Considerations for Using this Guide
- // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
- //
- // RefreshToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
- // String and GoString methods.
- RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
-
- // Used only when calling this API for the Token Exchange grant type. This value
- // specifies the type of token that the requester can receive. The following
- // values are supported:
- //
- // * Access Token - urn:ietf:params:oauth:token-type:access_token
- //
- // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
- RequestedTokenType *string `locationName:"requestedTokenType" type:"string"`
-
- // The list of scopes for which authorization is requested. The access token
- // that is issued is limited to the scopes that are granted. If the value is
- // not specified, IAM Identity Center authorizes all scopes configured for the
- // application, including the following default scopes: openid, aws, sts:identity_context.
- Scope []*string `locationName:"scope" type:"list"`
-
- // Used only when calling this API for the Token Exchange grant type. This value
- // specifies the subject of the exchange. The value of the subject token must
- // be an access token issued by IAM Identity Center to a different client or
- // application. The access token must have authorized scopes that indicate the
- // requested application as a target audience.
- //
- // SubjectToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by CreateTokenWithIAMInput's
- // String and GoString methods.
- SubjectToken *string `locationName:"subjectToken" type:"string" sensitive:"true"`
-
- // Used only when calling this API for the Token Exchange grant type. This value
- // specifies the type of token that is passed as the subject of the exchange.
- // The following value is supported:
- //
- // * Access Token - urn:ietf:params:oauth:token-type:access_token
- SubjectTokenType *string `locationName:"subjectTokenType" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateTokenWithIAMInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateTokenWithIAMInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *CreateTokenWithIAMInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "CreateTokenWithIAMInput"}
- if s.ClientId == nil {
- invalidParams.Add(request.NewErrParamRequired("ClientId"))
- }
- if s.GrantType == nil {
- invalidParams.Add(request.NewErrParamRequired("GrantType"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAssertion sets the Assertion field's value.
-func (s *CreateTokenWithIAMInput) SetAssertion(v string) *CreateTokenWithIAMInput {
- s.Assertion = &v
- return s
-}
-
-// SetClientId sets the ClientId field's value.
-func (s *CreateTokenWithIAMInput) SetClientId(v string) *CreateTokenWithIAMInput {
- s.ClientId = &v
- return s
-}
-
-// SetCode sets the Code field's value.
-func (s *CreateTokenWithIAMInput) SetCode(v string) *CreateTokenWithIAMInput {
- s.Code = &v
- return s
-}
-
-// SetCodeVerifier sets the CodeVerifier field's value.
-func (s *CreateTokenWithIAMInput) SetCodeVerifier(v string) *CreateTokenWithIAMInput {
- s.CodeVerifier = &v
- return s
-}
-
-// SetGrantType sets the GrantType field's value.
-func (s *CreateTokenWithIAMInput) SetGrantType(v string) *CreateTokenWithIAMInput {
- s.GrantType = &v
- return s
-}
-
-// SetRedirectUri sets the RedirectUri field's value.
-func (s *CreateTokenWithIAMInput) SetRedirectUri(v string) *CreateTokenWithIAMInput {
- s.RedirectUri = &v
- return s
-}
-
-// SetRefreshToken sets the RefreshToken field's value.
-func (s *CreateTokenWithIAMInput) SetRefreshToken(v string) *CreateTokenWithIAMInput {
- s.RefreshToken = &v
- return s
-}
-
-// SetRequestedTokenType sets the RequestedTokenType field's value.
-func (s *CreateTokenWithIAMInput) SetRequestedTokenType(v string) *CreateTokenWithIAMInput {
- s.RequestedTokenType = &v
- return s
-}
-
-// SetScope sets the Scope field's value.
-func (s *CreateTokenWithIAMInput) SetScope(v []*string) *CreateTokenWithIAMInput {
- s.Scope = v
- return s
-}
-
-// SetSubjectToken sets the SubjectToken field's value.
-func (s *CreateTokenWithIAMInput) SetSubjectToken(v string) *CreateTokenWithIAMInput {
- s.SubjectToken = &v
- return s
-}
-
-// SetSubjectTokenType sets the SubjectTokenType field's value.
-func (s *CreateTokenWithIAMInput) SetSubjectTokenType(v string) *CreateTokenWithIAMInput {
- s.SubjectTokenType = &v
- return s
-}
-
-type CreateTokenWithIAMOutput struct {
- _ struct{} `type:"structure"`
-
- // A bearer token to access Amazon Web Services accounts and applications assigned
- // to a user.
- //
- // AccessToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's
- // String and GoString methods.
- AccessToken *string `locationName:"accessToken" type:"string" sensitive:"true"`
-
- // Indicates the time in seconds when an access token will expire.
- ExpiresIn *int64 `locationName:"expiresIn" type:"integer"`
-
- // A JSON Web Token (JWT) that identifies the user associated with the issued
- // access token.
- //
- // IdToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's
- // String and GoString methods.
- IdToken *string `locationName:"idToken" type:"string" sensitive:"true"`
-
- // Indicates the type of tokens that are issued by IAM Identity Center. The
- // following values are supported:
- //
- // * Access Token - urn:ietf:params:oauth:token-type:access_token
- //
- // * Refresh Token - urn:ietf:params:oauth:token-type:refresh_token
- IssuedTokenType *string `locationName:"issuedTokenType" type:"string"`
-
- // A token that, if present, can be used to refresh a previously issued access
- // token that might have expired.
- //
- // For more information about the features and limitations of the current IAM
- // Identity Center OIDC implementation, see Considerations for Using this Guide
- // in the IAM Identity Center OIDC API Reference (https://docs.aws.amazon.com/singlesignon/latest/OIDCAPIReference/Welcome.html).
- //
- // RefreshToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by CreateTokenWithIAMOutput's
- // String and GoString methods.
- RefreshToken *string `locationName:"refreshToken" type:"string" sensitive:"true"`
-
- // The list of scopes for which authorization is granted. The access token that
- // is issued is limited to the scopes that are granted.
- Scope []*string `locationName:"scope" type:"list"`
-
- // Used to notify the requester that the returned token is an access token.
- // The supported token type is Bearer.
- TokenType *string `locationName:"tokenType" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateTokenWithIAMOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s CreateTokenWithIAMOutput) GoString() string {
- return s.String()
-}
-
-// SetAccessToken sets the AccessToken field's value.
-func (s *CreateTokenWithIAMOutput) SetAccessToken(v string) *CreateTokenWithIAMOutput {
- s.AccessToken = &v
- return s
-}
-
-// SetExpiresIn sets the ExpiresIn field's value.
-func (s *CreateTokenWithIAMOutput) SetExpiresIn(v int64) *CreateTokenWithIAMOutput {
- s.ExpiresIn = &v
- return s
-}
-
-// SetIdToken sets the IdToken field's value.
-func (s *CreateTokenWithIAMOutput) SetIdToken(v string) *CreateTokenWithIAMOutput {
- s.IdToken = &v
- return s
-}
-
-// SetIssuedTokenType sets the IssuedTokenType field's value.
-func (s *CreateTokenWithIAMOutput) SetIssuedTokenType(v string) *CreateTokenWithIAMOutput {
- s.IssuedTokenType = &v
- return s
-}
-
-// SetRefreshToken sets the RefreshToken field's value.
-func (s *CreateTokenWithIAMOutput) SetRefreshToken(v string) *CreateTokenWithIAMOutput {
- s.RefreshToken = &v
- return s
-}
-
-// SetScope sets the Scope field's value.
-func (s *CreateTokenWithIAMOutput) SetScope(v []*string) *CreateTokenWithIAMOutput {
- s.Scope = v
- return s
-}
-
-// SetTokenType sets the TokenType field's value.
-func (s *CreateTokenWithIAMOutput) SetTokenType(v string) *CreateTokenWithIAMOutput {
- s.TokenType = &v
- return s
-}
-
-// Indicates that the token issued by the service is expired and is no longer
-// valid.
-type ExpiredTokenException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Single error code. For this exception the value will be expired_token.
- Error_ *string `locationName:"error" type:"string"`
-
- // Human-readable text providing additional information, used to assist the
- // client developer in understanding the error that occurred.
- Error_description *string `locationName:"error_description" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExpiredTokenException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ExpiredTokenException) GoString() string {
- return s.String()
-}
-
-func newErrorExpiredTokenException(v protocol.ResponseMetadata) error {
- return &ExpiredTokenException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *ExpiredTokenException) Code() string {
- return "ExpiredTokenException"
-}
-
-// Message returns the exception's message.
-func (s *ExpiredTokenException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *ExpiredTokenException) OrigErr() error {
- return nil
-}
-
-func (s *ExpiredTokenException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *ExpiredTokenException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *ExpiredTokenException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Indicates that an error from the service occurred while trying to process
-// a request.
-type InternalServerException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Single error code. For this exception the value will be server_error.
- Error_ *string `locationName:"error" type:"string"`
-
- // Human-readable text providing additional information, used to assist the
- // client developer in understanding the error that occurred.
- Error_description *string `locationName:"error_description" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InternalServerException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InternalServerException) GoString() string {
- return s.String()
-}
-
-func newErrorInternalServerException(v protocol.ResponseMetadata) error {
- return &InternalServerException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InternalServerException) Code() string {
- return "InternalServerException"
-}
-
-// Message returns the exception's message.
-func (s *InternalServerException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InternalServerException) OrigErr() error {
- return nil
-}
-
-func (s *InternalServerException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InternalServerException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InternalServerException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Indicates that the clientId or clientSecret in the request is invalid. For
-// example, this can occur when a client sends an incorrect clientId or an expired
-// clientSecret.
-type InvalidClientException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Single error code. For this exception the value will be invalid_client.
- Error_ *string `locationName:"error" type:"string"`
-
- // Human-readable text providing additional information, used to assist the
- // client developer in understanding the error that occurred.
- Error_description *string `locationName:"error_description" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidClientException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidClientException) GoString() string {
- return s.String()
-}
-
-func newErrorInvalidClientException(v protocol.ResponseMetadata) error {
- return &InvalidClientException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InvalidClientException) Code() string {
- return "InvalidClientException"
-}
-
-// Message returns the exception's message.
-func (s *InvalidClientException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InvalidClientException) OrigErr() error {
- return nil
-}
-
-func (s *InvalidClientException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InvalidClientException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InvalidClientException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Indicates that the client information sent in the request during registration
-// is invalid.
-type InvalidClientMetadataException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Single error code. For this exception the value will be invalid_client_metadata.
- Error_ *string `locationName:"error" type:"string"`
-
- // Human-readable text providing additional information, used to assist the
- // client developer in understanding the error that occurred.
- Error_description *string `locationName:"error_description" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidClientMetadataException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidClientMetadataException) GoString() string {
- return s.String()
-}
-
-func newErrorInvalidClientMetadataException(v protocol.ResponseMetadata) error {
- return &InvalidClientMetadataException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InvalidClientMetadataException) Code() string {
- return "InvalidClientMetadataException"
-}
-
-// Message returns the exception's message.
-func (s *InvalidClientMetadataException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InvalidClientMetadataException) OrigErr() error {
- return nil
-}
-
-func (s *InvalidClientMetadataException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InvalidClientMetadataException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InvalidClientMetadataException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Indicates that a request contains an invalid grant. This can occur if a client
-// makes a CreateToken request with an invalid grant type.
-type InvalidGrantException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Single error code. For this exception the value will be invalid_grant.
- Error_ *string `locationName:"error" type:"string"`
-
- // Human-readable text providing additional information, used to assist the
- // client developer in understanding the error that occurred.
- Error_description *string `locationName:"error_description" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidGrantException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidGrantException) GoString() string {
- return s.String()
-}
-
-func newErrorInvalidGrantException(v protocol.ResponseMetadata) error {
- return &InvalidGrantException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InvalidGrantException) Code() string {
- return "InvalidGrantException"
-}
-
-// Message returns the exception's message.
-func (s *InvalidGrantException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InvalidGrantException) OrigErr() error {
- return nil
-}
-
-func (s *InvalidGrantException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InvalidGrantException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InvalidGrantException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Indicates that one or more redirect URI in the request is not supported for
-// this operation.
-type InvalidRedirectUriException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Single error code. For this exception the value will be invalid_redirect_uri.
- Error_ *string `locationName:"error" type:"string"`
-
- // Human-readable text providing additional information, used to assist the
- // client developer in understanding the error that occurred.
- Error_description *string `locationName:"error_description" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidRedirectUriException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidRedirectUriException) GoString() string {
- return s.String()
-}
-
-func newErrorInvalidRedirectUriException(v protocol.ResponseMetadata) error {
- return &InvalidRedirectUriException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InvalidRedirectUriException) Code() string {
- return "InvalidRedirectUriException"
-}
-
-// Message returns the exception's message.
-func (s *InvalidRedirectUriException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InvalidRedirectUriException) OrigErr() error {
- return nil
-}
-
-func (s *InvalidRedirectUriException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InvalidRedirectUriException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InvalidRedirectUriException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Indicates that something is wrong with the input to the request. For example,
-// a required parameter might be missing or out of range.
-type InvalidRequestException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Single error code. For this exception the value will be invalid_request.
- Error_ *string `locationName:"error" type:"string"`
-
- // Human-readable text providing additional information, used to assist the
- // client developer in understanding the error that occurred.
- Error_description *string `locationName:"error_description" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidRequestException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidRequestException) GoString() string {
- return s.String()
-}
-
-func newErrorInvalidRequestException(v protocol.ResponseMetadata) error {
- return &InvalidRequestException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InvalidRequestException) Code() string {
- return "InvalidRequestException"
-}
-
-// Message returns the exception's message.
-func (s *InvalidRequestException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InvalidRequestException) OrigErr() error {
- return nil
-}
-
-func (s *InvalidRequestException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InvalidRequestException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InvalidRequestException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Indicates that a token provided as input to the request was issued by and
-// is only usable by calling IAM Identity Center endpoints in another region.
-type InvalidRequestRegionException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Indicates the IAM Identity Center endpoint which the requester may call with
- // this token.
- Endpoint *string `locationName:"endpoint" type:"string"`
-
- // Single error code. For this exception the value will be invalid_request.
- Error_ *string `locationName:"error" type:"string"`
-
- // Human-readable text providing additional information, used to assist the
- // client developer in understanding the error that occurred.
- Error_description *string `locationName:"error_description" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-
- // Indicates the region which the requester may call with this token.
- Region *string `locationName:"region" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidRequestRegionException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidRequestRegionException) GoString() string {
- return s.String()
-}
-
-func newErrorInvalidRequestRegionException(v protocol.ResponseMetadata) error {
- return &InvalidRequestRegionException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InvalidRequestRegionException) Code() string {
- return "InvalidRequestRegionException"
-}
-
-// Message returns the exception's message.
-func (s *InvalidRequestRegionException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InvalidRequestRegionException) OrigErr() error {
- return nil
-}
-
-func (s *InvalidRequestRegionException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InvalidRequestRegionException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InvalidRequestRegionException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Indicates that the scope provided in the request is invalid.
-type InvalidScopeException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Single error code. For this exception the value will be invalid_scope.
- Error_ *string `locationName:"error" type:"string"`
-
- // Human-readable text providing additional information, used to assist the
- // client developer in understanding the error that occurred.
- Error_description *string `locationName:"error_description" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidScopeException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s InvalidScopeException) GoString() string {
- return s.String()
-}
-
-func newErrorInvalidScopeException(v protocol.ResponseMetadata) error {
- return &InvalidScopeException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *InvalidScopeException) Code() string {
- return "InvalidScopeException"
-}
-
-// Message returns the exception's message.
-func (s *InvalidScopeException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *InvalidScopeException) OrigErr() error {
- return nil
-}
-
-func (s *InvalidScopeException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *InvalidScopeException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *InvalidScopeException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-type RegisterClientInput struct {
- _ struct{} `type:"structure"`
-
- // The friendly name of the client.
- //
- // ClientName is a required field
- ClientName *string `locationName:"clientName" type:"string" required:"true"`
-
- // The type of client. The service supports only public as a client type. Anything
- // other than public will be rejected by the service.
- //
- // ClientType is a required field
- ClientType *string `locationName:"clientType" type:"string" required:"true"`
-
- // This IAM Identity Center application ARN is used to define administrator-managed
- // configuration for public client access to resources. At authorization, the
- // scopes, grants, and redirect URI available to this client will be restricted
- // by this application resource.
- EntitledApplicationArn *string `locationName:"entitledApplicationArn" type:"string"`
-
- // The list of OAuth 2.0 grant types that are defined by the client. This list
- // is used to restrict the token granting flows available to the client.
- GrantTypes []*string `locationName:"grantTypes" type:"list"`
-
- // The IAM Identity Center Issuer URL associated with an instance of IAM Identity
- // Center. This value is needed for user access to resources through the client.
- IssuerUrl *string `locationName:"issuerUrl" type:"string"`
-
- // The list of redirect URI that are defined by the client. At completion of
- // authorization, this list is used to restrict what locations the user agent
- // can be redirected back to.
- RedirectUris []*string `locationName:"redirectUris" type:"list"`
-
- // The list of scopes that are defined by the client. Upon authorization, this
- // list is used to restrict permissions when granting an access token.
- Scopes []*string `locationName:"scopes" type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RegisterClientInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RegisterClientInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *RegisterClientInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "RegisterClientInput"}
- if s.ClientName == nil {
- invalidParams.Add(request.NewErrParamRequired("ClientName"))
- }
- if s.ClientType == nil {
- invalidParams.Add(request.NewErrParamRequired("ClientType"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetClientName sets the ClientName field's value.
-func (s *RegisterClientInput) SetClientName(v string) *RegisterClientInput {
- s.ClientName = &v
- return s
-}
-
-// SetClientType sets the ClientType field's value.
-func (s *RegisterClientInput) SetClientType(v string) *RegisterClientInput {
- s.ClientType = &v
- return s
-}
-
-// SetEntitledApplicationArn sets the EntitledApplicationArn field's value.
-func (s *RegisterClientInput) SetEntitledApplicationArn(v string) *RegisterClientInput {
- s.EntitledApplicationArn = &v
- return s
-}
-
-// SetGrantTypes sets the GrantTypes field's value.
-func (s *RegisterClientInput) SetGrantTypes(v []*string) *RegisterClientInput {
- s.GrantTypes = v
- return s
-}
-
-// SetIssuerUrl sets the IssuerUrl field's value.
-func (s *RegisterClientInput) SetIssuerUrl(v string) *RegisterClientInput {
- s.IssuerUrl = &v
- return s
-}
-
-// SetRedirectUris sets the RedirectUris field's value.
-func (s *RegisterClientInput) SetRedirectUris(v []*string) *RegisterClientInput {
- s.RedirectUris = v
- return s
-}
-
-// SetScopes sets the Scopes field's value.
-func (s *RegisterClientInput) SetScopes(v []*string) *RegisterClientInput {
- s.Scopes = v
- return s
-}
-
-type RegisterClientOutput struct {
- _ struct{} `type:"structure"`
-
- // An endpoint that the client can use to request authorization.
- AuthorizationEndpoint *string `locationName:"authorizationEndpoint" type:"string"`
-
- // The unique identifier string for each client. This client uses this identifier
- // to get authenticated by the service in subsequent calls.
- ClientId *string `locationName:"clientId" type:"string"`
-
- // Indicates the time at which the clientId and clientSecret were issued.
- ClientIdIssuedAt *int64 `locationName:"clientIdIssuedAt" type:"long"`
-
- // A secret string generated for the client. The client will use this string
- // to get authenticated by the service in subsequent calls.
- //
- // ClientSecret is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by RegisterClientOutput's
- // String and GoString methods.
- ClientSecret *string `locationName:"clientSecret" type:"string" sensitive:"true"`
-
- // Indicates the time at which the clientId and clientSecret will become invalid.
- ClientSecretExpiresAt *int64 `locationName:"clientSecretExpiresAt" type:"long"`
-
- // An endpoint that the client can use to create tokens.
- TokenEndpoint *string `locationName:"tokenEndpoint" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RegisterClientOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s RegisterClientOutput) GoString() string {
- return s.String()
-}
-
-// SetAuthorizationEndpoint sets the AuthorizationEndpoint field's value.
-func (s *RegisterClientOutput) SetAuthorizationEndpoint(v string) *RegisterClientOutput {
- s.AuthorizationEndpoint = &v
- return s
-}
-
-// SetClientId sets the ClientId field's value.
-func (s *RegisterClientOutput) SetClientId(v string) *RegisterClientOutput {
- s.ClientId = &v
- return s
-}
-
-// SetClientIdIssuedAt sets the ClientIdIssuedAt field's value.
-func (s *RegisterClientOutput) SetClientIdIssuedAt(v int64) *RegisterClientOutput {
- s.ClientIdIssuedAt = &v
- return s
-}
-
-// SetClientSecret sets the ClientSecret field's value.
-func (s *RegisterClientOutput) SetClientSecret(v string) *RegisterClientOutput {
- s.ClientSecret = &v
- return s
-}
-
-// SetClientSecretExpiresAt sets the ClientSecretExpiresAt field's value.
-func (s *RegisterClientOutput) SetClientSecretExpiresAt(v int64) *RegisterClientOutput {
- s.ClientSecretExpiresAt = &v
- return s
-}
-
-// SetTokenEndpoint sets the TokenEndpoint field's value.
-func (s *RegisterClientOutput) SetTokenEndpoint(v string) *RegisterClientOutput {
- s.TokenEndpoint = &v
- return s
-}
-
-// Indicates that the client is making the request too frequently and is more
-// than the service can handle.
-type SlowDownException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Single error code. For this exception the value will be slow_down.
- Error_ *string `locationName:"error" type:"string"`
-
- // Human-readable text providing additional information, used to assist the
- // client developer in understanding the error that occurred.
- Error_description *string `locationName:"error_description" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s SlowDownException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s SlowDownException) GoString() string {
- return s.String()
-}
-
-func newErrorSlowDownException(v protocol.ResponseMetadata) error {
- return &SlowDownException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *SlowDownException) Code() string {
- return "SlowDownException"
-}
-
-// Message returns the exception's message.
-func (s *SlowDownException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *SlowDownException) OrigErr() error {
- return nil
-}
-
-func (s *SlowDownException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *SlowDownException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *SlowDownException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-type StartDeviceAuthorizationInput struct {
- _ struct{} `type:"structure"`
-
- // The unique identifier string for the client that is registered with IAM Identity
- // Center. This value should come from the persisted result of the RegisterClient
- // API operation.
- //
- // ClientId is a required field
- ClientId *string `locationName:"clientId" type:"string" required:"true"`
-
- // A secret string that is generated for the client. This value should come
- // from the persisted result of the RegisterClient API operation.
- //
- // ClientSecret is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by StartDeviceAuthorizationInput's
- // String and GoString methods.
- //
- // ClientSecret is a required field
- ClientSecret *string `locationName:"clientSecret" type:"string" required:"true" sensitive:"true"`
-
- // The URL for the Amazon Web Services access portal. For more information,
- // see Using the Amazon Web Services access portal (https://docs.aws.amazon.com/singlesignon/latest/userguide/using-the-portal.html)
- // in the IAM Identity Center User Guide.
- //
- // StartUrl is a required field
- StartUrl *string `locationName:"startUrl" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s StartDeviceAuthorizationInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s StartDeviceAuthorizationInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *StartDeviceAuthorizationInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "StartDeviceAuthorizationInput"}
- if s.ClientId == nil {
- invalidParams.Add(request.NewErrParamRequired("ClientId"))
- }
- if s.ClientSecret == nil {
- invalidParams.Add(request.NewErrParamRequired("ClientSecret"))
- }
- if s.StartUrl == nil {
- invalidParams.Add(request.NewErrParamRequired("StartUrl"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetClientId sets the ClientId field's value.
-func (s *StartDeviceAuthorizationInput) SetClientId(v string) *StartDeviceAuthorizationInput {
- s.ClientId = &v
- return s
-}
-
-// SetClientSecret sets the ClientSecret field's value.
-func (s *StartDeviceAuthorizationInput) SetClientSecret(v string) *StartDeviceAuthorizationInput {
- s.ClientSecret = &v
- return s
-}
-
-// SetStartUrl sets the StartUrl field's value.
-func (s *StartDeviceAuthorizationInput) SetStartUrl(v string) *StartDeviceAuthorizationInput {
- s.StartUrl = &v
- return s
-}
-
-type StartDeviceAuthorizationOutput struct {
- _ struct{} `type:"structure"`
-
- // The short-lived code that is used by the device when polling for a session
- // token.
- DeviceCode *string `locationName:"deviceCode" type:"string"`
-
- // Indicates the number of seconds in which the verification code will become
- // invalid.
- ExpiresIn *int64 `locationName:"expiresIn" type:"integer"`
-
- // Indicates the number of seconds the client must wait between attempts when
- // polling for a session.
- Interval *int64 `locationName:"interval" type:"integer"`
-
- // A one-time user verification code. This is needed to authorize an in-use
- // device.
- UserCode *string `locationName:"userCode" type:"string"`
-
- // The URI of the verification page that takes the userCode to authorize the
- // device.
- VerificationUri *string `locationName:"verificationUri" type:"string"`
-
- // An alternate URL that the client can use to automatically launch a browser.
- // This process skips the manual step in which the user visits the verification
- // page and enters their code.
- VerificationUriComplete *string `locationName:"verificationUriComplete" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s StartDeviceAuthorizationOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s StartDeviceAuthorizationOutput) GoString() string {
- return s.String()
-}
-
-// SetDeviceCode sets the DeviceCode field's value.
-func (s *StartDeviceAuthorizationOutput) SetDeviceCode(v string) *StartDeviceAuthorizationOutput {
- s.DeviceCode = &v
- return s
-}
-
-// SetExpiresIn sets the ExpiresIn field's value.
-func (s *StartDeviceAuthorizationOutput) SetExpiresIn(v int64) *StartDeviceAuthorizationOutput {
- s.ExpiresIn = &v
- return s
-}
-
-// SetInterval sets the Interval field's value.
-func (s *StartDeviceAuthorizationOutput) SetInterval(v int64) *StartDeviceAuthorizationOutput {
- s.Interval = &v
- return s
-}
-
-// SetUserCode sets the UserCode field's value.
-func (s *StartDeviceAuthorizationOutput) SetUserCode(v string) *StartDeviceAuthorizationOutput {
- s.UserCode = &v
- return s
-}
-
-// SetVerificationUri sets the VerificationUri field's value.
-func (s *StartDeviceAuthorizationOutput) SetVerificationUri(v string) *StartDeviceAuthorizationOutput {
- s.VerificationUri = &v
- return s
-}
-
-// SetVerificationUriComplete sets the VerificationUriComplete field's value.
-func (s *StartDeviceAuthorizationOutput) SetVerificationUriComplete(v string) *StartDeviceAuthorizationOutput {
- s.VerificationUriComplete = &v
- return s
-}
-
-// Indicates that the client is not currently authorized to make the request.
-// This can happen when a clientId is not issued for a public client.
-type UnauthorizedClientException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Single error code. For this exception the value will be unauthorized_client.
- Error_ *string `locationName:"error" type:"string"`
-
- // Human-readable text providing additional information, used to assist the
- // client developer in understanding the error that occurred.
- Error_description *string `locationName:"error_description" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UnauthorizedClientException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UnauthorizedClientException) GoString() string {
- return s.String()
-}
-
-func newErrorUnauthorizedClientException(v protocol.ResponseMetadata) error {
- return &UnauthorizedClientException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *UnauthorizedClientException) Code() string {
- return "UnauthorizedClientException"
-}
-
-// Message returns the exception's message.
-func (s *UnauthorizedClientException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *UnauthorizedClientException) OrigErr() error {
- return nil
-}
-
-func (s *UnauthorizedClientException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *UnauthorizedClientException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *UnauthorizedClientException) RequestID() string {
- return s.RespMetadata.RequestID
-}
-
-// Indicates that the grant type in the request is not supported by the service.
-type UnsupportedGrantTypeException struct {
- _ struct{} `type:"structure"`
- RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
-
- // Single error code. For this exception the value will be unsupported_grant_type.
- Error_ *string `locationName:"error" type:"string"`
-
- // Human-readable text providing additional information, used to assist the
- // client developer in understanding the error that occurred.
- Error_description *string `locationName:"error_description" type:"string"`
-
- Message_ *string `locationName:"message" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UnsupportedGrantTypeException) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s UnsupportedGrantTypeException) GoString() string {
- return s.String()
-}
-
-func newErrorUnsupportedGrantTypeException(v protocol.ResponseMetadata) error {
- return &UnsupportedGrantTypeException{
- RespMetadata: v,
- }
-}
-
-// Code returns the exception type name.
-func (s *UnsupportedGrantTypeException) Code() string {
- return "UnsupportedGrantTypeException"
-}
-
-// Message returns the exception's message.
-func (s *UnsupportedGrantTypeException) Message() string {
- if s.Message_ != nil {
- return *s.Message_
- }
- return ""
-}
-
-// OrigErr always returns nil, satisfies awserr.Error interface.
-func (s *UnsupportedGrantTypeException) OrigErr() error {
- return nil
-}
-
-func (s *UnsupportedGrantTypeException) Error() string {
- return fmt.Sprintf("%s: %s\n%s", s.Code(), s.Message(), s.String())
-}
-
-// Status code returns the HTTP status code for the request's response error.
-func (s *UnsupportedGrantTypeException) StatusCode() int {
- return s.RespMetadata.StatusCode
-}
-
-// RequestID returns the service's response RequestID for request.
-func (s *UnsupportedGrantTypeException) RequestID() string {
- return s.RespMetadata.RequestID
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go
deleted file mode 100644
index 083568c61..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/doc.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-// Package ssooidc provides the client and types for making API
-// requests to AWS SSO OIDC.
-//
-// IAM Identity Center OpenID Connect (OIDC) is a web service that enables a
-// client (such as CLI or a native application) to register with IAM Identity
-// Center. The service also enables the client to fetch the user’s access
-// token upon successful authentication and authorization with IAM Identity
-// Center.
-//
-// IAM Identity Center uses the sso and identitystore API namespaces.
-//
-// # Considerations for Using This Guide
-//
-// Before you begin using this guide, we recommend that you first review the
-// following important information about how the IAM Identity Center OIDC service
-// works.
-//
-// - The IAM Identity Center OIDC service currently implements only the portions
-// of the OAuth 2.0 Device Authorization Grant standard (https://tools.ietf.org/html/rfc8628
-// (https://tools.ietf.org/html/rfc8628)) that are necessary to enable single
-// sign-on authentication with the CLI.
-//
-// - With older versions of the CLI, the service only emits OIDC access tokens,
-// so to obtain a new token, users must explicitly re-authenticate. To access
-// the OIDC flow that supports token refresh and doesn’t require re-authentication,
-// update to the latest CLI version (1.27.10 for CLI V1 and 2.9.0 for CLI
-// V2) with support for OIDC token refresh and configurable IAM Identity
-// Center session durations. For more information, see Configure Amazon Web
-// Services access portal session duration (https://docs.aws.amazon.com/singlesignon/latest/userguide/configure-user-session.html).
-//
-// - The access tokens provided by this service grant access to all Amazon
-// Web Services account entitlements assigned to an IAM Identity Center user,
-// not just a particular application.
-//
-// - The documentation in this guide does not describe the mechanism to convert
-// the access token into Amazon Web Services Auth (“sigv4”) credentials
-// for use with IAM-protected Amazon Web Services service endpoints. For
-// more information, see GetRoleCredentials (https://docs.aws.amazon.com/singlesignon/latest/PortalAPIReference/API_GetRoleCredentials.html)
-// in the IAM Identity Center Portal API Reference Guide.
-//
-// For general information about IAM Identity Center, see What is IAM Identity
-// Center? (https://docs.aws.amazon.com/singlesignon/latest/userguide/what-is.html)
-// in the IAM Identity Center User Guide.
-//
-// See https://docs.aws.amazon.com/goto/WebAPI/sso-oidc-2019-06-10 for more information on this service.
-//
-// See ssooidc package documentation for more information.
-// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/
-//
-// # Using the Client
-//
-// To contact AWS SSO OIDC with the SDK use the New function to create
-// a new service client. With that client you can make API requests to the service.
-// These clients are safe to use concurrently.
-//
-// See the SDK's documentation for more information on how to use the SDK.
-// https://docs.aws.amazon.com/sdk-for-go/api/
-//
-// See aws.Config documentation for more information on configuring SDK clients.
-// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
-//
-// See the AWS SSO OIDC client SSOOIDC for more
-// information on creating client for this service.
-// https://docs.aws.amazon.com/sdk-for-go/api/service/ssooidc/#New
-package ssooidc
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go
deleted file mode 100644
index cadf4584d..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/errors.go
+++ /dev/null
@@ -1,123 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package ssooidc
-
-import (
- "github.com/aws/aws-sdk-go/private/protocol"
-)
-
-const (
-
- // ErrCodeAccessDeniedException for service response error code
- // "AccessDeniedException".
- //
- // You do not have sufficient access to perform this action.
- ErrCodeAccessDeniedException = "AccessDeniedException"
-
- // ErrCodeAuthorizationPendingException for service response error code
- // "AuthorizationPendingException".
- //
- // Indicates that a request to authorize a client with an access user session
- // token is pending.
- ErrCodeAuthorizationPendingException = "AuthorizationPendingException"
-
- // ErrCodeExpiredTokenException for service response error code
- // "ExpiredTokenException".
- //
- // Indicates that the token issued by the service is expired and is no longer
- // valid.
- ErrCodeExpiredTokenException = "ExpiredTokenException"
-
- // ErrCodeInternalServerException for service response error code
- // "InternalServerException".
- //
- // Indicates that an error from the service occurred while trying to process
- // a request.
- ErrCodeInternalServerException = "InternalServerException"
-
- // ErrCodeInvalidClientException for service response error code
- // "InvalidClientException".
- //
- // Indicates that the clientId or clientSecret in the request is invalid. For
- // example, this can occur when a client sends an incorrect clientId or an expired
- // clientSecret.
- ErrCodeInvalidClientException = "InvalidClientException"
-
- // ErrCodeInvalidClientMetadataException for service response error code
- // "InvalidClientMetadataException".
- //
- // Indicates that the client information sent in the request during registration
- // is invalid.
- ErrCodeInvalidClientMetadataException = "InvalidClientMetadataException"
-
- // ErrCodeInvalidGrantException for service response error code
- // "InvalidGrantException".
- //
- // Indicates that a request contains an invalid grant. This can occur if a client
- // makes a CreateToken request with an invalid grant type.
- ErrCodeInvalidGrantException = "InvalidGrantException"
-
- // ErrCodeInvalidRedirectUriException for service response error code
- // "InvalidRedirectUriException".
- //
- // Indicates that one or more redirect URI in the request is not supported for
- // this operation.
- ErrCodeInvalidRedirectUriException = "InvalidRedirectUriException"
-
- // ErrCodeInvalidRequestException for service response error code
- // "InvalidRequestException".
- //
- // Indicates that something is wrong with the input to the request. For example,
- // a required parameter might be missing or out of range.
- ErrCodeInvalidRequestException = "InvalidRequestException"
-
- // ErrCodeInvalidRequestRegionException for service response error code
- // "InvalidRequestRegionException".
- //
- // Indicates that a token provided as input to the request was issued by and
- // is only usable by calling IAM Identity Center endpoints in another region.
- ErrCodeInvalidRequestRegionException = "InvalidRequestRegionException"
-
- // ErrCodeInvalidScopeException for service response error code
- // "InvalidScopeException".
- //
- // Indicates that the scope provided in the request is invalid.
- ErrCodeInvalidScopeException = "InvalidScopeException"
-
- // ErrCodeSlowDownException for service response error code
- // "SlowDownException".
- //
- // Indicates that the client is making the request too frequently and is more
- // than the service can handle.
- ErrCodeSlowDownException = "SlowDownException"
-
- // ErrCodeUnauthorizedClientException for service response error code
- // "UnauthorizedClientException".
- //
- // Indicates that the client is not currently authorized to make the request.
- // This can happen when a clientId is not issued for a public client.
- ErrCodeUnauthorizedClientException = "UnauthorizedClientException"
-
- // ErrCodeUnsupportedGrantTypeException for service response error code
- // "UnsupportedGrantTypeException".
- //
- // Indicates that the grant type in the request is not supported by the service.
- ErrCodeUnsupportedGrantTypeException = "UnsupportedGrantTypeException"
-)
-
-var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
- "AccessDeniedException": newErrorAccessDeniedException,
- "AuthorizationPendingException": newErrorAuthorizationPendingException,
- "ExpiredTokenException": newErrorExpiredTokenException,
- "InternalServerException": newErrorInternalServerException,
- "InvalidClientException": newErrorInvalidClientException,
- "InvalidClientMetadataException": newErrorInvalidClientMetadataException,
- "InvalidGrantException": newErrorInvalidGrantException,
- "InvalidRedirectUriException": newErrorInvalidRedirectUriException,
- "InvalidRequestException": newErrorInvalidRequestException,
- "InvalidRequestRegionException": newErrorInvalidRequestRegionException,
- "InvalidScopeException": newErrorInvalidScopeException,
- "SlowDownException": newErrorSlowDownException,
- "UnauthorizedClientException": newErrorUnauthorizedClientException,
- "UnsupportedGrantTypeException": newErrorUnsupportedGrantTypeException,
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go b/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go
deleted file mode 100644
index 782bae369..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/ssooidc/service.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package ssooidc
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/aws/signer/v4"
- "github.com/aws/aws-sdk-go/private/protocol"
- "github.com/aws/aws-sdk-go/private/protocol/restjson"
-)
-
-// SSOOIDC provides the API operation methods for making requests to
-// AWS SSO OIDC. See this package's package overview docs
-// for details on the service.
-//
-// SSOOIDC methods are safe to use concurrently. It is not safe to
-// modify mutate any of the struct's properties though.
-type SSOOIDC struct {
- *client.Client
-}
-
-// Used for custom client initialization logic
-var initClient func(*client.Client)
-
-// Used for custom request initialization logic
-var initRequest func(*request.Request)
-
-// Service information constants
-const (
- ServiceName = "SSO OIDC" // Name of service.
- EndpointsID = "oidc" // ID to lookup a service endpoint with.
- ServiceID = "SSO OIDC" // ServiceID is a unique identifier of a specific service.
-)
-
-// New creates a new instance of the SSOOIDC client with a session.
-// If additional configuration is needed for the client instance use the optional
-// aws.Config parameter to add your extra config.
-//
-// Example:
-//
-// mySession := session.Must(session.NewSession())
-//
-// // Create a SSOOIDC client from just a session.
-// svc := ssooidc.New(mySession)
-//
-// // Create a SSOOIDC client with additional configuration
-// svc := ssooidc.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
-func New(p client.ConfigProvider, cfgs ...*aws.Config) *SSOOIDC {
- c := p.ClientConfig(EndpointsID, cfgs...)
- if c.SigningNameDerived || len(c.SigningName) == 0 {
- c.SigningName = "sso-oauth"
- }
- return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion)
-}
-
-// newClient creates, initializes and returns a new service client instance.
-func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *SSOOIDC {
- svc := &SSOOIDC{
- Client: client.New(
- cfg,
- metadata.ClientInfo{
- ServiceName: ServiceName,
- ServiceID: ServiceID,
- SigningName: signingName,
- SigningRegion: signingRegion,
- PartitionID: partitionID,
- Endpoint: endpoint,
- APIVersion: "2019-06-10",
- ResolvedRegion: resolvedRegion,
- },
- handlers,
- ),
- }
-
- // Handlers
- svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
- svc.Handlers.Build.PushBackNamed(restjson.BuildHandler)
- svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler)
- svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler)
- svc.Handlers.UnmarshalError.PushBackNamed(
- protocol.NewUnmarshalErrorHandler(restjson.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(),
- )
-
- // Run custom client initialization if present
- if initClient != nil {
- initClient(svc.Client)
- }
-
- return svc
-}
-
-// newRequest creates a new request for a SSOOIDC operation and runs any
-// custom request initialization.
-func (c *SSOOIDC) newRequest(op *request.Operation, params, data interface{}) *request.Request {
- req := c.NewRequest(op, params, data)
-
- // Run custom request initialization if present
- if initRequest != nil {
- initRequest(req)
- }
-
- return req
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
deleted file mode 100644
index 2c395f5f6..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
+++ /dev/null
@@ -1,3553 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package sts
-
-import (
- "fmt"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awsutil"
- "github.com/aws/aws-sdk-go/aws/credentials"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-const opAssumeRole = "AssumeRole"
-
-// AssumeRoleRequest generates a "aws/request.Request" representing the
-// client's request for the AssumeRole operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See AssumeRole for more information on using the AssumeRole
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the AssumeRoleRequest method.
-// req, resp := client.AssumeRoleRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
-func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) {
- op := &request.Operation{
- Name: opAssumeRole,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &AssumeRoleInput{}
- }
-
- output = &AssumeRoleOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// AssumeRole API operation for AWS Security Token Service.
-//
-// Returns a set of temporary security credentials that you can use to access
-// Amazon Web Services resources. These temporary credentials consist of an
-// access key ID, a secret access key, and a security token. Typically, you
-// use AssumeRole within your account or for cross-account access. For a comparison
-// of AssumeRole with other API operations that produce temporary credentials,
-// see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide.
-//
-// # Permissions
-//
-// The temporary security credentials created by AssumeRole can be used to make
-// API calls to any Amazon Web Services service with the following exception:
-// You cannot call the Amazon Web Services STS GetFederationToken or GetSessionToken
-// API operations.
-//
-// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policy Amazon
-// Resource Names (ARNs) to use as managed session policies. The plaintext that
-// you use for both inline and managed session policies can't exceed 2,048 characters.
-// Passing policies to this operation returns new temporary credentials. The
-// resulting session's permissions are the intersection of the role's identity-based
-// policy and the session policies. You can use the role's temporary credentials
-// in subsequent Amazon Web Services API calls to access resources in the account
-// that owns the role. You cannot use session policies to grant more permissions
-// than those allowed by the identity-based policy of the role that is being
-// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// in the IAM User Guide.
-//
-// When you create a role, you create two policies: a role trust policy that
-// specifies who can assume the role, and a permissions policy that specifies
-// what can be done with the role. You specify the trusted principal that is
-// allowed to assume the role in the role trust policy.
-//
-// To assume a role from a different account, your Amazon Web Services account
-// must be trusted by the role. The trust relationship is defined in the role's
-// trust policy when the role is created. That trust policy states which accounts
-// are allowed to delegate that access to users in the account.
-//
-// A user who wants to access a role in a different account must also have permissions
-// that are delegated from the account administrator. The administrator must
-// attach a policy that allows the user to call AssumeRole for the ARN of the
-// role in the other account.
-//
-// To allow a user to assume a role in the same account, you can do either of
-// the following:
-//
-// - Attach a policy to the user that allows the user to call AssumeRole
-// (as long as the role's trust policy trusts the account).
-//
-// - Add the user as a principal directly in the role's trust policy.
-//
-// You can do either because the role’s trust policy acts as an IAM resource-based
-// policy. When a resource-based policy grants access to a principal in the
-// same account, no additional identity-based policy is required. For more information
-// about trust policies and resource-based policies, see IAM Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
-// in the IAM User Guide.
-//
-// # Tags
-//
-// (Optional) You can pass tag key-value pairs to your session. These tags are
-// called session tags. For more information about session tags, see Passing
-// Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide.
-//
-// An administrator must grant you the permissions necessary to pass session
-// tags. The administrator can also create granular permissions to allow you
-// to pass only specific session tags. For more information, see Tutorial: Using
-// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
-// in the IAM User Guide.
-//
-// You can set the session tags as transitive. Transitive tags persist during
-// role chaining. For more information, see Chaining Roles with Session Tags
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
-// in the IAM User Guide.
-//
-// # Using MFA with AssumeRole
-//
-// (Optional) You can include multi-factor authentication (MFA) information
-// when you call AssumeRole. This is useful for cross-account scenarios to ensure
-// that the user that assumes the role has been authenticated with an Amazon
-// Web Services MFA device. In that scenario, the trust policy of the role being
-// assumed includes a condition that tests for MFA authentication. If the caller
-// does not include valid MFA information, the request to assume the role is
-// denied. The condition in a trust policy that tests for MFA authentication
-// might look like the following example.
-//
-// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}}
-//
-// For more information, see Configuring MFA-Protected API Access (https://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html)
-// in the IAM User Guide guide.
-//
-// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode
-// parameters. The SerialNumber value identifies the user's hardware or virtual
-// MFA device. The TokenCode is the time-based one-time password (TOTP) that
-// the MFA device produces.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS Security Token Service's
-// API operation AssumeRole for usage and error information.
-//
-// Returned Error Codes:
-//
-// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
-// The request was rejected because the policy document was malformed. The error
-// message describes the specific error.
-//
-// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
-// The request was rejected because the total packed size of the session policies
-// and session tags combined was too large. An Amazon Web Services conversion
-// compresses the session policy document, session policy ARNs, and session
-// tags into a packed binary format that has a separate limit. The error message
-// indicates by percentage how close the policies and tags are to the upper
-// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide.
-//
-// You could receive this error even though you meet other defined session policy
-// and session tag limits. For more information, see IAM and STS Entity Character
-// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
-// in the IAM User Guide.
-//
-// - ErrCodeRegionDisabledException "RegionDisabledException"
-// STS is not activated in the requested region for the account that is being
-// asked to generate credentials. The account administrator must use the IAM
-// console to activate STS in that region. For more information, see Activating
-// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
-// in the IAM User Guide.
-//
-// - ErrCodeExpiredTokenException "ExpiredTokenException"
-// The web identity token that was passed is expired or is not valid. Get a
-// new identity token from the identity provider and then retry the request.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
-func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
- req, out := c.AssumeRoleRequest(input)
- return out, req.Send()
-}
-
-// AssumeRoleWithContext is the same as AssumeRole with the addition of
-// the ability to pass a context and additional request options.
-//
-// See AssumeRole for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) {
- req, out := c.AssumeRoleRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
-
-// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
-// client's request for the AssumeRoleWithSAML operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the AssumeRoleWithSAMLRequest method.
-// req, resp := client.AssumeRoleWithSAMLRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
-func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) {
- op := &request.Operation{
- Name: opAssumeRoleWithSAML,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &AssumeRoleWithSAMLInput{}
- }
-
- output = &AssumeRoleWithSAMLOutput{}
- req = c.newRequest(op, input, output)
- req.Config.Credentials = credentials.AnonymousCredentials
- return
-}
-
-// AssumeRoleWithSAML API operation for AWS Security Token Service.
-//
-// Returns a set of temporary security credentials for users who have been authenticated
-// via a SAML authentication response. This operation provides a mechanism for
-// tying an enterprise identity store or directory to role-based Amazon Web
-// Services access without user-specific credentials or configuration. For a
-// comparison of AssumeRoleWithSAML with the other API operations that produce
-// temporary credentials, see Requesting Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide.
-//
-// The temporary security credentials returned by this operation consist of
-// an access key ID, a secret access key, and a security token. Applications
-// can use these temporary security credentials to sign calls to Amazon Web
-// Services services.
-//
-// # Session Duration
-//
-// By default, the temporary security credentials created by AssumeRoleWithSAML
-// last for one hour. However, you can use the optional DurationSeconds parameter
-// to specify the duration of your session. Your role session lasts for the
-// duration that you specify, or until the time specified in the SAML authentication
-// response's SessionNotOnOrAfter value, whichever is shorter. You can provide
-// a DurationSeconds value from 900 seconds (15 minutes) up to the maximum session
-// duration setting for the role. This setting can have a value from 1 hour
-// to 12 hours. To learn how to view the maximum value for your role, see View
-// the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
-// in the IAM User Guide. The maximum session duration limit applies when you
-// use the AssumeRole* API operations or the assume-role* CLI commands. However
-// the limit does not apply when you use those operations to create a console
-// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
-// in the IAM User Guide.
-//
-// Role chaining (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-role-chaining)
-// limits your CLI or Amazon Web Services API role session to a maximum of one
-// hour. When you use the AssumeRole API operation to assume a role, you can
-// specify the duration of your role session with the DurationSeconds parameter.
-// You can specify a parameter value of up to 43200 seconds (12 hours), depending
-// on the maximum session duration setting for your role. However, if you assume
-// a role using role chaining and provide a DurationSeconds parameter value
-// greater than one hour, the operation fails.
-//
-// # Permissions
-//
-// The temporary security credentials created by AssumeRoleWithSAML can be used
-// to make API calls to any Amazon Web Services service with the following exception:
-// you cannot call the STS GetFederationToken or GetSessionToken API operations.
-//
-// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policy Amazon
-// Resource Names (ARNs) to use as managed session policies. The plaintext that
-// you use for both inline and managed session policies can't exceed 2,048 characters.
-// Passing policies to this operation returns new temporary credentials. The
-// resulting session's permissions are the intersection of the role's identity-based
-// policy and the session policies. You can use the role's temporary credentials
-// in subsequent Amazon Web Services API calls to access resources in the account
-// that owns the role. You cannot use session policies to grant more permissions
-// than those allowed by the identity-based policy of the role that is being
-// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// in the IAM User Guide.
-//
-// Calling AssumeRoleWithSAML does not require the use of Amazon Web Services
-// security credentials. The identity of the caller is validated by using keys
-// in the metadata document that is uploaded for the SAML provider entity for
-// your identity provider.
-//
-// Calling AssumeRoleWithSAML can result in an entry in your CloudTrail logs.
-// The entry includes the value in the NameID element of the SAML assertion.
-// We recommend that you use a NameIDType that is not associated with any personally
-// identifiable information (PII). For example, you could instead use the persistent
-// identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent).
-//
-// # Tags
-//
-// (Optional) You can configure your IdP to pass attributes into your SAML assertion
-// as session tags. Each session tag consists of a key name and an associated
-// value. For more information about session tags, see Passing Session Tags
-// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide.
-//
-// You can pass up to 50 session tags. The plaintext session tag keys can’t
-// exceed 128 characters and the values can’t exceed 256 characters. For these
-// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
-// in the IAM User Guide.
-//
-// An Amazon Web Services conversion compresses the passed inline session policy,
-// managed policy ARNs, and session tags into a packed binary format that has
-// a separate limit. Your request can fail for this limit even if your plaintext
-// meets the other requirements. The PackedPolicySize response element indicates
-// by percentage how close the policies and tags for your request are to the
-// upper size limit.
-//
-// You can pass a session tag with the same key as a tag that is attached to
-// the role. When you do, session tags override the role's tags with the same
-// key.
-//
-// An administrator must grant you the permissions necessary to pass session
-// tags. The administrator can also create granular permissions to allow you
-// to pass only specific session tags. For more information, see Tutorial: Using
-// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
-// in the IAM User Guide.
-//
-// You can set the session tags as transitive. Transitive tags persist during
-// role chaining. For more information, see Chaining Roles with Session Tags
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
-// in the IAM User Guide.
-//
-// # SAML Configuration
-//
-// Before your application can call AssumeRoleWithSAML, you must configure your
-// SAML identity provider (IdP) to issue the claims required by Amazon Web Services.
-// Additionally, you must use Identity and Access Management (IAM) to create
-// a SAML provider entity in your Amazon Web Services account that represents
-// your identity provider. You must also create an IAM role that specifies this
-// SAML provider in its trust policy.
-//
-// For more information, see the following resources:
-//
-// - About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html)
-// in the IAM User Guide.
-//
-// - Creating SAML Identity Providers (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html)
-// in the IAM User Guide.
-//
-// - Configuring a Relying Party and Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html)
-// in the IAM User Guide.
-//
-// - Creating a Role for SAML 2.0 Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html)
-// in the IAM User Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS Security Token Service's
-// API operation AssumeRoleWithSAML for usage and error information.
-//
-// Returned Error Codes:
-//
-// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
-// The request was rejected because the policy document was malformed. The error
-// message describes the specific error.
-//
-// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
-// The request was rejected because the total packed size of the session policies
-// and session tags combined was too large. An Amazon Web Services conversion
-// compresses the session policy document, session policy ARNs, and session
-// tags into a packed binary format that has a separate limit. The error message
-// indicates by percentage how close the policies and tags are to the upper
-// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide.
-//
-// You could receive this error even though you meet other defined session policy
-// and session tag limits. For more information, see IAM and STS Entity Character
-// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
-// in the IAM User Guide.
-//
-// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
-// The identity provider (IdP) reported that authentication failed. This might
-// be because the claim is invalid.
-//
-// If this error is returned for the AssumeRoleWithWebIdentity operation, it
-// can also mean that the claim has expired or has been explicitly revoked.
-//
-// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
-// The web identity token that was passed could not be validated by Amazon Web
-// Services. Get a new identity token from the identity provider and then retry
-// the request.
-//
-// - ErrCodeExpiredTokenException "ExpiredTokenException"
-// The web identity token that was passed is expired or is not valid. Get a
-// new identity token from the identity provider and then retry the request.
-//
-// - ErrCodeRegionDisabledException "RegionDisabledException"
-// STS is not activated in the requested region for the account that is being
-// asked to generate credentials. The account administrator must use the IAM
-// console to activate STS in that region. For more information, see Activating
-// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
-// in the IAM User Guide.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
-func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
- req, out := c.AssumeRoleWithSAMLRequest(input)
- return out, req.Send()
-}
-
-// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of
-// the ability to pass a context and additional request options.
-//
-// See AssumeRoleWithSAML for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) {
- req, out := c.AssumeRoleWithSAMLRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
-
-// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
-// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
-// req, resp := client.AssumeRoleWithWebIdentityRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
-func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) {
- op := &request.Operation{
- Name: opAssumeRoleWithWebIdentity,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &AssumeRoleWithWebIdentityInput{}
- }
-
- output = &AssumeRoleWithWebIdentityOutput{}
- req = c.newRequest(op, input, output)
- req.Config.Credentials = credentials.AnonymousCredentials
- return
-}
-
-// AssumeRoleWithWebIdentity API operation for AWS Security Token Service.
-//
-// Returns a set of temporary security credentials for users who have been authenticated
-// in a mobile or web application with a web identity provider. Example providers
-// include the OAuth 2.0 providers Login with Amazon and Facebook, or any OpenID
-// Connect-compatible identity provider such as Google or Amazon Cognito federated
-// identities (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html).
-//
-// For mobile applications, we recommend that you use Amazon Cognito. You can
-// use Amazon Cognito with the Amazon Web Services SDK for iOS Developer Guide
-// (http://aws.amazon.com/sdkforios/) and the Amazon Web Services SDK for Android
-// Developer Guide (http://aws.amazon.com/sdkforandroid/) to uniquely identify
-// a user. You can also supply the user with a consistent identity throughout
-// the lifetime of an application.
-//
-// To learn more about Amazon Cognito, see Amazon Cognito identity pools (https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-identity.html)
-// in Amazon Cognito Developer Guide.
-//
-// Calling AssumeRoleWithWebIdentity does not require the use of Amazon Web
-// Services security credentials. Therefore, you can distribute an application
-// (for example, on mobile devices) that requests temporary security credentials
-// without including long-term Amazon Web Services credentials in the application.
-// You also don't need to deploy server-based proxy services that use long-term
-// Amazon Web Services credentials. Instead, the identity of the caller is validated
-// by using a token from the web identity provider. For a comparison of AssumeRoleWithWebIdentity
-// with the other API operations that produce temporary credentials, see Requesting
-// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide.
-//
-// The temporary security credentials returned by this API consist of an access
-// key ID, a secret access key, and a security token. Applications can use these
-// temporary security credentials to sign calls to Amazon Web Services service
-// API operations.
-//
-// # Session Duration
-//
-// By default, the temporary security credentials created by AssumeRoleWithWebIdentity
-// last for one hour. However, you can use the optional DurationSeconds parameter
-// to specify the duration of your session. You can provide a value from 900
-// seconds (15 minutes) up to the maximum session duration setting for the role.
-// This setting can have a value from 1 hour to 12 hours. To learn how to view
-// the maximum value for your role, see View the Maximum Session Duration Setting
-// for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
-// in the IAM User Guide. The maximum session duration limit applies when you
-// use the AssumeRole* API operations or the assume-role* CLI commands. However
-// the limit does not apply when you use those operations to create a console
-// URL. For more information, see Using IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html)
-// in the IAM User Guide.
-//
-// # Permissions
-//
-// The temporary security credentials created by AssumeRoleWithWebIdentity can
-// be used to make API calls to any Amazon Web Services service with the following
-// exception: you cannot call the STS GetFederationToken or GetSessionToken
-// API operations.
-//
-// (Optional) You can pass inline or managed session policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policy Amazon
-// Resource Names (ARNs) to use as managed session policies. The plaintext that
-// you use for both inline and managed session policies can't exceed 2,048 characters.
-// Passing policies to this operation returns new temporary credentials. The
-// resulting session's permissions are the intersection of the role's identity-based
-// policy and the session policies. You can use the role's temporary credentials
-// in subsequent Amazon Web Services API calls to access resources in the account
-// that owns the role. You cannot use session policies to grant more permissions
-// than those allowed by the identity-based policy of the role that is being
-// assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// in the IAM User Guide.
-//
-// # Tags
-//
-// (Optional) You can configure your IdP to pass attributes into your web identity
-// token as session tags. Each session tag consists of a key name and an associated
-// value. For more information about session tags, see Passing Session Tags
-// in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide.
-//
-// You can pass up to 50 session tags. The plaintext session tag keys can’t
-// exceed 128 characters and the values can’t exceed 256 characters. For these
-// and additional limits, see IAM and STS Character Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
-// in the IAM User Guide.
-//
-// An Amazon Web Services conversion compresses the passed inline session policy,
-// managed policy ARNs, and session tags into a packed binary format that has
-// a separate limit. Your request can fail for this limit even if your plaintext
-// meets the other requirements. The PackedPolicySize response element indicates
-// by percentage how close the policies and tags for your request are to the
-// upper size limit.
-//
-// You can pass a session tag with the same key as a tag that is attached to
-// the role. When you do, the session tag overrides the role tag with the same
-// key.
-//
-// An administrator must grant you the permissions necessary to pass session
-// tags. The administrator can also create granular permissions to allow you
-// to pass only specific session tags. For more information, see Tutorial: Using
-// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
-// in the IAM User Guide.
-//
-// You can set the session tags as transitive. Transitive tags persist during
-// role chaining. For more information, see Chaining Roles with Session Tags
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
-// in the IAM User Guide.
-//
-// # Identities
-//
-// Before your application can call AssumeRoleWithWebIdentity, you must have
-// an identity token from a supported identity provider and create a role that
-// the application can assume. The role that your application assumes must trust
-// the identity provider that is associated with the identity token. In other
-// words, the identity provider must be specified in the role's trust policy.
-//
-// Calling AssumeRoleWithWebIdentity can result in an entry in your CloudTrail
-// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims)
-// of the provided web identity token. We recommend that you avoid using any
-// personally identifiable information (PII) in this field. For example, you
-// could instead use a GUID or a pairwise identifier, as suggested in the OIDC
-// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes).
-//
-// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity
-// API, see the following resources:
-//
-// - Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
-// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
-//
-// - Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/).
-// Walk through the process of authenticating through Login with Amazon,
-// Facebook, or Google, getting temporary security credentials, and then
-// using those credentials to make a request to Amazon Web Services.
-//
-// - Amazon Web Services SDK for iOS Developer Guide (http://aws.amazon.com/sdkforios/)
-// and Amazon Web Services SDK for Android Developer Guide (http://aws.amazon.com/sdkforandroid/).
-// These toolkits contain sample apps that show how to invoke the identity
-// providers. The toolkits then show how to use the information from these
-// providers to get and use temporary security credentials.
-//
-// - Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/web-identity-federation-with-mobile-applications).
-// This article discusses web identity federation and shows an example of
-// how to use web identity federation to get access to content in Amazon
-// S3.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS Security Token Service's
-// API operation AssumeRoleWithWebIdentity for usage and error information.
-//
-// Returned Error Codes:
-//
-// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
-// The request was rejected because the policy document was malformed. The error
-// message describes the specific error.
-//
-// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
-// The request was rejected because the total packed size of the session policies
-// and session tags combined was too large. An Amazon Web Services conversion
-// compresses the session policy document, session policy ARNs, and session
-// tags into a packed binary format that has a separate limit. The error message
-// indicates by percentage how close the policies and tags are to the upper
-// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide.
-//
-// You could receive this error even though you meet other defined session policy
-// and session tag limits. For more information, see IAM and STS Entity Character
-// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
-// in the IAM User Guide.
-//
-// - ErrCodeIDPRejectedClaimException "IDPRejectedClaim"
-// The identity provider (IdP) reported that authentication failed. This might
-// be because the claim is invalid.
-//
-// If this error is returned for the AssumeRoleWithWebIdentity operation, it
-// can also mean that the claim has expired or has been explicitly revoked.
-//
-// - ErrCodeIDPCommunicationErrorException "IDPCommunicationError"
-// The request could not be fulfilled because the identity provider (IDP) that
-// was asked to verify the incoming identity token could not be reached. This
-// is often a transient error caused by network conditions. Retry the request
-// a limited number of times so that you don't exceed the request rate. If the
-// error persists, the identity provider might be down or not responding.
-//
-// - ErrCodeInvalidIdentityTokenException "InvalidIdentityToken"
-// The web identity token that was passed could not be validated by Amazon Web
-// Services. Get a new identity token from the identity provider and then retry
-// the request.
-//
-// - ErrCodeExpiredTokenException "ExpiredTokenException"
-// The web identity token that was passed is expired or is not valid. Get a
-// new identity token from the identity provider and then retry the request.
-//
-// - ErrCodeRegionDisabledException "RegionDisabledException"
-// STS is not activated in the requested region for the account that is being
-// asked to generate credentials. The account administrator must use the IAM
-// console to activate STS in that region. For more information, see Activating
-// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
-// in the IAM User Guide.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
-func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
- req, out := c.AssumeRoleWithWebIdentityRequest(input)
- return out, req.Send()
-}
-
-// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of
-// the ability to pass a context and additional request options.
-//
-// See AssumeRoleWithWebIdentity for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) {
- req, out := c.AssumeRoleWithWebIdentityRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
-
-// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
-// client's request for the DecodeAuthorizationMessage operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the DecodeAuthorizationMessageRequest method.
-// req, resp := client.DecodeAuthorizationMessageRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
-func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) {
- op := &request.Operation{
- Name: opDecodeAuthorizationMessage,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &DecodeAuthorizationMessageInput{}
- }
-
- output = &DecodeAuthorizationMessageOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// DecodeAuthorizationMessage API operation for AWS Security Token Service.
-//
-// Decodes additional information about the authorization status of a request
-// from an encoded message returned in response to an Amazon Web Services request.
-//
-// For example, if a user is not authorized to perform an operation that he
-// or she has requested, the request returns a Client.UnauthorizedOperation
-// response (an HTTP 403 response). Some Amazon Web Services operations additionally
-// return an encoded message that can provide details about this authorization
-// failure.
-//
-// Only certain Amazon Web Services operations return an encoded authorization
-// message. The documentation for an individual operation indicates whether
-// that operation returns an encoded message in addition to returning an HTTP
-// code.
-//
-// The message is encoded because the details of the authorization status can
-// contain privileged information that the user who requested the operation
-// should not see. To decode an authorization status message, a user must be
-// granted permissions through an IAM policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html)
-// to request the DecodeAuthorizationMessage (sts:DecodeAuthorizationMessage)
-// action.
-//
-// The decoded message includes the following type of information:
-//
-// - Whether the request was denied due to an explicit deny or due to the
-// absence of an explicit allow. For more information, see Determining Whether
-// a Request is Allowed or Denied (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow)
-// in the IAM User Guide.
-//
-// - The principal who made the request.
-//
-// - The requested action.
-//
-// - The requested resource.
-//
-// - The values of condition keys in the context of the user's request.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS Security Token Service's
-// API operation DecodeAuthorizationMessage for usage and error information.
-//
-// Returned Error Codes:
-// - ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException"
-// The error returned if the message passed to DecodeAuthorizationMessage was
-// invalid. This can happen if the token contains invalid characters, such as
-// linebreaks.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
-func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
- req, out := c.DecodeAuthorizationMessageRequest(input)
- return out, req.Send()
-}
-
-// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of
-// the ability to pass a context and additional request options.
-//
-// See DecodeAuthorizationMessage for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) {
- req, out := c.DecodeAuthorizationMessageRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opGetAccessKeyInfo = "GetAccessKeyInfo"
-
-// GetAccessKeyInfoRequest generates a "aws/request.Request" representing the
-// client's request for the GetAccessKeyInfo operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See GetAccessKeyInfo for more information on using the GetAccessKeyInfo
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the GetAccessKeyInfoRequest method.
-// req, resp := client.GetAccessKeyInfoRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo
-func (c *STS) GetAccessKeyInfoRequest(input *GetAccessKeyInfoInput) (req *request.Request, output *GetAccessKeyInfoOutput) {
- op := &request.Operation{
- Name: opGetAccessKeyInfo,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &GetAccessKeyInfoInput{}
- }
-
- output = &GetAccessKeyInfoOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// GetAccessKeyInfo API operation for AWS Security Token Service.
-//
-// Returns the account identifier for the specified access key ID.
-//
-// Access keys consist of two parts: an access key ID (for example, AKIAIOSFODNN7EXAMPLE)
-// and a secret access key (for example, wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY).
-// For more information about access keys, see Managing Access Keys for IAM
-// Users (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_access-keys.html)
-// in the IAM User Guide.
-//
-// When you pass an access key ID to this operation, it returns the ID of the
-// Amazon Web Services account to which the keys belong. Access key IDs beginning
-// with AKIA are long-term credentials for an IAM user or the Amazon Web Services
-// account root user. Access key IDs beginning with ASIA are temporary credentials
-// that are created using STS operations. If the account in the response belongs
-// to you, you can sign in as the root user and review your root user access
-// keys. Then, you can pull a credentials report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html)
-// to learn which IAM user owns the keys. To learn who requested the temporary
-// credentials for an ASIA access key, view the STS events in your CloudTrail
-// logs (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html)
-// in the IAM User Guide.
-//
-// This operation does not indicate the state of the access key. The key might
-// be active, inactive, or deleted. Active keys might not have permissions to
-// perform an operation. Providing a deleted access key might return an error
-// that the key doesn't exist.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS Security Token Service's
-// API operation GetAccessKeyInfo for usage and error information.
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetAccessKeyInfo
-func (c *STS) GetAccessKeyInfo(input *GetAccessKeyInfoInput) (*GetAccessKeyInfoOutput, error) {
- req, out := c.GetAccessKeyInfoRequest(input)
- return out, req.Send()
-}
-
-// GetAccessKeyInfoWithContext is the same as GetAccessKeyInfo with the addition of
-// the ability to pass a context and additional request options.
-//
-// See GetAccessKeyInfo for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *STS) GetAccessKeyInfoWithContext(ctx aws.Context, input *GetAccessKeyInfoInput, opts ...request.Option) (*GetAccessKeyInfoOutput, error) {
- req, out := c.GetAccessKeyInfoRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opGetCallerIdentity = "GetCallerIdentity"
-
-// GetCallerIdentityRequest generates a "aws/request.Request" representing the
-// client's request for the GetCallerIdentity operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See GetCallerIdentity for more information on using the GetCallerIdentity
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the GetCallerIdentityRequest method.
-// req, resp := client.GetCallerIdentityRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
-func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) {
- op := &request.Operation{
- Name: opGetCallerIdentity,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &GetCallerIdentityInput{}
- }
-
- output = &GetCallerIdentityOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// GetCallerIdentity API operation for AWS Security Token Service.
-//
-// Returns details about the IAM user or role whose credentials are used to
-// call the operation.
-//
-// No permissions are required to perform this operation. If an administrator
-// attaches a policy to your identity that explicitly denies access to the sts:GetCallerIdentity
-// action, you can still perform this operation. Permissions are not required
-// because the same information is returned when access is denied. To view an
-// example response, see I Am Not Authorized to Perform: iam:DeleteVirtualMFADevice
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_access-denied-delete-mfa)
-// in the IAM User Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS Security Token Service's
-// API operation GetCallerIdentity for usage and error information.
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
-func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) {
- req, out := c.GetCallerIdentityRequest(input)
- return out, req.Send()
-}
-
-// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of
-// the ability to pass a context and additional request options.
-//
-// See GetCallerIdentity for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) {
- req, out := c.GetCallerIdentityRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opGetFederationToken = "GetFederationToken"
-
-// GetFederationTokenRequest generates a "aws/request.Request" representing the
-// client's request for the GetFederationToken operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See GetFederationToken for more information on using the GetFederationToken
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the GetFederationTokenRequest method.
-// req, resp := client.GetFederationTokenRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
-func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) {
- op := &request.Operation{
- Name: opGetFederationToken,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &GetFederationTokenInput{}
- }
-
- output = &GetFederationTokenOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// GetFederationToken API operation for AWS Security Token Service.
-//
-// Returns a set of temporary security credentials (consisting of an access
-// key ID, a secret access key, and a security token) for a user. A typical
-// use is in a proxy application that gets temporary security credentials on
-// behalf of distributed applications inside a corporate network.
-//
-// You must call the GetFederationToken operation using the long-term security
-// credentials of an IAM user. As a result, this call is appropriate in contexts
-// where those credentials can be safeguarded, usually in a server-based application.
-// For a comparison of GetFederationToken with the other API operations that
-// produce temporary credentials, see Requesting Temporary Security Credentials
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide.
-//
-// Although it is possible to call GetFederationToken using the security credentials
-// of an Amazon Web Services account root user rather than an IAM user that
-// you create for the purpose of a proxy application, we do not recommend it.
-// For more information, see Safeguard your root user credentials and don't
-// use them for everyday tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials)
-// in the IAM User Guide.
-//
-// You can create a mobile-based or browser-based app that can authenticate
-// users using a web identity provider like Login with Amazon, Facebook, Google,
-// or an OpenID Connect-compatible identity provider. In this case, we recommend
-// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
-// For more information, see Federation Through a Web-based Identity Provider
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity)
-// in the IAM User Guide.
-//
-// # Session duration
-//
-// The temporary credentials are valid for the specified duration, from 900
-// seconds (15 minutes) up to a maximum of 129,600 seconds (36 hours). The default
-// session duration is 43,200 seconds (12 hours). Temporary credentials obtained
-// by using the root user credentials have a maximum duration of 3,600 seconds
-// (1 hour).
-//
-// # Permissions
-//
-// You can use the temporary credentials created by GetFederationToken in any
-// Amazon Web Services service with the following exceptions:
-//
-// - You cannot call any IAM operations using the CLI or the Amazon Web Services
-// API. This limitation does not apply to console sessions.
-//
-// - You cannot call any STS operations except GetCallerIdentity.
-//
-// You can use temporary credentials for single sign-on (SSO) to the console.
-//
-// You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// to this operation. You can pass a single JSON policy document to use as an
-// inline session policy. You can also specify up to 10 managed policy Amazon
-// Resource Names (ARNs) to use as managed session policies. The plaintext that
-// you use for both inline and managed session policies can't exceed 2,048 characters.
-//
-// Though the session policy parameters are optional, if you do not pass a policy,
-// then the resulting federated user session has no permissions. When you pass
-// session policies, the session permissions are the intersection of the IAM
-// user policies and the session policies that you pass. This gives you a way
-// to further restrict the permissions for a federated user. You cannot use
-// session policies to grant more permissions than those that are defined in
-// the permissions policy of the IAM user. For more information, see Session
-// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
-// in the IAM User Guide. For information about using GetFederationToken to
-// create temporary security credentials, see GetFederationToken—Federation
-// Through a Custom Identity Broker (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken).
-//
-// You can use the credentials to access a resource that has a resource-based
-// policy. If that policy specifically references the federated user session
-// in the Principal element of the policy, the session has the permissions allowed
-// by the policy. These permissions are granted in addition to the permissions
-// granted by the session policies.
-//
-// # Tags
-//
-// (Optional) You can pass tag key-value pairs to your session. These are called
-// session tags. For more information about session tags, see Passing Session
-// Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide.
-//
-// You can create a mobile-based or browser-based app that can authenticate
-// users using a web identity provider like Login with Amazon, Facebook, Google,
-// or an OpenID Connect-compatible identity provider. In this case, we recommend
-// that you use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity.
-// For more information, see Federation Through a Web-based Identity Provider
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity)
-// in the IAM User Guide.
-//
-// An administrator must grant you the permissions necessary to pass session
-// tags. The administrator can also create granular permissions to allow you
-// to pass only specific session tags. For more information, see Tutorial: Using
-// Tags for Attribute-Based Access Control (https://docs.aws.amazon.com/IAM/latest/UserGuide/tutorial_attribute-based-access-control.html)
-// in the IAM User Guide.
-//
-// Tag key–value pairs are not case sensitive, but case is preserved. This
-// means that you cannot have separate Department and department tag keys. Assume
-// that the user that you are federating has the Department=Marketing tag and
-// you pass the department=engineering session tag. Department and department
-// are not saved as separate tags, and the session tag passed in the request
-// takes precedence over the user tag.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS Security Token Service's
-// API operation GetFederationToken for usage and error information.
-//
-// Returned Error Codes:
-//
-// - ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument"
-// The request was rejected because the policy document was malformed. The error
-// message describes the specific error.
-//
-// - ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge"
-// The request was rejected because the total packed size of the session policies
-// and session tags combined was too large. An Amazon Web Services conversion
-// compresses the session policy document, session policy ARNs, and session
-// tags into a packed binary format that has a separate limit. The error message
-// indicates by percentage how close the policies and tags are to the upper
-// size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide.
-//
-// You could receive this error even though you meet other defined session policy
-// and session tag limits. For more information, see IAM and STS Entity Character
-// Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
-// in the IAM User Guide.
-//
-// - ErrCodeRegionDisabledException "RegionDisabledException"
-// STS is not activated in the requested region for the account that is being
-// asked to generate credentials. The account administrator must use the IAM
-// console to activate STS in that region. For more information, see Activating
-// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
-// in the IAM User Guide.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
-func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
- req, out := c.GetFederationTokenRequest(input)
- return out, req.Send()
-}
-
-// GetFederationTokenWithContext is the same as GetFederationToken with the addition of
-// the ability to pass a context and additional request options.
-//
-// See GetFederationToken for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) {
- req, out := c.GetFederationTokenRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-const opGetSessionToken = "GetSessionToken"
-
-// GetSessionTokenRequest generates a "aws/request.Request" representing the
-// client's request for the GetSessionToken operation. The "output" return
-// value will be populated with the request's response once the request completes
-// successfully.
-//
-// Use "Send" method on the returned Request to send the API call to the service.
-// the "output" return value is not valid until after Send returns without error.
-//
-// See GetSessionToken for more information on using the GetSessionToken
-// API call, and error handling.
-//
-// This method is useful when you want to inject custom logic or configuration
-// into the SDK's request lifecycle. Such as custom headers, or retry logic.
-//
-// // Example sending a request using the GetSessionTokenRequest method.
-// req, resp := client.GetSessionTokenRequest(params)
-//
-// err := req.Send()
-// if err == nil { // resp is now filled
-// fmt.Println(resp)
-// }
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
-func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) {
- op := &request.Operation{
- Name: opGetSessionToken,
- HTTPMethod: "POST",
- HTTPPath: "/",
- }
-
- if input == nil {
- input = &GetSessionTokenInput{}
- }
-
- output = &GetSessionTokenOutput{}
- req = c.newRequest(op, input, output)
- return
-}
-
-// GetSessionToken API operation for AWS Security Token Service.
-//
-// Returns a set of temporary credentials for an Amazon Web Services account
-// or IAM user. The credentials consist of an access key ID, a secret access
-// key, and a security token. Typically, you use GetSessionToken if you want
-// to use MFA to protect programmatic calls to specific Amazon Web Services
-// API operations like Amazon EC2 StopInstances.
-//
-// MFA-enabled IAM users must call GetSessionToken and submit an MFA code that
-// is associated with their MFA device. Using the temporary security credentials
-// that the call returns, IAM users can then make programmatic calls to API
-// operations that require MFA authentication. An incorrect MFA code causes
-// the API to return an access denied error. For a comparison of GetSessionToken
-// with the other API operations that produce temporary credentials, see Requesting
-// Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html)
-// and Comparing the Amazon Web Services STS API operations (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison)
-// in the IAM User Guide.
-//
-// No permissions are required for users to perform this operation. The purpose
-// of the sts:GetSessionToken operation is to authenticate the user using MFA.
-// You cannot use policies to control authentication operations. For more information,
-// see Permissions for GetSessionToken (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getsessiontoken.html)
-// in the IAM User Guide.
-//
-// # Session Duration
-//
-// The GetSessionToken operation must be called by using the long-term Amazon
-// Web Services security credentials of an IAM user. Credentials that are created
-// by IAM users are valid for the duration that you specify. This duration can
-// range from 900 seconds (15 minutes) up to a maximum of 129,600 seconds (36
-// hours), with a default of 43,200 seconds (12 hours). Credentials based on
-// account credentials can range from 900 seconds (15 minutes) up to 3,600 seconds
-// (1 hour), with a default of 1 hour.
-//
-// # Permissions
-//
-// The temporary security credentials created by GetSessionToken can be used
-// to make API calls to any Amazon Web Services service with the following exceptions:
-//
-// - You cannot call any IAM API operations unless MFA authentication information
-// is included in the request.
-//
-// - You cannot call any STS API except AssumeRole or GetCallerIdentity.
-//
-// The credentials that GetSessionToken returns are based on permissions associated
-// with the IAM user whose credentials were used to call the operation. The
-// temporary credentials have the same permissions as the IAM user.
-//
-// Although it is possible to call GetSessionToken using the security credentials
-// of an Amazon Web Services account root user rather than an IAM user, we do
-// not recommend it. If GetSessionToken is called using root user credentials,
-// the temporary credentials have root user permissions. For more information,
-// see Safeguard your root user credentials and don't use them for everyday
-// tasks (https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#lock-away-credentials)
-// in the IAM User Guide
-//
-// For more information about using GetSessionToken to create temporary credentials,
-// see Temporary Credentials for Users in Untrusted Environments (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken)
-// in the IAM User Guide.
-//
-// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
-// with awserr.Error's Code and Message methods to get detailed information about
-// the error.
-//
-// See the AWS API reference guide for AWS Security Token Service's
-// API operation GetSessionToken for usage and error information.
-//
-// Returned Error Codes:
-// - ErrCodeRegionDisabledException "RegionDisabledException"
-// STS is not activated in the requested region for the account that is being
-// asked to generate credentials. The account administrator must use the IAM
-// console to activate STS in that region. For more information, see Activating
-// and Deactivating Amazon Web Services STS in an Amazon Web Services Region
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
-// in the IAM User Guide.
-//
-// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
-func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
- req, out := c.GetSessionTokenRequest(input)
- return out, req.Send()
-}
-
-// GetSessionTokenWithContext is the same as GetSessionToken with the addition of
-// the ability to pass a context and additional request options.
-//
-// See GetSessionToken for details on how to use this API operation.
-//
-// The context must be non-nil and will be used for request cancellation. If
-// the context is nil a panic will occur. In the future the SDK may create
-// sub-contexts for http.Requests. See https://golang.org/pkg/context/
-// for more information on using Contexts.
-func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) {
- req, out := c.GetSessionTokenRequest(input)
- req.SetContext(ctx)
- req.ApplyOptions(opts...)
- return out, req.Send()
-}
-
-type AssumeRoleInput struct {
- _ struct{} `type:"structure"`
-
- // The duration, in seconds, of the role session. The value specified can range
- // from 900 seconds (15 minutes) up to the maximum session duration set for
- // the role. The maximum session duration setting can have a value from 1 hour
- // to 12 hours. If you specify a value higher than this setting or the administrator
- // setting (whichever is lower), the operation fails. For example, if you specify
- // a session duration of 12 hours, but your administrator set the maximum session
- // duration to 6 hours, your operation fails.
- //
- // Role chaining limits your Amazon Web Services CLI or Amazon Web Services
- // API role session to a maximum of one hour. When you use the AssumeRole API
- // operation to assume a role, you can specify the duration of your role session
- // with the DurationSeconds parameter. You can specify a parameter value of
- // up to 43200 seconds (12 hours), depending on the maximum session duration
- // setting for your role. However, if you assume a role using role chaining
- // and provide a DurationSeconds parameter value greater than one hour, the
- // operation fails. To learn how to view the maximum value for your role, see
- // View the Maximum Session Duration Setting for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
- // in the IAM User Guide.
- //
- // By default, the value is set to 3600 seconds.
- //
- // The DurationSeconds parameter is separate from the duration of a console
- // session that you might request using the returned credentials. The request
- // to the federation endpoint for a console sign-in token takes a SessionDuration
- // parameter that specifies the maximum length of the console session. For more
- // information, see Creating a URL that Enables Federated Users to Access the
- // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
- // in the IAM User Guide.
- DurationSeconds *int64 `min:"900" type:"integer"`
-
- // A unique identifier that might be required when you assume a role in another
- // account. If the administrator of the account to which the role belongs provided
- // you with an external ID, then provide that value in the ExternalId parameter.
- // This value can be any string, such as a passphrase or account number. A cross-account
- // role is usually set up to trust everyone in an account. Therefore, the administrator
- // of the trusting account might send an external ID to the administrator of
- // the trusted account. That way, only someone with the ID can assume the role,
- // rather than everyone in the account. For more information about the external
- // ID, see How to Use an External ID When Granting Access to Your Amazon Web
- // Services Resources to a Third Party (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html)
- // in the IAM User Guide.
- //
- // The regex used to validate this parameter is a string of characters consisting
- // of upper- and lower-case alphanumeric characters with no spaces. You can
- // also include underscores or any of the following characters: =,.@:/-
- ExternalId *string `min:"2" type:"string"`
-
- // An IAM policy in JSON format that you want to use as an inline session policy.
- //
- // This parameter is optional. Passing policies to this operation returns new
- // temporary credentials. The resulting session's permissions are the intersection
- // of the role's identity-based policy and the session policies. You can use
- // the role's temporary credentials in subsequent Amazon Web Services API calls
- // to access resources in the account that owns the role. You cannot use session
- // policies to grant more permissions than those allowed by the identity-based
- // policy of the role that is being assumed. For more information, see Session
- // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
- //
- // The plaintext that you use for both inline and managed session policies can't
- // exceed 2,048 characters. The JSON policy characters can be any ASCII character
- // from the space character to the end of the valid character list (\u0020 through
- // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
- // return (\u000D) characters.
- //
- // An Amazon Web Services conversion compresses the passed inline session policy,
- // managed policy ARNs, and session tags into a packed binary format that has
- // a separate limit. Your request can fail for this limit even if your plaintext
- // meets the other requirements. The PackedPolicySize response element indicates
- // by percentage how close the policies and tags for your request are to the
- // upper size limit.
- Policy *string `min:"1" type:"string"`
-
- // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
- // to use as managed session policies. The policies must exist in the same account
- // as the role.
- //
- // This parameter is optional. You can provide up to 10 managed policy ARNs.
- // However, the plaintext that you use for both inline and managed session policies
- // can't exceed 2,048 characters. For more information about ARNs, see Amazon
- // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the Amazon Web Services General Reference.
- //
- // An Amazon Web Services conversion compresses the passed inline session policy,
- // managed policy ARNs, and session tags into a packed binary format that has
- // a separate limit. Your request can fail for this limit even if your plaintext
- // meets the other requirements. The PackedPolicySize response element indicates
- // by percentage how close the policies and tags for your request are to the
- // upper size limit.
- //
- // Passing policies to this operation returns new temporary credentials. The
- // resulting session's permissions are the intersection of the role's identity-based
- // policy and the session policies. You can use the role's temporary credentials
- // in subsequent Amazon Web Services API calls to access resources in the account
- // that owns the role. You cannot use session policies to grant more permissions
- // than those allowed by the identity-based policy of the role that is being
- // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
- PolicyArns []*PolicyDescriptorType `type:"list"`
-
- // A list of previously acquired trusted context assertions in the format of
- // a JSON array. The trusted context assertion is signed and encrypted by Amazon
- // Web Services STS.
- //
- // The following is an example of a ProvidedContext value that includes a single
- // trusted context assertion and the ARN of the context provider from which
- // the trusted context assertion was generated.
- //
- // [{"ProviderArn":"arn:aws:iam::aws:contextProvider/IdentityCenter","ContextAssertion":"trusted-context-assertion"}]
- ProvidedContexts []*ProvidedContext `type:"list"`
-
- // The Amazon Resource Name (ARN) of the role to assume.
- //
- // RoleArn is a required field
- RoleArn *string `min:"20" type:"string" required:"true"`
-
- // An identifier for the assumed role session.
- //
- // Use the role session name to uniquely identify a session when the same role
- // is assumed by different principals or for different reasons. In cross-account
- // scenarios, the role session name is visible to, and can be logged by the
- // account that owns the role. The role session name is also used in the ARN
- // of the assumed role principal. This means that subsequent cross-account API
- // requests that use the temporary security credentials will expose the role
- // session name to the external account in their CloudTrail logs.
- //
- // The regex used to validate this parameter is a string of characters consisting
- // of upper- and lower-case alphanumeric characters with no spaces. You can
- // also include underscores or any of the following characters: =,.@-
- //
- // RoleSessionName is a required field
- RoleSessionName *string `min:"2" type:"string" required:"true"`
-
- // The identification number of the MFA device that is associated with the user
- // who is making the AssumeRole call. Specify this value if the trust policy
- // of the role being assumed includes a condition that requires MFA authentication.
- // The value is either the serial number for a hardware device (such as GAHT12345678)
- // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
- //
- // The regex used to validate this parameter is a string of characters consisting
- // of upper- and lower-case alphanumeric characters with no spaces. You can
- // also include underscores or any of the following characters: =,.@-
- SerialNumber *string `min:"9" type:"string"`
-
- // The source identity specified by the principal that is calling the AssumeRole
- // operation.
- //
- // You can require users to specify a source identity when they assume a role.
- // You do this by using the sts:SourceIdentity condition key in a role trust
- // policy. You can use source identity information in CloudTrail logs to determine
- // who took actions with a role. You can use the aws:SourceIdentity condition
- // key to further control access to Amazon Web Services resources based on the
- // value of source identity. For more information about using source identity,
- // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
- // in the IAM User Guide.
- //
- // The regex used to validate this parameter is a string of characters consisting
- // of upper- and lower-case alphanumeric characters with no spaces. You can
- // also include underscores or any of the following characters: =,.@-. You cannot
- // use a value that begins with the text aws:. This prefix is reserved for Amazon
- // Web Services internal use.
- SourceIdentity *string `min:"2" type:"string"`
-
- // A list of session tags that you want to pass. Each session tag consists of
- // a key name and an associated value. For more information about session tags,
- // see Tagging Amazon Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
- // in the IAM User Guide.
- //
- // This parameter is optional. You can pass up to 50 session tags. The plaintext
- // session tag keys can’t exceed 128 characters, and the values can’t exceed
- // 256 characters. For these and additional limits, see IAM and STS Character
- // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
- // in the IAM User Guide.
- //
- // An Amazon Web Services conversion compresses the passed inline session policy,
- // managed policy ARNs, and session tags into a packed binary format that has
- // a separate limit. Your request can fail for this limit even if your plaintext
- // meets the other requirements. The PackedPolicySize response element indicates
- // by percentage how close the policies and tags for your request are to the
- // upper size limit.
- //
- // You can pass a session tag with the same key as a tag that is already attached
- // to the role. When you do, session tags override a role tag with the same
- // key.
- //
- // Tag key–value pairs are not case sensitive, but case is preserved. This
- // means that you cannot have separate Department and department tag keys. Assume
- // that the role has the Department=Marketing tag and you pass the department=engineering
- // session tag. Department and department are not saved as separate tags, and
- // the session tag passed in the request takes precedence over the role tag.
- //
- // Additionally, if you used temporary credentials to perform this operation,
- // the new session inherits any transitive session tags from the calling session.
- // If you pass a session tag with the same key as an inherited tag, the operation
- // fails. To view the inherited tags for a session, see the CloudTrail logs.
- // For more information, see Viewing Session Tags in CloudTrail (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_ctlogs)
- // in the IAM User Guide.
- Tags []*Tag `type:"list"`
-
- // The value provided by the MFA device, if the trust policy of the role being
- // assumed requires MFA. (In other words, if the policy includes a condition
- // that tests for MFA). If the role being assumed requires MFA and if the TokenCode
- // value is missing or expired, the AssumeRole call returns an "access denied"
- // error.
- //
- // The format for this parameter, as described by its regex pattern, is a sequence
- // of six numeric digits.
- TokenCode *string `min:"6" type:"string"`
-
- // A list of keys for session tags that you want to set as transitive. If you
- // set a tag key as transitive, the corresponding key and value passes to subsequent
- // sessions in a role chain. For more information, see Chaining Roles with Session
- // Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html#id_session-tags_role-chaining)
- // in the IAM User Guide.
- //
- // This parameter is optional. When you set session tags as transitive, the
- // session policy and session tags packed binary limit is not affected.
- //
- // If you choose not to specify a transitive tag key, then no tags are passed
- // from this session to any subsequent sessions.
- TransitiveTagKeys []*string `type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AssumeRoleInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AssumeRoleInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *AssumeRoleInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"}
- if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
- invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
- }
- if s.ExternalId != nil && len(*s.ExternalId) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2))
- }
- if s.Policy != nil && len(*s.Policy) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
- }
- if s.RoleArn == nil {
- invalidParams.Add(request.NewErrParamRequired("RoleArn"))
- }
- if s.RoleArn != nil && len(*s.RoleArn) < 20 {
- invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
- }
- if s.RoleSessionName == nil {
- invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
- }
- if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
- }
- if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
- invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
- }
- if s.SourceIdentity != nil && len(*s.SourceIdentity) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("SourceIdentity", 2))
- }
- if s.TokenCode != nil && len(*s.TokenCode) < 6 {
- invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
- }
- if s.PolicyArns != nil {
- for i, v := range s.PolicyArns {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.ProvidedContexts != nil {
- for i, v := range s.ProvidedContexts {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ProvidedContexts", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.Tags != nil {
- for i, v := range s.Tags {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetDurationSeconds sets the DurationSeconds field's value.
-func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput {
- s.DurationSeconds = &v
- return s
-}
-
-// SetExternalId sets the ExternalId field's value.
-func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput {
- s.ExternalId = &v
- return s
-}
-
-// SetPolicy sets the Policy field's value.
-func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput {
- s.Policy = &v
- return s
-}
-
-// SetPolicyArns sets the PolicyArns field's value.
-func (s *AssumeRoleInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleInput {
- s.PolicyArns = v
- return s
-}
-
-// SetProvidedContexts sets the ProvidedContexts field's value.
-func (s *AssumeRoleInput) SetProvidedContexts(v []*ProvidedContext) *AssumeRoleInput {
- s.ProvidedContexts = v
- return s
-}
-
-// SetRoleArn sets the RoleArn field's value.
-func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput {
- s.RoleArn = &v
- return s
-}
-
-// SetRoleSessionName sets the RoleSessionName field's value.
-func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput {
- s.RoleSessionName = &v
- return s
-}
-
-// SetSerialNumber sets the SerialNumber field's value.
-func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput {
- s.SerialNumber = &v
- return s
-}
-
-// SetSourceIdentity sets the SourceIdentity field's value.
-func (s *AssumeRoleInput) SetSourceIdentity(v string) *AssumeRoleInput {
- s.SourceIdentity = &v
- return s
-}
-
-// SetTags sets the Tags field's value.
-func (s *AssumeRoleInput) SetTags(v []*Tag) *AssumeRoleInput {
- s.Tags = v
- return s
-}
-
-// SetTokenCode sets the TokenCode field's value.
-func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput {
- s.TokenCode = &v
- return s
-}
-
-// SetTransitiveTagKeys sets the TransitiveTagKeys field's value.
-func (s *AssumeRoleInput) SetTransitiveTagKeys(v []*string) *AssumeRoleInput {
- s.TransitiveTagKeys = v
- return s
-}
-
-// Contains the response to a successful AssumeRole request, including temporary
-// Amazon Web Services credentials that can be used to make Amazon Web Services
-// requests.
-type AssumeRoleOutput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
- // that you can use to refer to the resulting temporary security credentials.
- // For example, you can reference these credentials as a principal in a resource-based
- // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
- // that you specified when you called AssumeRole.
- AssumedRoleUser *AssumedRoleUser `type:"structure"`
-
- // The temporary security credentials, which include an access key ID, a secret
- // access key, and a security (or session) token.
- //
- // The size of the security token that STS API operations return is not fixed.
- // We strongly recommend that you make no assumptions about the maximum size.
- Credentials *Credentials `type:"structure"`
-
- // A percentage value that indicates the packed size of the session policies
- // and session tags combined passed in the request. The request fails if the
- // packed size is greater than 100 percent, which means the policies and tags
- // exceeded the allowed space.
- PackedPolicySize *int64 `type:"integer"`
-
- // The source identity specified by the principal that is calling the AssumeRole
- // operation.
- //
- // You can require users to specify a source identity when they assume a role.
- // You do this by using the sts:SourceIdentity condition key in a role trust
- // policy. You can use source identity information in CloudTrail logs to determine
- // who took actions with a role. You can use the aws:SourceIdentity condition
- // key to further control access to Amazon Web Services resources based on the
- // value of source identity. For more information about using source identity,
- // see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
- // in the IAM User Guide.
- //
- // The regex used to validate this parameter is a string of characters consisting
- // of upper- and lower-case alphanumeric characters with no spaces. You can
- // also include underscores or any of the following characters: =,.@-
- SourceIdentity *string `min:"2" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AssumeRoleOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AssumeRoleOutput) GoString() string {
- return s.String()
-}
-
-// SetAssumedRoleUser sets the AssumedRoleUser field's value.
-func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput {
- s.AssumedRoleUser = v
- return s
-}
-
-// SetCredentials sets the Credentials field's value.
-func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput {
- s.Credentials = v
- return s
-}
-
-// SetPackedPolicySize sets the PackedPolicySize field's value.
-func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput {
- s.PackedPolicySize = &v
- return s
-}
-
-// SetSourceIdentity sets the SourceIdentity field's value.
-func (s *AssumeRoleOutput) SetSourceIdentity(v string) *AssumeRoleOutput {
- s.SourceIdentity = &v
- return s
-}
-
-type AssumeRoleWithSAMLInput struct {
- _ struct{} `type:"structure"`
-
- // The duration, in seconds, of the role session. Your role session lasts for
- // the duration that you specify for the DurationSeconds parameter, or until
- // the time specified in the SAML authentication response's SessionNotOnOrAfter
- // value, whichever is shorter. You can provide a DurationSeconds value from
- // 900 seconds (15 minutes) up to the maximum session duration setting for the
- // role. This setting can have a value from 1 hour to 12 hours. If you specify
- // a value higher than this setting, the operation fails. For example, if you
- // specify a session duration of 12 hours, but your administrator set the maximum
- // session duration to 6 hours, your operation fails. To learn how to view the
- // maximum value for your role, see View the Maximum Session Duration Setting
- // for a Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
- // in the IAM User Guide.
- //
- // By default, the value is set to 3600 seconds.
- //
- // The DurationSeconds parameter is separate from the duration of a console
- // session that you might request using the returned credentials. The request
- // to the federation endpoint for a console sign-in token takes a SessionDuration
- // parameter that specifies the maximum length of the console session. For more
- // information, see Creating a URL that Enables Federated Users to Access the
- // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
- // in the IAM User Guide.
- DurationSeconds *int64 `min:"900" type:"integer"`
-
- // An IAM policy in JSON format that you want to use as an inline session policy.
- //
- // This parameter is optional. Passing policies to this operation returns new
- // temporary credentials. The resulting session's permissions are the intersection
- // of the role's identity-based policy and the session policies. You can use
- // the role's temporary credentials in subsequent Amazon Web Services API calls
- // to access resources in the account that owns the role. You cannot use session
- // policies to grant more permissions than those allowed by the identity-based
- // policy of the role that is being assumed. For more information, see Session
- // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
- //
- // The plaintext that you use for both inline and managed session policies can't
- // exceed 2,048 characters. The JSON policy characters can be any ASCII character
- // from the space character to the end of the valid character list (\u0020 through
- // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
- // return (\u000D) characters.
- //
- // An Amazon Web Services conversion compresses the passed inline session policy,
- // managed policy ARNs, and session tags into a packed binary format that has
- // a separate limit. Your request can fail for this limit even if your plaintext
- // meets the other requirements. The PackedPolicySize response element indicates
- // by percentage how close the policies and tags for your request are to the
- // upper size limit.
- Policy *string `min:"1" type:"string"`
-
- // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
- // to use as managed session policies. The policies must exist in the same account
- // as the role.
- //
- // This parameter is optional. You can provide up to 10 managed policy ARNs.
- // However, the plaintext that you use for both inline and managed session policies
- // can't exceed 2,048 characters. For more information about ARNs, see Amazon
- // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the Amazon Web Services General Reference.
- //
- // An Amazon Web Services conversion compresses the passed inline session policy,
- // managed policy ARNs, and session tags into a packed binary format that has
- // a separate limit. Your request can fail for this limit even if your plaintext
- // meets the other requirements. The PackedPolicySize response element indicates
- // by percentage how close the policies and tags for your request are to the
- // upper size limit.
- //
- // Passing policies to this operation returns new temporary credentials. The
- // resulting session's permissions are the intersection of the role's identity-based
- // policy and the session policies. You can use the role's temporary credentials
- // in subsequent Amazon Web Services API calls to access resources in the account
- // that owns the role. You cannot use session policies to grant more permissions
- // than those allowed by the identity-based policy of the role that is being
- // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
- PolicyArns []*PolicyDescriptorType `type:"list"`
-
- // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes
- // the IdP.
- //
- // PrincipalArn is a required field
- PrincipalArn *string `min:"20" type:"string" required:"true"`
-
- // The Amazon Resource Name (ARN) of the role that the caller is assuming.
- //
- // RoleArn is a required field
- RoleArn *string `min:"20" type:"string" required:"true"`
-
- // The base64 encoded SAML authentication response provided by the IdP.
- //
- // For more information, see Configuring a Relying Party and Adding Claims (https://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html)
- // in the IAM User Guide.
- //
- // SAMLAssertion is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by AssumeRoleWithSAMLInput's
- // String and GoString methods.
- //
- // SAMLAssertion is a required field
- SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AssumeRoleWithSAMLInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AssumeRoleWithSAMLInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *AssumeRoleWithSAMLInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"}
- if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
- invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
- }
- if s.Policy != nil && len(*s.Policy) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
- }
- if s.PrincipalArn == nil {
- invalidParams.Add(request.NewErrParamRequired("PrincipalArn"))
- }
- if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 {
- invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20))
- }
- if s.RoleArn == nil {
- invalidParams.Add(request.NewErrParamRequired("RoleArn"))
- }
- if s.RoleArn != nil && len(*s.RoleArn) < 20 {
- invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
- }
- if s.SAMLAssertion == nil {
- invalidParams.Add(request.NewErrParamRequired("SAMLAssertion"))
- }
- if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 {
- invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4))
- }
- if s.PolicyArns != nil {
- for i, v := range s.PolicyArns {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetDurationSeconds sets the DurationSeconds field's value.
-func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput {
- s.DurationSeconds = &v
- return s
-}
-
-// SetPolicy sets the Policy field's value.
-func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput {
- s.Policy = &v
- return s
-}
-
-// SetPolicyArns sets the PolicyArns field's value.
-func (s *AssumeRoleWithSAMLInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithSAMLInput {
- s.PolicyArns = v
- return s
-}
-
-// SetPrincipalArn sets the PrincipalArn field's value.
-func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput {
- s.PrincipalArn = &v
- return s
-}
-
-// SetRoleArn sets the RoleArn field's value.
-func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput {
- s.RoleArn = &v
- return s
-}
-
-// SetSAMLAssertion sets the SAMLAssertion field's value.
-func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput {
- s.SAMLAssertion = &v
- return s
-}
-
-// Contains the response to a successful AssumeRoleWithSAML request, including
-// temporary Amazon Web Services credentials that can be used to make Amazon
-// Web Services requests.
-type AssumeRoleWithSAMLOutput struct {
- _ struct{} `type:"structure"`
-
- // The identifiers for the temporary security credentials that the operation
- // returns.
- AssumedRoleUser *AssumedRoleUser `type:"structure"`
-
- // The value of the Recipient attribute of the SubjectConfirmationData element
- // of the SAML assertion.
- Audience *string `type:"string"`
-
- // The temporary security credentials, which include an access key ID, a secret
- // access key, and a security (or session) token.
- //
- // The size of the security token that STS API operations return is not fixed.
- // We strongly recommend that you make no assumptions about the maximum size.
- Credentials *Credentials `type:"structure"`
-
- // The value of the Issuer element of the SAML assertion.
- Issuer *string `type:"string"`
-
- // A hash value based on the concatenation of the following:
- //
- // * The Issuer response value.
- //
- // * The Amazon Web Services account ID.
- //
- // * The friendly name (the last part of the ARN) of the SAML provider in
- // IAM.
- //
- // The combination of NameQualifier and Subject can be used to uniquely identify
- // a user.
- //
- // The following pseudocode shows how the hash value is calculated:
- //
- // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP"
- // ) )
- NameQualifier *string `type:"string"`
-
- // A percentage value that indicates the packed size of the session policies
- // and session tags combined passed in the request. The request fails if the
- // packed size is greater than 100 percent, which means the policies and tags
- // exceeded the allowed space.
- PackedPolicySize *int64 `type:"integer"`
-
- // The value in the SourceIdentity attribute in the SAML assertion.
- //
- // You can require users to set a source identity value when they assume a role.
- // You do this by using the sts:SourceIdentity condition key in a role trust
- // policy. That way, actions that are taken with the role are associated with
- // that user. After the source identity is set, the value cannot be changed.
- // It is present in the request for all actions that are taken by the role and
- // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining)
- // sessions. You can configure your SAML identity provider to use an attribute
- // associated with your users, like user name or email, as the source identity
- // when calling AssumeRoleWithSAML. You do this by adding an attribute to the
- // SAML assertion. For more information about using source identity, see Monitor
- // and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
- // in the IAM User Guide.
- //
- // The regex used to validate this parameter is a string of characters consisting
- // of upper- and lower-case alphanumeric characters with no spaces. You can
- // also include underscores or any of the following characters: =,.@-
- SourceIdentity *string `min:"2" type:"string"`
-
- // The value of the NameID element in the Subject element of the SAML assertion.
- Subject *string `type:"string"`
-
- // The format of the name ID, as defined by the Format attribute in the NameID
- // element of the SAML assertion. Typical examples of the format are transient
- // or persistent.
- //
- // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format,
- // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient
- // is returned as transient. If the format includes any other prefix, the format
- // is returned with no modifications.
- SubjectType *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AssumeRoleWithSAMLOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AssumeRoleWithSAMLOutput) GoString() string {
- return s.String()
-}
-
-// SetAssumedRoleUser sets the AssumedRoleUser field's value.
-func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput {
- s.AssumedRoleUser = v
- return s
-}
-
-// SetAudience sets the Audience field's value.
-func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput {
- s.Audience = &v
- return s
-}
-
-// SetCredentials sets the Credentials field's value.
-func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput {
- s.Credentials = v
- return s
-}
-
-// SetIssuer sets the Issuer field's value.
-func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput {
- s.Issuer = &v
- return s
-}
-
-// SetNameQualifier sets the NameQualifier field's value.
-func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput {
- s.NameQualifier = &v
- return s
-}
-
-// SetPackedPolicySize sets the PackedPolicySize field's value.
-func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput {
- s.PackedPolicySize = &v
- return s
-}
-
-// SetSourceIdentity sets the SourceIdentity field's value.
-func (s *AssumeRoleWithSAMLOutput) SetSourceIdentity(v string) *AssumeRoleWithSAMLOutput {
- s.SourceIdentity = &v
- return s
-}
-
-// SetSubject sets the Subject field's value.
-func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput {
- s.Subject = &v
- return s
-}
-
-// SetSubjectType sets the SubjectType field's value.
-func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput {
- s.SubjectType = &v
- return s
-}
-
-type AssumeRoleWithWebIdentityInput struct {
- _ struct{} `type:"structure"`
-
- // The duration, in seconds, of the role session. The value can range from 900
- // seconds (15 minutes) up to the maximum session duration setting for the role.
- // This setting can have a value from 1 hour to 12 hours. If you specify a value
- // higher than this setting, the operation fails. For example, if you specify
- // a session duration of 12 hours, but your administrator set the maximum session
- // duration to 6 hours, your operation fails. To learn how to view the maximum
- // value for your role, see View the Maximum Session Duration Setting for a
- // Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html#id_roles_use_view-role-max-session)
- // in the IAM User Guide.
- //
- // By default, the value is set to 3600 seconds.
- //
- // The DurationSeconds parameter is separate from the duration of a console
- // session that you might request using the returned credentials. The request
- // to the federation endpoint for a console sign-in token takes a SessionDuration
- // parameter that specifies the maximum length of the console session. For more
- // information, see Creating a URL that Enables Federated Users to Access the
- // Amazon Web Services Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html)
- // in the IAM User Guide.
- DurationSeconds *int64 `min:"900" type:"integer"`
-
- // An IAM policy in JSON format that you want to use as an inline session policy.
- //
- // This parameter is optional. Passing policies to this operation returns new
- // temporary credentials. The resulting session's permissions are the intersection
- // of the role's identity-based policy and the session policies. You can use
- // the role's temporary credentials in subsequent Amazon Web Services API calls
- // to access resources in the account that owns the role. You cannot use session
- // policies to grant more permissions than those allowed by the identity-based
- // policy of the role that is being assumed. For more information, see Session
- // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
- //
- // The plaintext that you use for both inline and managed session policies can't
- // exceed 2,048 characters. The JSON policy characters can be any ASCII character
- // from the space character to the end of the valid character list (\u0020 through
- // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
- // return (\u000D) characters.
- //
- // An Amazon Web Services conversion compresses the passed inline session policy,
- // managed policy ARNs, and session tags into a packed binary format that has
- // a separate limit. Your request can fail for this limit even if your plaintext
- // meets the other requirements. The PackedPolicySize response element indicates
- // by percentage how close the policies and tags for your request are to the
- // upper size limit.
- Policy *string `min:"1" type:"string"`
-
- // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
- // to use as managed session policies. The policies must exist in the same account
- // as the role.
- //
- // This parameter is optional. You can provide up to 10 managed policy ARNs.
- // However, the plaintext that you use for both inline and managed session policies
- // can't exceed 2,048 characters. For more information about ARNs, see Amazon
- // Resource Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the Amazon Web Services General Reference.
- //
- // An Amazon Web Services conversion compresses the passed inline session policy,
- // managed policy ARNs, and session tags into a packed binary format that has
- // a separate limit. Your request can fail for this limit even if your plaintext
- // meets the other requirements. The PackedPolicySize response element indicates
- // by percentage how close the policies and tags for your request are to the
- // upper size limit.
- //
- // Passing policies to this operation returns new temporary credentials. The
- // resulting session's permissions are the intersection of the role's identity-based
- // policy and the session policies. You can use the role's temporary credentials
- // in subsequent Amazon Web Services API calls to access resources in the account
- // that owns the role. You cannot use session policies to grant more permissions
- // than those allowed by the identity-based policy of the role that is being
- // assumed. For more information, see Session Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
- PolicyArns []*PolicyDescriptorType `type:"list"`
-
- // The fully qualified host component of the domain name of the OAuth 2.0 identity
- // provider. Do not specify this value for an OpenID Connect identity provider.
- //
- // Currently www.amazon.com and graph.facebook.com are the only supported identity
- // providers for OAuth 2.0 access tokens. Do not include URL schemes and port
- // numbers.
- //
- // Do not specify this value for OpenID Connect ID tokens.
- ProviderId *string `min:"4" type:"string"`
-
- // The Amazon Resource Name (ARN) of the role that the caller is assuming.
- //
- // RoleArn is a required field
- RoleArn *string `min:"20" type:"string" required:"true"`
-
- // An identifier for the assumed role session. Typically, you pass the name
- // or identifier that is associated with the user who is using your application.
- // That way, the temporary security credentials that your application will use
- // are associated with that user. This session name is included as part of the
- // ARN and assumed role ID in the AssumedRoleUser response element.
- //
- // The regex used to validate this parameter is a string of characters consisting
- // of upper- and lower-case alphanumeric characters with no spaces. You can
- // also include underscores or any of the following characters: =,.@-
- //
- // RoleSessionName is a required field
- RoleSessionName *string `min:"2" type:"string" required:"true"`
-
- // The OAuth 2.0 access token or OpenID Connect ID token that is provided by
- // the identity provider. Your application must get this token by authenticating
- // the user who is using your application with a web identity provider before
- // the application makes an AssumeRoleWithWebIdentity call. Only tokens with
- // RSA algorithms (RS256) are supported.
- //
- // WebIdentityToken is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by AssumeRoleWithWebIdentityInput's
- // String and GoString methods.
- //
- // WebIdentityToken is a required field
- WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AssumeRoleWithWebIdentityInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AssumeRoleWithWebIdentityInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *AssumeRoleWithWebIdentityInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"}
- if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
- invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
- }
- if s.Policy != nil && len(*s.Policy) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
- }
- if s.ProviderId != nil && len(*s.ProviderId) < 4 {
- invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4))
- }
- if s.RoleArn == nil {
- invalidParams.Add(request.NewErrParamRequired("RoleArn"))
- }
- if s.RoleArn != nil && len(*s.RoleArn) < 20 {
- invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20))
- }
- if s.RoleSessionName == nil {
- invalidParams.Add(request.NewErrParamRequired("RoleSessionName"))
- }
- if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2))
- }
- if s.WebIdentityToken == nil {
- invalidParams.Add(request.NewErrParamRequired("WebIdentityToken"))
- }
- if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 {
- invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4))
- }
- if s.PolicyArns != nil {
- for i, v := range s.PolicyArns {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetDurationSeconds sets the DurationSeconds field's value.
-func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput {
- s.DurationSeconds = &v
- return s
-}
-
-// SetPolicy sets the Policy field's value.
-func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput {
- s.Policy = &v
- return s
-}
-
-// SetPolicyArns sets the PolicyArns field's value.
-func (s *AssumeRoleWithWebIdentityInput) SetPolicyArns(v []*PolicyDescriptorType) *AssumeRoleWithWebIdentityInput {
- s.PolicyArns = v
- return s
-}
-
-// SetProviderId sets the ProviderId field's value.
-func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput {
- s.ProviderId = &v
- return s
-}
-
-// SetRoleArn sets the RoleArn field's value.
-func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput {
- s.RoleArn = &v
- return s
-}
-
-// SetRoleSessionName sets the RoleSessionName field's value.
-func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput {
- s.RoleSessionName = &v
- return s
-}
-
-// SetWebIdentityToken sets the WebIdentityToken field's value.
-func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput {
- s.WebIdentityToken = &v
- return s
-}
-
-// Contains the response to a successful AssumeRoleWithWebIdentity request,
-// including temporary Amazon Web Services credentials that can be used to make
-// Amazon Web Services requests.
-type AssumeRoleWithWebIdentityOutput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers
- // that you can use to refer to the resulting temporary security credentials.
- // For example, you can reference these credentials as a principal in a resource-based
- // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName
- // that you specified when you called AssumeRole.
- AssumedRoleUser *AssumedRoleUser `type:"structure"`
-
- // The intended audience (also known as client ID) of the web identity token.
- // This is traditionally the client identifier issued to the application that
- // requested the web identity token.
- Audience *string `type:"string"`
-
- // The temporary security credentials, which include an access key ID, a secret
- // access key, and a security token.
- //
- // The size of the security token that STS API operations return is not fixed.
- // We strongly recommend that you make no assumptions about the maximum size.
- Credentials *Credentials `type:"structure"`
-
- // A percentage value that indicates the packed size of the session policies
- // and session tags combined passed in the request. The request fails if the
- // packed size is greater than 100 percent, which means the policies and tags
- // exceeded the allowed space.
- PackedPolicySize *int64 `type:"integer"`
-
- // The issuing authority of the web identity token presented. For OpenID Connect
- // ID tokens, this contains the value of the iss field. For OAuth 2.0 access
- // tokens, this contains the value of the ProviderId parameter that was passed
- // in the AssumeRoleWithWebIdentity request.
- Provider *string `type:"string"`
-
- // The value of the source identity that is returned in the JSON web token (JWT)
- // from the identity provider.
- //
- // You can require users to set a source identity value when they assume a role.
- // You do this by using the sts:SourceIdentity condition key in a role trust
- // policy. That way, actions that are taken with the role are associated with
- // that user. After the source identity is set, the value cannot be changed.
- // It is present in the request for all actions that are taken by the role and
- // persists across chained role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts#iam-term-role-chaining)
- // sessions. You can configure your identity provider to use an attribute associated
- // with your users, like user name or email, as the source identity when calling
- // AssumeRoleWithWebIdentity. You do this by adding a claim to the JSON web
- // token. To learn more about OIDC tokens and claims, see Using Tokens with
- // User Pools (https://docs.aws.amazon.com/cognito/latest/developerguide/amazon-cognito-user-pools-using-tokens-with-identity-providers.html)
- // in the Amazon Cognito Developer Guide. For more information about using source
- // identity, see Monitor and control actions taken with assumed roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_monitor.html)
- // in the IAM User Guide.
- //
- // The regex used to validate this parameter is a string of characters consisting
- // of upper- and lower-case alphanumeric characters with no spaces. You can
- // also include underscores or any of the following characters: =,.@-
- SourceIdentity *string `min:"2" type:"string"`
-
- // The unique user identifier that is returned by the identity provider. This
- // identifier is associated with the WebIdentityToken that was submitted with
- // the AssumeRoleWithWebIdentity call. The identifier is typically unique to
- // the user and the application that acquired the WebIdentityToken (pairwise
- // identifier). For OpenID Connect ID tokens, this field contains the value
- // returned by the identity provider as the token's sub (Subject) claim.
- SubjectFromWebIdentityToken *string `min:"6" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AssumeRoleWithWebIdentityOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AssumeRoleWithWebIdentityOutput) GoString() string {
- return s.String()
-}
-
-// SetAssumedRoleUser sets the AssumedRoleUser field's value.
-func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput {
- s.AssumedRoleUser = v
- return s
-}
-
-// SetAudience sets the Audience field's value.
-func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput {
- s.Audience = &v
- return s
-}
-
-// SetCredentials sets the Credentials field's value.
-func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput {
- s.Credentials = v
- return s
-}
-
-// SetPackedPolicySize sets the PackedPolicySize field's value.
-func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput {
- s.PackedPolicySize = &v
- return s
-}
-
-// SetProvider sets the Provider field's value.
-func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput {
- s.Provider = &v
- return s
-}
-
-// SetSourceIdentity sets the SourceIdentity field's value.
-func (s *AssumeRoleWithWebIdentityOutput) SetSourceIdentity(v string) *AssumeRoleWithWebIdentityOutput {
- s.SourceIdentity = &v
- return s
-}
-
-// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value.
-func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput {
- s.SubjectFromWebIdentityToken = &v
- return s
-}
-
-// The identifiers for the temporary security credentials that the operation
-// returns.
-type AssumedRoleUser struct {
- _ struct{} `type:"structure"`
-
- // The ARN of the temporary security credentials that are returned from the
- // AssumeRole action. For more information about ARNs and how to use them in
- // policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
- // in the IAM User Guide.
- //
- // Arn is a required field
- Arn *string `min:"20" type:"string" required:"true"`
-
- // A unique identifier that contains the role ID and the role session name of
- // the role that is being assumed. The role ID is generated by Amazon Web Services
- // when the role is created.
- //
- // AssumedRoleId is a required field
- AssumedRoleId *string `min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AssumedRoleUser) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s AssumedRoleUser) GoString() string {
- return s.String()
-}
-
-// SetArn sets the Arn field's value.
-func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser {
- s.Arn = &v
- return s
-}
-
-// SetAssumedRoleId sets the AssumedRoleId field's value.
-func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser {
- s.AssumedRoleId = &v
- return s
-}
-
-// Amazon Web Services credentials for API authentication.
-type Credentials struct {
- _ struct{} `type:"structure"`
-
- // The access key ID that identifies the temporary security credentials.
- //
- // AccessKeyId is a required field
- AccessKeyId *string `min:"16" type:"string" required:"true"`
-
- // The date on which the current credentials expire.
- //
- // Expiration is a required field
- Expiration *time.Time `type:"timestamp" required:"true"`
-
- // The secret access key that can be used to sign requests.
- //
- // SecretAccessKey is a sensitive parameter and its value will be
- // replaced with "sensitive" in string returned by Credentials's
- // String and GoString methods.
- //
- // SecretAccessKey is a required field
- SecretAccessKey *string `type:"string" required:"true" sensitive:"true"`
-
- // The token that users must pass to the service API to use the temporary credentials.
- //
- // SessionToken is a required field
- SessionToken *string `type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Credentials) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Credentials) GoString() string {
- return s.String()
-}
-
-// SetAccessKeyId sets the AccessKeyId field's value.
-func (s *Credentials) SetAccessKeyId(v string) *Credentials {
- s.AccessKeyId = &v
- return s
-}
-
-// SetExpiration sets the Expiration field's value.
-func (s *Credentials) SetExpiration(v time.Time) *Credentials {
- s.Expiration = &v
- return s
-}
-
-// SetSecretAccessKey sets the SecretAccessKey field's value.
-func (s *Credentials) SetSecretAccessKey(v string) *Credentials {
- s.SecretAccessKey = &v
- return s
-}
-
-// SetSessionToken sets the SessionToken field's value.
-func (s *Credentials) SetSessionToken(v string) *Credentials {
- s.SessionToken = &v
- return s
-}
-
-type DecodeAuthorizationMessageInput struct {
- _ struct{} `type:"structure"`
-
- // The encoded message that was returned with the response.
- //
- // EncodedMessage is a required field
- EncodedMessage *string `min:"1" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DecodeAuthorizationMessageInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DecodeAuthorizationMessageInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *DecodeAuthorizationMessageInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"}
- if s.EncodedMessage == nil {
- invalidParams.Add(request.NewErrParamRequired("EncodedMessage"))
- }
- if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetEncodedMessage sets the EncodedMessage field's value.
-func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput {
- s.EncodedMessage = &v
- return s
-}
-
-// A document that contains additional information about the authorization status
-// of a request from an encoded message that is returned in response to an Amazon
-// Web Services request.
-type DecodeAuthorizationMessageOutput struct {
- _ struct{} `type:"structure"`
-
- // The API returns a response with the decoded message.
- DecodedMessage *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DecodeAuthorizationMessageOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s DecodeAuthorizationMessageOutput) GoString() string {
- return s.String()
-}
-
-// SetDecodedMessage sets the DecodedMessage field's value.
-func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput {
- s.DecodedMessage = &v
- return s
-}
-
-// Identifiers for the federated user that is associated with the credentials.
-type FederatedUser struct {
- _ struct{} `type:"structure"`
-
- // The ARN that specifies the federated user that is associated with the credentials.
- // For more information about ARNs and how to use them in policies, see IAM
- // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html)
- // in the IAM User Guide.
- //
- // Arn is a required field
- Arn *string `min:"20" type:"string" required:"true"`
-
- // The string that identifies the federated user associated with the credentials,
- // similar to the unique ID of an IAM user.
- //
- // FederatedUserId is a required field
- FederatedUserId *string `min:"2" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s FederatedUser) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s FederatedUser) GoString() string {
- return s.String()
-}
-
-// SetArn sets the Arn field's value.
-func (s *FederatedUser) SetArn(v string) *FederatedUser {
- s.Arn = &v
- return s
-}
-
-// SetFederatedUserId sets the FederatedUserId field's value.
-func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser {
- s.FederatedUserId = &v
- return s
-}
-
-type GetAccessKeyInfoInput struct {
- _ struct{} `type:"structure"`
-
- // The identifier of an access key.
- //
- // This parameter allows (through its regex pattern) a string of characters
- // that can consist of any upper- or lowercase letter or digit.
- //
- // AccessKeyId is a required field
- AccessKeyId *string `min:"16" type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetAccessKeyInfoInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetAccessKeyInfoInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GetAccessKeyInfoInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GetAccessKeyInfoInput"}
- if s.AccessKeyId == nil {
- invalidParams.Add(request.NewErrParamRequired("AccessKeyId"))
- }
- if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 {
- invalidParams.Add(request.NewErrParamMinLen("AccessKeyId", 16))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetAccessKeyId sets the AccessKeyId field's value.
-func (s *GetAccessKeyInfoInput) SetAccessKeyId(v string) *GetAccessKeyInfoInput {
- s.AccessKeyId = &v
- return s
-}
-
-type GetAccessKeyInfoOutput struct {
- _ struct{} `type:"structure"`
-
- // The number used to identify the Amazon Web Services account.
- Account *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetAccessKeyInfoOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetAccessKeyInfoOutput) GoString() string {
- return s.String()
-}
-
-// SetAccount sets the Account field's value.
-func (s *GetAccessKeyInfoOutput) SetAccount(v string) *GetAccessKeyInfoOutput {
- s.Account = &v
- return s
-}
-
-type GetCallerIdentityInput struct {
- _ struct{} `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetCallerIdentityInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetCallerIdentityInput) GoString() string {
- return s.String()
-}
-
-// Contains the response to a successful GetCallerIdentity request, including
-// information about the entity making the request.
-type GetCallerIdentityOutput struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Web Services account ID number of the account that owns or contains
- // the calling entity.
- Account *string `type:"string"`
-
- // The Amazon Web Services ARN associated with the calling entity.
- Arn *string `min:"20" type:"string"`
-
- // The unique identifier of the calling entity. The exact value depends on the
- // type of entity that is making the call. The values returned are those listed
- // in the aws:userid column in the Principal table (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable)
- // found on the Policy Variables reference page in the IAM User Guide.
- UserId *string `type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetCallerIdentityOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetCallerIdentityOutput) GoString() string {
- return s.String()
-}
-
-// SetAccount sets the Account field's value.
-func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput {
- s.Account = &v
- return s
-}
-
-// SetArn sets the Arn field's value.
-func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput {
- s.Arn = &v
- return s
-}
-
-// SetUserId sets the UserId field's value.
-func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput {
- s.UserId = &v
- return s
-}
-
-type GetFederationTokenInput struct {
- _ struct{} `type:"structure"`
-
- // The duration, in seconds, that the session should last. Acceptable durations
- // for federation sessions range from 900 seconds (15 minutes) to 129,600 seconds
- // (36 hours), with 43,200 seconds (12 hours) as the default. Sessions obtained
- // using root user credentials are restricted to a maximum of 3,600 seconds
- // (one hour). If the specified duration is longer than one hour, the session
- // obtained by using root user credentials defaults to one hour.
- DurationSeconds *int64 `min:"900" type:"integer"`
-
- // The name of the federated user. The name is used as an identifier for the
- // temporary security credentials (such as Bob). For example, you can reference
- // the federated user name in a resource-based policy, such as in an Amazon
- // S3 bucket policy.
- //
- // The regex used to validate this parameter is a string of characters consisting
- // of upper- and lower-case alphanumeric characters with no spaces. You can
- // also include underscores or any of the following characters: =,.@-
- //
- // Name is a required field
- Name *string `min:"2" type:"string" required:"true"`
-
- // An IAM policy in JSON format that you want to use as an inline session policy.
- //
- // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // to this operation. You can pass a single JSON policy document to use as an
- // inline session policy. You can also specify up to 10 managed policy Amazon
- // Resource Names (ARNs) to use as managed session policies.
- //
- // This parameter is optional. However, if you do not pass any session policies,
- // then the resulting federated user session has no permissions.
- //
- // When you pass session policies, the session permissions are the intersection
- // of the IAM user policies and the session policies that you pass. This gives
- // you a way to further restrict the permissions for a federated user. You cannot
- // use session policies to grant more permissions than those that are defined
- // in the permissions policy of the IAM user. For more information, see Session
- // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
- //
- // The resulting credentials can be used to access a resource that has a resource-based
- // policy. If that policy specifically references the federated user session
- // in the Principal element of the policy, the session has the permissions allowed
- // by the policy. These permissions are granted in addition to the permissions
- // that are granted by the session policies.
- //
- // The plaintext that you use for both inline and managed session policies can't
- // exceed 2,048 characters. The JSON policy characters can be any ASCII character
- // from the space character to the end of the valid character list (\u0020 through
- // \u00FF). It can also include the tab (\u0009), linefeed (\u000A), and carriage
- // return (\u000D) characters.
- //
- // An Amazon Web Services conversion compresses the passed inline session policy,
- // managed policy ARNs, and session tags into a packed binary format that has
- // a separate limit. Your request can fail for this limit even if your plaintext
- // meets the other requirements. The PackedPolicySize response element indicates
- // by percentage how close the policies and tags for your request are to the
- // upper size limit.
- Policy *string `min:"1" type:"string"`
-
- // The Amazon Resource Names (ARNs) of the IAM managed policies that you want
- // to use as a managed session policy. The policies must exist in the same account
- // as the IAM user that is requesting federated access.
- //
- // You must pass an inline or managed session policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // to this operation. You can pass a single JSON policy document to use as an
- // inline session policy. You can also specify up to 10 managed policy Amazon
- // Resource Names (ARNs) to use as managed session policies. The plaintext that
- // you use for both inline and managed session policies can't exceed 2,048 characters.
- // You can provide up to 10 managed policy ARNs. For more information about
- // ARNs, see Amazon Resource Names (ARNs) and Amazon Web Services Service Namespaces
- // (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the Amazon Web Services General Reference.
- //
- // This parameter is optional. However, if you do not pass any session policies,
- // then the resulting federated user session has no permissions.
- //
- // When you pass session policies, the session permissions are the intersection
- // of the IAM user policies and the session policies that you pass. This gives
- // you a way to further restrict the permissions for a federated user. You cannot
- // use session policies to grant more permissions than those that are defined
- // in the permissions policy of the IAM user. For more information, see Session
- // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies.html#policies_session)
- // in the IAM User Guide.
- //
- // The resulting credentials can be used to access a resource that has a resource-based
- // policy. If that policy specifically references the federated user session
- // in the Principal element of the policy, the session has the permissions allowed
- // by the policy. These permissions are granted in addition to the permissions
- // that are granted by the session policies.
- //
- // An Amazon Web Services conversion compresses the passed inline session policy,
- // managed policy ARNs, and session tags into a packed binary format that has
- // a separate limit. Your request can fail for this limit even if your plaintext
- // meets the other requirements. The PackedPolicySize response element indicates
- // by percentage how close the policies and tags for your request are to the
- // upper size limit.
- PolicyArns []*PolicyDescriptorType `type:"list"`
-
- // A list of session tags. Each session tag consists of a key name and an associated
- // value. For more information about session tags, see Passing Session Tags
- // in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
- // in the IAM User Guide.
- //
- // This parameter is optional. You can pass up to 50 session tags. The plaintext
- // session tag keys can’t exceed 128 characters and the values can’t exceed
- // 256 characters. For these and additional limits, see IAM and STS Character
- // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
- // in the IAM User Guide.
- //
- // An Amazon Web Services conversion compresses the passed inline session policy,
- // managed policy ARNs, and session tags into a packed binary format that has
- // a separate limit. Your request can fail for this limit even if your plaintext
- // meets the other requirements. The PackedPolicySize response element indicates
- // by percentage how close the policies and tags for your request are to the
- // upper size limit.
- //
- // You can pass a session tag with the same key as a tag that is already attached
- // to the user you are federating. When you do, session tags override a user
- // tag with the same key.
- //
- // Tag key–value pairs are not case sensitive, but case is preserved. This
- // means that you cannot have separate Department and department tag keys. Assume
- // that the role has the Department=Marketing tag and you pass the department=engineering
- // session tag. Department and department are not saved as separate tags, and
- // the session tag passed in the request takes precedence over the role tag.
- Tags []*Tag `type:"list"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetFederationTokenInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetFederationTokenInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GetFederationTokenInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"}
- if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
- invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
- }
- if s.Name == nil {
- invalidParams.Add(request.NewErrParamRequired("Name"))
- }
- if s.Name != nil && len(*s.Name) < 2 {
- invalidParams.Add(request.NewErrParamMinLen("Name", 2))
- }
- if s.Policy != nil && len(*s.Policy) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Policy", 1))
- }
- if s.PolicyArns != nil {
- for i, v := range s.PolicyArns {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "PolicyArns", i), err.(request.ErrInvalidParams))
- }
- }
- }
- if s.Tags != nil {
- for i, v := range s.Tags {
- if v == nil {
- continue
- }
- if err := v.Validate(); err != nil {
- invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams))
- }
- }
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetDurationSeconds sets the DurationSeconds field's value.
-func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput {
- s.DurationSeconds = &v
- return s
-}
-
-// SetName sets the Name field's value.
-func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput {
- s.Name = &v
- return s
-}
-
-// SetPolicy sets the Policy field's value.
-func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput {
- s.Policy = &v
- return s
-}
-
-// SetPolicyArns sets the PolicyArns field's value.
-func (s *GetFederationTokenInput) SetPolicyArns(v []*PolicyDescriptorType) *GetFederationTokenInput {
- s.PolicyArns = v
- return s
-}
-
-// SetTags sets the Tags field's value.
-func (s *GetFederationTokenInput) SetTags(v []*Tag) *GetFederationTokenInput {
- s.Tags = v
- return s
-}
-
-// Contains the response to a successful GetFederationToken request, including
-// temporary Amazon Web Services credentials that can be used to make Amazon
-// Web Services requests.
-type GetFederationTokenOutput struct {
- _ struct{} `type:"structure"`
-
- // The temporary security credentials, which include an access key ID, a secret
- // access key, and a security (or session) token.
- //
- // The size of the security token that STS API operations return is not fixed.
- // We strongly recommend that you make no assumptions about the maximum size.
- Credentials *Credentials `type:"structure"`
-
- // Identifiers for the federated user associated with the credentials (such
- // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You
- // can use the federated user's ARN in your resource-based policies, such as
- // an Amazon S3 bucket policy.
- FederatedUser *FederatedUser `type:"structure"`
-
- // A percentage value that indicates the packed size of the session policies
- // and session tags combined passed in the request. The request fails if the
- // packed size is greater than 100 percent, which means the policies and tags
- // exceeded the allowed space.
- PackedPolicySize *int64 `type:"integer"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetFederationTokenOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetFederationTokenOutput) GoString() string {
- return s.String()
-}
-
-// SetCredentials sets the Credentials field's value.
-func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput {
- s.Credentials = v
- return s
-}
-
-// SetFederatedUser sets the FederatedUser field's value.
-func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput {
- s.FederatedUser = v
- return s
-}
-
-// SetPackedPolicySize sets the PackedPolicySize field's value.
-func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput {
- s.PackedPolicySize = &v
- return s
-}
-
-type GetSessionTokenInput struct {
- _ struct{} `type:"structure"`
-
- // The duration, in seconds, that the credentials should remain valid. Acceptable
- // durations for IAM user sessions range from 900 seconds (15 minutes) to 129,600
- // seconds (36 hours), with 43,200 seconds (12 hours) as the default. Sessions
- // for Amazon Web Services account owners are restricted to a maximum of 3,600
- // seconds (one hour). If the duration is longer than one hour, the session
- // for Amazon Web Services account owners defaults to one hour.
- DurationSeconds *int64 `min:"900" type:"integer"`
-
- // The identification number of the MFA device that is associated with the IAM
- // user who is making the GetSessionToken call. Specify this value if the IAM
- // user has a policy that requires MFA authentication. The value is either the
- // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource
- // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user).
- // You can find the device for an IAM user by going to the Amazon Web Services
- // Management Console and viewing the user's security credentials.
- //
- // The regex used to validate this parameter is a string of characters consisting
- // of upper- and lower-case alphanumeric characters with no spaces. You can
- // also include underscores or any of the following characters: =,.@:/-
- SerialNumber *string `min:"9" type:"string"`
-
- // The value provided by the MFA device, if MFA is required. If any policy requires
- // the IAM user to submit an MFA code, specify this value. If MFA authentication
- // is required, the user must provide a code when requesting a set of temporary
- // security credentials. A user who fails to provide the code receives an "access
- // denied" response when requesting resources that require MFA authentication.
- //
- // The format for this parameter, as described by its regex pattern, is a sequence
- // of six numeric digits.
- TokenCode *string `min:"6" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetSessionTokenInput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetSessionTokenInput) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *GetSessionTokenInput) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"}
- if s.DurationSeconds != nil && *s.DurationSeconds < 900 {
- invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900))
- }
- if s.SerialNumber != nil && len(*s.SerialNumber) < 9 {
- invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9))
- }
- if s.TokenCode != nil && len(*s.TokenCode) < 6 {
- invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetDurationSeconds sets the DurationSeconds field's value.
-func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput {
- s.DurationSeconds = &v
- return s
-}
-
-// SetSerialNumber sets the SerialNumber field's value.
-func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput {
- s.SerialNumber = &v
- return s
-}
-
-// SetTokenCode sets the TokenCode field's value.
-func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput {
- s.TokenCode = &v
- return s
-}
-
-// Contains the response to a successful GetSessionToken request, including
-// temporary Amazon Web Services credentials that can be used to make Amazon
-// Web Services requests.
-type GetSessionTokenOutput struct {
- _ struct{} `type:"structure"`
-
- // The temporary security credentials, which include an access key ID, a secret
- // access key, and a security (or session) token.
- //
- // The size of the security token that STS API operations return is not fixed.
- // We strongly recommend that you make no assumptions about the maximum size.
- Credentials *Credentials `type:"structure"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetSessionTokenOutput) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s GetSessionTokenOutput) GoString() string {
- return s.String()
-}
-
-// SetCredentials sets the Credentials field's value.
-func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput {
- s.Credentials = v
- return s
-}
-
-// A reference to the IAM managed policy that is passed as a session policy
-// for a role session or a federated user session.
-type PolicyDescriptorType struct {
- _ struct{} `type:"structure"`
-
- // The Amazon Resource Name (ARN) of the IAM managed policy to use as a session
- // policy for the role. For more information about ARNs, see Amazon Resource
- // Names (ARNs) and Amazon Web Services Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // in the Amazon Web Services General Reference.
- Arn *string `locationName:"arn" min:"20" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PolicyDescriptorType) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s PolicyDescriptorType) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *PolicyDescriptorType) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "PolicyDescriptorType"}
- if s.Arn != nil && len(*s.Arn) < 20 {
- invalidParams.Add(request.NewErrParamMinLen("Arn", 20))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetArn sets the Arn field's value.
-func (s *PolicyDescriptorType) SetArn(v string) *PolicyDescriptorType {
- s.Arn = &v
- return s
-}
-
-// Contains information about the provided context. This includes the signed
-// and encrypted trusted context assertion and the context provider ARN from
-// which the trusted context assertion was generated.
-type ProvidedContext struct {
- _ struct{} `type:"structure"`
-
- // The signed and encrypted trusted context assertion generated by the context
- // provider. The trusted context assertion is signed and encrypted by Amazon
- // Web Services STS.
- ContextAssertion *string `min:"4" type:"string"`
-
- // The context provider ARN from which the trusted context assertion was generated.
- ProviderArn *string `min:"20" type:"string"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ProvidedContext) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s ProvidedContext) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *ProvidedContext) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "ProvidedContext"}
- if s.ContextAssertion != nil && len(*s.ContextAssertion) < 4 {
- invalidParams.Add(request.NewErrParamMinLen("ContextAssertion", 4))
- }
- if s.ProviderArn != nil && len(*s.ProviderArn) < 20 {
- invalidParams.Add(request.NewErrParamMinLen("ProviderArn", 20))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetContextAssertion sets the ContextAssertion field's value.
-func (s *ProvidedContext) SetContextAssertion(v string) *ProvidedContext {
- s.ContextAssertion = &v
- return s
-}
-
-// SetProviderArn sets the ProviderArn field's value.
-func (s *ProvidedContext) SetProviderArn(v string) *ProvidedContext {
- s.ProviderArn = &v
- return s
-}
-
-// You can pass custom key-value pair attributes when you assume a role or federate
-// a user. These are called session tags. You can then use the session tags
-// to control access to resources. For more information, see Tagging Amazon
-// Web Services STS Sessions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
-// in the IAM User Guide.
-type Tag struct {
- _ struct{} `type:"structure"`
-
- // The key for a session tag.
- //
- // You can pass up to 50 session tags. The plain text session tag keys can’t
- // exceed 128 characters. For these and additional limits, see IAM and STS Character
- // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
- // in the IAM User Guide.
- //
- // Key is a required field
- Key *string `min:"1" type:"string" required:"true"`
-
- // The value for a session tag.
- //
- // You can pass up to 50 session tags. The plain text session tag values can’t
- // exceed 256 characters. For these and additional limits, see IAM and STS Character
- // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html#reference_iam-limits-entity-length)
- // in the IAM User Guide.
- //
- // Value is a required field
- Value *string `type:"string" required:"true"`
-}
-
-// String returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Tag) String() string {
- return awsutil.Prettify(s)
-}
-
-// GoString returns the string representation.
-//
-// API parameter values that are decorated as "sensitive" in the API will not
-// be included in the string output. The member name will be present, but the
-// value will be replaced with "sensitive".
-func (s Tag) GoString() string {
- return s.String()
-}
-
-// Validate inspects the fields of the type to determine if they are valid.
-func (s *Tag) Validate() error {
- invalidParams := request.ErrInvalidParams{Context: "Tag"}
- if s.Key == nil {
- invalidParams.Add(request.NewErrParamRequired("Key"))
- }
- if s.Key != nil && len(*s.Key) < 1 {
- invalidParams.Add(request.NewErrParamMinLen("Key", 1))
- }
- if s.Value == nil {
- invalidParams.Add(request.NewErrParamRequired("Value"))
- }
-
- if invalidParams.Len() > 0 {
- return invalidParams
- }
- return nil
-}
-
-// SetKey sets the Key field's value.
-func (s *Tag) SetKey(v string) *Tag {
- s.Key = &v
- return s
-}
-
-// SetValue sets the Value field's value.
-func (s *Tag) SetValue(v string) *Tag {
- s.Value = &v
- return s
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
deleted file mode 100644
index d5307fcaa..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package sts
-
-import "github.com/aws/aws-sdk-go/aws/request"
-
-func init() {
- initRequest = customizeRequest
-}
-
-func customizeRequest(r *request.Request) {
- r.RetryErrorCodes = append(r.RetryErrorCodes, ErrCodeIDPCommunicationErrorException)
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
deleted file mode 100644
index ea1d9eb0c..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-// Package sts provides the client and types for making API
-// requests to AWS Security Token Service.
-//
-// Security Token Service (STS) enables you to request temporary, limited-privilege
-// credentials for users. This guide provides descriptions of the STS API. For
-// more information about using this service, see Temporary Security Credentials
-// (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
-//
-// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
-//
-// See sts package documentation for more information.
-// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/
-//
-// # Using the Client
-//
-// To contact AWS Security Token Service with the SDK use the New function to create
-// a new service client. With that client you can make API requests to the service.
-// These clients are safe to use concurrently.
-//
-// See the SDK's documentation for more information on how to use the SDK.
-// https://docs.aws.amazon.com/sdk-for-go/api/
-//
-// See aws.Config documentation for more information on configuring SDK clients.
-// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
-//
-// See the AWS Security Token Service client STS for more
-// information on creating client for this service.
-// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New
-package sts
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
deleted file mode 100644
index b680bbd5d..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package sts
-
-const (
-
- // ErrCodeExpiredTokenException for service response error code
- // "ExpiredTokenException".
- //
- // The web identity token that was passed is expired or is not valid. Get a
- // new identity token from the identity provider and then retry the request.
- ErrCodeExpiredTokenException = "ExpiredTokenException"
-
- // ErrCodeIDPCommunicationErrorException for service response error code
- // "IDPCommunicationError".
- //
- // The request could not be fulfilled because the identity provider (IDP) that
- // was asked to verify the incoming identity token could not be reached. This
- // is often a transient error caused by network conditions. Retry the request
- // a limited number of times so that you don't exceed the request rate. If the
- // error persists, the identity provider might be down or not responding.
- ErrCodeIDPCommunicationErrorException = "IDPCommunicationError"
-
- // ErrCodeIDPRejectedClaimException for service response error code
- // "IDPRejectedClaim".
- //
- // The identity provider (IdP) reported that authentication failed. This might
- // be because the claim is invalid.
- //
- // If this error is returned for the AssumeRoleWithWebIdentity operation, it
- // can also mean that the claim has expired or has been explicitly revoked.
- ErrCodeIDPRejectedClaimException = "IDPRejectedClaim"
-
- // ErrCodeInvalidAuthorizationMessageException for service response error code
- // "InvalidAuthorizationMessageException".
- //
- // The error returned if the message passed to DecodeAuthorizationMessage was
- // invalid. This can happen if the token contains invalid characters, such as
- // linebreaks.
- ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException"
-
- // ErrCodeInvalidIdentityTokenException for service response error code
- // "InvalidIdentityToken".
- //
- // The web identity token that was passed could not be validated by Amazon Web
- // Services. Get a new identity token from the identity provider and then retry
- // the request.
- ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken"
-
- // ErrCodeMalformedPolicyDocumentException for service response error code
- // "MalformedPolicyDocument".
- //
- // The request was rejected because the policy document was malformed. The error
- // message describes the specific error.
- ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument"
-
- // ErrCodePackedPolicyTooLargeException for service response error code
- // "PackedPolicyTooLarge".
- //
- // The request was rejected because the total packed size of the session policies
- // and session tags combined was too large. An Amazon Web Services conversion
- // compresses the session policy document, session policy ARNs, and session
- // tags into a packed binary format that has a separate limit. The error message
- // indicates by percentage how close the policies and tags are to the upper
- // size limit. For more information, see Passing Session Tags in STS (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_session-tags.html)
- // in the IAM User Guide.
- //
- // You could receive this error even though you meet other defined session policy
- // and session tag limits. For more information, see IAM and STS Entity Character
- // Limits (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html#reference_iam-limits-entity-length)
- // in the IAM User Guide.
- ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge"
-
- // ErrCodeRegionDisabledException for service response error code
- // "RegionDisabledException".
- //
- // STS is not activated in the requested region for the account that is being
- // asked to generate credentials. The account administrator must use the IAM
- // console to activate STS in that region. For more information, see Activating
- // and Deactivating Amazon Web Services STS in an Amazon Web Services Region
- // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
- // in the IAM User Guide.
- ErrCodeRegionDisabledException = "RegionDisabledException"
-)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
deleted file mode 100644
index 12327d053..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-package sts
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/client"
- "github.com/aws/aws-sdk-go/aws/client/metadata"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/aws/signer/v4"
- "github.com/aws/aws-sdk-go/private/protocol/query"
-)
-
-// STS provides the API operation methods for making requests to
-// AWS Security Token Service. See this package's package overview docs
-// for details on the service.
-//
-// STS methods are safe to use concurrently. It is not safe to
-// modify mutate any of the struct's properties though.
-type STS struct {
- *client.Client
-}
-
-// Used for custom client initialization logic
-var initClient func(*client.Client)
-
-// Used for custom request initialization logic
-var initRequest func(*request.Request)
-
-// Service information constants
-const (
- ServiceName = "sts" // Name of service.
- EndpointsID = ServiceName // ID to lookup a service endpoint with.
- ServiceID = "STS" // ServiceID is a unique identifier of a specific service.
-)
-
-// New creates a new instance of the STS client with a session.
-// If additional configuration is needed for the client instance use the optional
-// aws.Config parameter to add your extra config.
-//
-// Example:
-//
-// mySession := session.Must(session.NewSession())
-//
-// // Create a STS client from just a session.
-// svc := sts.New(mySession)
-//
-// // Create a STS client with additional configuration
-// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
-func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS {
- c := p.ClientConfig(EndpointsID, cfgs...)
- if c.SigningNameDerived || len(c.SigningName) == 0 {
- c.SigningName = EndpointsID
- // No Fallback
- }
- return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName, c.ResolvedRegion)
-}
-
-// newClient creates, initializes and returns a new service client instance.
-func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName, resolvedRegion string) *STS {
- svc := &STS{
- Client: client.New(
- cfg,
- metadata.ClientInfo{
- ServiceName: ServiceName,
- ServiceID: ServiceID,
- SigningName: signingName,
- SigningRegion: signingRegion,
- PartitionID: partitionID,
- Endpoint: endpoint,
- APIVersion: "2011-06-15",
- ResolvedRegion: resolvedRegion,
- },
- handlers,
- ),
- }
-
- // Handlers
- svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
- svc.Handlers.Build.PushBackNamed(query.BuildHandler)
- svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler)
- svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler)
- svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler)
-
- // Run custom client initialization if present
- if initClient != nil {
- initClient(svc.Client)
- }
-
- return svc
-}
-
-// newRequest creates a new request for a STS operation and runs any
-// custom request initialization.
-func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request {
- req := c.NewRequest(op, params, data)
-
- // Run custom request initialization if present
- if initRequest != nil {
- initRequest(req)
- }
-
- return req
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go
deleted file mode 100644
index bf06b2e7d..000000000
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/stsiface/interface.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-
-// Package stsiface provides an interface to enable mocking the AWS Security Token Service service client
-// for testing your code.
-//
-// It is important to note that this interface will have breaking changes
-// when the service model is updated and adds new API operations, paginators,
-// and waiters.
-package stsiface
-
-import (
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/request"
- "github.com/aws/aws-sdk-go/service/sts"
-)
-
-// STSAPI provides an interface to enable mocking the
-// sts.STS service client's API operation,
-// paginators, and waiters. This make unit testing your code that calls out
-// to the SDK's service client's calls easier.
-//
-// The best way to use this interface is so the SDK's service client's calls
-// can be stubbed out for unit testing your code with the SDK without needing
-// to inject custom request handlers into the SDK's request pipeline.
-//
-// // myFunc uses an SDK service client to make a request to
-// // AWS Security Token Service.
-// func myFunc(svc stsiface.STSAPI) bool {
-// // Make svc.AssumeRole request
-// }
-//
-// func main() {
-// sess := session.New()
-// svc := sts.New(sess)
-//
-// myFunc(svc)
-// }
-//
-// In your _test.go file:
-//
-// // Define a mock struct to be used in your unit tests of myFunc.
-// type mockSTSClient struct {
-// stsiface.STSAPI
-// }
-// func (m *mockSTSClient) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) {
-// // mock response/functionality
-// }
-//
-// func TestMyFunc(t *testing.T) {
-// // Setup Test
-// mockSvc := &mockSTSClient{}
-//
-// myfunc(mockSvc)
-//
-// // Verify myFunc's functionality
-// }
-//
-// It is important to note that this interface will have breaking changes
-// when the service model is updated and adds new API operations, paginators,
-// and waiters. Its suggested to use the pattern above for testing, or using
-// tooling to generate mocks to satisfy the interfaces.
-type STSAPI interface {
- AssumeRole(*sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error)
- AssumeRoleWithContext(aws.Context, *sts.AssumeRoleInput, ...request.Option) (*sts.AssumeRoleOutput, error)
- AssumeRoleRequest(*sts.AssumeRoleInput) (*request.Request, *sts.AssumeRoleOutput)
-
- AssumeRoleWithSAML(*sts.AssumeRoleWithSAMLInput) (*sts.AssumeRoleWithSAMLOutput, error)
- AssumeRoleWithSAMLWithContext(aws.Context, *sts.AssumeRoleWithSAMLInput, ...request.Option) (*sts.AssumeRoleWithSAMLOutput, error)
- AssumeRoleWithSAMLRequest(*sts.AssumeRoleWithSAMLInput) (*request.Request, *sts.AssumeRoleWithSAMLOutput)
-
- AssumeRoleWithWebIdentity(*sts.AssumeRoleWithWebIdentityInput) (*sts.AssumeRoleWithWebIdentityOutput, error)
- AssumeRoleWithWebIdentityWithContext(aws.Context, *sts.AssumeRoleWithWebIdentityInput, ...request.Option) (*sts.AssumeRoleWithWebIdentityOutput, error)
- AssumeRoleWithWebIdentityRequest(*sts.AssumeRoleWithWebIdentityInput) (*request.Request, *sts.AssumeRoleWithWebIdentityOutput)
-
- DecodeAuthorizationMessage(*sts.DecodeAuthorizationMessageInput) (*sts.DecodeAuthorizationMessageOutput, error)
- DecodeAuthorizationMessageWithContext(aws.Context, *sts.DecodeAuthorizationMessageInput, ...request.Option) (*sts.DecodeAuthorizationMessageOutput, error)
- DecodeAuthorizationMessageRequest(*sts.DecodeAuthorizationMessageInput) (*request.Request, *sts.DecodeAuthorizationMessageOutput)
-
- GetAccessKeyInfo(*sts.GetAccessKeyInfoInput) (*sts.GetAccessKeyInfoOutput, error)
- GetAccessKeyInfoWithContext(aws.Context, *sts.GetAccessKeyInfoInput, ...request.Option) (*sts.GetAccessKeyInfoOutput, error)
- GetAccessKeyInfoRequest(*sts.GetAccessKeyInfoInput) (*request.Request, *sts.GetAccessKeyInfoOutput)
-
- GetCallerIdentity(*sts.GetCallerIdentityInput) (*sts.GetCallerIdentityOutput, error)
- GetCallerIdentityWithContext(aws.Context, *sts.GetCallerIdentityInput, ...request.Option) (*sts.GetCallerIdentityOutput, error)
- GetCallerIdentityRequest(*sts.GetCallerIdentityInput) (*request.Request, *sts.GetCallerIdentityOutput)
-
- GetFederationToken(*sts.GetFederationTokenInput) (*sts.GetFederationTokenOutput, error)
- GetFederationTokenWithContext(aws.Context, *sts.GetFederationTokenInput, ...request.Option) (*sts.GetFederationTokenOutput, error)
- GetFederationTokenRequest(*sts.GetFederationTokenInput) (*request.Request, *sts.GetFederationTokenOutput)
-
- GetSessionToken(*sts.GetSessionTokenInput) (*sts.GetSessionTokenOutput, error)
- GetSessionTokenWithContext(aws.Context, *sts.GetSessionTokenInput, ...request.Option) (*sts.GetSessionTokenOutput, error)
- GetSessionTokenRequest(*sts.GetSessionTokenInput) (*request.Request, *sts.GetSessionTokenOutput)
-}
-
-var _ STSAPI = (*sts.STS)(nil)
diff --git a/vendor/github.com/aws/smithy-go/.gitignore b/vendor/github.com/aws/smithy-go/.gitignore
index c01141aa4..2518b3491 100644
--- a/vendor/github.com/aws/smithy-go/.gitignore
+++ b/vendor/github.com/aws/smithy-go/.gitignore
@@ -20,3 +20,10 @@ target/
build/
*/out/
*/*/out/
+
+# VS Code
+bin/
+.vscode/
+
+# make
+c.out
diff --git a/vendor/github.com/aws/smithy-go/CHANGELOG.md b/vendor/github.com/aws/smithy-go/CHANGELOG.md
index a608e2b63..8b6ab2950 100644
--- a/vendor/github.com/aws/smithy-go/CHANGELOG.md
+++ b/vendor/github.com/aws/smithy-go/CHANGELOG.md
@@ -1,3 +1,176 @@
+# Release (2025-08-27)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.23.0
+ * **Feature**: Sort map keys in JSON Document types.
+
+# Release (2025-07-24)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.22.5
+ * **Feature**: Add HTTP interceptors.
+
+# Release (2025-06-16)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.22.4
+ * **Bug Fix**: Fix CBOR serd empty check for string and enum fields
+ * **Bug Fix**: Fix HTTP metrics data race.
+ * **Bug Fix**: Replace usages of deprecated ioutil package.
+
+# Release (2025-02-17)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.22.3
+ * **Dependency Update**: Bump minimum Go version to 1.22 per our language support policy.
+
+# Release (2025-01-21)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.22.2
+ * **Bug Fix**: Fix HTTP metrics data race.
+ * **Bug Fix**: Replace usages of deprecated ioutil package.
+
+# Release (2024-11-15)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.22.1
+ * **Bug Fix**: Fix failure to replace URI path segments when their names overlap.
+
+# Release (2024-10-03)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.22.0
+ * **Feature**: Add HTTP client metrics.
+
+# Release (2024-09-25)
+
+## Module Highlights
+* `github.com/aws/smithy-go/aws-http-auth`: [v1.0.0](aws-http-auth/CHANGELOG.md#v100-2024-09-25)
+ * **Release**: Initial release of module aws-http-auth, which implements generically consumable SigV4 and SigV4a request signing.
+
+# Release (2024-09-19)
+
+## General Highlights
+* **Dependency Update**: Updated to the latest SDK module versions
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.21.0
+ * **Feature**: Add tracing and metrics APIs, and builtin instrumentation for both, in generated clients.
+* `github.com/aws/smithy-go/metrics/smithyotelmetrics`: [v1.0.0](metrics/smithyotelmetrics/CHANGELOG.md#v100-2024-09-19)
+ * **Release**: Initial release of `smithyotelmetrics` module, which is used to adapt an OpenTelemetry SDK meter provider to be used with Smithy clients.
+* `github.com/aws/smithy-go/tracing/smithyoteltracing`: [v1.0.0](tracing/smithyoteltracing/CHANGELOG.md#v100-2024-09-19)
+ * **Release**: Initial release of `smithyoteltracing` module, which is used to adapt an OpenTelemetry SDK tracer provider to be used with Smithy clients.
+
+# Release (2024-08-14)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.20.4
+ * **Dependency Update**: Bump minimum Go version to 1.21.
+
+# Release (2024-06-27)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.20.3
+ * **Bug Fix**: Fix encoding/cbor test overflow on x86.
+
+# Release (2024-03-29)
+
+* No change notes available for this release.
+
+# Release (2024-02-21)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.20.1
+ * **Bug Fix**: Remove runtime dependency on go-cmp.
+
+# Release (2024-02-13)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.20.0
+ * **Feature**: Add codegen definition for sigv4a trait.
+ * **Feature**: Bump minimum Go version to 1.20 per our language support policy.
+
+# Release (2023-12-07)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.19.0
+ * **Feature**: Support modeled request compression.
+
+# Release (2023-11-30)
+
+* No change notes available for this release.
+
+# Release (2023-11-29)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.18.0
+ * **Feature**: Expose Options() method on generated service clients.
+
+# Release (2023-11-15)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.17.0
+ * **Feature**: Support identity/auth components of client reference architecture.
+
+# Release (2023-10-31)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.16.0
+ * **Feature**: **LANG**: Bump minimum go version to 1.19.
+
+# Release (2023-10-06)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.15.0
+ * **Feature**: Add `http.WithHeaderComment` middleware.
+
+# Release (2023-08-18)
+
+* No change notes available for this release.
+
+# Release (2023-08-07)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.14.1
+ * **Bug Fix**: Prevent duplicated error returns in EndpointResolverV2 default implementation.
+
+# Release (2023-07-31)
+
+## General Highlights
+* **Feature**: Adds support for smithy-modeled endpoint resolution.
+
+# Release (2022-12-02)
+
+* No change notes available for this release.
+
+# Release (2022-10-24)
+
+## Module Highlights
+* `github.com/aws/smithy-go`: v1.13.4
+ * **Bug Fix**: fixed document type checking for encoding nested types
+
# Release (2022-09-14)
* No change notes available for this release.
diff --git a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md
index c4b6a1c50..1f8d01ff6 100644
--- a/vendor/github.com/aws/smithy-go/CONTRIBUTING.md
+++ b/vendor/github.com/aws/smithy-go/CONTRIBUTING.md
@@ -39,6 +39,37 @@ To send us a pull request, please:
GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
+### Changelog Documents
+
+(You can SKIP this step if you are only changing the code generator, and not the runtime).
+
+When submitting a pull request please include a changelog file on a folder named `.changelog`.
+These are used to generate the content `CHANGELOG.md` and Release Notes. The format of the file is as follows:
+
+```
+{
+ "id": "12345678-1234-1234-1234-123456789012"
+ "type": "bugfix"
+ "collapse": true
+ "description": "Fix improper use of printf-style functions.",
+ "modules": [
+ "."
+ ]
+}
+```
+
+* id: a UUID. This should also be used for the name of the file, so if your id is `12345678-1234-1234-1234-123456789012` the file should be named `12345678-1234-1234-1234-123456789012.json/`
+* type: one of the following:
+ * bugfix: Fixing an existing bug
+ * Feature: Adding a new feature to an existing service
+ * Release: Releasing a new module
+ * Dependency: Updating dependencies
+ * Announcement: Making an announcement, like deprecation of a module
+* collapse: whether this change should appear separately on the release notes on every module listed on `modules` (`"collapse": false`), or if it should show up as a single entry (`"collapse": true`)
+ * For the smithy-go repository this should always be `false`
+* description: Description of this change. Most of the times is the same as the title of the PR
+* modules: which Go modules does this change impact. The root module is expressed as "."
+
## Finding contributions to work on
Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
diff --git a/vendor/github.com/aws/smithy-go/Makefile b/vendor/github.com/aws/smithy-go/Makefile
index b8c657435..34b17ab2f 100644
--- a/vendor/github.com/aws/smithy-go/Makefile
+++ b/vendor/github.com/aws/smithy-go/Makefile
@@ -14,6 +14,9 @@ REPOTOOLS_CMD_CHANGELOG = ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION}
REPOTOOLS_CMD_TAG_RELEASE = ${REPOTOOLS_MODULE}/cmd/tagrelease@${REPOTOOLS_VERSION}
REPOTOOLS_CMD_MODULE_VERSION = ${REPOTOOLS_MODULE}/cmd/moduleversion@${REPOTOOLS_VERSION}
+UNIT_TEST_TAGS=
+BUILD_TAGS=
+
ifneq ($(PRE_RELEASE_VERSION),)
REPOTOOLS_CMD_CALCULATE_RELEASE_ADDITIONAL_ARGS += -preview=${PRE_RELEASE_VERSION}
endif
@@ -27,6 +30,58 @@ smithy-build:
smithy-clean:
cd codegen && ./gradlew clean
+GRADLE_RETRIES := 3
+GRADLE_SLEEP := 2
+
+# We're making a call to ./gradlew to trigger downloading Gradle and
+# starting the daemon. Any call works, so using `./gradlew help`
+ensure-gradle-up:
+ @cd codegen && for i in $(shell seq 1 $(GRADLE_RETRIES)); do \
+ echo "Checking if Gradle daemon is up, attempt $$i..."; \
+ if ./gradlew help; then \
+ echo "Gradle daemon is up!"; \
+ exit 0; \
+ fi; \
+ echo "Failed to start Gradle, retrying in $(GRADLE_SLEEP) seconds..."; \
+ sleep $(GRADLE_SLEEP); \
+ done; \
+ echo "Failed to start Gradle after $(GRADLE_RETRIES) attempts."; \
+ exit 1
+
+##################
+# Linting/Verify #
+##################
+.PHONY: verify vet cover
+
+verify: vet
+
+vet:
+ go vet ${BUILD_TAGS} --all ./...
+
+cover:
+ go test ${BUILD_TAGS} -coverprofile c.out ./...
+ @cover=`go tool cover -func c.out | grep '^total:' | awk '{ print $$3+0 }'`; \
+ echo "total (statements): $$cover%";
+
+################
+# Unit Testing #
+################
+.PHONY: unit unit-race unit-test unit-race-test
+
+unit: verify
+ go test ${BUILD_TAGS} ${RUN_NONE} ./... && \
+ go test -timeout=1m ${UNIT_TEST_TAGS} ./...
+
+unit-race: verify
+ go test ${BUILD_TAGS} ${RUN_NONE} ./... && \
+ go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./...
+
+unit-test: verify
+ go test -timeout=1m ${UNIT_TEST_TAGS} ./...
+
+unit-race-test: verify
+ go test -timeout=1m ${UNIT_TEST_TAGS} -race -cpu=4 ./...
+
#####################
# Release Process #
#####################
@@ -59,5 +114,12 @@ module-version:
##############
.PHONY: install-changelog
+external-changelog:
+ mkdir -p .changelog
+ cp changelog-template.json .changelog/00000000-0000-0000-0000-000000000000.json
+ @echo "Generate a new UUID and update the file at .changelog/00000000-0000-0000-0000-000000000000.json"
+ @echo "Make sure to rename the file with your new id, like .changelog/12345678-1234-1234-1234-123456789012.json"
+ @echo "See CONTRIBUTING.md 'Changelog Documents' and an example at https://github.com/aws/smithy-go/pull/543/files"
+
install-changelog:
go install ${REPOTOOLS_MODULE}/cmd/changelog@${REPOTOOLS_VERSION}
diff --git a/vendor/github.com/aws/smithy-go/README.md b/vendor/github.com/aws/smithy-go/README.md
index 789b37889..77a74ae0c 100644
--- a/vendor/github.com/aws/smithy-go/README.md
+++ b/vendor/github.com/aws/smithy-go/README.md
@@ -1,11 +1,99 @@
-## Smithy Go
+# Smithy Go
[](https://github.com/aws/smithy-go/actions/workflows/go.yml)[](https://github.com/aws/smithy-go/actions/workflows/codegen.yml)
-Smithy code generators for Go.
+[Smithy](https://smithy.io/) code generators for Go and the accompanying smithy-go runtime.
+
+The smithy-go runtime requires a minimum version of Go 1.22.
**WARNING: All interfaces are subject to change.**
+## :no_entry_sign: DO NOT use the code generators in this repository
+
+**The code generators in this repository do not generate working clients at
+this time.**
+
+In order to generate a usable smithy client you must provide a [protocol definition](https://github.com/aws/smithy-go/blob/main/codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/integration/ProtocolGenerator.java),
+such as [AWS restJson1](https://smithy.io/2.0/aws/protocols/aws-restjson1-protocol.html),
+in order to generate transport mechanisms and serialization/deserialization
+code ("serde") accordingly.
+
+The code generator does not currently support any protocols out of the box.
+Support for all [AWS protocols](https://smithy.io/2.0/aws/protocols/index.html)
+exists in [aws-sdk-go-v2](https://github.com/aws/aws-sdk-go-v2). We are
+tracking the movement of those out of the SDK into smithy-go in
+[#458](https://github.com/aws/smithy-go/issues/458), but there's currently no
+timeline for doing so.
+
+## Plugins
+
+This repository implements the following Smithy build plugins:
+
+| ID | GAV prefix | Description |
+|----|------------|-------------|
+| `go-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go client code generation for Smithy models. |
+| `go-server-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go server code generation for Smithy models. |
+| `go-shape-codegen` | `software.amazon.smithy.go:smithy-go-codegen` | Implements Go shape code generation (types only) for Smithy models. |
+
+**NOTE: Build plugins are not currently published to mavenCentral. You must publish to mavenLocal to make the build plugins visible to the Smithy CLI. The artifact version is currently fixed at 0.1.0.**
+
+## `go-codegen`
+
+### Configuration
+
+[`GoSettings`](codegen/smithy-go-codegen/src/main/java/software/amazon/smithy/go/codegen/GoSettings.java)
+contains all of the settings enabled from `smithy-build.json` and helper
+methods and types. The up-to-date list of top-level properties enabled for
+`go-client-codegen` can be found in `GoSettings::from()`.
+
+| Setting | Type | Required | Description |
+|-----------------|---------|----------|-----------------------------------------------------------------------------------------------------------------------------|
+| `service` | string | yes | The Shape ID of the service for which to generate the client. |
+| `module` | string | yes | Name of the module in `generated.json` (and `go.mod` if `generateGoMod` is enabled) and `doc.go`. |
+| `generateGoMod` | boolean | | Whether to generate a default `go.mod` file. The default value is `false`. |
+| `goDirective` | string | | [Go directive](https://go.dev/ref/mod#go-mod-file-go) of the module. The default value is the minimum supported Go version. |
+
+### Supported protocols
+
+| Protocol | Notes |
+|----------|-------|
+| [`smithy.protocols#rpcv2Cbor`](https://smithy.io/2.0/additional-specs/protocols/smithy-rpc-v2.html) | Event streaming not yet implemented. |
+
+### Example
+
+This example applies the `go-codegen` build plugin to the Smithy quickstart
+example created from `smithy init`:
+
+```json
+{
+ "version": "1.0",
+ "sources": [
+ "models"
+ ],
+ "maven": {
+ "dependencies": [
+ "software.amazon.smithy.go:smithy-go-codegen:0.1.0"
+ ]
+ },
+ "plugins": {
+ "go-codegen": {
+ "service": "example.weather#Weather",
+ "module": "github.com/example/weather",
+ "generateGoMod": true,
+ "goDirective": "1.22"
+ }
+ }
+}
+```
+
+## `go-server-codegen`
+
+This plugin is a work-in-progress and is currently undocumented.
+
+## `go-shape-codegen`
+
+This plugin is a work-in-progress and is currently undocumented.
+
## License
This project is licensed under the Apache-2.0 License.
diff --git a/vendor/github.com/aws/smithy-go/auth/auth.go b/vendor/github.com/aws/smithy-go/auth/auth.go
new file mode 100644
index 000000000..5bdb70c9a
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/auth/auth.go
@@ -0,0 +1,3 @@
+// Package auth defines protocol-agnostic authentication types for smithy
+// clients.
+package auth
diff --git a/vendor/github.com/aws/smithy-go/auth/identity.go b/vendor/github.com/aws/smithy-go/auth/identity.go
new file mode 100644
index 000000000..ba8cf70d4
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/auth/identity.go
@@ -0,0 +1,47 @@
+package auth
+
+import (
+ "context"
+ "time"
+
+ "github.com/aws/smithy-go"
+)
+
+// Identity contains information that identifies who the user making the
+// request is.
+type Identity interface {
+ Expiration() time.Time
+}
+
+// IdentityResolver defines the interface through which an Identity is
+// retrieved.
+type IdentityResolver interface {
+ GetIdentity(context.Context, smithy.Properties) (Identity, error)
+}
+
+// IdentityResolverOptions defines the interface through which an entity can be
+// queried to retrieve an IdentityResolver for a given auth scheme.
+type IdentityResolverOptions interface {
+ GetIdentityResolver(schemeID string) IdentityResolver
+}
+
+// AnonymousIdentity is a sentinel to indicate no identity.
+type AnonymousIdentity struct{}
+
+var _ Identity = (*AnonymousIdentity)(nil)
+
+// Expiration returns the zero value for time, as anonymous identity never
+// expires.
+func (*AnonymousIdentity) Expiration() time.Time {
+ return time.Time{}
+}
+
+// AnonymousIdentityResolver returns AnonymousIdentity.
+type AnonymousIdentityResolver struct{}
+
+var _ IdentityResolver = (*AnonymousIdentityResolver)(nil)
+
+// GetIdentity returns AnonymousIdentity.
+func (*AnonymousIdentityResolver) GetIdentity(_ context.Context, _ smithy.Properties) (Identity, error) {
+ return &AnonymousIdentity{}, nil
+}
diff --git a/vendor/github.com/aws/smithy-go/auth/option.go b/vendor/github.com/aws/smithy-go/auth/option.go
new file mode 100644
index 000000000..d5dabff04
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/auth/option.go
@@ -0,0 +1,25 @@
+package auth
+
+import "github.com/aws/smithy-go"
+
+type (
+ authOptionsKey struct{}
+)
+
+// Option represents a possible authentication method for an operation.
+type Option struct {
+ SchemeID string
+ IdentityProperties smithy.Properties
+ SignerProperties smithy.Properties
+}
+
+// GetAuthOptions gets auth Options from Properties.
+func GetAuthOptions(p *smithy.Properties) ([]*Option, bool) {
+ v, ok := p.Get(authOptionsKey{}).([]*Option)
+ return v, ok
+}
+
+// SetAuthOptions sets auth Options on Properties.
+func SetAuthOptions(p *smithy.Properties, options []*Option) {
+ p.Set(authOptionsKey{}, options)
+}
diff --git a/vendor/github.com/aws/smithy-go/auth/scheme_id.go b/vendor/github.com/aws/smithy-go/auth/scheme_id.go
new file mode 100644
index 000000000..fb6a57c64
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/auth/scheme_id.go
@@ -0,0 +1,20 @@
+package auth
+
+// Anonymous
+const (
+ SchemeIDAnonymous = "smithy.api#noAuth"
+)
+
+// HTTP auth schemes
+const (
+ SchemeIDHTTPBasic = "smithy.api#httpBasicAuth"
+ SchemeIDHTTPDigest = "smithy.api#httpDigestAuth"
+ SchemeIDHTTPBearer = "smithy.api#httpBearerAuth"
+ SchemeIDHTTPAPIKey = "smithy.api#httpApiKeyAuth"
+)
+
+// AWS auth schemes
+const (
+ SchemeIDSigV4 = "aws.auth#sigv4"
+ SchemeIDSigV4A = "aws.auth#sigv4a"
+)
diff --git a/vendor/github.com/aws/smithy-go/changelog-template.json b/vendor/github.com/aws/smithy-go/changelog-template.json
new file mode 100644
index 000000000..d36e2b3e1
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/changelog-template.json
@@ -0,0 +1,9 @@
+{
+ "id": "00000000-0000-0000-0000-000000000000",
+ "type": "feature|bugfix|dependency",
+ "description": "Description of your changes",
+ "collapse": false,
+ "modules": [
+ "."
+ ]
+}
diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go
index 96abd073a..543e7cf03 100644
--- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go
+++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/encode.go
@@ -26,10 +26,17 @@ type Encoder struct {
header http.Header
}
-// NewEncoder creates a new encoder from the passed in request. All query and
+// NewEncoder creates a new encoder from the passed in request. It assumes that
+// raw path contains no valuable information at this point, so it passes in path
+// as path and raw path for subsequent trans
+func NewEncoder(path, query string, headers http.Header) (*Encoder, error) {
+ return NewEncoderWithRawPath(path, path, query, headers)
+}
+
+// NewHTTPBindingEncoder creates a new encoder from the passed in request. All query and
// header values will be added on top of the request's existing values. Overwriting
// duplicate values.
-func NewEncoder(path, query string, headers http.Header) (*Encoder, error) {
+func NewEncoderWithRawPath(path, rawPath, query string, headers http.Header) (*Encoder, error) {
parseQuery, err := url.ParseQuery(query)
if err != nil {
return nil, fmt.Errorf("failed to parse query string: %w", err)
@@ -37,7 +44,7 @@ func NewEncoder(path, query string, headers http.Header) (*Encoder, error) {
e := &Encoder{
path: []byte(path),
- rawPath: []byte(path),
+ rawPath: []byte(rawPath),
query: parseQuery,
header: headers.Clone(),
}
diff --git a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go
index e78926c9a..9ae308540 100644
--- a/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go
+++ b/vendor/github.com/aws/smithy-go/encoding/httpbinding/path_replace.go
@@ -22,33 +22,33 @@ func bufCap(b []byte, n int) []byte {
// replacePathElement replaces a single element in the path []byte.
// Escape is used to control whether the value will be escaped using Amazon path escape style.
func replacePathElement(path, fieldBuf []byte, key, val string, escape bool) ([]byte, []byte, error) {
- fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] }
+ // search for "{}". If not found, search for the greedy version "{+}". If none are found, return error
+ fieldBuf = bufCap(fieldBuf, len(key)+2) // { }
fieldBuf = append(fieldBuf, uriTokenStart)
fieldBuf = append(fieldBuf, key...)
+ fieldBuf = append(fieldBuf, uriTokenStop)
start := bytes.Index(path, fieldBuf)
- end := start + len(fieldBuf)
- if start < 0 || len(path[end:]) == 0 {
- // TODO what to do about error?
- return path, fieldBuf, fmt.Errorf("invalid path index, start=%d,end=%d. %s", start, end, path)
- }
-
encodeSep := true
- if path[end] == uriTokenSkip {
- // '+' token means do not escape slashes
+ if start < 0 {
+ fieldBuf = bufCap(fieldBuf, len(key)+3) // { [+] }
+ fieldBuf = append(fieldBuf, uriTokenStart)
+ fieldBuf = append(fieldBuf, key...)
+ fieldBuf = append(fieldBuf, uriTokenSkip)
+ fieldBuf = append(fieldBuf, uriTokenStop)
+
+ start = bytes.Index(path, fieldBuf)
+ if start < 0 {
+ return path, fieldBuf, fmt.Errorf("invalid path index, start=%d. %s", start, path)
+ }
encodeSep = false
- end++
}
+ end := start + len(fieldBuf)
if escape {
val = EscapePath(val, encodeSep)
}
- if path[end] != uriTokenStop {
- return path, fieldBuf, fmt.Errorf("invalid path element, does not contain token stop, %s", path)
- }
- end++
-
fieldBuf = bufCap(fieldBuf, len(val))
fieldBuf = append(fieldBuf, val...)
diff --git a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go
index d6e1e41e1..f9200093e 100644
--- a/vendor/github.com/aws/smithy-go/encoding/xml/doc.go
+++ b/vendor/github.com/aws/smithy-go/encoding/xml/doc.go
@@ -2,7 +2,7 @@
Package xml holds the XMl encoder utility. This utility is written in accordance to our design to delegate to
shape serializer function in which a xml.Value will be passed around.
-Resources followed: https://awslabs.github.io/smithy/1.0/spec/core/xml-traits.html#
+Resources followed: https://smithy.io/2.0/spec/protocol-traits.html#xml-bindings
Member Element
diff --git a/vendor/github.com/aws/smithy-go/endpoints/endpoint.go b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go
new file mode 100644
index 000000000..f778272be
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/endpoints/endpoint.go
@@ -0,0 +1,23 @@
+package transport
+
+import (
+ "net/http"
+ "net/url"
+
+ "github.com/aws/smithy-go"
+)
+
+// Endpoint is the endpoint object returned by Endpoint resolution V2
+type Endpoint struct {
+ // The complete URL minimally specifying the scheme and host.
+ // May optionally specify the port and base path component.
+ URI url.URL
+
+ // An optional set of headers to be sent using transport layer headers.
+ Headers http.Header
+
+ // A grab-bag property map of endpoint attributes. The
+ // values present here are subject to change, or being add/removed at any
+ // time.
+ Properties smithy.Properties
+}
diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go
new file mode 100644
index 000000000..e24e190dc
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/doc.go
@@ -0,0 +1,4 @@
+// Package rulesfn provides endpoint rule functions for evaluating endpoint
+// resolution rules.
+
+package rulesfn
diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go
new file mode 100644
index 000000000..5cf4a7b02
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/strings.go
@@ -0,0 +1,25 @@
+package rulesfn
+
+// Substring returns the substring of the input provided. If the start or stop
+// indexes are not valid for the input nil will be returned. If errors occur
+// they will be added to the provided [ErrorCollector].
+func SubString(input string, start, stop int, reverse bool) *string {
+ if start < 0 || stop < 1 || start >= stop || len(input) < stop {
+ return nil
+ }
+
+ for _, r := range input {
+ if r > 127 {
+ return nil
+ }
+ }
+
+ if !reverse {
+ v := input[start:stop]
+ return &v
+ }
+
+ rStart := len(input) - stop
+ rStop := len(input) - start
+ return SubString(input, rStart, rStop, false)
+}
diff --git a/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go
new file mode 100644
index 000000000..0c1154127
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/endpoints/private/rulesfn/uri.go
@@ -0,0 +1,130 @@
+package rulesfn
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "strings"
+
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+)
+
+// IsValidHostLabel returns if the input is a single valid [RFC 1123] host
+// label. If allowSubDomains is true, will allow validation to include nested
+// host labels. Returns false if the input is not a valid host label. If errors
+// occur they will be added to the provided [ErrorCollector].
+//
+// [RFC 1123]: https://www.ietf.org/rfc/rfc1123.txt
+func IsValidHostLabel(input string, allowSubDomains bool) bool {
+ var labels []string
+ if allowSubDomains {
+ labels = strings.Split(input, ".")
+ } else {
+ labels = []string{input}
+ }
+
+ for _, label := range labels {
+ if !smithyhttp.ValidHostLabel(label) {
+ return false
+ }
+ }
+
+ return true
+}
+
+// ParseURL returns a [URL] if the provided string could be parsed. Returns nil
+// if the string could not be parsed. Any parsing error will be added to the
+// [ErrorCollector].
+//
+// If the input URL string contains an IP6 address with a zone index. The
+// returned [builtin.URL.Authority] value will contain the percent escaped (%)
+// zone index separator.
+func ParseURL(input string) *URL {
+ u, err := url.Parse(input)
+ if err != nil {
+ return nil
+ }
+
+ if u.RawQuery != "" {
+ return nil
+ }
+
+ if u.Scheme != "http" && u.Scheme != "https" {
+ return nil
+ }
+
+ normalizedPath := u.Path
+ if !strings.HasPrefix(normalizedPath, "/") {
+ normalizedPath = "/" + normalizedPath
+ }
+ if !strings.HasSuffix(normalizedPath, "/") {
+ normalizedPath = normalizedPath + "/"
+ }
+
+ // IP6 hosts may have zone indexes that need to be escaped to be valid in a
+ // URI. The Go URL parser will unescape the `%25` into `%`. This needs to
+ // be reverted since the returned URL will be used in string builders.
+ authority := strings.ReplaceAll(u.Host, "%", "%25")
+
+ return &URL{
+ Scheme: u.Scheme,
+ Authority: authority,
+ Path: u.Path,
+ NormalizedPath: normalizedPath,
+ IsIp: net.ParseIP(hostnameWithoutZone(u)) != nil,
+ }
+}
+
+// URL provides the structure describing the parts of a parsed URL returned by
+// [ParseURL].
+type URL struct {
+ Scheme string // https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+ Authority string // https://www.rfc-editor.org/rfc/rfc3986#section-3.2
+ Path string // https://www.rfc-editor.org/rfc/rfc3986#section-3.3
+ NormalizedPath string // https://www.rfc-editor.org/rfc/rfc3986#section-6.2.3
+ IsIp bool
+}
+
+// URIEncode returns an percent-encoded [RFC3986 section 2.1] version of the
+// input string.
+//
+// [RFC3986 section 2.1]: https://www.rfc-editor.org/rfc/rfc3986#section-2.1
+func URIEncode(input string) string {
+ var output strings.Builder
+ for _, c := range []byte(input) {
+ if validPercentEncodedChar(c) {
+ output.WriteByte(c)
+ continue
+ }
+
+ fmt.Fprintf(&output, "%%%X", c)
+ }
+
+ return output.String()
+}
+
+func validPercentEncodedChar(c byte) bool {
+ return (c >= 'a' && c <= 'z') ||
+ (c >= 'A' && c <= 'Z') ||
+ (c >= '0' && c <= '9') ||
+ c == '-' || c == '_' || c == '.' || c == '~'
+}
+
+// hostname implements u.Hostname() but strips the ipv6 zone ID (if present)
+// such that net.ParseIP can still recognize IPv6 addresses with zone IDs.
+//
+// FUTURE(10/2023): netip.ParseAddr handles this natively but we can't take
+// that package as a dependency yet due to our min go version (1.15, netip
+// starts in 1.18). When we align with go runtime deprecation policy in
+// 10/2023, we can remove this.
+func hostnameWithoutZone(u *url.URL) string {
+ full := u.Hostname()
+
+ // this more or less mimics the internals of net/ (see unexported
+ // splitHostZone in that source) but throws the zone away because we don't
+ // need it
+ if i := strings.LastIndex(full, "%"); i > -1 {
+ return full[:i]
+ }
+ return full
+}
diff --git a/vendor/github.com/aws/smithy-go/go_module_metadata.go b/vendor/github.com/aws/smithy-go/go_module_metadata.go
index 08db245f8..945db0af3 100644
--- a/vendor/github.com/aws/smithy-go/go_module_metadata.go
+++ b/vendor/github.com/aws/smithy-go/go_module_metadata.go
@@ -3,4 +3,4 @@
package smithy
// goModuleVersion is the tagged release for this module
-const goModuleVersion = "1.13.3"
+const goModuleVersion = "1.23.0"
diff --git a/vendor/github.com/aws/smithy-go/metrics/metrics.go b/vendor/github.com/aws/smithy-go/metrics/metrics.go
new file mode 100644
index 000000000..c009d9f27
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/metrics/metrics.go
@@ -0,0 +1,136 @@
+// Package metrics defines the metrics APIs used by Smithy clients.
+package metrics
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go"
+)
+
+// MeterProvider is the entry point for creating a Meter.
+type MeterProvider interface {
+ Meter(scope string, opts ...MeterOption) Meter
+}
+
+// MeterOption applies configuration to a Meter.
+type MeterOption func(o *MeterOptions)
+
+// MeterOptions represents configuration for a Meter.
+type MeterOptions struct {
+ Properties smithy.Properties
+}
+
+// Meter is the entry point for creation of measurement instruments.
+type Meter interface {
+ // integer/synchronous
+ Int64Counter(name string, opts ...InstrumentOption) (Int64Counter, error)
+ Int64UpDownCounter(name string, opts ...InstrumentOption) (Int64UpDownCounter, error)
+ Int64Gauge(name string, opts ...InstrumentOption) (Int64Gauge, error)
+ Int64Histogram(name string, opts ...InstrumentOption) (Int64Histogram, error)
+
+ // integer/asynchronous
+ Int64AsyncCounter(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error)
+ Int64AsyncUpDownCounter(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error)
+ Int64AsyncGauge(name string, callback Int64Callback, opts ...InstrumentOption) (AsyncInstrument, error)
+
+ // floating-point/synchronous
+ Float64Counter(name string, opts ...InstrumentOption) (Float64Counter, error)
+ Float64UpDownCounter(name string, opts ...InstrumentOption) (Float64UpDownCounter, error)
+ Float64Gauge(name string, opts ...InstrumentOption) (Float64Gauge, error)
+ Float64Histogram(name string, opts ...InstrumentOption) (Float64Histogram, error)
+
+ // floating-point/asynchronous
+ Float64AsyncCounter(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error)
+ Float64AsyncUpDownCounter(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error)
+ Float64AsyncGauge(name string, callback Float64Callback, opts ...InstrumentOption) (AsyncInstrument, error)
+}
+
+// InstrumentOption applies configuration to an instrument.
+type InstrumentOption func(o *InstrumentOptions)
+
+// InstrumentOptions represents configuration for an instrument.
+type InstrumentOptions struct {
+ UnitLabel string
+ Description string
+}
+
+// Int64Counter measures a monotonically increasing int64 value.
+type Int64Counter interface {
+ Add(context.Context, int64, ...RecordMetricOption)
+}
+
+// Int64UpDownCounter measures a fluctuating int64 value.
+type Int64UpDownCounter interface {
+ Add(context.Context, int64, ...RecordMetricOption)
+}
+
+// Int64Gauge samples a discrete int64 value.
+type Int64Gauge interface {
+ Sample(context.Context, int64, ...RecordMetricOption)
+}
+
+// Int64Histogram records multiple data points for an int64 value.
+type Int64Histogram interface {
+ Record(context.Context, int64, ...RecordMetricOption)
+}
+
+// Float64Counter measures a monotonically increasing float64 value.
+type Float64Counter interface {
+ Add(context.Context, float64, ...RecordMetricOption)
+}
+
+// Float64UpDownCounter measures a fluctuating float64 value.
+type Float64UpDownCounter interface {
+ Add(context.Context, float64, ...RecordMetricOption)
+}
+
+// Float64Gauge samples a discrete float64 value.
+type Float64Gauge interface {
+ Sample(context.Context, float64, ...RecordMetricOption)
+}
+
+// Float64Histogram records multiple data points for an float64 value.
+type Float64Histogram interface {
+ Record(context.Context, float64, ...RecordMetricOption)
+}
+
+// AsyncInstrument is the universal handle returned for creation of all async
+// instruments.
+//
+// Callers use the Stop() API to unregister the callback passed at instrument
+// creation.
+type AsyncInstrument interface {
+ Stop()
+}
+
+// Int64Callback describes a function invoked when an async int64 instrument is
+// read.
+type Int64Callback func(context.Context, Int64Observer)
+
+// Int64Observer is the interface passed to async int64 instruments.
+//
+// Callers use the Observe() API of this interface to report metrics to the
+// underlying collector.
+type Int64Observer interface {
+ Observe(context.Context, int64, ...RecordMetricOption)
+}
+
+// Float64Callback describes a function invoked when an async float64
+// instrument is read.
+type Float64Callback func(context.Context, Float64Observer)
+
+// Float64Observer is the interface passed to async int64 instruments.
+//
+// Callers use the Observe() API of this interface to report metrics to the
+// underlying collector.
+type Float64Observer interface {
+ Observe(context.Context, float64, ...RecordMetricOption)
+}
+
+// RecordMetricOption applies configuration to a recorded metric.
+type RecordMetricOption func(o *RecordMetricOptions)
+
+// RecordMetricOptions represents configuration for a recorded metric.
+type RecordMetricOptions struct {
+ Properties smithy.Properties
+}
diff --git a/vendor/github.com/aws/smithy-go/metrics/nop.go b/vendor/github.com/aws/smithy-go/metrics/nop.go
new file mode 100644
index 000000000..fb374e1fb
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/metrics/nop.go
@@ -0,0 +1,67 @@
+package metrics
+
+import "context"
+
+// NopMeterProvider is a no-op metrics implementation.
+type NopMeterProvider struct{}
+
+var _ MeterProvider = (*NopMeterProvider)(nil)
+
+// Meter returns a meter which creates no-op instruments.
+func (NopMeterProvider) Meter(string, ...MeterOption) Meter {
+ return nopMeter{}
+}
+
+type nopMeter struct{}
+
+var _ Meter = (*nopMeter)(nil)
+
+func (nopMeter) Int64Counter(string, ...InstrumentOption) (Int64Counter, error) {
+ return nopInstrument[int64]{}, nil
+}
+func (nopMeter) Int64UpDownCounter(string, ...InstrumentOption) (Int64UpDownCounter, error) {
+ return nopInstrument[int64]{}, nil
+}
+func (nopMeter) Int64Gauge(string, ...InstrumentOption) (Int64Gauge, error) {
+ return nopInstrument[int64]{}, nil
+}
+func (nopMeter) Int64Histogram(string, ...InstrumentOption) (Int64Histogram, error) {
+ return nopInstrument[int64]{}, nil
+}
+func (nopMeter) Int64AsyncCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrument[int64]{}, nil
+}
+func (nopMeter) Int64AsyncUpDownCounter(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrument[int64]{}, nil
+}
+func (nopMeter) Int64AsyncGauge(string, Int64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrument[int64]{}, nil
+}
+func (nopMeter) Float64Counter(string, ...InstrumentOption) (Float64Counter, error) {
+ return nopInstrument[float64]{}, nil
+}
+func (nopMeter) Float64UpDownCounter(string, ...InstrumentOption) (Float64UpDownCounter, error) {
+ return nopInstrument[float64]{}, nil
+}
+func (nopMeter) Float64Gauge(string, ...InstrumentOption) (Float64Gauge, error) {
+ return nopInstrument[float64]{}, nil
+}
+func (nopMeter) Float64Histogram(string, ...InstrumentOption) (Float64Histogram, error) {
+ return nopInstrument[float64]{}, nil
+}
+func (nopMeter) Float64AsyncCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrument[float64]{}, nil
+}
+func (nopMeter) Float64AsyncUpDownCounter(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrument[float64]{}, nil
+}
+func (nopMeter) Float64AsyncGauge(string, Float64Callback, ...InstrumentOption) (AsyncInstrument, error) {
+ return nopInstrument[float64]{}, nil
+}
+
+type nopInstrument[N any] struct{}
+
+func (nopInstrument[N]) Add(context.Context, N, ...RecordMetricOption) {}
+func (nopInstrument[N]) Sample(context.Context, N, ...RecordMetricOption) {}
+func (nopInstrument[N]) Record(context.Context, N, ...RecordMetricOption) {}
+func (nopInstrument[_]) Stop() {}
diff --git a/vendor/github.com/aws/smithy-go/middleware/context.go b/vendor/github.com/aws/smithy-go/middleware/context.go
new file mode 100644
index 000000000..f51aa4f04
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/middleware/context.go
@@ -0,0 +1,41 @@
+package middleware
+
+import "context"
+
+type (
+ serviceIDKey struct{}
+ operationNameKey struct{}
+)
+
+// WithServiceID adds a service ID to the context, scoped to middleware stack
+// values.
+//
+// This API is called in the client runtime when bootstrapping an operation and
+// should not typically be used directly.
+func WithServiceID(parent context.Context, id string) context.Context {
+ return WithStackValue(parent, serviceIDKey{}, id)
+}
+
+// GetServiceID retrieves the service ID from the context. This is typically
+// the service shape's name from its Smithy model. Service clients for specific
+// systems (e.g. AWS SDK) may use an alternate designated value.
+func GetServiceID(ctx context.Context) string {
+ id, _ := GetStackValue(ctx, serviceIDKey{}).(string)
+ return id
+}
+
+// WithOperationName adds the operation name to the context, scoped to
+// middleware stack values.
+//
+// This API is called in the client runtime when bootstrapping an operation and
+// should not typically be used directly.
+func WithOperationName(parent context.Context, id string) context.Context {
+ return WithStackValue(parent, operationNameKey{}, id)
+}
+
+// GetOperationName retrieves the operation name from the context. This is
+// typically the operation shape's name from its Smithy model.
+func GetOperationName(ctx context.Context) string {
+ name, _ := GetStackValue(ctx, operationNameKey{}).(string)
+ return name
+}
diff --git a/vendor/github.com/aws/smithy-go/modman.toml b/vendor/github.com/aws/smithy-go/modman.toml
index 20295cdd2..aac582fa2 100644
--- a/vendor/github.com/aws/smithy-go/modman.toml
+++ b/vendor/github.com/aws/smithy-go/modman.toml
@@ -1,6 +1,4 @@
[dependencies]
- "github.com/google/go-cmp" = "v0.5.8"
- "github.com/jmespath/go-jmespath" = "v0.4.0"
[modules]
diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go b/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go
new file mode 100644
index 000000000..004d78f21
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/private/requestcompression/gzip.go
@@ -0,0 +1,30 @@
+package requestcompression
+
+import (
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+)
+
+func gzipCompress(input io.Reader) ([]byte, error) {
+ var b bytes.Buffer
+ w, err := gzip.NewWriterLevel(&b, gzip.DefaultCompression)
+ if err != nil {
+ return nil, fmt.Errorf("failed to create gzip writer, %v", err)
+ }
+
+ inBytes, err := io.ReadAll(input)
+ if err != nil {
+ return nil, fmt.Errorf("failed read payload to compress, %v", err)
+ }
+
+ if _, err = w.Write(inBytes); err != nil {
+ return nil, fmt.Errorf("failed to write payload to be compressed, %v", err)
+ }
+ if err = w.Close(); err != nil {
+ return nil, fmt.Errorf("failed to flush payload being compressed, %v", err)
+ }
+
+ return b.Bytes(), nil
+}
diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go
new file mode 100644
index 000000000..06c16afc1
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/private/requestcompression/middleware_capture_request_compression.go
@@ -0,0 +1,52 @@
+package requestcompression
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/aws/smithy-go/middleware"
+ smithyhttp "github.com/aws/smithy-go/transport/http"
+ "io"
+ "net/http"
+)
+
+const captureUncompressedRequestID = "CaptureUncompressedRequest"
+
+// AddCaptureUncompressedRequestMiddleware captures http request before compress encoding for check
+func AddCaptureUncompressedRequestMiddleware(stack *middleware.Stack, buf *bytes.Buffer) error {
+ return stack.Serialize.Insert(&captureUncompressedRequestMiddleware{
+ buf: buf,
+ }, "RequestCompression", middleware.Before)
+}
+
+type captureUncompressedRequestMiddleware struct {
+ req *http.Request
+ buf *bytes.Buffer
+ bytes []byte
+}
+
+// ID returns id of the captureUncompressedRequestMiddleware
+func (*captureUncompressedRequestMiddleware) ID() string {
+ return captureUncompressedRequestID
+}
+
+// HandleSerialize captures request payload before it is compressed by request compression middleware
+func (m *captureUncompressedRequestMiddleware) HandleSerialize(ctx context.Context, input middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ output middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ request, ok := input.Request.(*smithyhttp.Request)
+ if !ok {
+ return output, metadata, fmt.Errorf("error when retrieving http request")
+ }
+
+ _, err = io.Copy(m.buf, request.GetStream())
+ if err != nil {
+ return output, metadata, fmt.Errorf("error when copying http request stream: %q", err)
+ }
+ if err = request.RewindStream(); err != nil {
+ return output, metadata, fmt.Errorf("error when rewinding request stream: %q", err)
+ }
+
+ return next.HandleSerialize(ctx, input)
+}
diff --git a/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go b/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go
new file mode 100644
index 000000000..7c4147603
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/private/requestcompression/request_compression.go
@@ -0,0 +1,103 @@
+// Package requestcompression implements runtime support for smithy-modeled
+// request compression.
+//
+// This package is designated as private and is intended for use only by the
+// smithy client runtime. The exported API therein is not considered stable and
+// is subject to breaking changes without notice.
+package requestcompression
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/transport/http"
+ "io"
+)
+
+const MaxRequestMinCompressSizeBytes = 10485760
+
+// Enumeration values for supported compress Algorithms.
+const (
+ GZIP = "gzip"
+)
+
+type compressFunc func(io.Reader) ([]byte, error)
+
+var allowedAlgorithms = map[string]compressFunc{
+ GZIP: gzipCompress,
+}
+
+// AddRequestCompression add requestCompression middleware to op stack
+func AddRequestCompression(stack *middleware.Stack, disabled bool, minBytes int64, algorithms []string) error {
+ return stack.Serialize.Add(&requestCompression{
+ disableRequestCompression: disabled,
+ requestMinCompressSizeBytes: minBytes,
+ compressAlgorithms: algorithms,
+ }, middleware.After)
+}
+
+type requestCompression struct {
+ disableRequestCompression bool
+ requestMinCompressSizeBytes int64
+ compressAlgorithms []string
+}
+
+// ID returns the ID of the middleware
+func (m requestCompression) ID() string {
+ return "RequestCompression"
+}
+
+// HandleSerialize gzip compress the request's stream/body if enabled by config fields
+func (m requestCompression) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, metadata middleware.Metadata, err error,
+) {
+ if m.disableRequestCompression {
+ return next.HandleSerialize(ctx, in)
+ }
+ // still need to check requestMinCompressSizeBytes in case it is out of range after service client config
+ if m.requestMinCompressSizeBytes < 0 || m.requestMinCompressSizeBytes > MaxRequestMinCompressSizeBytes {
+ return out, metadata, fmt.Errorf("invalid range for min request compression size bytes %d, must be within 0 and 10485760 inclusively", m.requestMinCompressSizeBytes)
+ }
+
+ req, ok := in.Request.(*http.Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown request type %T", req)
+ }
+
+ for _, algorithm := range m.compressAlgorithms {
+ compressFunc := allowedAlgorithms[algorithm]
+ if compressFunc != nil {
+ if stream := req.GetStream(); stream != nil {
+ size, found, err := req.StreamLength()
+ if err != nil {
+ return out, metadata, fmt.Errorf("error while finding request stream length, %v", err)
+ } else if !found || size < m.requestMinCompressSizeBytes {
+ return next.HandleSerialize(ctx, in)
+ }
+
+ compressedBytes, err := compressFunc(stream)
+ if err != nil {
+ return out, metadata, fmt.Errorf("failed to compress request stream, %v", err)
+ }
+
+ var newReq *http.Request
+ if newReq, err = req.SetStream(bytes.NewReader(compressedBytes)); err != nil {
+ return out, metadata, fmt.Errorf("failed to set request stream, %v", err)
+ }
+ *req = *newReq
+
+ if val := req.Header.Get("Content-Encoding"); val != "" {
+ req.Header.Set("Content-Encoding", fmt.Sprintf("%s, %s", val, algorithm))
+ } else {
+ req.Header.Set("Content-Encoding", algorithm)
+ }
+ }
+ break
+ }
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
diff --git a/vendor/github.com/aws/smithy-go/properties.go b/vendor/github.com/aws/smithy-go/properties.go
new file mode 100644
index 000000000..68df4c4e0
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/properties.go
@@ -0,0 +1,69 @@
+package smithy
+
+import "maps"
+
+// PropertiesReader provides an interface for reading metadata from the
+// underlying metadata container.
+type PropertiesReader interface {
+ Get(key any) any
+}
+
+// Properties provides storing and reading metadata values. Keys may be any
+// comparable value type. Get and Set will panic if a key is not comparable.
+//
+// The zero value for a Properties instance is ready for reads/writes without
+// any additional initialization.
+type Properties struct {
+ values map[any]any
+}
+
+// Get attempts to retrieve the value the key points to. Returns nil if the
+// key was not found.
+//
+// Panics if key type is not comparable.
+func (m *Properties) Get(key any) any {
+ m.lazyInit()
+ return m.values[key]
+}
+
+// Set stores the value pointed to by the key. If a value already exists at
+// that key it will be replaced with the new value.
+//
+// Panics if the key type is not comparable.
+func (m *Properties) Set(key, value any) {
+ m.lazyInit()
+ m.values[key] = value
+}
+
+// Has returns whether the key exists in the metadata.
+//
+// Panics if the key type is not comparable.
+func (m *Properties) Has(key any) bool {
+ m.lazyInit()
+ _, ok := m.values[key]
+ return ok
+}
+
+// SetAll accepts all of the given Properties into the receiver, overwriting
+// any existing keys in the case of conflicts.
+func (m *Properties) SetAll(other *Properties) {
+ if other.values == nil {
+ return
+ }
+
+ m.lazyInit()
+ for k, v := range other.values {
+ m.values[k] = v
+ }
+}
+
+// Values returns a shallow clone of the property set's values.
+func (m *Properties) Values() map[any]any {
+ return maps.Clone(m.values)
+}
+
+func (m *Properties) lazyInit() {
+ if m.values == nil {
+ m.values = map[any]any{}
+ }
+}
diff --git a/vendor/github.com/aws/smithy-go/tracing/context.go b/vendor/github.com/aws/smithy-go/tracing/context.go
new file mode 100644
index 000000000..a404ed9d3
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/tracing/context.go
@@ -0,0 +1,96 @@
+package tracing
+
+import "context"
+
+type (
+ operationTracerKey struct{}
+ spanLineageKey struct{}
+)
+
+// GetSpan returns the active trace Span on the context.
+//
+// The boolean in the return indicates whether a Span was actually in the
+// context, but a no-op implementation will be returned if not, so callers
+// can generally disregard the boolean unless they wish to explicitly confirm
+// presence/absence of a Span.
+func GetSpan(ctx context.Context) (Span, bool) {
+ lineage := getLineage(ctx)
+ if len(lineage) == 0 {
+ return nopSpan{}, false
+ }
+
+ return lineage[len(lineage)-1], true
+}
+
+// WithSpan sets the active trace Span on the context.
+func WithSpan(parent context.Context, span Span) context.Context {
+ lineage := getLineage(parent)
+ if len(lineage) == 0 {
+ return context.WithValue(parent, spanLineageKey{}, []Span{span})
+ }
+
+ lineage = append(lineage, span)
+ return context.WithValue(parent, spanLineageKey{}, lineage)
+}
+
+// PopSpan pops the current Span off the context, setting the active Span on
+// the returned Context back to its parent and returning the REMOVED one.
+//
+// PopSpan on a context with no active Span will return a no-op instance.
+//
+// This is mostly necessary for the runtime to manage base trace spans due to
+// the wrapped-function nature of the middleware stack. End-users of Smithy
+// clients SHOULD NOT generally be using this API.
+func PopSpan(parent context.Context) (context.Context, Span) {
+ lineage := getLineage(parent)
+ if len(lineage) == 0 {
+ return parent, nopSpan{}
+ }
+
+ span := lineage[len(lineage)-1]
+ lineage = lineage[:len(lineage)-1]
+ return context.WithValue(parent, spanLineageKey{}, lineage), span
+}
+
+func getLineage(ctx context.Context) []Span {
+ v := ctx.Value(spanLineageKey{})
+ if v == nil {
+ return nil
+ }
+
+ return v.([]Span)
+}
+
+// GetOperationTracer returns the embedded operation-scoped Tracer on a
+// Context.
+//
+// The boolean in the return indicates whether a Tracer was actually in the
+// context, but a no-op implementation will be returned if not, so callers
+// can generally disregard the boolean unless they wish to explicitly confirm
+// presence/absence of a Tracer.
+func GetOperationTracer(ctx context.Context) (Tracer, bool) {
+ v := ctx.Value(operationTracerKey{})
+ if v == nil {
+ return nopTracer{}, false
+ }
+
+ return v.(Tracer), true
+}
+
+// WithOperationTracer returns a child Context embedding the given Tracer.
+//
+// The runtime will use this embed a scoped tracer for client operations,
+// Smithy/SDK client callers DO NOT need to do this explicitly.
+func WithOperationTracer(parent context.Context, tracer Tracer) context.Context {
+ return context.WithValue(parent, operationTracerKey{}, tracer)
+}
+
+// StartSpan is a convenience API for creating tracing Spans from a Context.
+//
+// StartSpan uses the operation-scoped Tracer, previously stored using
+// [WithOperationTracer], to start the Span. If a Tracer has not been embedded
+// the returned Span will be a no-op implementation.
+func StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) {
+ tracer, _ := GetOperationTracer(ctx)
+ return tracer.StartSpan(ctx, name, opts...)
+}
diff --git a/vendor/github.com/aws/smithy-go/tracing/nop.go b/vendor/github.com/aws/smithy-go/tracing/nop.go
new file mode 100644
index 000000000..573d28b1c
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/tracing/nop.go
@@ -0,0 +1,32 @@
+package tracing
+
+import "context"
+
+// NopTracerProvider is a no-op tracing implementation.
+type NopTracerProvider struct{}
+
+var _ TracerProvider = (*NopTracerProvider)(nil)
+
+// Tracer returns a tracer which creates no-op spans.
+func (NopTracerProvider) Tracer(string, ...TracerOption) Tracer {
+ return nopTracer{}
+}
+
+type nopTracer struct{}
+
+var _ Tracer = (*nopTracer)(nil)
+
+func (nopTracer) StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) {
+ return ctx, nopSpan{}
+}
+
+type nopSpan struct{}
+
+var _ Span = (*nopSpan)(nil)
+
+func (nopSpan) Name() string { return "" }
+func (nopSpan) Context() SpanContext { return SpanContext{} }
+func (nopSpan) AddEvent(string, ...EventOption) {}
+func (nopSpan) SetProperty(any, any) {}
+func (nopSpan) SetStatus(SpanStatus) {}
+func (nopSpan) End() {}
diff --git a/vendor/github.com/aws/smithy-go/tracing/tracing.go b/vendor/github.com/aws/smithy-go/tracing/tracing.go
new file mode 100644
index 000000000..089ed3932
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/tracing/tracing.go
@@ -0,0 +1,95 @@
+// Package tracing defines tracing APIs to be used by Smithy clients.
+package tracing
+
+import (
+ "context"
+
+ "github.com/aws/smithy-go"
+)
+
+// SpanStatus records the "success" state of an observed span.
+type SpanStatus int
+
+// Enumeration of SpanStatus.
+const (
+ SpanStatusUnset SpanStatus = iota
+ SpanStatusOK
+ SpanStatusError
+)
+
+// SpanKind indicates the nature of the work being performed.
+type SpanKind int
+
+// Enumeration of SpanKind.
+const (
+ SpanKindInternal SpanKind = iota
+ SpanKindClient
+ SpanKindServer
+ SpanKindProducer
+ SpanKindConsumer
+)
+
+// TracerProvider is the entry point for creating client traces.
+type TracerProvider interface {
+ Tracer(scope string, opts ...TracerOption) Tracer
+}
+
+// TracerOption applies configuration to a tracer.
+type TracerOption func(o *TracerOptions)
+
+// TracerOptions represent configuration for tracers.
+type TracerOptions struct {
+ Properties smithy.Properties
+}
+
+// Tracer is the entry point for creating observed client Spans.
+//
+// Spans created by tracers propagate by existing on the Context. Consumers of
+// the API can use [GetSpan] to pull the active Span from a Context.
+//
+// Creation of child Spans is implicit through Context persistence. If
+// CreateSpan is called with a Context that holds a Span, the result will be a
+// child of that Span.
+type Tracer interface {
+ StartSpan(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span)
+}
+
+// SpanOption applies configuration to a span.
+type SpanOption func(o *SpanOptions)
+
+// SpanOptions represent configuration for span events.
+type SpanOptions struct {
+ Kind SpanKind
+ Properties smithy.Properties
+}
+
+// Span records a conceptually individual unit of work that takes place in a
+// Smithy client operation.
+type Span interface {
+ Name() string
+ Context() SpanContext
+ AddEvent(name string, opts ...EventOption)
+ SetStatus(status SpanStatus)
+ SetProperty(k, v any)
+ End()
+}
+
+// EventOption applies configuration to a span event.
+type EventOption func(o *EventOptions)
+
+// EventOptions represent configuration for span events.
+type EventOptions struct {
+ Properties smithy.Properties
+}
+
+// SpanContext uniquely identifies a Span.
+type SpanContext struct {
+ TraceID string
+ SpanID string
+ IsRemote bool
+}
+
+// IsValid is true when a span has nonzero trace and span IDs.
+func (ctx *SpanContext) IsValid() bool {
+ return len(ctx.TraceID) != 0 && len(ctx.SpanID) != 0
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/auth.go b/vendor/github.com/aws/smithy-go/transport/http/auth.go
new file mode 100644
index 000000000..58e1ab5ef
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/auth.go
@@ -0,0 +1,21 @@
+package http
+
+import (
+ "context"
+
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+)
+
+// AuthScheme defines an HTTP authentication scheme.
+type AuthScheme interface {
+ SchemeID() string
+ IdentityResolver(auth.IdentityResolverOptions) auth.IdentityResolver
+ Signer() Signer
+}
+
+// Signer defines the interface through which HTTP requests are supplemented
+// with an Identity.
+type Signer interface {
+ SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go b/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go
new file mode 100644
index 000000000..d60cf2a60
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/auth_schemes.go
@@ -0,0 +1,45 @@
+package http
+
+import (
+ "context"
+
+ smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/auth"
+)
+
+// NewAnonymousScheme returns the anonymous HTTP auth scheme.
+func NewAnonymousScheme() AuthScheme {
+ return &authScheme{
+ schemeID: auth.SchemeIDAnonymous,
+ signer: &nopSigner{},
+ }
+}
+
+// authScheme is parameterized to generically implement the exported AuthScheme
+// interface
+type authScheme struct {
+ schemeID string
+ signer Signer
+}
+
+var _ AuthScheme = (*authScheme)(nil)
+
+func (s *authScheme) SchemeID() string {
+ return s.schemeID
+}
+
+func (s *authScheme) IdentityResolver(o auth.IdentityResolverOptions) auth.IdentityResolver {
+ return o.GetIdentityResolver(s.schemeID)
+}
+
+func (s *authScheme) Signer() Signer {
+ return s.signer
+}
+
+type nopSigner struct{}
+
+var _ Signer = (*nopSigner)(nil)
+
+func (*nopSigner) SignRequest(context.Context, *Request, auth.Identity, smithy.Properties) error {
+ return nil
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/client.go b/vendor/github.com/aws/smithy-go/transport/http/client.go
index e691c69bf..0fceae81d 100644
--- a/vendor/github.com/aws/smithy-go/transport/http/client.go
+++ b/vendor/github.com/aws/smithy-go/transport/http/client.go
@@ -6,7 +6,9 @@ import (
"net/http"
smithy "github.com/aws/smithy-go"
+ "github.com/aws/smithy-go/metrics"
"github.com/aws/smithy-go/middleware"
+ "github.com/aws/smithy-go/tracing"
)
// ClientDo provides the interface for custom HTTP client implementations.
@@ -27,13 +29,30 @@ func (fn ClientDoFunc) Do(r *http.Request) (*http.Response, error) {
// implementation is http.Client.
type ClientHandler struct {
client ClientDo
+
+ Meter metrics.Meter // For HTTP client metrics.
}
// NewClientHandler returns an initialized middleware handler for the client.
+//
+// Deprecated: Use [NewClientHandlerWithOptions].
func NewClientHandler(client ClientDo) ClientHandler {
- return ClientHandler{
+ return NewClientHandlerWithOptions(client)
+}
+
+// NewClientHandlerWithOptions returns an initialized middleware handler for the client
+// with applied options.
+func NewClientHandlerWithOptions(client ClientDo, opts ...func(*ClientHandler)) ClientHandler {
+ h := ClientHandler{
client: client,
}
+ for _, opt := range opts {
+ opt(&h)
+ }
+ if h.Meter == nil {
+ h.Meter = metrics.NopMeterProvider{}.Meter("")
+ }
+ return h
}
// Handle implements the middleware Handler interface, that will invoke the
@@ -42,6 +61,14 @@ func NewClientHandler(client ClientDo) ClientHandler {
func (c ClientHandler) Handle(ctx context.Context, input interface{}) (
out interface{}, metadata middleware.Metadata, err error,
) {
+ ctx, span := tracing.StartSpan(ctx, "DoHTTPRequest")
+ defer span.End()
+
+ ctx, client, err := withMetrics(ctx, c.client, c.Meter)
+ if err != nil {
+ return nil, metadata, fmt.Errorf("instrument with HTTP metrics: %w", err)
+ }
+
req, ok := input.(*Request)
if !ok {
return nil, metadata, fmt.Errorf("expect Smithy http.Request value as input, got unsupported type %T", input)
@@ -52,7 +79,17 @@ func (c ClientHandler) Handle(ctx context.Context, input interface{}) (
return nil, metadata, err
}
- resp, err := c.client.Do(builtRequest)
+ span.SetProperty("http.method", req.Method)
+ span.SetProperty("http.request_content_length", -1) // at least indicate unknown
+ length, ok, err := req.StreamLength()
+ if err != nil {
+ return nil, metadata, err
+ }
+ if ok {
+ span.SetProperty("http.request_content_length", length)
+ }
+
+ resp, err := client.Do(builtRequest)
if resp == nil {
// Ensure a http response value is always present to prevent unexpected
// panics.
@@ -79,6 +116,10 @@ func (c ClientHandler) Handle(ctx context.Context, input interface{}) (
_ = builtRequest.Body.Close()
}
+ span.SetProperty("net.protocol.version", fmt.Sprintf("%d.%d", resp.ProtoMajor, resp.ProtoMinor))
+ span.SetProperty("http.status_code", resp.StatusCode)
+ span.SetProperty("http.response_content_length", resp.ContentLength)
+
return &Response{Response: resp}, metadata, err
}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/host.go b/vendor/github.com/aws/smithy-go/transport/http/host.go
index 6b290fec0..db9801bea 100644
--- a/vendor/github.com/aws/smithy-go/transport/http/host.go
+++ b/vendor/github.com/aws/smithy-go/transport/http/host.go
@@ -69,7 +69,7 @@ func ValidPortNumber(port string) bool {
return true
}
-// ValidHostLabel returns whether the label is a valid RFC 3986 host abel.
+// ValidHostLabel returns whether the label is a valid RFC 3986 host label.
func ValidHostLabel(label string) bool {
if l := len(label); l == 0 || l > 63 {
return false
diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go
new file mode 100644
index 000000000..e21f2632a
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/interceptor.go
@@ -0,0 +1,321 @@
+package http
+
+import (
+ "context"
+)
+
+func icopy[T any](v []T) []T {
+ s := make([]T, len(v))
+ copy(s, v)
+ return s
+}
+
+// InterceptorContext is all the information available in different
+// interceptors.
+//
+// Not all information is available in each interceptor, see each interface
+// definition for more details.
+type InterceptorContext struct {
+ Input any
+ Request *Request
+
+ Output any
+ Response *Response
+}
+
+// InterceptorRegistry holds a list of operation interceptors.
+//
+// Interceptors allow callers to insert custom behavior at well-defined points
+// within a client's operation lifecycle.
+//
+// # Interceptor context
+//
+// All interceptors are invoked with a context object that contains input and
+// output containers for the operation. The individual fields that are
+// available will depend on what the interceptor is and, in certain
+// interceptors, how far the operation was able to progress. See the
+// documentation for each interface definition for more information about field
+// availability.
+//
+// Implementations MUST NOT directly mutate the values of the fields in the
+// interceptor context. They are free to mutate the existing values _pointed
+// to_ by those fields, however.
+//
+// # Returning errors
+//
+// All interceptors can return errors. If an interceptor returns an error
+// _before_ the client's retry loop, the operation will fail immediately. If
+// one returns an error _within_ the retry loop, the error WILL be considered
+// according to the client's retry policy.
+//
+// # Adding interceptors
+//
+// Idiomatically you will simply use one of the Add() receiver methods to
+// register interceptors as desired. However, the list for each interface is
+// exported on the registry struct and the caller is free to manipulate it
+// directly, for example, to register a number of interceptors all at once, or
+// to remove one that was previously registered.
+//
+// The base SDK client WILL NOT add any interceptors. SDK operations and
+// customizations are implemented in terms of middleware.
+//
+// Modifications to the registry will not persist across operation calls when
+// using per-operation functional options. This means you can register
+// interceptors on a per-operation basis without affecting other operations.
+type InterceptorRegistry struct {
+ BeforeExecution []BeforeExecutionInterceptor
+ BeforeSerialization []BeforeSerializationInterceptor
+ AfterSerialization []AfterSerializationInterceptor
+ BeforeRetryLoop []BeforeRetryLoopInterceptor
+ BeforeAttempt []BeforeAttemptInterceptor
+ BeforeSigning []BeforeSigningInterceptor
+ AfterSigning []AfterSigningInterceptor
+ BeforeTransmit []BeforeTransmitInterceptor
+ AfterTransmit []AfterTransmitInterceptor
+ BeforeDeserialization []BeforeDeserializationInterceptor
+ AfterDeserialization []AfterDeserializationInterceptor
+ AfterAttempt []AfterAttemptInterceptor
+ AfterExecution []AfterExecutionInterceptor
+}
+
+// Copy returns a deep copy of the registry. This is used by SDK clients on
+// each operation call in order to prevent per-op config mutation from
+// persisting.
+func (i *InterceptorRegistry) Copy() InterceptorRegistry {
+ return InterceptorRegistry{
+ BeforeExecution: icopy(i.BeforeExecution),
+ BeforeSerialization: icopy(i.BeforeSerialization),
+ AfterSerialization: icopy(i.AfterSerialization),
+ BeforeRetryLoop: icopy(i.BeforeRetryLoop),
+ BeforeAttempt: icopy(i.BeforeAttempt),
+ BeforeSigning: icopy(i.BeforeSigning),
+ AfterSigning: icopy(i.AfterSigning),
+ BeforeTransmit: icopy(i.BeforeTransmit),
+ AfterTransmit: icopy(i.AfterTransmit),
+ BeforeDeserialization: icopy(i.BeforeDeserialization),
+ AfterDeserialization: icopy(i.AfterDeserialization),
+ AfterAttempt: icopy(i.AfterAttempt),
+ AfterExecution: icopy(i.AfterExecution),
+ }
+}
+
+// AddBeforeExecution registers the provided BeforeExecutionInterceptor.
+func (i *InterceptorRegistry) AddBeforeExecution(v BeforeExecutionInterceptor) {
+ i.BeforeExecution = append(i.BeforeExecution, v)
+}
+
+// AddBeforeSerialization registers the provided BeforeSerializationInterceptor.
+func (i *InterceptorRegistry) AddBeforeSerialization(v BeforeSerializationInterceptor) {
+ i.BeforeSerialization = append(i.BeforeSerialization, v)
+}
+
+// AddAfterSerialization registers the provided AfterSerializationInterceptor.
+func (i *InterceptorRegistry) AddAfterSerialization(v AfterSerializationInterceptor) {
+ i.AfterSerialization = append(i.AfterSerialization, v)
+}
+
+// AddBeforeRetryLoop registers the provided BeforeRetryLoopInterceptor.
+func (i *InterceptorRegistry) AddBeforeRetryLoop(v BeforeRetryLoopInterceptor) {
+ i.BeforeRetryLoop = append(i.BeforeRetryLoop, v)
+}
+
+// AddBeforeAttempt registers the provided BeforeAttemptInterceptor.
+func (i *InterceptorRegistry) AddBeforeAttempt(v BeforeAttemptInterceptor) {
+ i.BeforeAttempt = append(i.BeforeAttempt, v)
+}
+
+// AddBeforeSigning registers the provided BeforeSigningInterceptor.
+func (i *InterceptorRegistry) AddBeforeSigning(v BeforeSigningInterceptor) {
+ i.BeforeSigning = append(i.BeforeSigning, v)
+}
+
+// AddAfterSigning registers the provided AfterSigningInterceptor.
+func (i *InterceptorRegistry) AddAfterSigning(v AfterSigningInterceptor) {
+ i.AfterSigning = append(i.AfterSigning, v)
+}
+
+// AddBeforeTransmit registers the provided BeforeTransmitInterceptor.
+func (i *InterceptorRegistry) AddBeforeTransmit(v BeforeTransmitInterceptor) {
+ i.BeforeTransmit = append(i.BeforeTransmit, v)
+}
+
+// AddAfterTransmit registers the provided AfterTransmitInterceptor.
+func (i *InterceptorRegistry) AddAfterTransmit(v AfterTransmitInterceptor) {
+ i.AfterTransmit = append(i.AfterTransmit, v)
+}
+
+// AddBeforeDeserialization registers the provided BeforeDeserializationInterceptor.
+func (i *InterceptorRegistry) AddBeforeDeserialization(v BeforeDeserializationInterceptor) {
+ i.BeforeDeserialization = append(i.BeforeDeserialization, v)
+}
+
+// AddAfterDeserialization registers the provided AfterDeserializationInterceptor.
+func (i *InterceptorRegistry) AddAfterDeserialization(v AfterDeserializationInterceptor) {
+ i.AfterDeserialization = append(i.AfterDeserialization, v)
+}
+
+// AddAfterAttempt registers the provided AfterAttemptInterceptor.
+func (i *InterceptorRegistry) AddAfterAttempt(v AfterAttemptInterceptor) {
+ i.AfterAttempt = append(i.AfterAttempt, v)
+}
+
+// AddAfterExecution registers the provided AfterExecutionInterceptor.
+func (i *InterceptorRegistry) AddAfterExecution(v AfterExecutionInterceptor) {
+ i.AfterExecution = append(i.AfterExecution, v)
+}
+
+// BeforeExecutionInterceptor runs before anything else in the operation
+// lifecycle.
+//
+// Available InterceptorContext fields:
+// - Input
+type BeforeExecutionInterceptor interface {
+ BeforeExecution(ctx context.Context, in *InterceptorContext) error
+}
+
+// BeforeSerializationInterceptor runs before the operation input is serialized
+// into its transport request.
+//
+// Serialization occurs before the operation's retry loop.
+//
+// Available InterceptorContext fields:
+// - Input
+type BeforeSerializationInterceptor interface {
+ BeforeSerialization(ctx context.Context, in *InterceptorContext) error
+}
+
+// AfterSerializationInterceptor runs after the operation input is serialized
+// into its transport request.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+type AfterSerializationInterceptor interface {
+ AfterSerialization(ctx context.Context, in *InterceptorContext) error
+}
+
+// BeforeRetryLoopInterceptor runs right before the operation enters the retry loop.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+type BeforeRetryLoopInterceptor interface {
+ BeforeRetryLoop(ctx context.Context, in *InterceptorContext) error
+}
+
+// BeforeAttemptInterceptor runs right before every attempt in the retry loop.
+//
+// If this interceptor returns an error, AfterAttempt interceptors WILL NOT be
+// invoked.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+type BeforeAttemptInterceptor interface {
+ BeforeAttempt(ctx context.Context, in *InterceptorContext) error
+}
+
+// BeforeSigningInterceptor runs right before the request is signed.
+//
+// Signing occurs within the operation's retry loop.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+type BeforeSigningInterceptor interface {
+ BeforeSigning(ctx context.Context, in *InterceptorContext) error
+}
+
+// AfterSigningInterceptor runs right after the request is signed.
+//
+// It is unsafe to modify the outgoing HTTP request at or past this hook, since
+// doing so may invalidate the signature of the request.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+type AfterSigningInterceptor interface {
+ AfterSigning(ctx context.Context, in *InterceptorContext) error
+}
+
+// BeforeTransmitInterceptor runs right before the HTTP request is sent.
+//
+// HTTP transmit occurs within the operation's retry loop.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+type BeforeTransmitInterceptor interface {
+ BeforeTransmit(ctx context.Context, in *InterceptorContext) error
+}
+
+// AfterTransmitInterceptor runs right after the HTTP response is received.
+//
+// It will always be invoked when a response is received, regardless of its
+// status code. Conversely, it WILL NOT be invoked if the HTTP round-trip was
+// not successful, e.g. because of a DNS resolution error
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+// - Response
+type AfterTransmitInterceptor interface {
+ AfterTransmit(ctx context.Context, in *InterceptorContext) error
+}
+
+// BeforeDeserializationInterceptor runs right before the incoming HTTP response
+// is deserialized.
+//
+// This interceptor IS NOT invoked if the HTTP round-trip was not successful.
+//
+// Deserialization occurs within the operation's retry loop.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Request
+// - Response
+type BeforeDeserializationInterceptor interface {
+ BeforeDeserialization(ctx context.Context, in *InterceptorContext) error
+}
+
+// AfterDeserializationInterceptor runs right after the incoming HTTP response
+// is deserialized. This hook is invoked regardless of whether the deserialized
+// result was an error.
+//
+// This interceptor IS NOT invoked if the HTTP round-trip was not successful.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Output (IF the operation had a success-level response)
+// - Request
+// - Response
+type AfterDeserializationInterceptor interface {
+ AfterDeserialization(ctx context.Context, in *InterceptorContext) error
+}
+
+// AfterAttemptInterceptor runs right after the incoming HTTP response
+// is deserialized. This hook is invoked regardless of whether the deserialized
+// result was an error, or if another interceptor within the retry loop
+// returned an error.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Output (IF the operation had a success-level response)
+// - Request (IF the operation did not return an error during serialization)
+// - Response (IF the operation was able to transmit the HTTP request)
+type AfterAttemptInterceptor interface {
+ AfterAttempt(ctx context.Context, in *InterceptorContext) error
+}
+
+// AfterExecutionInterceptor runs after everything else. It runs regardless of
+// how far the operation progressed in its lifecycle, and regardless of whether
+// the operation succeeded or failed.
+//
+// Available InterceptorContext fields:
+// - Input
+// - Output (IF the operation had a success-level response)
+// - Request (IF the operation did not return an error during serialization)
+// - Response (IF the operation was able to transmit the HTTP request)
+type AfterExecutionInterceptor interface {
+ AfterExecution(ctx context.Context, in *InterceptorContext) error
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go
new file mode 100644
index 000000000..2cc4b57f8
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/interceptor_middleware.go
@@ -0,0 +1,325 @@
+package http
+
+import (
+ "context"
+ "errors"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+type ictxKey struct{}
+
+func withIctx(ctx context.Context) context.Context {
+ return middleware.WithStackValue(ctx, ictxKey{}, &InterceptorContext{})
+}
+
+func getIctx(ctx context.Context) *InterceptorContext {
+ return middleware.GetStackValue(ctx, ictxKey{}).(*InterceptorContext)
+}
+
+// InterceptExecution runs Before/AfterExecutionInterceptors.
+type InterceptExecution struct {
+ BeforeExecution []BeforeExecutionInterceptor
+ AfterExecution []AfterExecutionInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptExecution) ID() string {
+ return "InterceptExecution"
+}
+
+// HandleInitialize runs the interceptors.
+func (m *InterceptExecution) HandleInitialize(
+ ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler,
+) (
+ out middleware.InitializeOutput, md middleware.Metadata, err error,
+) {
+ ctx = withIctx(ctx)
+ getIctx(ctx).Input = in.Parameters
+
+ for _, i := range m.BeforeExecution {
+ if err := i.BeforeExecution(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ out, md, err = next.HandleInitialize(ctx, in)
+
+ for _, i := range m.AfterExecution {
+ if err := i.AfterExecution(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return out, md, err
+}
+
+// InterceptBeforeSerialization runs BeforeSerializationInterceptors.
+type InterceptBeforeSerialization struct {
+ Interceptors []BeforeSerializationInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptBeforeSerialization) ID() string {
+ return "InterceptBeforeSerialization"
+}
+
+// HandleSerialize runs the interceptors.
+func (m *InterceptBeforeSerialization) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, md middleware.Metadata, err error,
+) {
+ for _, i := range m.Interceptors {
+ if err := i.BeforeSerialization(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
+
+// InterceptAfterSerialization runs AfterSerializationInterceptors.
+type InterceptAfterSerialization struct {
+ Interceptors []AfterSerializationInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptAfterSerialization) ID() string {
+ return "InterceptAfterSerialization"
+}
+
+// HandleSerialize runs the interceptors.
+func (m *InterceptAfterSerialization) HandleSerialize(
+ ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler,
+) (
+ out middleware.SerializeOutput, md middleware.Metadata, err error,
+) {
+ getIctx(ctx).Request = in.Request.(*Request)
+
+ for _, i := range m.Interceptors {
+ if err := i.AfterSerialization(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return next.HandleSerialize(ctx, in)
+}
+
+// InterceptBeforeRetryLoop runs BeforeRetryLoopInterceptors.
+type InterceptBeforeRetryLoop struct {
+ Interceptors []BeforeRetryLoopInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptBeforeRetryLoop) ID() string {
+ return "InterceptBeforeRetryLoop"
+}
+
+// HandleFinalize runs the interceptors.
+func (m *InterceptBeforeRetryLoop) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, md middleware.Metadata, err error,
+) {
+ for _, i := range m.Interceptors {
+ if err := i.BeforeRetryLoop(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+// InterceptBeforeSigning runs BeforeSigningInterceptors.
+type InterceptBeforeSigning struct {
+ Interceptors []BeforeSigningInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptBeforeSigning) ID() string {
+ return "InterceptBeforeSigning"
+}
+
+// HandleFinalize runs the interceptors.
+func (m *InterceptBeforeSigning) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, md middleware.Metadata, err error,
+) {
+ for _, i := range m.Interceptors {
+ if err := i.BeforeSigning(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+// InterceptAfterSigning runs AfterSigningInterceptors.
+type InterceptAfterSigning struct {
+ Interceptors []AfterSigningInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptAfterSigning) ID() string {
+ return "InterceptAfterSigning"
+}
+
+// HandleFinalize runs the interceptors.
+func (m *InterceptAfterSigning) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, md middleware.Metadata, err error,
+) {
+ for _, i := range m.Interceptors {
+ if err := i.AfterSigning(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return next.HandleFinalize(ctx, in)
+}
+
+// InterceptTransmit runs BeforeTransmitInterceptors and AfterTransmitInterceptors.
+type InterceptTransmit struct {
+ BeforeTransmit []BeforeTransmitInterceptor
+ AfterTransmit []AfterTransmitInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptTransmit) ID() string {
+ return "InterceptTransmit"
+}
+
+// HandleDeserialize runs the interceptors.
+func (m *InterceptTransmit) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, md middleware.Metadata, err error,
+) {
+ for _, i := range m.BeforeTransmit {
+ if err := i.BeforeTransmit(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ out, md, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ return out, md, err
+ }
+
+ // the root of the decorated middleware guarantees this will be here
+ // (client.go: ClientHandler.Handle)
+ getIctx(ctx).Response = out.RawResponse.(*Response)
+
+ for _, i := range m.AfterTransmit {
+ if err := i.AfterTransmit(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return out, md, err
+}
+
+// InterceptBeforeDeserialization runs BeforeDeserializationInterceptors.
+type InterceptBeforeDeserialization struct {
+ Interceptors []BeforeDeserializationInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptBeforeDeserialization) ID() string {
+ return "InterceptBeforeDeserialization"
+}
+
+// HandleDeserialize runs the interceptors.
+func (m *InterceptBeforeDeserialization) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, md middleware.Metadata, err error,
+) {
+ out, md, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ var terr *RequestSendError
+ if errors.As(err, &terr) {
+ return out, md, err
+ }
+ }
+
+ for _, i := range m.Interceptors {
+ if err := i.BeforeDeserialization(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return out, md, err
+}
+
+// InterceptAfterDeserialization runs AfterDeserializationInterceptors.
+type InterceptAfterDeserialization struct {
+ Interceptors []AfterDeserializationInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptAfterDeserialization) ID() string {
+ return "InterceptAfterDeserialization"
+}
+
+// HandleDeserialize runs the interceptors.
+func (m *InterceptAfterDeserialization) HandleDeserialize(
+ ctx context.Context, in middleware.DeserializeInput, next middleware.DeserializeHandler,
+) (
+ out middleware.DeserializeOutput, md middleware.Metadata, err error,
+) {
+ out, md, err = next.HandleDeserialize(ctx, in)
+ if err != nil {
+ var terr *RequestSendError
+ if errors.As(err, &terr) {
+ return out, md, err
+ }
+ }
+
+ getIctx(ctx).Output = out.Result
+
+ for _, i := range m.Interceptors {
+ if err := i.AfterDeserialization(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return out, md, err
+}
+
+// InterceptAttempt runs AfterAttemptInterceptors.
+type InterceptAttempt struct {
+ BeforeAttempt []BeforeAttemptInterceptor
+ AfterAttempt []AfterAttemptInterceptor
+}
+
+// ID identifies the middleware.
+func (m *InterceptAttempt) ID() string {
+ return "InterceptAttempt"
+}
+
+// HandleFinalize runs the interceptors.
+func (m *InterceptAttempt) HandleFinalize(
+ ctx context.Context, in middleware.FinalizeInput, next middleware.FinalizeHandler,
+) (
+ out middleware.FinalizeOutput, md middleware.Metadata, err error,
+) {
+ for _, i := range m.BeforeAttempt {
+ if err := i.BeforeAttempt(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ out, md, err = next.HandleFinalize(ctx, in)
+
+ for _, i := range m.AfterAttempt {
+ if err := i.AfterAttempt(ctx, getIctx(ctx)); err != nil {
+ return out, md, err
+ }
+ }
+
+ return out, md, err
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/metrics.go b/vendor/github.com/aws/smithy-go/transport/http/metrics.go
new file mode 100644
index 000000000..d1beaa595
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/metrics.go
@@ -0,0 +1,198 @@
+package http
+
+import (
+ "context"
+ "crypto/tls"
+ "net/http"
+ "net/http/httptrace"
+ "sync/atomic"
+ "time"
+
+ "github.com/aws/smithy-go/metrics"
+)
+
+var now = time.Now
+
+// withMetrics instruments an HTTP client and context to collect HTTP metrics.
+func withMetrics(parent context.Context, client ClientDo, meter metrics.Meter) (
+ context.Context, ClientDo, error,
+) {
+ hm, err := newHTTPMetrics(meter)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ ctx := httptrace.WithClientTrace(parent, &httptrace.ClientTrace{
+ DNSStart: hm.DNSStart,
+ ConnectStart: hm.ConnectStart,
+ TLSHandshakeStart: hm.TLSHandshakeStart,
+
+ GotConn: hm.GotConn(parent),
+ PutIdleConn: hm.PutIdleConn(parent),
+ ConnectDone: hm.ConnectDone(parent),
+ DNSDone: hm.DNSDone(parent),
+ TLSHandshakeDone: hm.TLSHandshakeDone(parent),
+ GotFirstResponseByte: hm.GotFirstResponseByte(parent),
+ })
+ return ctx, &timedClientDo{client, hm}, nil
+}
+
+type timedClientDo struct {
+ ClientDo
+ hm *httpMetrics
+}
+
+func (c *timedClientDo) Do(r *http.Request) (*http.Response, error) {
+ c.hm.doStart.Store(now())
+ resp, err := c.ClientDo.Do(r)
+
+ c.hm.DoRequestDuration.Record(r.Context(), c.hm.doStart.Elapsed())
+ return resp, err
+}
+
+type httpMetrics struct {
+ DNSLookupDuration metrics.Float64Histogram // client.http.connections.dns_lookup_duration
+ ConnectDuration metrics.Float64Histogram // client.http.connections.acquire_duration
+ TLSHandshakeDuration metrics.Float64Histogram // client.http.connections.tls_handshake_duration
+ ConnectionUsage metrics.Int64UpDownCounter // client.http.connections.usage
+
+ DoRequestDuration metrics.Float64Histogram // client.http.do_request_duration
+ TimeToFirstByte metrics.Float64Histogram // client.http.time_to_first_byte
+
+ doStart safeTime
+ dnsStart safeTime
+ connectStart safeTime
+ tlsStart safeTime
+}
+
+func newHTTPMetrics(meter metrics.Meter) (*httpMetrics, error) {
+ hm := &httpMetrics{}
+
+ var err error
+ hm.DNSLookupDuration, err = meter.Float64Histogram("client.http.connections.dns_lookup_duration", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = "The time it takes a request to perform DNS lookup."
+ })
+ if err != nil {
+ return nil, err
+ }
+ hm.ConnectDuration, err = meter.Float64Histogram("client.http.connections.acquire_duration", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = "The time it takes a request to acquire a connection."
+ })
+ if err != nil {
+ return nil, err
+ }
+ hm.TLSHandshakeDuration, err = meter.Float64Histogram("client.http.connections.tls_handshake_duration", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = "The time it takes an HTTP request to perform the TLS handshake."
+ })
+ if err != nil {
+ return nil, err
+ }
+ hm.ConnectionUsage, err = meter.Int64UpDownCounter("client.http.connections.usage", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "{connection}"
+ o.Description = "Current state of connections pool."
+ })
+ if err != nil {
+ return nil, err
+ }
+ hm.DoRequestDuration, err = meter.Float64Histogram("client.http.do_request_duration", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = "Time spent performing an entire HTTP transaction."
+ })
+ if err != nil {
+ return nil, err
+ }
+ hm.TimeToFirstByte, err = meter.Float64Histogram("client.http.time_to_first_byte", func(o *metrics.InstrumentOptions) {
+ o.UnitLabel = "s"
+ o.Description = "Time from start of transaction to when the first response byte is available."
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return hm, nil
+}
+
+func (m *httpMetrics) DNSStart(httptrace.DNSStartInfo) {
+ m.dnsStart.Store(now())
+}
+
+func (m *httpMetrics) ConnectStart(string, string) {
+ m.connectStart.Store(now())
+}
+
+func (m *httpMetrics) TLSHandshakeStart() {
+ m.tlsStart.Store(now())
+}
+
+func (m *httpMetrics) GotConn(ctx context.Context) func(httptrace.GotConnInfo) {
+ return func(httptrace.GotConnInfo) {
+ m.addConnAcquired(ctx, 1)
+ }
+}
+
+func (m *httpMetrics) PutIdleConn(ctx context.Context) func(error) {
+ return func(error) {
+ m.addConnAcquired(ctx, -1)
+ }
+}
+
+func (m *httpMetrics) DNSDone(ctx context.Context) func(httptrace.DNSDoneInfo) {
+ return func(httptrace.DNSDoneInfo) {
+ m.DNSLookupDuration.Record(ctx, m.dnsStart.Elapsed())
+ }
+}
+
+func (m *httpMetrics) ConnectDone(ctx context.Context) func(string, string, error) {
+ return func(string, string, error) {
+ m.ConnectDuration.Record(ctx, m.connectStart.Elapsed())
+ }
+}
+
+func (m *httpMetrics) TLSHandshakeDone(ctx context.Context) func(tls.ConnectionState, error) {
+ return func(tls.ConnectionState, error) {
+ m.TLSHandshakeDuration.Record(ctx, m.tlsStart.Elapsed())
+ }
+}
+
+func (m *httpMetrics) GotFirstResponseByte(ctx context.Context) func() {
+ return func() {
+ m.TimeToFirstByte.Record(ctx, m.doStart.Elapsed())
+ }
+}
+
+func (m *httpMetrics) addConnAcquired(ctx context.Context, incr int64) {
+ m.ConnectionUsage.Add(ctx, incr, func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("state", "acquired")
+ })
+}
+
+// Not used: it is recommended to track acquired vs idle conn, but we can't
+// determine when something is truly idle with the current HTTP client hooks
+// available to us.
+func (m *httpMetrics) addConnIdle(ctx context.Context, incr int64) {
+ m.ConnectionUsage.Add(ctx, incr, func(o *metrics.RecordMetricOptions) {
+ o.Properties.Set("state", "idle")
+ })
+}
+
+type safeTime struct {
+ atomic.Value // time.Time
+}
+
+func (st *safeTime) Store(v time.Time) {
+ st.Value.Store(v)
+}
+
+func (st *safeTime) Load() time.Time {
+ t, _ := st.Value.Load().(time.Time)
+ return t
+}
+
+func (st *safeTime) Elapsed() float64 {
+ end := now()
+ elapsed := end.Sub(st.Load())
+ return float64(elapsed) / 1e9
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go
index 1d3b218a1..914338f2e 100644
--- a/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go
+++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_close_response_body.go
@@ -2,10 +2,10 @@ package http
import (
"context"
+ "io"
+
"github.com/aws/smithy-go/logging"
"github.com/aws/smithy-go/middleware"
- "io"
- "io/ioutil"
)
// AddErrorCloseResponseBodyMiddleware adds the middleware to automatically
@@ -30,7 +30,7 @@ func (m *errorCloseResponseBodyMiddleware) HandleDeserialize(
if err != nil {
if resp, ok := out.RawResponse.(*Response); ok && resp != nil && resp.Body != nil {
// Consume the full body to prevent TCP connection resets on some platforms
- _, _ = io.Copy(ioutil.Discard, resp.Body)
+ _, _ = io.Copy(io.Discard, resp.Body)
// Do not validate that the response closes successfully.
resp.Body.Close()
}
@@ -64,7 +64,7 @@ func (m *closeResponseBody) HandleDeserialize(
if resp, ok := out.RawResponse.(*Response); ok {
// Consume the full body to prevent TCP connection resets on some platforms
- _, copyErr := io.Copy(ioutil.Discard, resp.Body)
+ _, copyErr := io.Copy(io.Discard, resp.Body)
if copyErr != nil {
middleware.GetLogger(ctx).Logf(logging.Warn, "failed to discard remaining HTTP response body, this may affect connection reuse")
}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go
new file mode 100644
index 000000000..855c22720
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/middleware_header_comment.go
@@ -0,0 +1,81 @@
+package http
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+
+ "github.com/aws/smithy-go/middleware"
+)
+
+// WithHeaderComment instruments a middleware stack to append an HTTP field
+// comment to the given header as specified in RFC 9110
+// (https://www.rfc-editor.org/rfc/rfc9110#name-comments).
+//
+// The header is case-insensitive. If the provided header exists when the
+// middleware runs, the content will be inserted as-is enclosed in parentheses.
+//
+// Note that per the HTTP specification, comments are only allowed in fields
+// containing "comment" as part of their field value definition, but this API
+// will NOT verify whether the provided header is one of them.
+//
+// WithHeaderComment MAY be applied more than once to a middleware stack and/or
+// more than once per header.
+func WithHeaderComment(header, content string) func(*middleware.Stack) error {
+ return func(s *middleware.Stack) error {
+ m, err := getOrAddHeaderComment(s)
+ if err != nil {
+ return fmt.Errorf("get or add header comment: %v", err)
+ }
+
+ m.values.Add(header, content)
+ return nil
+ }
+}
+
+type headerCommentMiddleware struct {
+ values http.Header // hijack case-insensitive access APIs
+}
+
+func (*headerCommentMiddleware) ID() string {
+ return "headerComment"
+}
+
+func (m *headerCommentMiddleware) HandleBuild(ctx context.Context, in middleware.BuildInput, next middleware.BuildHandler) (
+ out middleware.BuildOutput, metadata middleware.Metadata, err error,
+) {
+ r, ok := in.Request.(*Request)
+ if !ok {
+ return out, metadata, fmt.Errorf("unknown transport type %T", in.Request)
+ }
+
+ for h, contents := range m.values {
+ for _, c := range contents {
+ if existing := r.Header.Get(h); existing != "" {
+ r.Header.Set(h, fmt.Sprintf("%s (%s)", existing, c))
+ }
+ }
+ }
+
+ return next.HandleBuild(ctx, in)
+}
+
+func getOrAddHeaderComment(s *middleware.Stack) (*headerCommentMiddleware, error) {
+ id := (*headerCommentMiddleware)(nil).ID()
+ m, ok := s.Build.Get(id)
+ if !ok {
+ m := &headerCommentMiddleware{values: http.Header{}}
+ if err := s.Build.Add(m, middleware.After); err != nil {
+ return nil, fmt.Errorf("add build: %v", err)
+ }
+
+ return m, nil
+ }
+
+ hc, ok := m.(*headerCommentMiddleware)
+ if !ok {
+ return nil, fmt.Errorf("existing middleware w/ id %s is not *headerCommentMiddleware", id)
+ }
+
+ return hc, nil
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/properties.go b/vendor/github.com/aws/smithy-go/transport/http/properties.go
new file mode 100644
index 000000000..c65aa3932
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/transport/http/properties.go
@@ -0,0 +1,80 @@
+package http
+
+import smithy "github.com/aws/smithy-go"
+
+type (
+ sigV4SigningNameKey struct{}
+ sigV4SigningRegionKey struct{}
+
+ sigV4ASigningNameKey struct{}
+ sigV4ASigningRegionsKey struct{}
+
+ isUnsignedPayloadKey struct{}
+ disableDoubleEncodingKey struct{}
+)
+
+// GetSigV4SigningName gets the signing name from Properties.
+func GetSigV4SigningName(p *smithy.Properties) (string, bool) {
+ v, ok := p.Get(sigV4SigningNameKey{}).(string)
+ return v, ok
+}
+
+// SetSigV4SigningName sets the signing name on Properties.
+func SetSigV4SigningName(p *smithy.Properties, name string) {
+ p.Set(sigV4SigningNameKey{}, name)
+}
+
+// GetSigV4SigningRegion gets the signing region from Properties.
+func GetSigV4SigningRegion(p *smithy.Properties) (string, bool) {
+ v, ok := p.Get(sigV4SigningRegionKey{}).(string)
+ return v, ok
+}
+
+// SetSigV4SigningRegion sets the signing region on Properties.
+func SetSigV4SigningRegion(p *smithy.Properties, region string) {
+ p.Set(sigV4SigningRegionKey{}, region)
+}
+
+// GetSigV4ASigningName gets the v4a signing name from Properties.
+func GetSigV4ASigningName(p *smithy.Properties) (string, bool) {
+ v, ok := p.Get(sigV4ASigningNameKey{}).(string)
+ return v, ok
+}
+
+// SetSigV4ASigningName sets the signing name on Properties.
+func SetSigV4ASigningName(p *smithy.Properties, name string) {
+ p.Set(sigV4ASigningNameKey{}, name)
+}
+
+// GetSigV4ASigningRegion gets the v4a signing region set from Properties.
+func GetSigV4ASigningRegions(p *smithy.Properties) ([]string, bool) {
+ v, ok := p.Get(sigV4ASigningRegionsKey{}).([]string)
+ return v, ok
+}
+
+// SetSigV4ASigningRegions sets the v4a signing region set on Properties.
+func SetSigV4ASigningRegions(p *smithy.Properties, regions []string) {
+ p.Set(sigV4ASigningRegionsKey{}, regions)
+}
+
+// GetIsUnsignedPayload gets whether the payload is unsigned from Properties.
+func GetIsUnsignedPayload(p *smithy.Properties) (bool, bool) {
+ v, ok := p.Get(isUnsignedPayloadKey{}).(bool)
+ return v, ok
+}
+
+// SetIsUnsignedPayload sets whether the payload is unsigned on Properties.
+func SetIsUnsignedPayload(p *smithy.Properties, isUnsignedPayload bool) {
+ p.Set(isUnsignedPayloadKey{}, isUnsignedPayload)
+}
+
+// GetDisableDoubleEncoding gets whether the payload is unsigned from Properties.
+func GetDisableDoubleEncoding(p *smithy.Properties) (bool, bool) {
+ v, ok := p.Get(disableDoubleEncodingKey{}).(bool)
+ return v, ok
+}
+
+// SetDisableDoubleEncoding sets whether the payload is unsigned on Properties.
+func SetDisableDoubleEncoding(p *smithy.Properties, disableDoubleEncoding bool) {
+ p.Set(disableDoubleEncodingKey{}, disableDoubleEncoding)
+}
diff --git a/vendor/github.com/aws/smithy-go/transport/http/request.go b/vendor/github.com/aws/smithy-go/transport/http/request.go
index 7177d6f95..5cbf6f10a 100644
--- a/vendor/github.com/aws/smithy-go/transport/http/request.go
+++ b/vendor/github.com/aws/smithy-go/transport/http/request.go
@@ -4,7 +4,6 @@ import (
"context"
"fmt"
"io"
- "io/ioutil"
"net/http"
"net/url"
"strings"
@@ -167,7 +166,7 @@ func (r *Request) Build(ctx context.Context) *http.Request {
switch stream := r.stream.(type) {
case *io.PipeReader:
- req.Body = ioutil.NopCloser(stream)
+ req.Body = io.NopCloser(stream)
req.ContentLength = -1
default:
// HTTP Client Request must only have a non-nil body if the
@@ -175,7 +174,7 @@ func (r *Request) Build(ctx context.Context) *http.Request {
// Client will interpret a non-nil body and ContentLength 0 as
// "unknown". This is unwanted behavior.
if req.ContentLength != 0 && r.stream != nil {
- req.Body = iointernal.NewSafeReadCloser(ioutil.NopCloser(stream))
+ req.Body = iointernal.NewSafeReadCloser(io.NopCloser(stream))
}
}
diff --git a/vendor/github.com/aws/smithy-go/waiter/logger.go b/vendor/github.com/aws/smithy-go/waiter/logger.go
new file mode 100644
index 000000000..8d70a03ff
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/waiter/logger.go
@@ -0,0 +1,36 @@
+package waiter
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/aws/smithy-go/logging"
+ "github.com/aws/smithy-go/middleware"
+)
+
+// Logger is the Logger middleware used by the waiter to log an attempt
+type Logger struct {
+ // Attempt is the current attempt to be logged
+ Attempt int64
+}
+
+// ID representing the Logger middleware
+func (*Logger) ID() string {
+ return "WaiterLogger"
+}
+
+// HandleInitialize performs handling of request in initialize stack step
+func (m *Logger) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
+ out middleware.InitializeOutput, metadata middleware.Metadata, err error,
+) {
+ logger := middleware.GetLogger(ctx)
+
+ logger.Logf(logging.Debug, fmt.Sprintf("attempting waiter request, attempt count: %d", m.Attempt))
+
+ return next.HandleInitialize(ctx, in)
+}
+
+// AddLogger is a helper util to add waiter logger after `SetLogger` middleware in
+func (m Logger) AddLogger(stack *middleware.Stack) error {
+ return stack.Initialize.Insert(&m, "SetLogger", middleware.After)
+}
diff --git a/vendor/github.com/aws/smithy-go/waiter/waiter.go b/vendor/github.com/aws/smithy-go/waiter/waiter.go
new file mode 100644
index 000000000..03e46e2ee
--- /dev/null
+++ b/vendor/github.com/aws/smithy-go/waiter/waiter.go
@@ -0,0 +1,66 @@
+package waiter
+
+import (
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/aws/smithy-go/rand"
+)
+
+// ComputeDelay computes delay between waiter attempts. The function takes in a current attempt count,
+// minimum delay, maximum delay, and remaining wait time for waiter as input. The inputs minDelay and maxDelay
+// must always be greater than 0, along with minDelay lesser than or equal to maxDelay.
+//
+// Returns the computed delay and if next attempt count is possible within the given input time constraints.
+// Note that the zeroth attempt results in no delay.
+func ComputeDelay(attempt int64, minDelay, maxDelay, remainingTime time.Duration) (delay time.Duration, err error) {
+ // zeroth attempt, no delay
+ if attempt <= 0 {
+ return 0, nil
+ }
+
+ // remainingTime is zero or less, no delay
+ if remainingTime <= 0 {
+ return 0, nil
+ }
+
+ // validate min delay is greater than 0
+ if minDelay == 0 {
+ return 0, fmt.Errorf("minDelay must be greater than zero when computing Delay")
+ }
+
+ // validate max delay is greater than 0
+ if maxDelay == 0 {
+ return 0, fmt.Errorf("maxDelay must be greater than zero when computing Delay")
+ }
+
+ // Get attempt ceiling to prevent integer overflow.
+ attemptCeiling := (math.Log(float64(maxDelay/minDelay)) / math.Log(2)) + 1
+
+ if attempt > int64(attemptCeiling) {
+ delay = maxDelay
+ } else {
+ // Compute exponential delay based on attempt.
+ ri := 1 << uint64(attempt-1)
+ // compute delay
+ delay = minDelay * time.Duration(ri)
+ }
+
+ if delay != minDelay {
+ // randomize to get jitter between min delay and delay value
+ d, err := rand.CryptoRandInt63n(int64(delay - minDelay))
+ if err != nil {
+ return 0, fmt.Errorf("error computing retry jitter, %w", err)
+ }
+
+ delay = time.Duration(d) + minDelay
+ }
+
+ // check if this is the last attempt possible and compute delay accordingly
+ if remainingTime-delay <= minDelay {
+ delay = remainingTime - minDelay
+ }
+
+ return delay, nil
+}
diff --git a/vendor/github.com/bradfitz/gomemcache/AUTHORS b/vendor/github.com/bradfitz/gomemcache/AUTHORS
new file mode 100644
index 000000000..86ca62074
--- /dev/null
+++ b/vendor/github.com/bradfitz/gomemcache/AUTHORS
@@ -0,0 +1,9 @@
+The following people & companies are the copyright holders of this
+package. Feel free to add to this list if you or your employer cares,
+otherwise it's implicit from the git log.
+
+Authors:
+
+- Brad Fitzpatrick
+- Google, Inc. (from Googlers contributing)
+- Anybody else in the git log.
diff --git a/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go b/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
index 5b94ed05d..6f48caac9 100644
--- a/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
+++ b/vendor/github.com/bradfitz/gomemcache/memcache/memcache.go
@@ -1,5 +1,5 @@
/*
-Copyright 2011 Google Inc.
+Copyright 2011 The gomemcache AUTHORS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -20,11 +20,12 @@ package memcache
import (
"bufio"
"bytes"
+ "context"
"errors"
"fmt"
"io"
+ "math"
"net"
-
"strconv"
"strings"
"sync"
@@ -65,7 +66,7 @@ var (
const (
// DefaultTimeout is the default socket read/write timeout.
- DefaultTimeout = 100 * time.Millisecond
+ DefaultTimeout = 500 * time.Millisecond
// DefaultMaxIdleConns is the default maximum number of idle connections
// kept for any single address.
@@ -112,6 +113,7 @@ var (
resultTouched = []byte("TOUCHED\r\n")
resultClientErrorPrefix = []byte("CLIENT_ERROR ")
+ versionPrefix = []byte("VERSION")
)
// New returns a memcache client using the provided server(s)
@@ -131,8 +133,14 @@ func NewFromSelector(ss ServerSelector) *Client {
// Client is a memcache client.
// It is safe for unlocked use by multiple concurrent goroutines.
type Client struct {
- // Dialer specifies a custom dialer used to dial new connections to a server.
- DialTimeout func(network, address string, timeout time.Duration) (net.Conn, error)
+ // DialContext connects to the address on the named network using the
+ // provided context.
+ //
+ // To connect to servers using TLS (memcached running with "--enable-ssl"),
+ // use a DialContext func that uses tls.Dialer.DialContext. See this
+ // package's tests as an example.
+ DialContext func(ctx context.Context, network, address string) (net.Conn, error)
+
// Timeout specifies the socket read/write timeout.
// If zero, DefaultTimeout is used.
Timeout time.Duration
@@ -147,7 +155,7 @@ type Client struct {
selector ServerSelector
- lk sync.Mutex
+ mu sync.Mutex
freeconn map[string][]*conn
}
@@ -168,8 +176,11 @@ type Item struct {
// Zero means the Item has no expiration time.
Expiration int32
- // Compare and swap ID.
- casid uint64
+ // CasID is the compare and swap ID.
+ //
+ // It's populated by get requests and then the same value is
+ // required for a CompareAndSwap request to succeed.
+ CasID uint64
}
// conn is a connection to a server.
@@ -202,8 +213,8 @@ func (cn *conn) condRelease(err *error) {
}
func (c *Client) putFreeConn(addr net.Addr, cn *conn) {
- c.lk.Lock()
- defer c.lk.Unlock()
+ c.mu.Lock()
+ defer c.mu.Unlock()
if c.freeconn == nil {
c.freeconn = make(map[string][]*conn)
}
@@ -216,8 +227,8 @@ func (c *Client) putFreeConn(addr net.Addr, cn *conn) {
}
func (c *Client) getFreeConn(addr net.Addr) (cn *conn, ok bool) {
- c.lk.Lock()
- defer c.lk.Unlock()
+ c.mu.Lock()
+ defer c.mu.Unlock()
if c.freeconn == nil {
return nil, false
}
@@ -256,14 +267,18 @@ func (cte *ConnectTimeoutError) Error() string {
}
func (c *Client) dial(addr net.Addr) (net.Conn, error) {
- type connError struct {
- cn net.Conn
- err error
- }
- if c.DialTimeout == nil {
- c.DialTimeout = net.DialTimeout
+ ctx, cancel := context.WithTimeout(context.Background(), c.netTimeout())
+ defer cancel()
+
+ dialerContext := c.DialContext
+ if dialerContext == nil {
+ dialer := net.Dialer{
+ Timeout: c.netTimeout(),
+ }
+ dialerContext = dialer.DialContext
}
- nc, err := c.DialTimeout(addr.Network(), addr.String(), c.netTimeout())
+
+ nc, err := dialerContext(ctx, addr.Network(), addr.String())
if err == nil {
return nc, nil
}
@@ -349,30 +364,31 @@ func (c *Client) withKeyAddr(key string, fn func(net.Addr) error) (err error) {
return fn(addr)
}
-func (c *Client) withAddrRw(addr net.Addr, fn func(*bufio.ReadWriter) error) (err error) {
+func (c *Client) withAddrRw(addr net.Addr, fn func(*conn) error) (err error) {
cn, err := c.getConn(addr)
if err != nil {
return err
}
defer cn.condRelease(&err)
- return fn(cn.rw)
+ return fn(cn)
}
-func (c *Client) withKeyRw(key string, fn func(*bufio.ReadWriter) error) error {
+func (c *Client) withKeyRw(key string, fn func(*conn) error) error {
return c.withKeyAddr(key, func(addr net.Addr) error {
return c.withAddrRw(addr, fn)
})
}
func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error {
- return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+ return c.withAddrRw(addr, func(conn *conn) error {
+ rw := conn.rw
if _, err := fmt.Fprintf(rw, "gets %s\r\n", strings.Join(keys, " ")); err != nil {
return err
}
if err := rw.Flush(); err != nil {
return err
}
- if err := parseGetResponse(rw.Reader, cb); err != nil {
+ if err := parseGetResponse(rw.Reader, conn, cb); err != nil {
return err
}
return nil
@@ -381,7 +397,8 @@ func (c *Client) getFromAddr(addr net.Addr, keys []string, cb func(*Item)) error
// flushAllFromAddr send the flush_all command to the given addr
func (c *Client) flushAllFromAddr(addr net.Addr) error {
- return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+ return c.withAddrRw(addr, func(conn *conn) error {
+ rw := conn.rw
if _, err := fmt.Fprintf(rw, "flush_all\r\n"); err != nil {
return err
}
@@ -402,8 +419,34 @@ func (c *Client) flushAllFromAddr(addr net.Addr) error {
})
}
+// ping sends the version command to the given addr
+func (c *Client) ping(addr net.Addr) error {
+ return c.withAddrRw(addr, func(conn *conn) error {
+ rw := conn.rw
+ if _, err := fmt.Fprintf(rw, "version\r\n"); err != nil {
+ return err
+ }
+ if err := rw.Flush(); err != nil {
+ return err
+ }
+ line, err := rw.ReadSlice('\n')
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case bytes.HasPrefix(line, versionPrefix):
+ break
+ default:
+ return fmt.Errorf("memcache: unexpected response line from ping: %q", string(line))
+ }
+ return nil
+ })
+}
+
func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) error {
- return c.withAddrRw(addr, func(rw *bufio.ReadWriter) error {
+ return c.withAddrRw(addr, func(conn *conn) error {
+ rw := conn.rw
for _, key := range keys {
if _, err := fmt.Fprintf(rw, "touch %s %d\r\n", key, expiration); err != nil {
return err
@@ -433,11 +476,11 @@ func (c *Client) touchFromAddr(addr net.Addr, keys []string, expiration int32) e
// cache misses. Each key must be at most 250 bytes in length.
// If no error is returned, the returned map will also be non-nil.
func (c *Client) GetMulti(keys []string) (map[string]*Item, error) {
- var lk sync.Mutex
+ var mu sync.Mutex
m := make(map[string]*Item)
addItemToMap := func(it *Item) {
- lk.Lock()
- defer lk.Unlock()
+ mu.Lock()
+ defer mu.Unlock()
m[it.Key] = it
}
@@ -471,8 +514,12 @@ func (c *Client) GetMulti(keys []string) (map[string]*Item, error) {
// parseGetResponse reads a GET response from r and calls cb for each
// read and allocated Item
-func parseGetResponse(r *bufio.Reader, cb func(*Item)) error {
+func parseGetResponse(r *bufio.Reader, conn *conn, cb func(*Item)) error {
for {
+ // extend deadline before each additional call, otherwise all cumulative
+ // calls use the same overall deadline
+ conn.extendDeadline()
+
line, err := r.ReadSlice('\n')
if err != nil {
return err
@@ -503,17 +550,52 @@ func parseGetResponse(r *bufio.Reader, cb func(*Item)) error {
// scanGetResponseLine populates it and returns the declared size of the item.
// It does not read the bytes of the item.
func scanGetResponseLine(line []byte, it *Item) (size int, err error) {
- pattern := "VALUE %s %d %d %d\r\n"
- dest := []interface{}{&it.Key, &it.Flags, &size, &it.casid}
- if bytes.Count(line, space) == 3 {
- pattern = "VALUE %s %d %d\r\n"
- dest = dest[:3]
- }
- n, err := fmt.Sscanf(string(line), pattern, dest...)
- if err != nil || n != len(dest) {
+ errf := func(line []byte) (int, error) {
return -1, fmt.Errorf("memcache: unexpected line in get response: %q", line)
}
- return size, nil
+ if !bytes.HasPrefix(line, []byte("VALUE ")) || !bytes.HasSuffix(line, []byte("\r\n")) {
+ return errf(line)
+ }
+ s := string(line[6 : len(line)-2])
+ var rest string
+ var found bool
+ it.Key, rest, found = cut(s, ' ')
+ if !found {
+ return errf(line)
+ }
+ val, rest, found := cut(rest, ' ')
+ if !found {
+ return errf(line)
+ }
+ flags64, err := strconv.ParseUint(val, 10, 32)
+ if err != nil {
+ return errf(line)
+ }
+ it.Flags = uint32(flags64)
+ val, rest, found = cut(rest, ' ')
+ size64, err := strconv.ParseUint(val, 10, 32)
+ if err != nil {
+ return errf(line)
+ }
+ if size64 > math.MaxInt { // Can happen if int is 32-bit
+ return errf(line)
+ }
+ if !found { // final CAS ID is optional.
+ return int(size64), nil
+ }
+ it.CasID, err = strconv.ParseUint(rest, 10, 64)
+ if err != nil {
+ return errf(line)
+ }
+ return int(size64), nil
+}
+
+// Similar to strings.Cut in Go 1.18, but sep can only be 1 byte.
+func cut(s string, sep byte) (before, after string, found bool) {
+ if i := strings.IndexByte(s, sep); i >= 0 {
+ return s[:i], s[i+1:], true
+ }
+ return s, "", false
}
// Set writes the given item, unconditionally.
@@ -545,6 +627,26 @@ func (c *Client) replace(rw *bufio.ReadWriter, item *Item) error {
return c.populateOne(rw, "replace", item)
}
+// Append appends the given item to the existing item, if a value already
+// exists for its key. ErrNotStored is returned if that condition is not met.
+func (c *Client) Append(item *Item) error {
+ return c.onItem(item, (*Client).append)
+}
+
+func (c *Client) append(rw *bufio.ReadWriter, item *Item) error {
+ return c.populateOne(rw, "append", item)
+}
+
+// Prepend prepends the given item to the existing item, if a value already
+// exists for its key. ErrNotStored is returned if that condition is not met.
+func (c *Client) Prepend(item *Item) error {
+ return c.onItem(item, (*Client).prepend)
+}
+
+func (c *Client) prepend(rw *bufio.ReadWriter, item *Item) error {
+ return c.populateOne(rw, "prepend", item)
+}
+
// CompareAndSwap writes the given item that was previously returned
// by Get, if the value was neither modified or evicted between the
// Get and the CompareAndSwap calls. The item's Key should not change
@@ -567,7 +669,7 @@ func (c *Client) populateOne(rw *bufio.ReadWriter, verb string, item *Item) erro
var err error
if verb == "cas" {
_, err = fmt.Fprintf(rw, "%s %s %d %d %d %d\r\n",
- verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.casid)
+ verb, item.Key, item.Flags, item.Expiration, len(item.Value), item.CasID)
} else {
_, err = fmt.Fprintf(rw, "%s %s %d %d %d\r\n",
verb, item.Key, item.Flags, item.Expiration, len(item.Value))
@@ -636,18 +738,52 @@ func writeExpectf(rw *bufio.ReadWriter, expect []byte, format string, args ...in
// Delete deletes the item with the provided key. The error ErrCacheMiss is
// returned if the item didn't already exist in the cache.
func (c *Client) Delete(key string) error {
- return c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
- return writeExpectf(rw, resultDeleted, "delete %s\r\n", key)
+ return c.withKeyRw(key, func(conn *conn) error {
+ return writeExpectf(conn.rw, resultDeleted, "delete %s\r\n", key)
})
}
// DeleteAll deletes all items in the cache.
func (c *Client) DeleteAll() error {
- return c.withKeyRw("", func(rw *bufio.ReadWriter) error {
- return writeExpectf(rw, resultDeleted, "flush_all\r\n")
+ return c.withKeyRw("", func(conn *conn) error {
+ return writeExpectf(conn.rw, resultDeleted, "flush_all\r\n")
+ })
+}
+
+// Get and Touch the item with the provided key. The error ErrCacheMiss is
+// returned if the item didn't already exist in the cache.
+func (c *Client) GetAndTouch(key string, expiration int32) (item *Item, err error) {
+ err = c.withKeyAddr(key, func(addr net.Addr) error {
+ return c.getAndTouchFromAddr(addr, key, expiration, func(it *Item) { item = it })
+ })
+ if err == nil && item == nil {
+ err = ErrCacheMiss
+ }
+ return
+}
+
+func (c *Client) getAndTouchFromAddr(addr net.Addr, key string, expiration int32, cb func(*Item)) error {
+ return c.withAddrRw(addr, func(conn *conn) error {
+ rw := conn.rw
+ if _, err := fmt.Fprintf(rw, "gat %d %s\r\n", expiration, key); err != nil {
+ return err
+ }
+ if err := rw.Flush(); err != nil {
+ return err
+ }
+ if err := parseGetResponse(rw.Reader, conn, cb); err != nil {
+ return err
+ }
+ return nil
})
}
+// Ping checks all instances if they are alive. Returns error if any
+// of them is down.
+func (c *Client) Ping() error {
+ return c.selector.Each(c.ping)
+}
+
// Increment atomically increments key by delta. The return value is
// the new value after being incremented or an error. If the value
// didn't exist in memcached the error is ErrCacheMiss. The value in
@@ -669,7 +805,8 @@ func (c *Client) Decrement(key string, delta uint64) (newValue uint64, err error
func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) {
var val uint64
- err := c.withKeyRw(key, func(rw *bufio.ReadWriter) error {
+ err := c.withKeyRw(key, func(conn *conn) error {
+ rw := conn.rw
line, err := writeReadLine(rw, "%s %s %d\r\n", verb, key, delta)
if err != nil {
return err
@@ -689,3 +826,24 @@ func (c *Client) incrDecr(verb, key string, delta uint64) (uint64, error) {
})
return val, err
}
+
+// Close closes any open connections.
+//
+// It returns the first error encountered closing connections, but always
+// closes all connections.
+//
+// After Close, the Client may still be used.
+func (c *Client) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ var ret error
+ for _, conns := range c.freeconn {
+ for _, c := range conns {
+ if err := c.nc.Close(); err != nil && ret == nil {
+ ret = err
+ }
+ }
+ }
+ c.freeconn = nil
+ return ret
+}
diff --git a/vendor/github.com/bradfitz/gomemcache/memcache/selector.go b/vendor/github.com/bradfitz/gomemcache/memcache/selector.go
index 89ad81e0d..964dbdb6a 100644
--- a/vendor/github.com/bradfitz/gomemcache/memcache/selector.go
+++ b/vendor/github.com/bradfitz/gomemcache/memcache/selector.go
@@ -1,5 +1,5 @@
/*
-Copyright 2011 Google Inc.
+Copyright 2011 The gomemcache AUTHORS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/caio/go-tdigest/.gitignore b/vendor/github.com/caio/go-tdigest/.gitignore
new file mode 100644
index 000000000..f9f915f62
--- /dev/null
+++ b/vendor/github.com/caio/go-tdigest/.gitignore
@@ -0,0 +1,2 @@
+vendor/
+go-tdigest.test
diff --git a/vendor/github.com/caio/go-tdigest/CONTRIBUTING.md b/vendor/github.com/caio/go-tdigest/CONTRIBUTING.md
new file mode 100644
index 000000000..3baa1d164
--- /dev/null
+++ b/vendor/github.com/caio/go-tdigest/CONTRIBUTING.md
@@ -0,0 +1,42 @@
+# Contributing
+
+First and foremost: **thank you very much** for your interest in this
+project. Feel free to skip all this and open your issue / pull request
+if reading contribution guidelines is too much for you at this point.
+We value your contribution a lot more than we value your ability to
+follow rules (and thankfully we can afford to take this approach given
+this project's demand).
+
+Any kind of contribution is welcome. We can always use better docs and
+tests (and code, of course). If you think you can improve this project
+in any dimension _let's talk_ :-)
+
+## Guidelines
+
+Be kind and respectful in all your interactions with people inside
+(outside too!) this community; There is no excuse for not showing
+basic decency. Sarcasm and generally unconstructive remarks are **not
+welcome**.
+
+### Issues
+
+When opening and interacting with issues please:
+
+- Be as clear as possible
+- Provide examples if you can
+
+### Pull Requests
+
+We expect that pull requests:
+
+- Have [good commit messages][commits]
+- Contain tests for new features
+- Target and can be cleanly merged with the `master` branch
+- Pass the tests
+
+[commits]: https://www.git-scm.com/book/en/v2/Distributed-Git-Contributing-to-a-Project#_commit_guidelines
+
+### Project Management
+
+Don't bother with labels, milestones, assignments, etc. We don't make
+use of those.
diff --git a/vendor/github.com/caio/go-tdigest/Gopkg.lock b/vendor/github.com/caio/go-tdigest/Gopkg.lock
new file mode 100644
index 000000000..65bf9067a
--- /dev/null
+++ b/vendor/github.com/caio/go-tdigest/Gopkg.lock
@@ -0,0 +1,41 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ digest = "1:cf63454c1e81409484ded047413228de0f7a3031f0fcd36d4e1db7620c3c7d1b"
+ name = "github.com/leesper/go_rng"
+ packages = ["."]
+ pruneopts = ""
+ revision = "5344a9259b21627d94279721ab1f27eb029194e7"
+
+[[projects]]
+ branch = "master"
+ digest = "1:ad6d9b2cce40c7c44952d49a6a324a2110db43b4279d9e599db74e45de5ae80c"
+ name = "gonum.org/v1/gonum"
+ packages = [
+ "blas",
+ "blas/blas64",
+ "blas/gonum",
+ "floats",
+ "internal/asm/c128",
+ "internal/asm/f32",
+ "internal/asm/f64",
+ "internal/math32",
+ "lapack",
+ "lapack/gonum",
+ "lapack/lapack64",
+ "mat",
+ "stat",
+ ]
+ pruneopts = ""
+ revision = "f0982070f509ee139841ca385c44dc22a77c8da8"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ input-imports = [
+ "github.com/leesper/go_rng",
+ "gonum.org/v1/gonum/stat",
+ ]
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/caio/go-tdigest/Gopkg.toml b/vendor/github.com/caio/go-tdigest/Gopkg.toml
new file mode 100644
index 000000000..323002ca8
--- /dev/null
+++ b/vendor/github.com/caio/go-tdigest/Gopkg.toml
@@ -0,0 +1,21 @@
+
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
diff --git a/vendor/github.com/caio/go-tdigest/LICENSE b/vendor/github.com/caio/go-tdigest/LICENSE
new file mode 100644
index 000000000..f5f074401
--- /dev/null
+++ b/vendor/github.com/caio/go-tdigest/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Caio Romão Costa Nascimento
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/caio/go-tdigest/README.md b/vendor/github.com/caio/go-tdigest/README.md
new file mode 100644
index 000000000..b63587025
--- /dev/null
+++ b/vendor/github.com/caio/go-tdigest/README.md
@@ -0,0 +1,94 @@
+# T-Digest
+
+A fast map-reduce and parallel streaming friendly data-structure for accurate
+quantile approximation.
+
+This package provides an implementation of Ted Dunning's t-digest data
+structure in Go.
+
+[](http://godoc.org/github.com/caio/go-tdigest)
+[](https://goreportcard.com/report/github.com/caio/go-tdigest)
+
+## Project Status
+
+This project is actively maintained. We are happy to collaborate on features
+and issues if/when they arrive.
+
+## Installation
+
+Our releases are tagged and signed following the [Semantic Versioning][semver]
+scheme. If you are using a dependency manager such as [dep][], the recommended
+way to is go about your business normally:
+
+ go get github.com/caio/go-tdigest
+
+Otherwise we recommend to use the following so that you don't risk breaking
+your build because of an API change:
+
+ go get gopkg.in/caio/go-tdigest.v2
+
+[semver]: http://semver.org/
+[dep]: https://github.com/golang/dep
+
+## Example Usage
+
+```go
+package main
+
+import (
+ "fmt"
+ "math/rand"
+
+ "github.com/caio/go-tdigest"
+)
+
+func main() {
+ // Analogue to tdigest.New(tdigest.Compression(100))
+ t, _ := tdigest.New()
+
+ for i := 0; i < 10000; i++ {
+ // Analogue to t.AddWeighted(rand.Float64(), 1)
+ t.Add(rand.Float64())
+ }
+
+ fmt.Printf("p(.5) = %.6f\n", t.Quantile(0.5))
+ fmt.Printf("CDF(Quantile(.5)) = %.6f\n", t.CDF(t.Quantile(0.5)))
+}
+```
+
+## Configuration
+
+You can configure your digest upon creation with options documented
+at [options.go](options.go). Example:
+
+```go
+// Construct a digest with compression=200 and its own
+// (thread-unsafe) RNG seeded with 0xCA10:
+digest, _ := tdigest.New(
+ tdigest.Compression(200),
+ tdigest.LocalRandomNumberGenerator(0xCA10),
+)
+```
+
+## Porting Existing Code to the v2 API
+
+It's very easy to migrate to the new API:
+
+- Replace `tdigest.New(100)` with `tdigest.New()`
+- Replace `tdigest.New(number)` with `tdigest.New(tdigest.Compression(number))`
+- Replace `Add(x,1)` with `Add(x)`
+- Replace `Add(x, weight)` with `AddWeighted(x, weight)`
+- Remove any use of `tdigest.Len()` (or [open an issue][issues])
+
+[issues]: https://github.com/caio/go-tdigest/issues/new
+
+## References
+
+This is a port of the [reference][1] implementation with some ideas borrowed
+from the [python version][2]. If you wanna get a quick grasp of how it works
+and why it's useful, [this video and companion article is pretty helpful][3].
+
+[1]: https://github.com/tdunning/t-digest
+[2]: https://github.com/CamDavidsonPilon/tdigest
+[3]: https://www.mapr.com/blog/better-anomaly-detection-t-digest-whiteboard-walkthrough
+
diff --git a/vendor/github.com/caio/go-tdigest/options.go b/vendor/github.com/caio/go-tdigest/options.go
new file mode 100644
index 000000000..c30b45954
--- /dev/null
+++ b/vendor/github.com/caio/go-tdigest/options.go
@@ -0,0 +1,51 @@
+package tdigest
+
+import "errors"
+
+type tdigestOption func(*TDigest) error
+
+// Compression sets the digest compression
+//
+// The compression parameter rules the threshold in which samples are
+// merged together - the more often distinct samples are merged the more
+// precision is lost. Compression should be tuned according to your data
+// distribution, but a value of 100 (the default) is often good enough.
+//
+// A higher compression value means holding more centroids in memory
+// (thus: better precision), which means a bigger serialization payload,
+// higher memory footprint and slower addition of new samples.
+//
+// Compression must be a value greater of equal to 1, will yield an
+// error otherwise.
+func Compression(compression float64) tdigestOption { // nolint
+ return func(t *TDigest) error {
+ if compression < 1 {
+ return errors.New("Compression should be >= 1")
+ }
+ t.compression = compression
+ return nil
+ }
+}
+
+// RandomNumberGenerator sets the RNG to be used internally
+//
+// This allows changing which random number source is used when using
+// the TDigest structure (rngs are used when deciding which candidate
+// centroid to merge with and when compressing or merging with
+// another digest for it increases accuracy). This functionality is
+// particularly useful for testing or when you want to disconnect
+// your sample collection from the (default) shared random source
+// to minimize lock contention.
+func RandomNumberGenerator(rng RNG) tdigestOption { // nolint
+ return func(t *TDigest) error {
+ t.rng = rng
+ return nil
+ }
+}
+
+// LocalRandomNumberGenerator makes the TDigest use the default
+// `math/random` functions but with an unshared source that is
+// seeded with the given `seed` parameter.
+func LocalRandomNumberGenerator(seed int64) tdigestOption { // nolint
+ return RandomNumberGenerator(newLocalRNG(seed))
+}
diff --git a/vendor/github.com/caio/go-tdigest/rng.go b/vendor/github.com/caio/go-tdigest/rng.go
new file mode 100644
index 000000000..856b6ad9f
--- /dev/null
+++ b/vendor/github.com/caio/go-tdigest/rng.go
@@ -0,0 +1,40 @@
+package tdigest
+
+import (
+ "math/rand"
+)
+
+// RNG is an interface that wraps the needed random number
+// generator calls that tdigest uses during its runtime
+type RNG interface {
+ Float32() float32
+ Intn(int) int
+}
+
+type globalRNG struct{}
+
+func (r globalRNG) Float32() float32 {
+ return rand.Float32()
+}
+
+func (r globalRNG) Intn(i int) int {
+ return rand.Intn(i)
+}
+
+type localRNG struct {
+ localRand *rand.Rand
+}
+
+func newLocalRNG(seed int64) *localRNG {
+ return &localRNG{
+ localRand: rand.New(rand.NewSource(seed)),
+ }
+}
+
+func (r *localRNG) Float32() float32 {
+ return r.localRand.Float32()
+}
+
+func (r *localRNG) Intn(i int) int {
+ return r.localRand.Intn(i)
+}
diff --git a/vendor/github.com/caio/go-tdigest/serialization.go b/vendor/github.com/caio/go-tdigest/serialization.go
new file mode 100644
index 000000000..6acb658b1
--- /dev/null
+++ b/vendor/github.com/caio/go-tdigest/serialization.go
@@ -0,0 +1,202 @@
+package tdigest
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+)
+
+const smallEncoding int32 = 2
+
+var endianess = binary.BigEndian
+
+// AsBytes serializes the digest into a byte array so it can be
+// saved to disk or sent over the wire.
+func (t TDigest) AsBytes() ([]byte, error) {
+ // TODO get rid of the (now) useless error
+ return t.ToBytes(make([]byte, t.requiredSize())), nil
+}
+
+func (t *TDigest) requiredSize() int {
+ return 16 + (4 * len(t.summary.means)) + (len(t.summary.counts) * binary.MaxVarintLen64)
+}
+
+// ToBytes serializes into the supplied slice, avoiding allocation if the slice
+// is large enough. The result slice is returned.
+func (t *TDigest) ToBytes(b []byte) []byte {
+ requiredSize := t.requiredSize()
+ if cap(b) < requiredSize {
+ b = make([]byte, requiredSize)
+ }
+
+ // The binary.Put* functions helpfully don't extend the slice for you, they
+ // just panic if it's not already long enough. So pre-set the slice length;
+ // we'll return it with the actual encoded length.
+ b = b[:cap(b)]
+
+ endianess.PutUint32(b[0:4], uint32(smallEncoding))
+ endianess.PutUint64(b[4:12], math.Float64bits(t.compression))
+ endianess.PutUint32(b[12:16], uint32(t.summary.Len()))
+
+ var x float64
+ idx := 16
+ for _, mean := range t.summary.means {
+ delta := mean - x
+ x = mean
+ endianess.PutUint32(b[idx:], math.Float32bits(float32(delta)))
+ idx += 4
+ }
+
+ for _, count := range t.summary.counts {
+ idx += binary.PutUvarint(b[idx:], count)
+ }
+ return b[:idx]
+}
+
+// FromBytes reads a byte buffer with a serialized digest (from AsBytes)
+// and deserializes it.
+//
+// This function creates a new tdigest instance with the provided options,
+// but ignores the compression setting since the correct value comes
+// from the buffer.
+func FromBytes(buf *bytes.Reader, options ...tdigestOption) (*TDigest, error) {
+ var encoding int32
+ err := binary.Read(buf, endianess, &encoding)
+ if err != nil {
+ return nil, err
+ }
+
+ if encoding != smallEncoding {
+ return nil, fmt.Errorf("Unsupported encoding version: %d", encoding)
+ }
+
+ t, err := newWithoutSummary(options...)
+
+ if err != nil {
+ return nil, err
+ }
+
+ var compression float64
+ err = binary.Read(buf, endianess, &compression)
+ if err != nil {
+ return nil, err
+ }
+
+ t.compression = compression
+
+ var numCentroids int32
+ err = binary.Read(buf, endianess, &numCentroids)
+ if err != nil {
+ return nil, err
+ }
+
+ if numCentroids < 0 || numCentroids > 1<<22 {
+ return nil, errors.New("bad number of centroids in serialization")
+ }
+
+ t.summary = newSummary(int(numCentroids))
+ t.summary.means = t.summary.means[:numCentroids]
+ t.summary.counts = t.summary.counts[:numCentroids]
+
+ var x float64
+ for i := 0; i < int(numCentroids); i++ {
+ var delta float32
+ err = binary.Read(buf, endianess, &delta)
+ if err != nil {
+ return nil, err
+ }
+ x += float64(delta)
+ t.summary.means[i] = x
+ }
+
+ for i := 0; i < int(numCentroids); i++ {
+ count, err := decodeUint(buf)
+ if err != nil {
+ return nil, err
+ }
+ t.summary.counts[i] = count
+ t.count += count
+ }
+
+ return t, nil
+}
+
+// FromBytes deserializes into the supplied TDigest struct, re-using
+// and overwriting any existing buffers.
+//
+// This method reinitializes the digest from the provided buffer
+// discarding any previously collected data. Notice that in case
+// of errors this may leave the digest in a unusable state.
+func (t *TDigest) FromBytes(buf []byte) error {
+ if len(buf) < 16 {
+ return errors.New("buffer too small for deserialization")
+ }
+
+ encoding := int32(endianess.Uint32(buf))
+ if encoding != smallEncoding {
+ return fmt.Errorf("unsupported encoding version: %d", encoding)
+ }
+
+ compression := math.Float64frombits(endianess.Uint64(buf[4:12]))
+ numCentroids := int(endianess.Uint32(buf[12:16]))
+ if numCentroids < 0 || numCentroids > 1<<22 {
+ return errors.New("bad number of centroids in serialization")
+ }
+
+ if len(buf) < 16+(4*numCentroids) {
+ return errors.New("buffer too small for deserialization")
+ }
+
+ t.count = 0
+ t.compression = compression
+ if t.summary == nil ||
+ cap(t.summary.means) < numCentroids ||
+ cap(t.summary.counts) < numCentroids {
+ t.summary = newSummary(numCentroids)
+ }
+ t.summary.means = t.summary.means[:numCentroids]
+ t.summary.counts = t.summary.counts[:numCentroids]
+
+ idx := 16
+ var x float64
+ for i := 0; i < numCentroids; i++ {
+ delta := math.Float32frombits(endianess.Uint32(buf[idx:]))
+ idx += 4
+ x += float64(delta)
+ t.summary.means[i] = x
+ }
+
+ for i := 0; i < numCentroids; i++ {
+ count, read := binary.Uvarint(buf[idx:])
+ if read < 1 {
+ return errors.New("error decoding varint, this TDigest is now invalid")
+ }
+
+ idx += read
+
+ t.summary.counts[i] = count
+ t.count += count
+ }
+
+ if idx != len(buf) {
+ return errors.New("buffer has unread data")
+ }
+ return nil
+}
+
+func encodeUint(buf *bytes.Buffer, n uint64) error {
+ var b [binary.MaxVarintLen64]byte
+
+ l := binary.PutUvarint(b[:], n)
+
+ _, err := buf.Write(b[:l])
+
+ return err
+}
+
+func decodeUint(buf *bytes.Reader) (uint64, error) {
+ v, err := binary.ReadUvarint(buf)
+ return v, err
+}
diff --git a/vendor/github.com/caio/go-tdigest/summary.go b/vendor/github.com/caio/go-tdigest/summary.go
new file mode 100644
index 000000000..f7c90672e
--- /dev/null
+++ b/vendor/github.com/caio/go-tdigest/summary.go
@@ -0,0 +1,206 @@
+package tdigest
+
+import (
+ "fmt"
+ "math"
+ "sort"
+)
+
+type summary struct {
+ means []float64
+ counts []uint64
+}
+
+func newSummary(initialCapacity int) *summary {
+ s := &summary{
+ means: make([]float64, 0, initialCapacity),
+ counts: make([]uint64, 0, initialCapacity),
+ }
+ return s
+}
+
+func (s *summary) Len() int {
+ return len(s.means)
+}
+
+func (s *summary) Add(key float64, value uint64) error {
+ if math.IsNaN(key) {
+ return fmt.Errorf("Key must not be NaN")
+ }
+ if value == 0 {
+ return fmt.Errorf("Count must be >0")
+ }
+
+ idx := s.findInsertionIndex(key)
+
+ s.means = append(s.means, math.NaN())
+ s.counts = append(s.counts, 0)
+
+ copy(s.means[idx+1:], s.means[idx:])
+ copy(s.counts[idx+1:], s.counts[idx:])
+
+ s.means[idx] = key
+ s.counts[idx] = value
+
+ return nil
+}
+
+// Always insert to the right
+func (s *summary) findInsertionIndex(x float64) int {
+ // Binary search is only worthwhile if we have a lot of keys.
+ if len(s.means) < 250 {
+ for i, mean := range s.means {
+ if mean > x {
+ return i
+ }
+ }
+ return len(s.means)
+ }
+
+ return sort.Search(len(s.means), func(i int) bool {
+ return s.means[i] > x
+ })
+}
+
+// This method is the hotspot when calling Add(), which in turn is called by
+// Compress() and Merge().
+func (s *summary) HeadSum(idx int) (sum float64) {
+ return float64(sumUntilIndex(s.counts, idx))
+}
+
+func (s *summary) Floor(x float64) int {
+ return s.findIndex(x) - 1
+}
+
+func (s *summary) findIndex(x float64) int {
+ // Binary search is only worthwhile if we have a lot of keys.
+ if len(s.means) < 250 {
+ for i, mean := range s.means {
+ if mean >= x {
+ return i
+ }
+ }
+ return len(s.means)
+ }
+
+ return sort.Search(len(s.means), func(i int) bool {
+ return s.means[i] >= x
+ })
+}
+
+func (s *summary) Mean(uncheckedIndex int) float64 {
+ return s.means[uncheckedIndex]
+}
+
+func (s *summary) Count(uncheckedIndex int) uint64 {
+ return s.counts[uncheckedIndex]
+}
+
+// return the index of the last item which the sum of counts
+// of items before it is less than or equal to `sum`. -1 in
+// case no centroid satisfies the requirement.
+// Since it's cheap, this also returns the `HeadSum` until
+// the found index (i.e. cumSum = HeadSum(FloorSum(x)))
+func (s *summary) FloorSum(sum float64) (index int, cumSum float64) {
+ index = -1
+ for i, count := range s.counts {
+ if cumSum <= sum {
+ index = i
+ } else {
+ break
+ }
+ cumSum += float64(count)
+ }
+ if index != -1 {
+ cumSum -= float64(s.counts[index])
+ }
+ return index, cumSum
+}
+
+func (s *summary) setAt(index int, mean float64, count uint64) {
+ s.means[index] = mean
+ s.counts[index] = count
+ s.adjustRight(index)
+ s.adjustLeft(index)
+}
+
+func (s *summary) adjustRight(index int) {
+ for i := index + 1; i < len(s.means) && s.means[i-1] > s.means[i]; i++ {
+ s.means[i-1], s.means[i] = s.means[i], s.means[i-1]
+ s.counts[i-1], s.counts[i] = s.counts[i], s.counts[i-1]
+ }
+}
+
+func (s *summary) adjustLeft(index int) {
+ for i := index - 1; i >= 0 && s.means[i] > s.means[i+1]; i-- {
+ s.means[i], s.means[i+1] = s.means[i+1], s.means[i]
+ s.counts[i], s.counts[i+1] = s.counts[i+1], s.counts[i]
+ }
+}
+
+func (s *summary) ForEach(f func(float64, uint64) bool) {
+ for i, mean := range s.means {
+ if !f(mean, s.counts[i]) {
+ break
+ }
+ }
+}
+
+func (s *summary) Perm(rng RNG, f func(float64, uint64) bool) {
+ for _, i := range perm(rng, s.Len()) {
+ if !f(s.means[i], s.counts[i]) {
+ break
+ }
+ }
+}
+
+func (s *summary) Clone() *summary {
+ return &summary{
+ means: append([]float64{}, s.means...),
+ counts: append([]uint64{}, s.counts...),
+ }
+}
+
+// Randomly shuffles summary contents, so they can be added to another summary
+// with being pathological. Renders summary invalid.
+func (s *summary) shuffle(rng RNG) {
+ for i := len(s.means) - 1; i > 1; i-- {
+ s.Swap(i, rng.Intn(i+1))
+ }
+}
+
+// for sort.Interface
+func (s *summary) Swap(i, j int) {
+ s.means[i], s.means[j] = s.means[j], s.means[i]
+ s.counts[i], s.counts[j] = s.counts[j], s.counts[i]
+}
+
+func (s *summary) Less(i, j int) bool {
+ return s.means[i] < s.means[j]
+}
+
+// A simple loop unroll saves a surprising amount of time.
+func sumUntilIndex(s []uint64, idx int) uint64 {
+ var cumSum uint64
+ var i int
+ for i = idx - 1; i >= 3; i -= 4 {
+ cumSum += uint64(s[i])
+ cumSum += uint64(s[i-1])
+ cumSum += uint64(s[i-2])
+ cumSum += uint64(s[i-3])
+ }
+ for ; i >= 0; i-- {
+ cumSum += uint64(s[i])
+ }
+ return cumSum
+}
+
+func perm(rng RNG, n int) []int {
+ m := make([]int, n)
+ for i := 1; i < n; i++ {
+ j := rng.Intn(i + 1)
+ m[i] = m[j]
+ m[j] = i
+ }
+ return m
+}
diff --git a/vendor/github.com/caio/go-tdigest/tdigest.go b/vendor/github.com/caio/go-tdigest/tdigest.go
new file mode 100644
index 000000000..e1b932c19
--- /dev/null
+++ b/vendor/github.com/caio/go-tdigest/tdigest.go
@@ -0,0 +1,445 @@
+// Package tdigest provides a highly accurate mergeable data-structure
+// for quantile estimation.
+//
+// Typical T-Digest use cases involve accumulating metrics on several
+// distinct nodes of a cluster and then merging them together to get
+// a system-wide quantile overview. Things such as: sensory data from
+// IoT devices, quantiles over enormous document datasets (think
+// ElasticSearch), performance metrics for distributed systems, etc.
+//
+// After you create (and configure, if desired) the digest:
+// digest, err := tdigest.New(tdigest.Compression(100))
+//
+// You can then use it for registering measurements:
+// digest.Add(number)
+//
+// Estimating quantiles:
+// digest.Quantile(0.99)
+//
+// And merging with another digest:
+// digest.Merge(otherDigest)
+package tdigest
+
+import (
+ "fmt"
+ "math"
+)
+
+// TDigest is a quantile approximation data structure.
+type TDigest struct {
+ summary *summary
+ compression float64
+ count uint64
+ rng RNG
+}
+
+// New creates a new digest.
+//
+// By default the digest is constructed with a configuration that
+// should be useful for most use-cases. It comes with compression
+// set to 100 and uses a local random number generator for
+// performance reasons.
+func New(options ...tdigestOption) (*TDigest, error) {
+ tdigest, err := newWithoutSummary(options...)
+
+ if err != nil {
+ return nil, err
+ }
+
+ tdigest.summary = newSummary(estimateCapacity(tdigest.compression))
+ return tdigest, nil
+}
+
+// Creates a tdigest instance without allocating a summary.
+func newWithoutSummary(options ...tdigestOption) (*TDigest, error) {
+ tdigest := &TDigest{
+ compression: 100,
+ count: 0,
+ rng: newLocalRNG(1),
+ }
+
+ for _, option := range options {
+ err := option(tdigest)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return tdigest, nil
+}
+
+func _quantile(index float64, previousIndex float64, nextIndex float64, previousMean float64, nextMean float64) float64 {
+ delta := nextIndex - previousIndex
+ previousWeight := (nextIndex - index) / delta
+ nextWeight := (index - previousIndex) / delta
+ return previousMean*previousWeight + nextMean*nextWeight
+}
+
+// Compression returns the TDigest compression.
+func (t *TDigest) Compression() float64 {
+ return t.compression
+}
+
+// Quantile returns the desired percentile estimation.
+//
+// Values of p must be between 0 and 1 (inclusive), will panic otherwise.
+func (t *TDigest) Quantile(q float64) float64 {
+ if q < 0 || q > 1 {
+ panic("q must be between 0 and 1 (inclusive)")
+ }
+
+ if t.summary.Len() == 0 {
+ return math.NaN()
+ } else if t.summary.Len() == 1 {
+ return t.summary.Mean(0)
+ }
+
+ index := q * float64(t.count-1)
+ previousMean := math.NaN()
+ previousIndex := float64(0)
+ next, total := t.summary.FloorSum(index)
+
+ if next > 0 {
+ previousMean = t.summary.Mean(next - 1)
+ previousIndex = total - float64(t.summary.Count(next-1)+1)/2
+ }
+
+ for {
+ nextIndex := total + float64(t.summary.Count(next)-1)/2
+ if nextIndex >= index {
+ if math.IsNaN(previousMean) {
+ // the index is before the 1st centroid
+ if nextIndex == previousIndex {
+ return t.summary.Mean(next)
+ }
+ // assume linear growth
+ nextIndex2 := total + float64(t.summary.Count(next)) + float64(t.summary.Count(next+1)-1)/2
+ previousMean = (nextIndex2*t.summary.Mean(next) - nextIndex*t.summary.Mean(next+1)) / (nextIndex2 - nextIndex)
+ }
+ // common case: two centroids found, the result in in between
+ return _quantile(index, previousIndex, nextIndex, previousMean, t.summary.Mean(next))
+ } else if next+1 == t.summary.Len() {
+ // the index is after the last centroid
+ nextIndex2 := float64(t.count - 1)
+ nextMean2 := (t.summary.Mean(next)*(nextIndex2-previousIndex) - previousMean*(nextIndex2-nextIndex)) / (nextIndex - previousIndex)
+ return _quantile(index, nextIndex, nextIndex2, t.summary.Mean(next), nextMean2)
+ }
+ total += float64(t.summary.Count(next))
+ previousMean = t.summary.Mean(next)
+ previousIndex = nextIndex
+ next++
+ }
+ // unreachable
+}
+
+// boundedWeightedAverage computes the weighted average of two
+// centroids guaranteeing that the result will be between x1 and x2,
+// inclusive.
+//
+// Refer to https://github.com/caio/go-tdigest/pull/19 for more details
+func boundedWeightedAverage(x1 float64, w1 float64, x2 float64, w2 float64) float64 {
+ if x1 > x2 {
+ x1, x2, w1, w2 = x2, x1, w2, w1
+ }
+ result := (x1*w1 + x2*w2) / (w1 + w2)
+ return math.Max(x1, math.Min(result, x2))
+}
+
+// AddWeighted registers a new sample in the digest.
+//
+// It's the main entry point for the digest and very likely the only
+// method to be used for collecting samples. The count parameter is for
+// when you are registering a sample that occurred multiple times - the
+// most common value for this is 1.
+//
+// This will emit an error if `value` is NaN of if `count` is zero.
+func (t *TDigest) AddWeighted(value float64, count uint64) (err error) {
+ if count == 0 {
+ return fmt.Errorf("Illegal datapoint ", value, count)
+ }
+
+ if t.summary.Len() == 0 {
+ err = t.summary.Add(value, count)
+ t.count = uint64(count)
+ return err
+ }
+
+ begin := t.summary.Floor(value)
+ if begin == -1 {
+ begin = 0
+ }
+
+ begin, end := t.findNeighbors(begin, value)
+
+ closest := t.chooseMergeCandidate(begin, end, value, count)
+
+ if closest == t.summary.Len() {
+ err = t.summary.Add(value, count)
+ if err != nil {
+ return err
+ }
+ } else {
+ c := float64(t.summary.Count(closest))
+ newMean := boundedWeightedAverage(t.summary.Mean(closest), c, value, float64(count))
+ t.summary.setAt(closest, newMean, uint64(c)+count)
+ }
+ t.count += uint64(count)
+
+ if float64(t.summary.Len()) > 20*t.compression {
+ err = t.Compress()
+ }
+
+ return err
+}
+
+// Count returns the total number of samples this digest represents
+//
+// The result represents how many times Add() was called on a digest
+// plus how many samples the digests it has been merged with had.
+// This is useful mainly for two scenarios:
+//
+// - Knowing if there is enough data so you can trust the quantiles
+//
+// - Knowing if you've registered too many samples already and
+// deciding what to do about it.
+//
+// For the second case one approach would be to create a side empty
+// digest and start registering samples on it as well as on the old
+// (big) one and then discard the bigger one after a certain criterion
+// is reached (say, minimum number of samples or a small relative
+// error between new and old digests).
+func (t TDigest) Count() uint64 {
+ return t.count
+}
+
+// Add is an alias for AddWeighted(x,1)
+// Read the documentation for AddWeighted for more details.
+func (t *TDigest) Add(value float64) error {
+ return t.AddWeighted(value, 1)
+}
+
+// Compress tries to reduce the number of individual centroids stored
+// in the digest.
+//
+// Compression trades off accuracy for performance and happens
+// automatically after a certain amount of distinct samples have been
+// stored.
+//
+// At any point in time you may call Compress on a digest, but you
+// may completely ignore this and it will compress itself automatically
+// after it grows too much. If you are minimizing network traffic
+// it might be a good idea to compress before serializing.
+func (t *TDigest) Compress() (err error) {
+ if t.summary.Len() <= 1 {
+ return nil
+ }
+
+ oldTree := t.summary
+ t.summary = newSummary(estimateCapacity(t.compression))
+ t.count = 0
+
+ oldTree.shuffle(t.rng)
+ oldTree.ForEach(func(mean float64, count uint64) bool {
+ err = t.AddWeighted(mean, count)
+ return err == nil
+ })
+ return err
+}
+
+// Merge joins a given digest into itself.
+//
+// Merging is useful when you have multiple TDigest instances running
+// in separate threads and you want to compute quantiles over all the
+// samples. This is particularly important on a scatter-gather/map-reduce
+// scenario.
+func (t *TDigest) Merge(other *TDigest) (err error) {
+ if other.summary.Len() == 0 {
+ return nil
+ }
+
+ other.summary.Perm(t.rng, func(mean float64, count uint64) bool {
+ err = t.AddWeighted(mean, count)
+ return err == nil
+ })
+ return err
+}
+
+// MergeDestructive joins a given digest into itself rendering
+// the other digest invalid.
+//
+// This works as Merge above but its faster. Using this method
+// requires caution as it makes 'other' useless - you must make
+// sure you discard it without making further uses of it.
+func (t *TDigest) MergeDestructive(other *TDigest) (err error) {
+ if other.summary.Len() == 0 {
+ return nil
+ }
+
+ other.summary.shuffle(t.rng)
+ other.summary.ForEach(func(mean float64, count uint64) bool {
+ err = t.AddWeighted(mean, count)
+ return err == nil
+ })
+ return err
+}
+
+// CDF computes the fraction in which all samples are less than
+// or equal to the given value.
+func (t *TDigest) CDF(value float64) float64 {
+ if t.summary.Len() == 0 {
+ return math.NaN()
+ } else if t.summary.Len() == 1 {
+ if value < t.summary.Mean(0) {
+ return 0
+ }
+ return 1
+ }
+
+ // We have at least 2 centroids
+ left := (t.summary.Mean(1) - t.summary.Mean(0)) / 2
+ right := left
+ tot := 0.0
+
+ for i := 1; i < t.summary.Len()-1; i++ {
+ prevMean := t.summary.Mean(i - 1)
+ if value < prevMean+right {
+ v := (tot + float64(t.summary.Count(i-1))*interpolate(value, prevMean-left, prevMean+right)) / float64(t.Count())
+ if v > 0 {
+ return v
+ }
+ return 0
+ }
+
+ tot += float64(t.summary.Count(i - 1))
+ left = right
+ right = (t.summary.Mean(i+1) - t.summary.Mean(i)) / 2
+ }
+
+ // last centroid, the summary length is at least two
+ aIdx := t.summary.Len() - 2
+ aMean := t.summary.Mean(aIdx)
+ if value < aMean+right {
+ aCount := float64(t.summary.Count(aIdx))
+ return (tot + aCount*interpolate(value, aMean-left, aMean+right)) / float64(t.Count())
+ }
+ return 1
+}
+
+// Clone returns a deep copy of a TDigest.
+func (t *TDigest) Clone() *TDigest {
+ return &TDigest{
+ summary: t.summary.Clone(),
+ compression: t.compression,
+ count: t.count,
+ rng: t.rng,
+ }
+}
+
+func interpolate(x, x0, x1 float64) float64 {
+ return (x - x0) / (x1 - x0)
+}
+
+// ForEachCentroid calls the specified function for each centroid.
+//
+// Iteration stops when the supplied function returns false, or when all
+// centroids have been iterated.
+func (t *TDigest) ForEachCentroid(f func(mean float64, count uint64) bool) {
+ t.summary.ForEach(f)
+}
+
+func (t TDigest) findNeighbors(start int, value float64) (int, int) {
+ minDistance := math.MaxFloat64
+ lastNeighbor := t.summary.Len()
+ for neighbor := start; neighbor < t.summary.Len(); neighbor++ {
+ z := math.Abs(t.summary.Mean(neighbor) - value)
+ if z < minDistance {
+ start = neighbor
+ minDistance = z
+ } else if z > minDistance {
+ lastNeighbor = neighbor
+ break
+ }
+ }
+ return start, lastNeighbor
+}
+
+func (t TDigest) chooseMergeCandidate(begin, end int, value float64, count uint64) int {
+ closest := t.summary.Len()
+ sum := t.summary.HeadSum(begin)
+ var n float32
+
+ for neighbor := begin; neighbor != end; neighbor++ {
+ c := float64(t.summary.Count(neighbor))
+ var q float64
+ if t.count == 1 {
+ q = 0.5
+ } else {
+ q = (sum + (c-1)/2) / float64(t.count-1)
+ }
+ k := 4 * float64(t.count) * q * (1 - q) / t.compression
+
+ if c+float64(count) <= k {
+ n++
+ if t.rng.Float32() < 1/n {
+ closest = neighbor
+ }
+ }
+ sum += c
+ }
+ return closest
+}
+
+// TrimmedMean returns the mean of the distribution between the two
+// percentiles p1 and p2.
+//
+// Values of p1 and p2 must be beetween 0 and 1 (inclusive) and p1
+// must be less than p2. Will panic otherwise.
+func (t *TDigest) TrimmedMean(p1, p2 float64) float64 {
+ if p1 < 0 || p1 > 1 {
+ panic("p1 must be between 0 and 1 (inclusive)")
+ }
+ if p2 < 0 || p2 > 1 {
+ panic("p2 must be between 0 and 1 (inclusive)")
+ }
+ if p1 >= p2 {
+ panic("p1 must be lower than p2")
+ }
+
+ minCount := p1 * float64(t.count)
+ maxCount := p2 * float64(t.count)
+
+ var trimmedSum, trimmedCount, currCount float64
+ for i, mean := range t.summary.means {
+ count := float64(t.summary.counts[i])
+
+ nextCount := currCount + count
+ if nextCount <= minCount {
+ currCount = nextCount
+ continue
+ }
+
+ if currCount < minCount {
+ count = nextCount - minCount
+ }
+ if nextCount > maxCount {
+ count -= nextCount - maxCount
+ }
+
+ trimmedSum += count * mean
+ trimmedCount += count
+
+ if nextCount >= maxCount {
+ break
+ }
+ currCount = nextCount
+ }
+
+ if trimmedCount == 0 {
+ return 0
+ }
+ return trimmedSum / trimmedCount
+}
+
+func estimateCapacity(compression float64) int {
+ return int(compression) * 10
+}
diff --git a/vendor/github.com/cespare/xxhash/LICENSE.txt b/vendor/github.com/cespare/xxhash/LICENSE.txt
deleted file mode 100644
index 24b53065f..000000000
--- a/vendor/github.com/cespare/xxhash/LICENSE.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Copyright (c) 2016 Caleb Spare
-
-MIT License
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cespare/xxhash/README.md b/vendor/github.com/cespare/xxhash/README.md
deleted file mode 100644
index 0982fd25e..000000000
--- a/vendor/github.com/cespare/xxhash/README.md
+++ /dev/null
@@ -1,50 +0,0 @@
-# xxhash
-
-[](https://godoc.org/github.com/cespare/xxhash)
-
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
-high-quality hashing algorithm that is much faster than anything in the Go
-standard library.
-
-The API is very small, taking its cue from the other hashing packages in the
-standard library:
-
- $ go doc github.com/cespare/xxhash !
- package xxhash // import "github.com/cespare/xxhash"
-
- Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
- at http://cyan4973.github.io/xxHash/.
-
- func New() hash.Hash64
- func Sum64(b []byte) uint64
- func Sum64String(s string) uint64
-
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
-
-## Benchmarks
-
-Here are some quick benchmarks comparing the pure-Go and assembly
-implementations of Sum64 against another popular Go XXH64 implementation,
-[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash):
-
-| input size | OneOfOne | cespare (purego) | cespare |
-| --- | --- | --- | --- |
-| 5 B | 416 MB/s | 720 MB/s | 872 MB/s |
-| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s |
-| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s |
-| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s |
-
-These numbers were generated with:
-
-```
-$ go test -benchtime 10s -bench '/OneOfOne,'
-$ go test -tags purego -benchtime 10s -bench '/xxhash,'
-$ go test -benchtime 10s -bench '/xxhash,'
-```
-
-## Projects using this package
-
-- [InfluxDB](https://github.com/influxdata/influxdb)
-- [Prometheus](https://github.com/prometheus/prometheus)
diff --git a/vendor/github.com/cespare/xxhash/rotate.go b/vendor/github.com/cespare/xxhash/rotate.go
deleted file mode 100644
index f3eac5ebc..000000000
--- a/vendor/github.com/cespare/xxhash/rotate.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build !go1.9
-
-package xxhash
-
-// TODO(caleb): After Go 1.10 comes out, remove this fallback code.
-
-func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) }
-func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) }
-func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
-func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
-func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
-func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
-func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
-func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }
diff --git a/vendor/github.com/cespare/xxhash/rotate19.go b/vendor/github.com/cespare/xxhash/rotate19.go
deleted file mode 100644
index b99612bab..000000000
--- a/vendor/github.com/cespare/xxhash/rotate19.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build go1.9
-
-package xxhash
-
-import "math/bits"
-
-func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
-func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
-func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
-func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
-func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
-func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
-func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
-func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/xxhash.go b/vendor/github.com/cespare/xxhash/xxhash.go
deleted file mode 100644
index f896bd28f..000000000
--- a/vendor/github.com/cespare/xxhash/xxhash.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
-// at http://cyan4973.github.io/xxHash/.
-package xxhash
-
-import (
- "encoding/binary"
- "hash"
-)
-
-const (
- prime1 uint64 = 11400714785074694791
- prime2 uint64 = 14029467366897019727
- prime3 uint64 = 1609587929392839161
- prime4 uint64 = 9650029242287828579
- prime5 uint64 = 2870177450012600261
-)
-
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
- prime1v = prime1
- prime2v = prime2
- prime3v = prime3
- prime4v = prime4
- prime5v = prime5
-)
-
-type xxh struct {
- v1 uint64
- v2 uint64
- v3 uint64
- v4 uint64
- total int
- mem [32]byte
- n int // how much of mem is used
-}
-
-// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm.
-func New() hash.Hash64 {
- var x xxh
- x.Reset()
- return &x
-}
-
-func (x *xxh) Reset() {
- x.n = 0
- x.total = 0
- x.v1 = prime1v + prime2
- x.v2 = prime2
- x.v3 = 0
- x.v4 = -prime1v
-}
-
-func (x *xxh) Size() int { return 8 }
-func (x *xxh) BlockSize() int { return 32 }
-
-// Write adds more data to x. It always returns len(b), nil.
-func (x *xxh) Write(b []byte) (n int, err error) {
- n = len(b)
- x.total += len(b)
-
- if x.n+len(b) < 32 {
- // This new data doesn't even fill the current block.
- copy(x.mem[x.n:], b)
- x.n += len(b)
- return
- }
-
- if x.n > 0 {
- // Finish off the partial block.
- copy(x.mem[x.n:], b)
- x.v1 = round(x.v1, u64(x.mem[0:8]))
- x.v2 = round(x.v2, u64(x.mem[8:16]))
- x.v3 = round(x.v3, u64(x.mem[16:24]))
- x.v4 = round(x.v4, u64(x.mem[24:32]))
- b = b[32-x.n:]
- x.n = 0
- }
-
- if len(b) >= 32 {
- // One or more full blocks left.
- b = writeBlocks(x, b)
- }
-
- // Store any remaining partial block.
- copy(x.mem[:], b)
- x.n = len(b)
-
- return
-}
-
-func (x *xxh) Sum(b []byte) []byte {
- s := x.Sum64()
- return append(
- b,
- byte(s>>56),
- byte(s>>48),
- byte(s>>40),
- byte(s>>32),
- byte(s>>24),
- byte(s>>16),
- byte(s>>8),
- byte(s),
- )
-}
-
-func (x *xxh) Sum64() uint64 {
- var h uint64
-
- if x.total >= 32 {
- v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
- h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
- h = mergeRound(h, v1)
- h = mergeRound(h, v2)
- h = mergeRound(h, v3)
- h = mergeRound(h, v4)
- } else {
- h = x.v3 + prime5
- }
-
- h += uint64(x.total)
-
- i, end := 0, x.n
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(x.mem[i:i+8]))
- h ^= k1
- h = rol27(h)*prime1 + prime4
- }
- if i+4 <= end {
- h ^= uint64(u32(x.mem[i:i+4])) * prime1
- h = rol23(h)*prime2 + prime3
- i += 4
- }
- for i < end {
- h ^= uint64(x.mem[i]) * prime5
- h = rol11(h) * prime1
- i++
- }
-
- h ^= h >> 33
- h *= prime2
- h ^= h >> 29
- h *= prime3
- h ^= h >> 32
-
- return h
-}
-
-func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
-func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
-
-func round(acc, input uint64) uint64 {
- acc += input * prime2
- acc = rol31(acc)
- acc *= prime1
- return acc
-}
-
-func mergeRound(acc, val uint64) uint64 {
- val = round(0, val)
- acc ^= val
- acc = acc*prime1 + prime4
- return acc
-}
diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/xxhash_amd64.go
deleted file mode 100644
index d61765268..000000000
--- a/vendor/github.com/cespare/xxhash/xxhash_amd64.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-package xxhash
-
-// Sum64 computes the 64-bit xxHash digest of b.
-//
-//go:noescape
-func Sum64(b []byte) uint64
-
-func writeBlocks(x *xxh, b []byte) []byte
diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/xxhash_amd64.s
deleted file mode 100644
index 757f2011f..000000000
--- a/vendor/github.com/cespare/xxhash/xxhash_amd64.s
+++ /dev/null
@@ -1,233 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !purego
-
-#include "textflag.h"
-
-// Register allocation:
-// AX h
-// CX pointer to advance through b
-// DX n
-// BX loop end
-// R8 v1, k1
-// R9 v2
-// R10 v3
-// R11 v4
-// R12 tmp
-// R13 prime1v
-// R14 prime2v
-// R15 prime4v
-
-// round reads from and advances the buffer pointer in CX.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
- MOVQ (CX), R12 \
- ADDQ $8, CX \
- IMULQ R14, R12 \
- ADDQ R12, r \
- ROLQ $31, r \
- IMULQ R13, r
-
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
-#define mergeRound(acc, val) \
- IMULQ R14, val \
- ROLQ $31, val \
- IMULQ R13, val \
- XORQ val, acc \
- IMULQ R13, acc \
- ADDQ R15, acc
-
-// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
- // Load fixed primes.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), R15
-
- // Load slice.
- MOVQ b_base+0(FP), CX
- MOVQ b_len+8(FP), DX
- LEAQ (CX)(DX*1), BX
-
- // The first loop limit will be len(b)-32.
- SUBQ $32, BX
-
- // Check whether we have at least one block.
- CMPQ DX, $32
- JLT noBlocks
-
- // Set up initial state (v1, v2, v3, v4).
- MOVQ R13, R8
- ADDQ R14, R8
- MOVQ R14, R9
- XORQ R10, R10
- XORQ R11, R11
- SUBQ R13, R11
-
- // Loop until CX > BX.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ CX, BX
- JLE blockLoop
-
- MOVQ R8, AX
- ROLQ $1, AX
- MOVQ R9, R12
- ROLQ $7, R12
- ADDQ R12, AX
- MOVQ R10, R12
- ROLQ $12, R12
- ADDQ R12, AX
- MOVQ R11, R12
- ROLQ $18, R12
- ADDQ R12, AX
-
- mergeRound(AX, R8)
- mergeRound(AX, R9)
- mergeRound(AX, R10)
- mergeRound(AX, R11)
-
- JMP afterBlocks
-
-noBlocks:
- MOVQ ·prime5v(SB), AX
-
-afterBlocks:
- ADDQ DX, AX
-
- // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
- ADDQ $24, BX
-
- CMPQ CX, BX
- JG fourByte
-
-wordLoop:
- // Calculate k1.
- MOVQ (CX), R8
- ADDQ $8, CX
- IMULQ R14, R8
- ROLQ $31, R8
- IMULQ R13, R8
-
- XORQ R8, AX
- ROLQ $27, AX
- IMULQ R13, AX
- ADDQ R15, AX
-
- CMPQ CX, BX
- JLE wordLoop
-
-fourByte:
- ADDQ $4, BX
- CMPQ CX, BX
- JG singles
-
- MOVL (CX), R8
- ADDQ $4, CX
- IMULQ R13, R8
- XORQ R8, AX
-
- ROLQ $23, AX
- IMULQ R14, AX
- ADDQ ·prime3v(SB), AX
-
-singles:
- ADDQ $4, BX
- CMPQ CX, BX
- JGE finalize
-
-singlesLoop:
- MOVBQZX (CX), R12
- ADDQ $1, CX
- IMULQ ·prime5v(SB), R12
- XORQ R12, AX
-
- ROLQ $11, AX
- IMULQ R13, AX
-
- CMPQ CX, BX
- JL singlesLoop
-
-finalize:
- MOVQ AX, R12
- SHRQ $33, R12
- XORQ R12, AX
- IMULQ R14, AX
- MOVQ AX, R12
- SHRQ $29, R12
- XORQ R12, AX
- IMULQ ·prime3v(SB), AX
- MOVQ AX, R12
- SHRQ $32, R12
- XORQ R12, AX
-
- MOVQ AX, ret+24(FP)
- RET
-
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the x pointer.
-
-// func writeBlocks(x *xxh, b []byte) []byte
-TEXT ·writeBlocks(SB), NOSPLIT, $0-56
- // Load fixed primes needed for round.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
-
- // Load slice.
- MOVQ b_base+8(FP), CX
- MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below
- MOVQ b_len+16(FP), DX
- LEAQ (CX)(DX*1), BX
- SUBQ $32, BX
-
- // Load vN from x.
- MOVQ x+0(FP), AX
- MOVQ 0(AX), R8 // v1
- MOVQ 8(AX), R9 // v2
- MOVQ 16(AX), R10 // v3
- MOVQ 24(AX), R11 // v4
-
- // We don't need to check the loop condition here; this function is
- // always called with at least one block of data to process.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ CX, BX
- JLE blockLoop
-
- // Copy vN back to x.
- MOVQ R8, 0(AX)
- MOVQ R9, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R11, 24(AX)
-
- // Construct return slice.
- // NOTE: It's important that we don't construct a slice that has a base
- // pointer off the end of the original slice, as in Go 1.7+ this will
- // cause runtime crashes. (See discussion in, for example,
- // https://github.com/golang/go/issues/16772.)
- // Therefore, we calculate the length/cap first, and if they're zero, we
- // keep the old base. This is what the compiler does as well if you
- // write code like
- // b = b[len(b):]
-
- // New length is 32 - (CX - BX) -> BX+32 - CX.
- ADDQ $32, BX
- SUBQ CX, BX
- JZ afterSetBase
-
- MOVQ CX, ret_base+32(FP)
-
-afterSetBase:
- MOVQ BX, ret_len+40(FP)
- MOVQ BX, ret_cap+48(FP) // set cap == len
-
- RET
diff --git a/vendor/github.com/cespare/xxhash/xxhash_other.go b/vendor/github.com/cespare/xxhash/xxhash_other.go
deleted file mode 100644
index c68d13f89..000000000
--- a/vendor/github.com/cespare/xxhash/xxhash_other.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// +build !amd64 appengine !gc purego
-
-package xxhash
-
-// Sum64 computes the 64-bit xxHash digest of b.
-func Sum64(b []byte) uint64 {
- // A simpler version would be
- // x := New()
- // x.Write(b)
- // return x.Sum64()
- // but this is faster, particularly for small inputs.
-
- n := len(b)
- var h uint64
-
- if n >= 32 {
- v1 := prime1v + prime2
- v2 := prime2
- v3 := uint64(0)
- v4 := -prime1v
- for len(b) >= 32 {
- v1 = round(v1, u64(b[0:8:len(b)]))
- v2 = round(v2, u64(b[8:16:len(b)]))
- v3 = round(v3, u64(b[16:24:len(b)]))
- v4 = round(v4, u64(b[24:32:len(b)]))
- b = b[32:len(b):len(b)]
- }
- h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
- h = mergeRound(h, v1)
- h = mergeRound(h, v2)
- h = mergeRound(h, v3)
- h = mergeRound(h, v4)
- } else {
- h = prime5
- }
-
- h += uint64(n)
-
- i, end := 0, len(b)
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(b[i:i+8:len(b)]))
- h ^= k1
- h = rol27(h)*prime1 + prime4
- }
- if i+4 <= end {
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
- h = rol23(h)*prime2 + prime3
- i += 4
- }
- for ; i < end; i++ {
- h ^= uint64(b[i]) * prime5
- h = rol11(h) * prime1
- }
-
- h ^= h >> 33
- h *= prime2
- h ^= h >> 29
- h *= prime3
- h ^= h >> 32
-
- return h
-}
-
-func writeBlocks(x *xxh, b []byte) []byte {
- v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
- for len(b) >= 32 {
- v1 = round(v1, u64(b[0:8:len(b)]))
- v2 = round(v2, u64(b[8:16:len(b)]))
- v3 = round(v3, u64(b[16:24:len(b)]))
- v4 = round(v4, u64(b[24:32:len(b)]))
- b = b[32:len(b):len(b)]
- }
- x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4
- return b
-}
diff --git a/vendor/github.com/cespare/xxhash/xxhash_safe.go b/vendor/github.com/cespare/xxhash/xxhash_safe.go
deleted file mode 100644
index dfa15ab7e..000000000
--- a/vendor/github.com/cespare/xxhash/xxhash_safe.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build appengine
-
-// This file contains the safe implementations of otherwise unsafe-using code.
-
-package xxhash
-
-// Sum64String computes the 64-bit xxHash digest of s.
-func Sum64String(s string) uint64 {
- return Sum64([]byte(s))
-}
diff --git a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go
deleted file mode 100644
index d2b64e8bb..000000000
--- a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build !appengine
-
-// This file encapsulates usage of unsafe.
-// xxhash_safe.go contains the safe implementations.
-
-package xxhash
-
-import (
- "reflect"
- "unsafe"
-)
-
-// Sum64String computes the 64-bit xxHash digest of s.
-// It may be faster than Sum64([]byte(s)) by avoiding a copy.
-//
-// TODO(caleb): Consider removing this if an optimization is ever added to make
-// it unnecessary: https://golang.org/issue/2205.
-//
-// TODO(caleb): We still have a function call; we could instead write Go/asm
-// copies of Sum64 for strings to squeeze out a bit more speed.
-func Sum64String(s string) uint64 {
- // See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
- // for some discussion about this unsafe conversion.
- var b []byte
- bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
- bh.Len = len(s)
- bh.Cap = len(s)
- return Sum64(b)
-}
diff --git a/vendor/github.com/prometheus/common/sigv4/LICENSE b/vendor/github.com/cncf/xds/go/LICENSE
similarity index 100%
rename from vendor/github.com/prometheus/common/sigv4/LICENSE
rename to vendor/github.com/cncf/xds/go/LICENSE
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go
new file mode 100644
index 000000000..3c751b6ca
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.go
@@ -0,0 +1,411 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: udpa/annotations/migrate.proto
+
+package annotations
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type MigrateAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"`
+}
+
+func (x *MigrateAnnotation) Reset() {
+ *x = MigrateAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_udpa_annotations_migrate_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MigrateAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MigrateAnnotation) ProtoMessage() {}
+
+func (x *MigrateAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_udpa_annotations_migrate_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MigrateAnnotation.ProtoReflect.Descriptor instead.
+func (*MigrateAnnotation) Descriptor() ([]byte, []int) {
+ return file_udpa_annotations_migrate_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MigrateAnnotation) GetRename() string {
+ if x != nil {
+ return x.Rename
+ }
+ return ""
+}
+
+type FieldMigrateAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"`
+ OneofPromotion string `protobuf:"bytes,2,opt,name=oneof_promotion,json=oneofPromotion,proto3" json:"oneof_promotion,omitempty"`
+}
+
+func (x *FieldMigrateAnnotation) Reset() {
+ *x = FieldMigrateAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_udpa_annotations_migrate_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldMigrateAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldMigrateAnnotation) ProtoMessage() {}
+
+func (x *FieldMigrateAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_udpa_annotations_migrate_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldMigrateAnnotation.ProtoReflect.Descriptor instead.
+func (*FieldMigrateAnnotation) Descriptor() ([]byte, []int) {
+ return file_udpa_annotations_migrate_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *FieldMigrateAnnotation) GetRename() string {
+ if x != nil {
+ return x.Rename
+ }
+ return ""
+}
+
+func (x *FieldMigrateAnnotation) GetOneofPromotion() string {
+ if x != nil {
+ return x.OneofPromotion
+ }
+ return ""
+}
+
+type FileMigrateAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MoveToPackage string `protobuf:"bytes,2,opt,name=move_to_package,json=moveToPackage,proto3" json:"move_to_package,omitempty"`
+}
+
+func (x *FileMigrateAnnotation) Reset() {
+ *x = FileMigrateAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_udpa_annotations_migrate_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FileMigrateAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileMigrateAnnotation) ProtoMessage() {}
+
+func (x *FileMigrateAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_udpa_annotations_migrate_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FileMigrateAnnotation.ProtoReflect.Descriptor instead.
+func (*FileMigrateAnnotation) Descriptor() ([]byte, []int) {
+ return file_udpa_annotations_migrate_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *FileMigrateAnnotation) GetMoveToPackage() string {
+ if x != nil {
+ return x.MoveToPackage
+ }
+ return ""
+}
+
+var file_udpa_annotations_migrate_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.message_migrate",
+ Tag: "bytes,171962766,opt,name=message_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*FieldMigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.field_migrate",
+ Tag: "bytes,171962766,opt,name=field_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.enum_migrate",
+ Tag: "bytes,171962766,opt,name=enum_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumValueOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.enum_value_migrate",
+ Tag: "bytes,171962766,opt,name=enum_value_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*FileMigrateAnnotation)(nil),
+ Field: 171962766,
+ Name: "udpa.annotations.file_migrate",
+ Tag: "bytes,171962766,opt,name=file_migrate",
+ Filename: "udpa/annotations/migrate.proto",
+ },
+}
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // optional udpa.annotations.MigrateAnnotation message_migrate = 171962766;
+ E_MessageMigrate = &file_udpa_annotations_migrate_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional udpa.annotations.FieldMigrateAnnotation field_migrate = 171962766;
+ E_FieldMigrate = &file_udpa_annotations_migrate_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.EnumOptions.
+var (
+ // optional udpa.annotations.MigrateAnnotation enum_migrate = 171962766;
+ E_EnumMigrate = &file_udpa_annotations_migrate_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.EnumValueOptions.
+var (
+ // optional udpa.annotations.MigrateAnnotation enum_value_migrate = 171962766;
+ E_EnumValueMigrate = &file_udpa_annotations_migrate_proto_extTypes[3]
+)
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // optional udpa.annotations.FileMigrateAnnotation file_migrate = 171962766;
+ E_FileMigrate = &file_udpa_annotations_migrate_proto_extTypes[4]
+)
+
+var File_udpa_annotations_migrate_proto protoreflect.FileDescriptor
+
+var file_udpa_annotations_migrate_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2b, 0x0a, 0x11, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d,
+ 0x65, 0x22, 0x59, 0x0a, 0x16, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74,
+ 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72,
+ 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x70, 0x72, 0x6f,
+ 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6f, 0x6e,
+ 0x65, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3f, 0x0a, 0x15,
+ 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x74, 0x6f,
+ 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d,
+ 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x3a, 0x70, 0x0a,
+ 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65,
+ 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x75, 0x64, 0x70,
+ 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x69,
+ 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a,
+ 0x6f, 0x0a, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65,
+ 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x65, 0x6c,
+ 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65,
+ 0x3a, 0x67, 0x0a, 0x0c, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65,
+ 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x8e,
+ 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61,
+ 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x65, 0x6e,
+ 0x75, 0x6d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x77, 0x0a, 0x12, 0x65, 0x6e, 0x75,
+ 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12,
+ 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x75, 0x64,
+ 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d,
+ 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x10, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61,
+ 0x74, 0x65, 0x3a, 0x6b, 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61,
+ 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x8e, 0xe3, 0xff, 0x51, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x75, 0x64, 0x70, 0x61,
+ 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69, 0x6c,
+ 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x42,
+ 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e,
+ 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_udpa_annotations_migrate_proto_rawDescOnce sync.Once
+ file_udpa_annotations_migrate_proto_rawDescData = file_udpa_annotations_migrate_proto_rawDesc
+)
+
+func file_udpa_annotations_migrate_proto_rawDescGZIP() []byte {
+ file_udpa_annotations_migrate_proto_rawDescOnce.Do(func() {
+ file_udpa_annotations_migrate_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_migrate_proto_rawDescData)
+ })
+ return file_udpa_annotations_migrate_proto_rawDescData
+}
+
+var file_udpa_annotations_migrate_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_udpa_annotations_migrate_proto_goTypes = []interface{}{
+ (*MigrateAnnotation)(nil), // 0: udpa.annotations.MigrateAnnotation
+ (*FieldMigrateAnnotation)(nil), // 1: udpa.annotations.FieldMigrateAnnotation
+ (*FileMigrateAnnotation)(nil), // 2: udpa.annotations.FileMigrateAnnotation
+ (*descriptorpb.MessageOptions)(nil), // 3: google.protobuf.MessageOptions
+ (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions
+ (*descriptorpb.EnumOptions)(nil), // 5: google.protobuf.EnumOptions
+ (*descriptorpb.EnumValueOptions)(nil), // 6: google.protobuf.EnumValueOptions
+ (*descriptorpb.FileOptions)(nil), // 7: google.protobuf.FileOptions
+}
+var file_udpa_annotations_migrate_proto_depIdxs = []int32{
+ 3, // 0: udpa.annotations.message_migrate:extendee -> google.protobuf.MessageOptions
+ 4, // 1: udpa.annotations.field_migrate:extendee -> google.protobuf.FieldOptions
+ 5, // 2: udpa.annotations.enum_migrate:extendee -> google.protobuf.EnumOptions
+ 6, // 3: udpa.annotations.enum_value_migrate:extendee -> google.protobuf.EnumValueOptions
+ 7, // 4: udpa.annotations.file_migrate:extendee -> google.protobuf.FileOptions
+ 0, // 5: udpa.annotations.message_migrate:type_name -> udpa.annotations.MigrateAnnotation
+ 1, // 6: udpa.annotations.field_migrate:type_name -> udpa.annotations.FieldMigrateAnnotation
+ 0, // 7: udpa.annotations.enum_migrate:type_name -> udpa.annotations.MigrateAnnotation
+ 0, // 8: udpa.annotations.enum_value_migrate:type_name -> udpa.annotations.MigrateAnnotation
+ 2, // 9: udpa.annotations.file_migrate:type_name -> udpa.annotations.FileMigrateAnnotation
+ 10, // [10:10] is the sub-list for method output_type
+ 10, // [10:10] is the sub-list for method input_type
+ 5, // [5:10] is the sub-list for extension type_name
+ 0, // [0:5] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_udpa_annotations_migrate_proto_init() }
+func file_udpa_annotations_migrate_proto_init() {
+ if File_udpa_annotations_migrate_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_udpa_annotations_migrate_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MigrateAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_udpa_annotations_migrate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldMigrateAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_udpa_annotations_migrate_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileMigrateAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_udpa_annotations_migrate_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 5,
+ NumServices: 0,
+ },
+ GoTypes: file_udpa_annotations_migrate_proto_goTypes,
+ DependencyIndexes: file_udpa_annotations_migrate_proto_depIdxs,
+ MessageInfos: file_udpa_annotations_migrate_proto_msgTypes,
+ ExtensionInfos: file_udpa_annotations_migrate_proto_extTypes,
+ }.Build()
+ File_udpa_annotations_migrate_proto = out.File
+ file_udpa_annotations_migrate_proto_rawDesc = nil
+ file_udpa_annotations_migrate_proto_goTypes = nil
+ file_udpa_annotations_migrate_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go
new file mode 100644
index 000000000..38196d5eb
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/migrate.pb.validate.go
@@ -0,0 +1,350 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/migrate.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on MigrateAnnotation with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *MigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// MigrateAnnotationMultiError, or nil if none found.
+func (m *MigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MigrateAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Rename
+
+ if len(errors) > 0 {
+ return MigrateAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// MigrateAnnotationMultiError is an error wrapping multiple validation errors
+// returned by MigrateAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type MigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MigrateAnnotationMultiError) AllErrors() []error { return m }
+
+// MigrateAnnotationValidationError is the validation error returned by
+// MigrateAnnotation.Validate if the designated constraints aren't met.
+type MigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MigrateAnnotationValidationError) ErrorName() string {
+ return "MigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e MigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MigrateAnnotationValidationError{}
+
+// Validate checks the field values on FieldMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FieldMigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldMigrateAnnotationMultiError, or nil if none found.
+func (m *FieldMigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldMigrateAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Rename
+
+ // no validation rules for OneofPromotion
+
+ if len(errors) > 0 {
+ return FieldMigrateAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FieldMigrateAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldMigrateAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldMigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldMigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldMigrateAnnotationMultiError) AllErrors() []error { return m }
+
+// FieldMigrateAnnotationValidationError is the validation error returned by
+// FieldMigrateAnnotation.Validate if the designated constraints aren't met.
+type FieldMigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FieldMigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FieldMigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FieldMigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FieldMigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FieldMigrateAnnotationValidationError) ErrorName() string {
+ return "FieldMigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FieldMigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFieldMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FieldMigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FieldMigrateAnnotationValidationError{}
+
+// Validate checks the field values on FileMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FileMigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FileMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FileMigrateAnnotationMultiError, or nil if none found.
+func (m *FileMigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FileMigrateAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for MoveToPackage
+
+ if len(errors) > 0 {
+ return FileMigrateAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FileMigrateAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FileMigrateAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FileMigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FileMigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FileMigrateAnnotationMultiError) AllErrors() []error { return m }
+
+// FileMigrateAnnotationValidationError is the validation error returned by
+// FileMigrateAnnotation.Validate if the designated constraints aren't met.
+type FileMigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FileMigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FileMigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FileMigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FileMigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FileMigrateAnnotationValidationError) ErrorName() string {
+ return "FileMigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FileMigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFileMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FileMigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FileMigrateAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go
new file mode 100644
index 000000000..7c8339919
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.go
@@ -0,0 +1,196 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: udpa/annotations/security.proto
+
+package annotations
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type FieldSecurityAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ConfigureForUntrustedDownstream bool `protobuf:"varint,1,opt,name=configure_for_untrusted_downstream,json=configureForUntrustedDownstream,proto3" json:"configure_for_untrusted_downstream,omitempty"`
+ ConfigureForUntrustedUpstream bool `protobuf:"varint,2,opt,name=configure_for_untrusted_upstream,json=configureForUntrustedUpstream,proto3" json:"configure_for_untrusted_upstream,omitempty"`
+}
+
+func (x *FieldSecurityAnnotation) Reset() {
+ *x = FieldSecurityAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_udpa_annotations_security_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldSecurityAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldSecurityAnnotation) ProtoMessage() {}
+
+func (x *FieldSecurityAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_udpa_annotations_security_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldSecurityAnnotation.ProtoReflect.Descriptor instead.
+func (*FieldSecurityAnnotation) Descriptor() ([]byte, []int) {
+ return file_udpa_annotations_security_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FieldSecurityAnnotation) GetConfigureForUntrustedDownstream() bool {
+ if x != nil {
+ return x.ConfigureForUntrustedDownstream
+ }
+ return false
+}
+
+func (x *FieldSecurityAnnotation) GetConfigureForUntrustedUpstream() bool {
+ if x != nil {
+ return x.ConfigureForUntrustedUpstream
+ }
+ return false
+}
+
+var file_udpa_annotations_security_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*FieldSecurityAnnotation)(nil),
+ Field: 11122993,
+ Name: "udpa.annotations.security",
+ Tag: "bytes,11122993,opt,name=security",
+ Filename: "udpa/annotations/security.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional udpa.annotations.FieldSecurityAnnotation security = 11122993;
+ E_Security = &file_udpa_annotations_security_proto_extTypes[0]
+)
+
+var File_udpa_annotations_security_proto protoreflect.FileDescriptor
+
+var file_udpa_annotations_security_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaf, 0x01, 0x0a, 0x17, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65,
+ 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x4b, 0x0a, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6f,
+ 0x72, 0x5f, 0x75, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x64, 0x6f, 0x77, 0x6e,
+ 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1f, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x55, 0x6e, 0x74, 0x72, 0x75, 0x73,
+ 0x74, 0x65, 0x64, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x47, 0x0a,
+ 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x75,
+ 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61,
+ 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
+ 0x72, 0x65, 0x46, 0x6f, 0x72, 0x55, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x55, 0x70,
+ 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x3a, 0x67, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69,
+ 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0xb1, 0xf2, 0xa6, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x75, 0x64, 0x70,
+ 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x42,
+ 0x31, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x08, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67,
+ 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_udpa_annotations_security_proto_rawDescOnce sync.Once
+ file_udpa_annotations_security_proto_rawDescData = file_udpa_annotations_security_proto_rawDesc
+)
+
+func file_udpa_annotations_security_proto_rawDescGZIP() []byte {
+ file_udpa_annotations_security_proto_rawDescOnce.Do(func() {
+ file_udpa_annotations_security_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_security_proto_rawDescData)
+ })
+ return file_udpa_annotations_security_proto_rawDescData
+}
+
+var file_udpa_annotations_security_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_udpa_annotations_security_proto_goTypes = []interface{}{
+ (*FieldSecurityAnnotation)(nil), // 0: udpa.annotations.FieldSecurityAnnotation
+ (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions
+}
+var file_udpa_annotations_security_proto_depIdxs = []int32{
+ 1, // 0: udpa.annotations.security:extendee -> google.protobuf.FieldOptions
+ 0, // 1: udpa.annotations.security:type_name -> udpa.annotations.FieldSecurityAnnotation
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_udpa_annotations_security_proto_init() }
+func file_udpa_annotations_security_proto_init() {
+ if File_udpa_annotations_security_proto != nil {
+ return
+ }
+ file_udpa_annotations_status_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_udpa_annotations_security_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldSecurityAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_udpa_annotations_security_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_udpa_annotations_security_proto_goTypes,
+ DependencyIndexes: file_udpa_annotations_security_proto_depIdxs,
+ MessageInfos: file_udpa_annotations_security_proto_msgTypes,
+ ExtensionInfos: file_udpa_annotations_security_proto_extTypes,
+ }.Build()
+ File_udpa_annotations_security_proto = out.File
+ file_udpa_annotations_security_proto_rawDesc = nil
+ file_udpa_annotations_security_proto_goTypes = nil
+ file_udpa_annotations_security_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go
new file mode 100644
index 000000000..acc9bd7a1
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/security.pb.validate.go
@@ -0,0 +1,142 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/security.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on FieldSecurityAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FieldSecurityAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldSecurityAnnotation with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldSecurityAnnotationMultiError, or nil if none found.
+func (m *FieldSecurityAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldSecurityAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for ConfigureForUntrustedDownstream
+
+ // no validation rules for ConfigureForUntrustedUpstream
+
+ if len(errors) > 0 {
+ return FieldSecurityAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FieldSecurityAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldSecurityAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldSecurityAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldSecurityAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldSecurityAnnotationMultiError) AllErrors() []error { return m }
+
+// FieldSecurityAnnotationValidationError is the validation error returned by
+// FieldSecurityAnnotation.Validate if the designated constraints aren't met.
+type FieldSecurityAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FieldSecurityAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FieldSecurityAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FieldSecurityAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FieldSecurityAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FieldSecurityAnnotationValidationError) ErrorName() string {
+ return "FieldSecurityAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FieldSecurityAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFieldSecurityAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FieldSecurityAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FieldSecurityAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go
new file mode 100644
index 000000000..e2b1a59cb
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.go
@@ -0,0 +1,93 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: udpa/annotations/sensitive.proto
+
+package annotations
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_udpa_annotations_sensitive_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 76569463,
+ Name: "udpa.annotations.sensitive",
+ Tag: "varint,76569463,opt,name=sensitive",
+ Filename: "udpa/annotations/sensitive.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional bool sensitive = 76569463;
+ E_Sensitive = &file_udpa_annotations_sensitive_proto_extTypes[0]
+)
+
+var File_udpa_annotations_sensitive_proto protoreflect.FileDescriptor
+
+var file_udpa_annotations_sensitive_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3e, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74,
+ 0x69, 0x76, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0xf7, 0xb6, 0xc1, 0x24, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6e,
+ 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_udpa_annotations_sensitive_proto_goTypes = []interface{}{
+ (*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions
+}
+var file_udpa_annotations_sensitive_proto_depIdxs = []int32{
+ 0, // 0: udpa.annotations.sensitive:extendee -> google.protobuf.FieldOptions
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_udpa_annotations_sensitive_proto_init() }
+func file_udpa_annotations_sensitive_proto_init() {
+ if File_udpa_annotations_sensitive_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_udpa_annotations_sensitive_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_udpa_annotations_sensitive_proto_goTypes,
+ DependencyIndexes: file_udpa_annotations_sensitive_proto_depIdxs,
+ ExtensionInfos: file_udpa_annotations_sensitive_proto_extTypes,
+ }.Build()
+ File_udpa_annotations_sensitive_proto = out.File
+ file_udpa_annotations_sensitive_proto_rawDesc = nil
+ file_udpa_annotations_sensitive_proto_goTypes = nil
+ file_udpa_annotations_sensitive_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go
new file mode 100644
index 000000000..f3fa61974
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/sensitive.pb.validate.go
@@ -0,0 +1,36 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/sensitive.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go
new file mode 100644
index 000000000..cf629f751
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.go
@@ -0,0 +1,253 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: udpa/annotations/status.proto
+
+package annotations
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type PackageVersionStatus int32
+
+const (
+ PackageVersionStatus_UNKNOWN PackageVersionStatus = 0
+ PackageVersionStatus_FROZEN PackageVersionStatus = 1
+ PackageVersionStatus_ACTIVE PackageVersionStatus = 2
+ PackageVersionStatus_NEXT_MAJOR_VERSION_CANDIDATE PackageVersionStatus = 3
+)
+
+// Enum value maps for PackageVersionStatus.
+var (
+ PackageVersionStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "FROZEN",
+ 2: "ACTIVE",
+ 3: "NEXT_MAJOR_VERSION_CANDIDATE",
+ }
+ PackageVersionStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "FROZEN": 1,
+ "ACTIVE": 2,
+ "NEXT_MAJOR_VERSION_CANDIDATE": 3,
+ }
+)
+
+func (x PackageVersionStatus) Enum() *PackageVersionStatus {
+ p := new(PackageVersionStatus)
+ *p = x
+ return p
+}
+
+func (x PackageVersionStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (PackageVersionStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_udpa_annotations_status_proto_enumTypes[0].Descriptor()
+}
+
+func (PackageVersionStatus) Type() protoreflect.EnumType {
+ return &file_udpa_annotations_status_proto_enumTypes[0]
+}
+
+func (x PackageVersionStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use PackageVersionStatus.Descriptor instead.
+func (PackageVersionStatus) EnumDescriptor() ([]byte, []int) {
+ return file_udpa_annotations_status_proto_rawDescGZIP(), []int{0}
+}
+
+type StatusAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"`
+ PackageVersionStatus PackageVersionStatus `protobuf:"varint,2,opt,name=package_version_status,json=packageVersionStatus,proto3,enum=udpa.annotations.PackageVersionStatus" json:"package_version_status,omitempty"`
+}
+
+func (x *StatusAnnotation) Reset() {
+ *x = StatusAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_udpa_annotations_status_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatusAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusAnnotation) ProtoMessage() {}
+
+func (x *StatusAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_udpa_annotations_status_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusAnnotation.ProtoReflect.Descriptor instead.
+func (*StatusAnnotation) Descriptor() ([]byte, []int) {
+ return file_udpa_annotations_status_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *StatusAnnotation) GetWorkInProgress() bool {
+ if x != nil {
+ return x.WorkInProgress
+ }
+ return false
+}
+
+func (x *StatusAnnotation) GetPackageVersionStatus() PackageVersionStatus {
+ if x != nil {
+ return x.PackageVersionStatus
+ }
+ return PackageVersionStatus_UNKNOWN
+}
+
+var file_udpa_annotations_status_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*StatusAnnotation)(nil),
+ Field: 222707719,
+ Name: "udpa.annotations.file_status",
+ Tag: "bytes,222707719,opt,name=file_status",
+ Filename: "udpa/annotations/status.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // optional udpa.annotations.StatusAnnotation file_status = 222707719;
+ E_FileStatus = &file_udpa_annotations_status_proto_extTypes[0]
+)
+
+var File_udpa_annotations_status_proto protoreflect.FileDescriptor
+
+var file_udpa_annotations_status_proto_rawDesc = []byte{
+ 0x0a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x22, 0x9a, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b,
+ 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65,
+ 0x73, 0x73, 0x12, 0x5c, 0x0a, 0x16, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x14, 0x70, 0x61, 0x63, 0x6b,
+ 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x2a, 0x5d, 0x0a, 0x14, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e,
+ 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x52, 0x4f, 0x5a, 0x45, 0x4e, 0x10,
+ 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x20, 0x0a,
+ 0x1c, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4d, 0x41, 0x4a, 0x4f, 0x52, 0x5f, 0x56, 0x45, 0x52, 0x53,
+ 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x41, 0x4e, 0x44, 0x49, 0x44, 0x41, 0x54, 0x45, 0x10, 0x03, 0x3a,
+ 0x64, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x87, 0x80, 0x99,
+ 0x6a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f,
+ 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_udpa_annotations_status_proto_rawDescOnce sync.Once
+ file_udpa_annotations_status_proto_rawDescData = file_udpa_annotations_status_proto_rawDesc
+)
+
+func file_udpa_annotations_status_proto_rawDescGZIP() []byte {
+ file_udpa_annotations_status_proto_rawDescOnce.Do(func() {
+ file_udpa_annotations_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_status_proto_rawDescData)
+ })
+ return file_udpa_annotations_status_proto_rawDescData
+}
+
+var file_udpa_annotations_status_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_udpa_annotations_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_udpa_annotations_status_proto_goTypes = []interface{}{
+ (PackageVersionStatus)(0), // 0: udpa.annotations.PackageVersionStatus
+ (*StatusAnnotation)(nil), // 1: udpa.annotations.StatusAnnotation
+ (*descriptorpb.FileOptions)(nil), // 2: google.protobuf.FileOptions
+}
+var file_udpa_annotations_status_proto_depIdxs = []int32{
+ 0, // 0: udpa.annotations.StatusAnnotation.package_version_status:type_name -> udpa.annotations.PackageVersionStatus
+ 2, // 1: udpa.annotations.file_status:extendee -> google.protobuf.FileOptions
+ 1, // 2: udpa.annotations.file_status:type_name -> udpa.annotations.StatusAnnotation
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 2, // [2:3] is the sub-list for extension type_name
+ 1, // [1:2] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_udpa_annotations_status_proto_init() }
+func file_udpa_annotations_status_proto_init() {
+ if File_udpa_annotations_status_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_udpa_annotations_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatusAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_udpa_annotations_status_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_udpa_annotations_status_proto_goTypes,
+ DependencyIndexes: file_udpa_annotations_status_proto_depIdxs,
+ EnumInfos: file_udpa_annotations_status_proto_enumTypes,
+ MessageInfos: file_udpa_annotations_status_proto_msgTypes,
+ ExtensionInfos: file_udpa_annotations_status_proto_extTypes,
+ }.Build()
+ File_udpa_annotations_status_proto = out.File
+ file_udpa_annotations_status_proto_rawDesc = nil
+ file_udpa_annotations_status_proto_goTypes = nil
+ file_udpa_annotations_status_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go
new file mode 100644
index 000000000..5633a8383
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/status.pb.validate.go
@@ -0,0 +1,140 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/status.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on StatusAnnotation with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *StatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on StatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// StatusAnnotationMultiError, or nil if none found.
+func (m *StatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *StatusAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for WorkInProgress
+
+ // no validation rules for PackageVersionStatus
+
+ if len(errors) > 0 {
+ return StatusAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// StatusAnnotationMultiError is an error wrapping multiple validation errors
+// returned by StatusAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type StatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m StatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m StatusAnnotationMultiError) AllErrors() []error { return m }
+
+// StatusAnnotationValidationError is the validation error returned by
+// StatusAnnotation.Validate if the designated constraints aren't met.
+type StatusAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e StatusAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e StatusAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e StatusAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e StatusAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e StatusAnnotationValidationError) ErrorName() string { return "StatusAnnotationValidationError" }
+
+// Error satisfies the builtin error interface
+func (e StatusAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sStatusAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = StatusAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = StatusAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go
new file mode 100644
index 000000000..8bd950f6b
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.go
@@ -0,0 +1,179 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: udpa/annotations/versioning.proto
+
+package annotations
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type VersioningAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PreviousMessageType string `protobuf:"bytes,1,opt,name=previous_message_type,json=previousMessageType,proto3" json:"previous_message_type,omitempty"`
+}
+
+func (x *VersioningAnnotation) Reset() {
+ *x = VersioningAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_udpa_annotations_versioning_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VersioningAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VersioningAnnotation) ProtoMessage() {}
+
+func (x *VersioningAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_udpa_annotations_versioning_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VersioningAnnotation.ProtoReflect.Descriptor instead.
+func (*VersioningAnnotation) Descriptor() ([]byte, []int) {
+ return file_udpa_annotations_versioning_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *VersioningAnnotation) GetPreviousMessageType() string {
+ if x != nil {
+ return x.PreviousMessageType
+ }
+ return ""
+}
+
+var file_udpa_annotations_versioning_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*VersioningAnnotation)(nil),
+ Field: 7881811,
+ Name: "udpa.annotations.versioning",
+ Tag: "bytes,7881811,opt,name=versioning",
+ Filename: "udpa/annotations/versioning.proto",
+ },
+}
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // optional udpa.annotations.VersioningAnnotation versioning = 7881811;
+ E_Versioning = &file_udpa_annotations_versioning_proto_extTypes[0]
+)
+
+var File_udpa_annotations_versioning_proto protoreflect.FileDescriptor
+
+var file_udpa_annotations_versioning_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4a, 0x0a, 0x14, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x32, 0x0a, 0x15, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13,
+ 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54,
+ 0x79, 0x70, 0x65, 0x3a, 0x6a, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e,
+ 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0xd3, 0x88, 0xe1, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x75, 0x64,
+ 0x70, 0x61, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x42,
+ 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e,
+ 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_udpa_annotations_versioning_proto_rawDescOnce sync.Once
+ file_udpa_annotations_versioning_proto_rawDescData = file_udpa_annotations_versioning_proto_rawDesc
+)
+
+func file_udpa_annotations_versioning_proto_rawDescGZIP() []byte {
+ file_udpa_annotations_versioning_proto_rawDescOnce.Do(func() {
+ file_udpa_annotations_versioning_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_annotations_versioning_proto_rawDescData)
+ })
+ return file_udpa_annotations_versioning_proto_rawDescData
+}
+
+var file_udpa_annotations_versioning_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_udpa_annotations_versioning_proto_goTypes = []interface{}{
+ (*VersioningAnnotation)(nil), // 0: udpa.annotations.VersioningAnnotation
+ (*descriptorpb.MessageOptions)(nil), // 1: google.protobuf.MessageOptions
+}
+var file_udpa_annotations_versioning_proto_depIdxs = []int32{
+ 1, // 0: udpa.annotations.versioning:extendee -> google.protobuf.MessageOptions
+ 0, // 1: udpa.annotations.versioning:type_name -> udpa.annotations.VersioningAnnotation
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_udpa_annotations_versioning_proto_init() }
+func file_udpa_annotations_versioning_proto_init() {
+ if File_udpa_annotations_versioning_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_udpa_annotations_versioning_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VersioningAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_udpa_annotations_versioning_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_udpa_annotations_versioning_proto_goTypes,
+ DependencyIndexes: file_udpa_annotations_versioning_proto_depIdxs,
+ MessageInfos: file_udpa_annotations_versioning_proto_msgTypes,
+ ExtensionInfos: file_udpa_annotations_versioning_proto_extTypes,
+ }.Build()
+ File_udpa_annotations_versioning_proto = out.File
+ file_udpa_annotations_versioning_proto_rawDesc = nil
+ file_udpa_annotations_versioning_proto_goTypes = nil
+ file_udpa_annotations_versioning_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go
new file mode 100644
index 000000000..5fd86baff
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/annotations/versioning.pb.validate.go
@@ -0,0 +1,140 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/annotations/versioning.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on VersioningAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *VersioningAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on VersioningAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// VersioningAnnotationMultiError, or nil if none found.
+func (m *VersioningAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *VersioningAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for PreviousMessageType
+
+ if len(errors) > 0 {
+ return VersioningAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// VersioningAnnotationMultiError is an error wrapping multiple validation
+// errors returned by VersioningAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type VersioningAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m VersioningAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m VersioningAnnotationMultiError) AllErrors() []error { return m }
+
+// VersioningAnnotationValidationError is the validation error returned by
+// VersioningAnnotation.Validate if the designated constraints aren't met.
+type VersioningAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e VersioningAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e VersioningAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e VersioningAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e VersioningAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e VersioningAnnotationValidationError) ErrorName() string {
+ return "VersioningAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e VersioningAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sVersioningAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = VersioningAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = VersioningAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go
new file mode 100644
index 000000000..8eb3b7b24
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.go
@@ -0,0 +1,164 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: udpa/type/v1/typed_struct.proto
+
+package v1
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type TypedStruct struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+ Value *structpb.Struct `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *TypedStruct) Reset() {
+ *x = TypedStruct{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_udpa_type_v1_typed_struct_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TypedStruct) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TypedStruct) ProtoMessage() {}
+
+func (x *TypedStruct) ProtoReflect() protoreflect.Message {
+ mi := &file_udpa_type_v1_typed_struct_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TypedStruct.ProtoReflect.Descriptor instead.
+func (*TypedStruct) Descriptor() ([]byte, []int) {
+ return file_udpa_type_v1_typed_struct_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TypedStruct) GetTypeUrl() string {
+ if x != nil {
+ return x.TypeUrl
+ }
+ return ""
+}
+
+func (x *TypedStruct) GetValue() *structpb.Struct {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_udpa_type_v1_typed_struct_proto protoreflect.FileDescriptor
+
+var file_udpa_type_v1_typed_struct_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x0c, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x31, 0x1a,
+ 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a,
+ 0x0b, 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x19, 0x0a, 0x08,
+ 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
+ 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x57, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x75, 0x64, 0x70, 0x61, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72,
+ 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f,
+ 0x67, 0x6f, 0x2f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x31, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_udpa_type_v1_typed_struct_proto_rawDescOnce sync.Once
+ file_udpa_type_v1_typed_struct_proto_rawDescData = file_udpa_type_v1_typed_struct_proto_rawDesc
+)
+
+func file_udpa_type_v1_typed_struct_proto_rawDescGZIP() []byte {
+ file_udpa_type_v1_typed_struct_proto_rawDescOnce.Do(func() {
+ file_udpa_type_v1_typed_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_udpa_type_v1_typed_struct_proto_rawDescData)
+ })
+ return file_udpa_type_v1_typed_struct_proto_rawDescData
+}
+
+var file_udpa_type_v1_typed_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_udpa_type_v1_typed_struct_proto_goTypes = []interface{}{
+ (*TypedStruct)(nil), // 0: udpa.type.v1.TypedStruct
+ (*structpb.Struct)(nil), // 1: google.protobuf.Struct
+}
+var file_udpa_type_v1_typed_struct_proto_depIdxs = []int32{
+ 1, // 0: udpa.type.v1.TypedStruct.value:type_name -> google.protobuf.Struct
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_udpa_type_v1_typed_struct_proto_init() }
+func file_udpa_type_v1_typed_struct_proto_init() {
+ if File_udpa_type_v1_typed_struct_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_udpa_type_v1_typed_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TypedStruct); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_udpa_type_v1_typed_struct_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_udpa_type_v1_typed_struct_proto_goTypes,
+ DependencyIndexes: file_udpa_type_v1_typed_struct_proto_depIdxs,
+ MessageInfos: file_udpa_type_v1_typed_struct_proto_msgTypes,
+ }.Build()
+ File_udpa_type_v1_typed_struct_proto = out.File
+ file_udpa_type_v1_typed_struct_proto_rawDesc = nil
+ file_udpa_type_v1_typed_struct_proto_goTypes = nil
+ file_udpa_type_v1_typed_struct_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.validate.go b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.validate.go
new file mode 100644
index 000000000..e336fb4a7
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/udpa/type/v1/typed_struct.pb.validate.go
@@ -0,0 +1,166 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: udpa/type/v1/typed_struct.proto
+
+package v1
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on TypedStruct with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *TypedStruct) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TypedStruct with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in TypedStructMultiError, or
+// nil if none found.
+func (m *TypedStruct) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TypedStruct) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for TypeUrl
+
+ if all {
+ switch v := interface{}(m.GetValue()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, TypedStructValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, TypedStructValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TypedStructValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return TypedStructMultiError(errors)
+ }
+
+ return nil
+}
+
+// TypedStructMultiError is an error wrapping multiple validation errors
+// returned by TypedStruct.ValidateAll() if the designated constraints aren't met.
+type TypedStructMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TypedStructMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TypedStructMultiError) AllErrors() []error { return m }
+
+// TypedStructValidationError is the validation error returned by
+// TypedStruct.Validate if the designated constraints aren't met.
+type TypedStructValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TypedStructValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TypedStructValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TypedStructValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TypedStructValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TypedStructValidationError) ErrorName() string { return "TypedStructValidationError" }
+
+// Error satisfies the builtin error interface
+func (e TypedStructValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTypedStruct.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TypedStructValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TypedStructValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go
new file mode 100644
index 000000000..5211b83c7
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.go
@@ -0,0 +1,412 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/annotations/v3/migrate.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type MigrateAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"`
+}
+
+func (x *MigrateAnnotation) Reset() {
+ *x = MigrateAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_migrate_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MigrateAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MigrateAnnotation) ProtoMessage() {}
+
+func (x *MigrateAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_migrate_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MigrateAnnotation.ProtoReflect.Descriptor instead.
+func (*MigrateAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_migrate_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MigrateAnnotation) GetRename() string {
+ if x != nil {
+ return x.Rename
+ }
+ return ""
+}
+
+type FieldMigrateAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Rename string `protobuf:"bytes,1,opt,name=rename,proto3" json:"rename,omitempty"`
+ OneofPromotion string `protobuf:"bytes,2,opt,name=oneof_promotion,json=oneofPromotion,proto3" json:"oneof_promotion,omitempty"`
+}
+
+func (x *FieldMigrateAnnotation) Reset() {
+ *x = FieldMigrateAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_migrate_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldMigrateAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldMigrateAnnotation) ProtoMessage() {}
+
+func (x *FieldMigrateAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_migrate_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldMigrateAnnotation.ProtoReflect.Descriptor instead.
+func (*FieldMigrateAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_migrate_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *FieldMigrateAnnotation) GetRename() string {
+ if x != nil {
+ return x.Rename
+ }
+ return ""
+}
+
+func (x *FieldMigrateAnnotation) GetOneofPromotion() string {
+ if x != nil {
+ return x.OneofPromotion
+ }
+ return ""
+}
+
+type FileMigrateAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ MoveToPackage string `protobuf:"bytes,2,opt,name=move_to_package,json=moveToPackage,proto3" json:"move_to_package,omitempty"`
+}
+
+func (x *FileMigrateAnnotation) Reset() {
+ *x = FileMigrateAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_migrate_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FileMigrateAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileMigrateAnnotation) ProtoMessage() {}
+
+func (x *FileMigrateAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_migrate_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FileMigrateAnnotation.ProtoReflect.Descriptor instead.
+func (*FileMigrateAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_migrate_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *FileMigrateAnnotation) GetMoveToPackage() string {
+ if x != nil {
+ return x.MoveToPackage
+ }
+ return ""
+}
+
+var file_xds_annotations_v3_migrate_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 112948430,
+ Name: "xds.annotations.v3.message_migrate",
+ Tag: "bytes,112948430,opt,name=message_migrate",
+ Filename: "xds/annotations/v3/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*FieldMigrateAnnotation)(nil),
+ Field: 112948430,
+ Name: "xds.annotations.v3.field_migrate",
+ Tag: "bytes,112948430,opt,name=field_migrate",
+ Filename: "xds/annotations/v3/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 112948430,
+ Name: "xds.annotations.v3.enum_migrate",
+ Tag: "bytes,112948430,opt,name=enum_migrate",
+ Filename: "xds/annotations/v3/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumValueOptions)(nil),
+ ExtensionType: (*MigrateAnnotation)(nil),
+ Field: 112948430,
+ Name: "xds.annotations.v3.enum_value_migrate",
+ Tag: "bytes,112948430,opt,name=enum_value_migrate",
+ Filename: "xds/annotations/v3/migrate.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*FileMigrateAnnotation)(nil),
+ Field: 112948430,
+ Name: "xds.annotations.v3.file_migrate",
+ Tag: "bytes,112948430,opt,name=file_migrate",
+ Filename: "xds/annotations/v3/migrate.proto",
+ },
+}
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // optional xds.annotations.v3.MigrateAnnotation message_migrate = 112948430;
+ E_MessageMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional xds.annotations.v3.FieldMigrateAnnotation field_migrate = 112948430;
+ E_FieldMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.EnumOptions.
+var (
+ // optional xds.annotations.v3.MigrateAnnotation enum_migrate = 112948430;
+ E_EnumMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.EnumValueOptions.
+var (
+ // optional xds.annotations.v3.MigrateAnnotation enum_value_migrate = 112948430;
+ E_EnumValueMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[3]
+)
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // optional xds.annotations.v3.FileMigrateAnnotation file_migrate = 112948430;
+ E_FileMigrate = &file_xds_annotations_v3_migrate_proto_extTypes[4]
+)
+
+var File_xds_annotations_v3_migrate_proto protoreflect.FileDescriptor
+
+var file_xds_annotations_v3_migrate_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x2b, 0x0a, 0x11, 0x4d, 0x69, 0x67, 0x72,
+ 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a,
+ 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72,
+ 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x59, 0x0a, 0x16, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69,
+ 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x16, 0x0a, 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x06, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x6f, 0x6e, 0x65, 0x6f, 0x66,
+ 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0e, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x50, 0x72, 0x6f, 0x6d, 0x6f, 0x74, 0x69, 0x6f, 0x6e,
+ 0x22, 0x3f, 0x0a, 0x15, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x6d, 0x6f, 0x76,
+ 0x65, 0x5f, 0x74, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0d, 0x6d, 0x6f, 0x76, 0x65, 0x54, 0x6f, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67,
+ 0x65, 0x3a, 0x72, 0x0a, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6d, 0x69, 0x67,
+ 0x72, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4d, 0x69,
+ 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x71, 0x0a, 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d,
+ 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65,
+ 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x66, 0x69, 0x65, 0x6c,
+ 0x64, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x69, 0x0a, 0x0c, 0x65, 0x6e, 0x75, 0x6d,
+ 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed, 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x25, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x65, 0x6e, 0x75, 0x6d, 0x4d, 0x69, 0x67, 0x72,
+ 0x61, 0x74, 0x65, 0x3a, 0x79, 0x0a, 0x12, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed,
+ 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x69, 0x67, 0x72, 0x61,
+ 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x65, 0x6e,
+ 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x3a, 0x6d,
+ 0x0a, 0x0c, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x12, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xce, 0xe9, 0xed,
+ 0x35, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d,
+ 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x42, 0x2b, 0x5a,
+ 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66,
+ 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x33,
+}
+
+var (
+ file_xds_annotations_v3_migrate_proto_rawDescOnce sync.Once
+ file_xds_annotations_v3_migrate_proto_rawDescData = file_xds_annotations_v3_migrate_proto_rawDesc
+)
+
+func file_xds_annotations_v3_migrate_proto_rawDescGZIP() []byte {
+ file_xds_annotations_v3_migrate_proto_rawDescOnce.Do(func() {
+ file_xds_annotations_v3_migrate_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_migrate_proto_rawDescData)
+ })
+ return file_xds_annotations_v3_migrate_proto_rawDescData
+}
+
+var file_xds_annotations_v3_migrate_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_xds_annotations_v3_migrate_proto_goTypes = []interface{}{
+ (*MigrateAnnotation)(nil), // 0: xds.annotations.v3.MigrateAnnotation
+ (*FieldMigrateAnnotation)(nil), // 1: xds.annotations.v3.FieldMigrateAnnotation
+ (*FileMigrateAnnotation)(nil), // 2: xds.annotations.v3.FileMigrateAnnotation
+ (*descriptorpb.MessageOptions)(nil), // 3: google.protobuf.MessageOptions
+ (*descriptorpb.FieldOptions)(nil), // 4: google.protobuf.FieldOptions
+ (*descriptorpb.EnumOptions)(nil), // 5: google.protobuf.EnumOptions
+ (*descriptorpb.EnumValueOptions)(nil), // 6: google.protobuf.EnumValueOptions
+ (*descriptorpb.FileOptions)(nil), // 7: google.protobuf.FileOptions
+}
+var file_xds_annotations_v3_migrate_proto_depIdxs = []int32{
+ 3, // 0: xds.annotations.v3.message_migrate:extendee -> google.protobuf.MessageOptions
+ 4, // 1: xds.annotations.v3.field_migrate:extendee -> google.protobuf.FieldOptions
+ 5, // 2: xds.annotations.v3.enum_migrate:extendee -> google.protobuf.EnumOptions
+ 6, // 3: xds.annotations.v3.enum_value_migrate:extendee -> google.protobuf.EnumValueOptions
+ 7, // 4: xds.annotations.v3.file_migrate:extendee -> google.protobuf.FileOptions
+ 0, // 5: xds.annotations.v3.message_migrate:type_name -> xds.annotations.v3.MigrateAnnotation
+ 1, // 6: xds.annotations.v3.field_migrate:type_name -> xds.annotations.v3.FieldMigrateAnnotation
+ 0, // 7: xds.annotations.v3.enum_migrate:type_name -> xds.annotations.v3.MigrateAnnotation
+ 0, // 8: xds.annotations.v3.enum_value_migrate:type_name -> xds.annotations.v3.MigrateAnnotation
+ 2, // 9: xds.annotations.v3.file_migrate:type_name -> xds.annotations.v3.FileMigrateAnnotation
+ 10, // [10:10] is the sub-list for method output_type
+ 10, // [10:10] is the sub-list for method input_type
+ 5, // [5:10] is the sub-list for extension type_name
+ 0, // [0:5] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_xds_annotations_v3_migrate_proto_init() }
+func file_xds_annotations_v3_migrate_proto_init() {
+ if File_xds_annotations_v3_migrate_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_annotations_v3_migrate_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MigrateAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_annotations_v3_migrate_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldMigrateAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_annotations_v3_migrate_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileMigrateAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_annotations_v3_migrate_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 5,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_annotations_v3_migrate_proto_goTypes,
+ DependencyIndexes: file_xds_annotations_v3_migrate_proto_depIdxs,
+ MessageInfos: file_xds_annotations_v3_migrate_proto_msgTypes,
+ ExtensionInfos: file_xds_annotations_v3_migrate_proto_extTypes,
+ }.Build()
+ File_xds_annotations_v3_migrate_proto = out.File
+ file_xds_annotations_v3_migrate_proto_rawDesc = nil
+ file_xds_annotations_v3_migrate_proto_goTypes = nil
+ file_xds_annotations_v3_migrate_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go
new file mode 100644
index 000000000..d57d77824
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/migrate.pb.validate.go
@@ -0,0 +1,350 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/annotations/v3/migrate.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on MigrateAnnotation with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *MigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// MigrateAnnotationMultiError, or nil if none found.
+func (m *MigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MigrateAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Rename
+
+ if len(errors) > 0 {
+ return MigrateAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// MigrateAnnotationMultiError is an error wrapping multiple validation errors
+// returned by MigrateAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type MigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MigrateAnnotationMultiError) AllErrors() []error { return m }
+
+// MigrateAnnotationValidationError is the validation error returned by
+// MigrateAnnotation.Validate if the designated constraints aren't met.
+type MigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MigrateAnnotationValidationError) ErrorName() string {
+ return "MigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e MigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MigrateAnnotationValidationError{}
+
+// Validate checks the field values on FieldMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FieldMigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldMigrateAnnotationMultiError, or nil if none found.
+func (m *FieldMigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldMigrateAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Rename
+
+ // no validation rules for OneofPromotion
+
+ if len(errors) > 0 {
+ return FieldMigrateAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FieldMigrateAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldMigrateAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldMigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldMigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldMigrateAnnotationMultiError) AllErrors() []error { return m }
+
+// FieldMigrateAnnotationValidationError is the validation error returned by
+// FieldMigrateAnnotation.Validate if the designated constraints aren't met.
+type FieldMigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FieldMigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FieldMigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FieldMigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FieldMigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FieldMigrateAnnotationValidationError) ErrorName() string {
+ return "FieldMigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FieldMigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFieldMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FieldMigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FieldMigrateAnnotationValidationError{}
+
+// Validate checks the field values on FileMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FileMigrateAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FileMigrateAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FileMigrateAnnotationMultiError, or nil if none found.
+func (m *FileMigrateAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FileMigrateAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for MoveToPackage
+
+ if len(errors) > 0 {
+ return FileMigrateAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FileMigrateAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FileMigrateAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FileMigrateAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FileMigrateAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FileMigrateAnnotationMultiError) AllErrors() []error { return m }
+
+// FileMigrateAnnotationValidationError is the validation error returned by
+// FileMigrateAnnotation.Validate if the designated constraints aren't met.
+type FileMigrateAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FileMigrateAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FileMigrateAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FileMigrateAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FileMigrateAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FileMigrateAnnotationValidationError) ErrorName() string {
+ return "FileMigrateAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FileMigrateAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFileMigrateAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FileMigrateAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FileMigrateAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go
new file mode 100644
index 000000000..14df890c1
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.go
@@ -0,0 +1,197 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/annotations/v3/security.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type FieldSecurityAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ConfigureForUntrustedDownstream bool `protobuf:"varint,1,opt,name=configure_for_untrusted_downstream,json=configureForUntrustedDownstream,proto3" json:"configure_for_untrusted_downstream,omitempty"`
+ ConfigureForUntrustedUpstream bool `protobuf:"varint,2,opt,name=configure_for_untrusted_upstream,json=configureForUntrustedUpstream,proto3" json:"configure_for_untrusted_upstream,omitempty"`
+}
+
+func (x *FieldSecurityAnnotation) Reset() {
+ *x = FieldSecurityAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_security_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldSecurityAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldSecurityAnnotation) ProtoMessage() {}
+
+func (x *FieldSecurityAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_security_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldSecurityAnnotation.ProtoReflect.Descriptor instead.
+func (*FieldSecurityAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_security_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FieldSecurityAnnotation) GetConfigureForUntrustedDownstream() bool {
+ if x != nil {
+ return x.ConfigureForUntrustedDownstream
+ }
+ return false
+}
+
+func (x *FieldSecurityAnnotation) GetConfigureForUntrustedUpstream() bool {
+ if x != nil {
+ return x.ConfigureForUntrustedUpstream
+ }
+ return false
+}
+
+var file_xds_annotations_v3_security_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*FieldSecurityAnnotation)(nil),
+ Field: 99044135,
+ Name: "xds.annotations.v3.security",
+ Tag: "bytes,99044135,opt,name=security",
+ Filename: "xds/annotations/v3/security.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional xds.annotations.v3.FieldSecurityAnnotation security = 99044135;
+ E_Security = &file_xds_annotations_v3_security_proto_extTypes[0]
+)
+
+var File_xds_annotations_v3_security_proto protoreflect.FileDescriptor
+
+var file_xds_annotations_v3_security_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xaf, 0x01, 0x0a, 0x17, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x75, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x75, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65,
+ 0x64, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x1f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x46, 0x6f, 0x72,
+ 0x55, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x44, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72,
+ 0x65, 0x61, 0x6d, 0x12, 0x47, 0x0a, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65,
+ 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x75, 0x6e, 0x74, 0x72, 0x75, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x75,
+ 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x55, 0x6e, 0x74, 0x72, 0x75,
+ 0x73, 0x74, 0x65, 0x64, 0x55, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x3a, 0x69, 0x0a, 0x08,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xa7, 0x96, 0x9d, 0x2f, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x2b, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x73,
+ 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x42, 0x33, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08,
+ 0x01, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e,
+ 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_annotations_v3_security_proto_rawDescOnce sync.Once
+ file_xds_annotations_v3_security_proto_rawDescData = file_xds_annotations_v3_security_proto_rawDesc
+)
+
+func file_xds_annotations_v3_security_proto_rawDescGZIP() []byte {
+ file_xds_annotations_v3_security_proto_rawDescOnce.Do(func() {
+ file_xds_annotations_v3_security_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_security_proto_rawDescData)
+ })
+ return file_xds_annotations_v3_security_proto_rawDescData
+}
+
+var file_xds_annotations_v3_security_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_annotations_v3_security_proto_goTypes = []interface{}{
+ (*FieldSecurityAnnotation)(nil), // 0: xds.annotations.v3.FieldSecurityAnnotation
+ (*descriptorpb.FieldOptions)(nil), // 1: google.protobuf.FieldOptions
+}
+var file_xds_annotations_v3_security_proto_depIdxs = []int32{
+ 1, // 0: xds.annotations.v3.security:extendee -> google.protobuf.FieldOptions
+ 0, // 1: xds.annotations.v3.security:type_name -> xds.annotations.v3.FieldSecurityAnnotation
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_xds_annotations_v3_security_proto_init() }
+func file_xds_annotations_v3_security_proto_init() {
+ if File_xds_annotations_v3_security_proto != nil {
+ return
+ }
+ file_xds_annotations_v3_status_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_annotations_v3_security_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldSecurityAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_annotations_v3_security_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_annotations_v3_security_proto_goTypes,
+ DependencyIndexes: file_xds_annotations_v3_security_proto_depIdxs,
+ MessageInfos: file_xds_annotations_v3_security_proto_msgTypes,
+ ExtensionInfos: file_xds_annotations_v3_security_proto_extTypes,
+ }.Build()
+ File_xds_annotations_v3_security_proto = out.File
+ file_xds_annotations_v3_security_proto_rawDesc = nil
+ file_xds_annotations_v3_security_proto_goTypes = nil
+ file_xds_annotations_v3_security_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go
new file mode 100644
index 000000000..ac0143f27
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/security.pb.validate.go
@@ -0,0 +1,142 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/annotations/v3/security.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on FieldSecurityAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FieldSecurityAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldSecurityAnnotation with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldSecurityAnnotationMultiError, or nil if none found.
+func (m *FieldSecurityAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldSecurityAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for ConfigureForUntrustedDownstream
+
+ // no validation rules for ConfigureForUntrustedUpstream
+
+ if len(errors) > 0 {
+ return FieldSecurityAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FieldSecurityAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldSecurityAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldSecurityAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldSecurityAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldSecurityAnnotationMultiError) AllErrors() []error { return m }
+
+// FieldSecurityAnnotationValidationError is the validation error returned by
+// FieldSecurityAnnotation.Validate if the designated constraints aren't met.
+type FieldSecurityAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FieldSecurityAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FieldSecurityAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FieldSecurityAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FieldSecurityAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FieldSecurityAnnotationValidationError) ErrorName() string {
+ return "FieldSecurityAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FieldSecurityAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFieldSecurityAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FieldSecurityAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FieldSecurityAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go
new file mode 100644
index 000000000..042b66bff
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.go
@@ -0,0 +1,93 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/annotations/v3/sensitive.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_xds_annotations_v3_sensitive_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 61008053,
+ Name: "xds.annotations.v3.sensitive",
+ Tag: "varint,61008053,opt,name=sensitive",
+ Filename: "xds/annotations/v3/sensitive.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional bool sensitive = 61008053;
+ E_Sensitive = &file_xds_annotations_v3_sensitive_proto_extTypes[0]
+)
+
+var File_xds_annotations_v3_sensitive_proto protoreflect.FileDescriptor
+
+var file_xds_annotations_v3_sensitive_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x3e, 0x0a, 0x09, 0x73, 0x65,
+ 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xb5, 0xd1, 0x8b, 0x1d, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64,
+ 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_xds_annotations_v3_sensitive_proto_goTypes = []interface{}{
+ (*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions
+}
+var file_xds_annotations_v3_sensitive_proto_depIdxs = []int32{
+ 0, // 0: xds.annotations.v3.sensitive:extendee -> google.protobuf.FieldOptions
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_xds_annotations_v3_sensitive_proto_init() }
+func file_xds_annotations_v3_sensitive_proto_init() {
+ if File_xds_annotations_v3_sensitive_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_annotations_v3_sensitive_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_annotations_v3_sensitive_proto_goTypes,
+ DependencyIndexes: file_xds_annotations_v3_sensitive_proto_depIdxs,
+ ExtensionInfos: file_xds_annotations_v3_sensitive_proto_extTypes,
+ }.Build()
+ File_xds_annotations_v3_sensitive_proto = out.File
+ file_xds_annotations_v3_sensitive_proto_rawDesc = nil
+ file_xds_annotations_v3_sensitive_proto_goTypes = nil
+ file_xds_annotations_v3_sensitive_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go
new file mode 100644
index 000000000..c101d3acc
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/sensitive.pb.validate.go
@@ -0,0 +1,36 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/annotations/v3/sensitive.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go
new file mode 100644
index 000000000..5d5975ffb
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.go
@@ -0,0 +1,495 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/annotations/v3/status.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type PackageVersionStatus int32
+
+const (
+ PackageVersionStatus_UNKNOWN PackageVersionStatus = 0
+ PackageVersionStatus_FROZEN PackageVersionStatus = 1
+ PackageVersionStatus_ACTIVE PackageVersionStatus = 2
+ PackageVersionStatus_NEXT_MAJOR_VERSION_CANDIDATE PackageVersionStatus = 3
+)
+
+// Enum value maps for PackageVersionStatus.
+var (
+ PackageVersionStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "FROZEN",
+ 2: "ACTIVE",
+ 3: "NEXT_MAJOR_VERSION_CANDIDATE",
+ }
+ PackageVersionStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "FROZEN": 1,
+ "ACTIVE": 2,
+ "NEXT_MAJOR_VERSION_CANDIDATE": 3,
+ }
+)
+
+func (x PackageVersionStatus) Enum() *PackageVersionStatus {
+ p := new(PackageVersionStatus)
+ *p = x
+ return p
+}
+
+func (x PackageVersionStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (PackageVersionStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_xds_annotations_v3_status_proto_enumTypes[0].Descriptor()
+}
+
+func (PackageVersionStatus) Type() protoreflect.EnumType {
+ return &file_xds_annotations_v3_status_proto_enumTypes[0]
+}
+
+func (x PackageVersionStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use PackageVersionStatus.Descriptor instead.
+func (PackageVersionStatus) EnumDescriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{0}
+}
+
+type FileStatusAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"`
+}
+
+func (x *FileStatusAnnotation) Reset() {
+ *x = FileStatusAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FileStatusAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileStatusAnnotation) ProtoMessage() {}
+
+func (x *FileStatusAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FileStatusAnnotation.ProtoReflect.Descriptor instead.
+func (*FileStatusAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FileStatusAnnotation) GetWorkInProgress() bool {
+ if x != nil {
+ return x.WorkInProgress
+ }
+ return false
+}
+
+type MessageStatusAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"`
+}
+
+func (x *MessageStatusAnnotation) Reset() {
+ *x = MessageStatusAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MessageStatusAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MessageStatusAnnotation) ProtoMessage() {}
+
+func (x *MessageStatusAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MessageStatusAnnotation.ProtoReflect.Descriptor instead.
+func (*MessageStatusAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *MessageStatusAnnotation) GetWorkInProgress() bool {
+ if x != nil {
+ return x.WorkInProgress
+ }
+ return false
+}
+
+type FieldStatusAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"`
+}
+
+func (x *FieldStatusAnnotation) Reset() {
+ *x = FieldStatusAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldStatusAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldStatusAnnotation) ProtoMessage() {}
+
+func (x *FieldStatusAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldStatusAnnotation.ProtoReflect.Descriptor instead.
+func (*FieldStatusAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *FieldStatusAnnotation) GetWorkInProgress() bool {
+ if x != nil {
+ return x.WorkInProgress
+ }
+ return false
+}
+
+type StatusAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ WorkInProgress bool `protobuf:"varint,1,opt,name=work_in_progress,json=workInProgress,proto3" json:"work_in_progress,omitempty"`
+ PackageVersionStatus PackageVersionStatus `protobuf:"varint,2,opt,name=package_version_status,json=packageVersionStatus,proto3,enum=xds.annotations.v3.PackageVersionStatus" json:"package_version_status,omitempty"`
+}
+
+func (x *StatusAnnotation) Reset() {
+ *x = StatusAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatusAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusAnnotation) ProtoMessage() {}
+
+func (x *StatusAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_status_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusAnnotation.ProtoReflect.Descriptor instead.
+func (*StatusAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_status_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *StatusAnnotation) GetWorkInProgress() bool {
+ if x != nil {
+ return x.WorkInProgress
+ }
+ return false
+}
+
+func (x *StatusAnnotation) GetPackageVersionStatus() PackageVersionStatus {
+ if x != nil {
+ return x.PackageVersionStatus
+ }
+ return PackageVersionStatus_UNKNOWN
+}
+
+var file_xds_annotations_v3_status_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*FileStatusAnnotation)(nil),
+ Field: 226829418,
+ Name: "xds.annotations.v3.file_status",
+ Tag: "bytes,226829418,opt,name=file_status",
+ Filename: "xds/annotations/v3/status.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*MessageStatusAnnotation)(nil),
+ Field: 226829418,
+ Name: "xds.annotations.v3.message_status",
+ Tag: "bytes,226829418,opt,name=message_status",
+ Filename: "xds/annotations/v3/status.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*FieldStatusAnnotation)(nil),
+ Field: 226829418,
+ Name: "xds.annotations.v3.field_status",
+ Tag: "bytes,226829418,opt,name=field_status",
+ Filename: "xds/annotations/v3/status.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // optional xds.annotations.v3.FileStatusAnnotation file_status = 226829418;
+ E_FileStatus = &file_xds_annotations_v3_status_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // optional xds.annotations.v3.MessageStatusAnnotation message_status = 226829418;
+ E_MessageStatus = &file_xds_annotations_v3_status_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional xds.annotations.v3.FieldStatusAnnotation field_status = 226829418;
+ E_FieldStatus = &file_xds_annotations_v3_status_proto_extTypes[2]
+)
+
+var File_xds_annotations_v3_status_proto protoreflect.FileDescriptor
+
+var file_xds_annotations_v3_status_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x40, 0x0a, 0x14, 0x46, 0x69, 0x6c, 0x65, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72,
+ 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49,
+ 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0x43, 0x0a, 0x17, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69, 0x6e, 0x5f,
+ 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e,
+ 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0x41,
+ 0x0a, 0x15, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f,
+ 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73,
+ 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x69,
+ 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x49, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73,
+ 0x12, 0x5e, 0x0a, 0x16, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x14, 0x70, 0x61, 0x63, 0x6b,
+ 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x2a, 0x5d, 0x0a, 0x14, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e,
+ 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x52, 0x4f, 0x5a, 0x45, 0x4e, 0x10,
+ 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, 0x20, 0x0a,
+ 0x1c, 0x4e, 0x45, 0x58, 0x54, 0x5f, 0x4d, 0x41, 0x4a, 0x4f, 0x52, 0x5f, 0x56, 0x45, 0x52, 0x53,
+ 0x49, 0x4f, 0x4e, 0x5f, 0x43, 0x41, 0x4e, 0x44, 0x49, 0x44, 0x41, 0x54, 0x45, 0x10, 0x03, 0x3a,
+ 0x6a, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xea, 0xc8, 0x94,
+ 0x6c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x76, 0x0a, 0x0e, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xea,
+ 0xc8, 0x94, 0x6c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x3a, 0x6e, 0x0a, 0x0c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x73, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0xea, 0xc8, 0x94, 0x6c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x78, 0x64,
+ 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33,
+ 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x41, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64,
+ 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_annotations_v3_status_proto_rawDescOnce sync.Once
+ file_xds_annotations_v3_status_proto_rawDescData = file_xds_annotations_v3_status_proto_rawDesc
+)
+
+func file_xds_annotations_v3_status_proto_rawDescGZIP() []byte {
+ file_xds_annotations_v3_status_proto_rawDescOnce.Do(func() {
+ file_xds_annotations_v3_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_status_proto_rawDescData)
+ })
+ return file_xds_annotations_v3_status_proto_rawDescData
+}
+
+var file_xds_annotations_v3_status_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_xds_annotations_v3_status_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_xds_annotations_v3_status_proto_goTypes = []interface{}{
+ (PackageVersionStatus)(0), // 0: xds.annotations.v3.PackageVersionStatus
+ (*FileStatusAnnotation)(nil), // 1: xds.annotations.v3.FileStatusAnnotation
+ (*MessageStatusAnnotation)(nil), // 2: xds.annotations.v3.MessageStatusAnnotation
+ (*FieldStatusAnnotation)(nil), // 3: xds.annotations.v3.FieldStatusAnnotation
+ (*StatusAnnotation)(nil), // 4: xds.annotations.v3.StatusAnnotation
+ (*descriptorpb.FileOptions)(nil), // 5: google.protobuf.FileOptions
+ (*descriptorpb.MessageOptions)(nil), // 6: google.protobuf.MessageOptions
+ (*descriptorpb.FieldOptions)(nil), // 7: google.protobuf.FieldOptions
+}
+var file_xds_annotations_v3_status_proto_depIdxs = []int32{
+ 0, // 0: xds.annotations.v3.StatusAnnotation.package_version_status:type_name -> xds.annotations.v3.PackageVersionStatus
+ 5, // 1: xds.annotations.v3.file_status:extendee -> google.protobuf.FileOptions
+ 6, // 2: xds.annotations.v3.message_status:extendee -> google.protobuf.MessageOptions
+ 7, // 3: xds.annotations.v3.field_status:extendee -> google.protobuf.FieldOptions
+ 1, // 4: xds.annotations.v3.file_status:type_name -> xds.annotations.v3.FileStatusAnnotation
+ 2, // 5: xds.annotations.v3.message_status:type_name -> xds.annotations.v3.MessageStatusAnnotation
+ 3, // 6: xds.annotations.v3.field_status:type_name -> xds.annotations.v3.FieldStatusAnnotation
+ 7, // [7:7] is the sub-list for method output_type
+ 7, // [7:7] is the sub-list for method input_type
+ 4, // [4:7] is the sub-list for extension type_name
+ 1, // [1:4] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_annotations_v3_status_proto_init() }
+func file_xds_annotations_v3_status_proto_init() {
+ if File_xds_annotations_v3_status_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_annotations_v3_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FileStatusAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_annotations_v3_status_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MessageStatusAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_annotations_v3_status_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldStatusAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_annotations_v3_status_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatusAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_annotations_v3_status_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 4,
+ NumExtensions: 3,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_annotations_v3_status_proto_goTypes,
+ DependencyIndexes: file_xds_annotations_v3_status_proto_depIdxs,
+ EnumInfos: file_xds_annotations_v3_status_proto_enumTypes,
+ MessageInfos: file_xds_annotations_v3_status_proto_msgTypes,
+ ExtensionInfos: file_xds_annotations_v3_status_proto_extTypes,
+ }.Build()
+ File_xds_annotations_v3_status_proto = out.File
+ file_xds_annotations_v3_status_proto_rawDesc = nil
+ file_xds_annotations_v3_status_proto_goTypes = nil
+ file_xds_annotations_v3_status_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go
new file mode 100644
index 000000000..a87dbee8d
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/status.pb.validate.go
@@ -0,0 +1,452 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/annotations/v3/status.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on FileStatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FileStatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FileStatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FileStatusAnnotationMultiError, or nil if none found.
+func (m *FileStatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FileStatusAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for WorkInProgress
+
+ if len(errors) > 0 {
+ return FileStatusAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FileStatusAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FileStatusAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FileStatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FileStatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FileStatusAnnotationMultiError) AllErrors() []error { return m }
+
+// FileStatusAnnotationValidationError is the validation error returned by
+// FileStatusAnnotation.Validate if the designated constraints aren't met.
+type FileStatusAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FileStatusAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FileStatusAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FileStatusAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FileStatusAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FileStatusAnnotationValidationError) ErrorName() string {
+ return "FileStatusAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FileStatusAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFileStatusAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FileStatusAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FileStatusAnnotationValidationError{}
+
+// Validate checks the field values on MessageStatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *MessageStatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MessageStatusAnnotation with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// MessageStatusAnnotationMultiError, or nil if none found.
+func (m *MessageStatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MessageStatusAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for WorkInProgress
+
+ if len(errors) > 0 {
+ return MessageStatusAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// MessageStatusAnnotationMultiError is an error wrapping multiple validation
+// errors returned by MessageStatusAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type MessageStatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MessageStatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MessageStatusAnnotationMultiError) AllErrors() []error { return m }
+
+// MessageStatusAnnotationValidationError is the validation error returned by
+// MessageStatusAnnotation.Validate if the designated constraints aren't met.
+type MessageStatusAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MessageStatusAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MessageStatusAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MessageStatusAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MessageStatusAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MessageStatusAnnotationValidationError) ErrorName() string {
+ return "MessageStatusAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e MessageStatusAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMessageStatusAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MessageStatusAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MessageStatusAnnotationValidationError{}
+
+// Validate checks the field values on FieldStatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *FieldStatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FieldStatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// FieldStatusAnnotationMultiError, or nil if none found.
+func (m *FieldStatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FieldStatusAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for WorkInProgress
+
+ if len(errors) > 0 {
+ return FieldStatusAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// FieldStatusAnnotationMultiError is an error wrapping multiple validation
+// errors returned by FieldStatusAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type FieldStatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FieldStatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FieldStatusAnnotationMultiError) AllErrors() []error { return m }
+
+// FieldStatusAnnotationValidationError is the validation error returned by
+// FieldStatusAnnotation.Validate if the designated constraints aren't met.
+type FieldStatusAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FieldStatusAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FieldStatusAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FieldStatusAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FieldStatusAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FieldStatusAnnotationValidationError) ErrorName() string {
+ return "FieldStatusAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e FieldStatusAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFieldStatusAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FieldStatusAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FieldStatusAnnotationValidationError{}
+
+// Validate checks the field values on StatusAnnotation with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *StatusAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on StatusAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// StatusAnnotationMultiError, or nil if none found.
+func (m *StatusAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *StatusAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for WorkInProgress
+
+ // no validation rules for PackageVersionStatus
+
+ if len(errors) > 0 {
+ return StatusAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// StatusAnnotationMultiError is an error wrapping multiple validation errors
+// returned by StatusAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type StatusAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m StatusAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m StatusAnnotationMultiError) AllErrors() []error { return m }
+
+// StatusAnnotationValidationError is the validation error returned by
+// StatusAnnotation.Validate if the designated constraints aren't met.
+type StatusAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e StatusAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e StatusAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e StatusAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e StatusAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e StatusAnnotationValidationError) ErrorName() string { return "StatusAnnotationValidationError" }
+
+// Error satisfies the builtin error interface
+func (e StatusAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sStatusAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = StatusAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = StatusAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go
new file mode 100644
index 000000000..97edd7690
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.go
@@ -0,0 +1,179 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/annotations/v3/versioning.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type VersioningAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ PreviousMessageType string `protobuf:"bytes,1,opt,name=previous_message_type,json=previousMessageType,proto3" json:"previous_message_type,omitempty"`
+}
+
+func (x *VersioningAnnotation) Reset() {
+ *x = VersioningAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_annotations_v3_versioning_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *VersioningAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*VersioningAnnotation) ProtoMessage() {}
+
+func (x *VersioningAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_annotations_v3_versioning_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use VersioningAnnotation.ProtoReflect.Descriptor instead.
+func (*VersioningAnnotation) Descriptor() ([]byte, []int) {
+ return file_xds_annotations_v3_versioning_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *VersioningAnnotation) GetPreviousMessageType() string {
+ if x != nil {
+ return x.PreviousMessageType
+ }
+ return ""
+}
+
+var file_xds_annotations_v3_versioning_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*VersioningAnnotation)(nil),
+ Field: 92389011,
+ Name: "xds.annotations.v3.versioning",
+ Tag: "bytes,92389011,opt,name=versioning",
+ Filename: "xds/annotations/v3/versioning.proto",
+ },
+}
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // optional xds.annotations.v3.VersioningAnnotation versioning = 92389011;
+ E_Versioning = &file_xds_annotations_v3_versioning_proto_extTypes[0]
+)
+
+var File_xds_annotations_v3_versioning_proto protoreflect.FileDescriptor
+
+var file_xds_annotations_v3_versioning_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4a, 0x0a, 0x14, 0x56,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x13, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x6c, 0x0a, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x93, 0xfd, 0x86, 0x2c, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x41,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f,
+ 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_annotations_v3_versioning_proto_rawDescOnce sync.Once
+ file_xds_annotations_v3_versioning_proto_rawDescData = file_xds_annotations_v3_versioning_proto_rawDesc
+)
+
+func file_xds_annotations_v3_versioning_proto_rawDescGZIP() []byte {
+ file_xds_annotations_v3_versioning_proto_rawDescOnce.Do(func() {
+ file_xds_annotations_v3_versioning_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_annotations_v3_versioning_proto_rawDescData)
+ })
+ return file_xds_annotations_v3_versioning_proto_rawDescData
+}
+
+var file_xds_annotations_v3_versioning_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_annotations_v3_versioning_proto_goTypes = []interface{}{
+ (*VersioningAnnotation)(nil), // 0: xds.annotations.v3.VersioningAnnotation
+ (*descriptorpb.MessageOptions)(nil), // 1: google.protobuf.MessageOptions
+}
+var file_xds_annotations_v3_versioning_proto_depIdxs = []int32{
+ 1, // 0: xds.annotations.v3.versioning:extendee -> google.protobuf.MessageOptions
+ 0, // 1: xds.annotations.v3.versioning:type_name -> xds.annotations.v3.VersioningAnnotation
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_xds_annotations_v3_versioning_proto_init() }
+func file_xds_annotations_v3_versioning_proto_init() {
+ if File_xds_annotations_v3_versioning_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_annotations_v3_versioning_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*VersioningAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_annotations_v3_versioning_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_annotations_v3_versioning_proto_goTypes,
+ DependencyIndexes: file_xds_annotations_v3_versioning_proto_depIdxs,
+ MessageInfos: file_xds_annotations_v3_versioning_proto_msgTypes,
+ ExtensionInfos: file_xds_annotations_v3_versioning_proto_extTypes,
+ }.Build()
+ File_xds_annotations_v3_versioning_proto = out.File
+ file_xds_annotations_v3_versioning_proto_rawDesc = nil
+ file_xds_annotations_v3_versioning_proto_goTypes = nil
+ file_xds_annotations_v3_versioning_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go
new file mode 100644
index 000000000..042c266e1
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/annotations/v3/versioning.pb.validate.go
@@ -0,0 +1,140 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/annotations/v3/versioning.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on VersioningAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *VersioningAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on VersioningAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// VersioningAnnotationMultiError, or nil if none found.
+func (m *VersioningAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *VersioningAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for PreviousMessageType
+
+ if len(errors) > 0 {
+ return VersioningAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// VersioningAnnotationMultiError is an error wrapping multiple validation
+// errors returned by VersioningAnnotation.ValidateAll() if the designated
+// constraints aren't met.
+type VersioningAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m VersioningAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m VersioningAnnotationMultiError) AllErrors() []error { return m }
+
+// VersioningAnnotationValidationError is the validation error returned by
+// VersioningAnnotation.Validate if the designated constraints aren't met.
+type VersioningAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e VersioningAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e VersioningAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e VersioningAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e VersioningAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e VersioningAnnotationValidationError) ErrorName() string {
+ return "VersioningAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e VersioningAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sVersioningAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = VersioningAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = VersioningAnnotationValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go
new file mode 100644
index 000000000..035b8c010
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.go
@@ -0,0 +1,153 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/authority.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Authority struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *Authority) Reset() {
+ *x = Authority{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_authority_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Authority) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Authority) ProtoMessage() {}
+
+func (x *Authority) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_authority_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Authority.ProtoReflect.Descriptor instead.
+func (*Authority) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_authority_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Authority) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+var File_xds_core_v3_authority_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_authority_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x75,
+ 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78,
+ 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a, 0x09, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74,
+ 0x79, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42,
+ 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x56,
+ 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x42, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63,
+ 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_core_v3_authority_proto_rawDescOnce sync.Once
+ file_xds_core_v3_authority_proto_rawDescData = file_xds_core_v3_authority_proto_rawDesc
+)
+
+func file_xds_core_v3_authority_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_authority_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_authority_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_authority_proto_rawDescData)
+ })
+ return file_xds_core_v3_authority_proto_rawDescData
+}
+
+var file_xds_core_v3_authority_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_core_v3_authority_proto_goTypes = []interface{}{
+ (*Authority)(nil), // 0: xds.core.v3.Authority
+}
+var file_xds_core_v3_authority_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_authority_proto_init() }
+func file_xds_core_v3_authority_proto_init() {
+ if File_xds_core_v3_authority_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_authority_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Authority); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_authority_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_authority_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_authority_proto_depIdxs,
+ MessageInfos: file_xds_core_v3_authority_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_authority_proto = out.File
+ file_xds_core_v3_authority_proto_rawDesc = nil
+ file_xds_core_v3_authority_proto_goTypes = nil
+ file_xds_core_v3_authority_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go
new file mode 100644
index 000000000..94317c2af
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/authority.pb.validate.go
@@ -0,0 +1,146 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/authority.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Authority with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Authority) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Authority with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in AuthorityMultiError, or nil
+// if none found.
+func (m *Authority) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Authority) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ err := AuthorityValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return AuthorityMultiError(errors)
+ }
+
+ return nil
+}
+
+// AuthorityMultiError is an error wrapping multiple validation errors returned
+// by Authority.ValidateAll() if the designated constraints aren't met.
+type AuthorityMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AuthorityMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AuthorityMultiError) AllErrors() []error { return m }
+
+// AuthorityValidationError is the validation error returned by
+// Authority.Validate if the designated constraints aren't met.
+type AuthorityValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AuthorityValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AuthorityValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AuthorityValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AuthorityValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AuthorityValidationError) ErrorName() string { return "AuthorityValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AuthorityValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAuthority.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AuthorityValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AuthorityValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go
new file mode 100644
index 000000000..58c27d7d3
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.go
@@ -0,0 +1,172 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/cidr.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type CidrRange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ AddressPrefix string `protobuf:"bytes,1,opt,name=address_prefix,json=addressPrefix,proto3" json:"address_prefix,omitempty"`
+ PrefixLen *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=prefix_len,json=prefixLen,proto3" json:"prefix_len,omitempty"`
+}
+
+func (x *CidrRange) Reset() {
+ *x = CidrRange{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_cidr_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CidrRange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CidrRange) ProtoMessage() {}
+
+func (x *CidrRange) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_cidr_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CidrRange.ProtoReflect.Descriptor instead.
+func (*CidrRange) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_cidr_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CidrRange) GetAddressPrefix() string {
+ if x != nil {
+ return x.AddressPrefix
+ }
+ return ""
+}
+
+func (x *CidrRange) GetPrefixLen() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.PrefixLen
+ }
+ return nil
+}
+
+var File_xds_core_v3_cidr_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_cidr_proto_rawDesc = []byte{
+ 0x0a, 0x16, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69,
+ 0x64, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x82, 0x01, 0x0a, 0x09, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2e, 0x0a,
+ 0x0e, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0d,
+ 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x45, 0x0a,
+ 0x0a, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42,
+ 0x08, 0xfa, 0x42, 0x05, 0x2a, 0x03, 0x18, 0x80, 0x01, 0x52, 0x09, 0x70, 0x72, 0x65, 0x66, 0x69,
+ 0x78, 0x4c, 0x65, 0x6e, 0x42, 0x56, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16,
+ 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x43, 0x69, 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_core_v3_cidr_proto_rawDescOnce sync.Once
+ file_xds_core_v3_cidr_proto_rawDescData = file_xds_core_v3_cidr_proto_rawDesc
+)
+
+func file_xds_core_v3_cidr_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_cidr_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_cidr_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_cidr_proto_rawDescData)
+ })
+ return file_xds_core_v3_cidr_proto_rawDescData
+}
+
+var file_xds_core_v3_cidr_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_core_v3_cidr_proto_goTypes = []interface{}{
+ (*CidrRange)(nil), // 0: xds.core.v3.CidrRange
+ (*wrapperspb.UInt32Value)(nil), // 1: google.protobuf.UInt32Value
+}
+var file_xds_core_v3_cidr_proto_depIdxs = []int32{
+ 1, // 0: xds.core.v3.CidrRange.prefix_len:type_name -> google.protobuf.UInt32Value
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_cidr_proto_init() }
+func file_xds_core_v3_cidr_proto_init() {
+ if File_xds_core_v3_cidr_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_cidr_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CidrRange); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_cidr_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_cidr_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_cidr_proto_depIdxs,
+ MessageInfos: file_xds_core_v3_cidr_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_cidr_proto = out.File
+ file_xds_core_v3_cidr_proto_rawDesc = nil
+ file_xds_core_v3_cidr_proto_goTypes = nil
+ file_xds_core_v3_cidr_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go
new file mode 100644
index 000000000..43327f56b
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/cidr.pb.validate.go
@@ -0,0 +1,161 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/cidr.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on CidrRange with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *CidrRange) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CidrRange with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in CidrRangeMultiError, or nil
+// if none found.
+func (m *CidrRange) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CidrRange) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetAddressPrefix()) < 1 {
+ err := CidrRangeValidationError{
+ field: "AddressPrefix",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if wrapper := m.GetPrefixLen(); wrapper != nil {
+
+ if wrapper.GetValue() > 128 {
+ err := CidrRangeValidationError{
+ field: "PrefixLen",
+ reason: "value must be less than or equal to 128",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return CidrRangeMultiError(errors)
+ }
+
+ return nil
+}
+
+// CidrRangeMultiError is an error wrapping multiple validation errors returned
+// by CidrRange.ValidateAll() if the designated constraints aren't met.
+type CidrRangeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CidrRangeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CidrRangeMultiError) AllErrors() []error { return m }
+
+// CidrRangeValidationError is the validation error returned by
+// CidrRange.Validate if the designated constraints aren't met.
+type CidrRangeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CidrRangeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CidrRangeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CidrRangeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CidrRangeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CidrRangeValidationError) ErrorName() string { return "CidrRangeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CidrRangeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCidrRange.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CidrRangeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CidrRangeValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go
new file mode 100644
index 000000000..f0b4c12f2
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.go
@@ -0,0 +1,297 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/collection_entry.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type CollectionEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to ResourceSpecifier:
+ //
+ // *CollectionEntry_Locator
+ // *CollectionEntry_InlineEntry_
+ ResourceSpecifier isCollectionEntry_ResourceSpecifier `protobuf_oneof:"resource_specifier"`
+}
+
+func (x *CollectionEntry) Reset() {
+ *x = CollectionEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_collection_entry_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectionEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectionEntry) ProtoMessage() {}
+
+func (x *CollectionEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_collection_entry_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectionEntry.ProtoReflect.Descriptor instead.
+func (*CollectionEntry) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_collection_entry_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *CollectionEntry) GetResourceSpecifier() isCollectionEntry_ResourceSpecifier {
+ if m != nil {
+ return m.ResourceSpecifier
+ }
+ return nil
+}
+
+func (x *CollectionEntry) GetLocator() *ResourceLocator {
+ if x, ok := x.GetResourceSpecifier().(*CollectionEntry_Locator); ok {
+ return x.Locator
+ }
+ return nil
+}
+
+func (x *CollectionEntry) GetInlineEntry() *CollectionEntry_InlineEntry {
+ if x, ok := x.GetResourceSpecifier().(*CollectionEntry_InlineEntry_); ok {
+ return x.InlineEntry
+ }
+ return nil
+}
+
+type isCollectionEntry_ResourceSpecifier interface {
+ isCollectionEntry_ResourceSpecifier()
+}
+
+type CollectionEntry_Locator struct {
+ Locator *ResourceLocator `protobuf:"bytes,1,opt,name=locator,proto3,oneof"`
+}
+
+type CollectionEntry_InlineEntry_ struct {
+ InlineEntry *CollectionEntry_InlineEntry `protobuf:"bytes,2,opt,name=inline_entry,json=inlineEntry,proto3,oneof"`
+}
+
+func (*CollectionEntry_Locator) isCollectionEntry_ResourceSpecifier() {}
+
+func (*CollectionEntry_InlineEntry_) isCollectionEntry_ResourceSpecifier() {}
+
+type CollectionEntry_InlineEntry struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ Resource *anypb.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
+}
+
+func (x *CollectionEntry_InlineEntry) Reset() {
+ *x = CollectionEntry_InlineEntry{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_collection_entry_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CollectionEntry_InlineEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CollectionEntry_InlineEntry) ProtoMessage() {}
+
+func (x *CollectionEntry_InlineEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_collection_entry_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CollectionEntry_InlineEntry.ProtoReflect.Descriptor instead.
+func (*CollectionEntry_InlineEntry) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_collection_entry_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *CollectionEntry_InlineEntry) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *CollectionEntry_InlineEntry) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *CollectionEntry_InlineEntry) GetResource() *anypb.Any {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+var File_xds_core_v3_collection_entry_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_collection_entry_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64,
+ 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33,
+ 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x78,
+ 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc3, 0x02, 0x0a, 0x0f, 0x43,
+ 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x38,
+ 0x0a, 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1c, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52,
+ 0x07, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x4d, 0x0a, 0x0c, 0x69, 0x6e, 0x6c, 0x69,
+ 0x6e, 0x65, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6c,
+ 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x49, 0x6e, 0x6c,
+ 0x69, 0x6e, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x69, 0x6e, 0x6c, 0x69,
+ 0x6e, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x1a, 0x8b, 0x01, 0x0a, 0x0b, 0x49, 0x6e, 0x6c, 0x69,
+ 0x6e, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1c, 0xfa, 0x42, 0x19, 0x72, 0x17, 0x32, 0x15, 0x5e, 0x5b,
+ 0x30, 0x2d, 0x39, 0x61, 0x2d, 0x7a, 0x41, 0x2d, 0x5a, 0x5f, 0x5c, 0x2d, 0x5c, 0x2e, 0x7e, 0x3a,
+ 0x5d, 0x2b, 0x24, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x19, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01,
+ 0x42, 0x5c, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x76, 0x33, 0x42, 0x14, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f,
+ 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_core_v3_collection_entry_proto_rawDescOnce sync.Once
+ file_xds_core_v3_collection_entry_proto_rawDescData = file_xds_core_v3_collection_entry_proto_rawDesc
+)
+
+func file_xds_core_v3_collection_entry_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_collection_entry_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_collection_entry_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_collection_entry_proto_rawDescData)
+ })
+ return file_xds_core_v3_collection_entry_proto_rawDescData
+}
+
+var file_xds_core_v3_collection_entry_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_core_v3_collection_entry_proto_goTypes = []interface{}{
+ (*CollectionEntry)(nil), // 0: xds.core.v3.CollectionEntry
+ (*CollectionEntry_InlineEntry)(nil), // 1: xds.core.v3.CollectionEntry.InlineEntry
+ (*ResourceLocator)(nil), // 2: xds.core.v3.ResourceLocator
+ (*anypb.Any)(nil), // 3: google.protobuf.Any
+}
+var file_xds_core_v3_collection_entry_proto_depIdxs = []int32{
+ 2, // 0: xds.core.v3.CollectionEntry.locator:type_name -> xds.core.v3.ResourceLocator
+ 1, // 1: xds.core.v3.CollectionEntry.inline_entry:type_name -> xds.core.v3.CollectionEntry.InlineEntry
+ 3, // 2: xds.core.v3.CollectionEntry.InlineEntry.resource:type_name -> google.protobuf.Any
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_collection_entry_proto_init() }
+func file_xds_core_v3_collection_entry_proto_init() {
+ if File_xds_core_v3_collection_entry_proto != nil {
+ return
+ }
+ file_xds_core_v3_resource_locator_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_collection_entry_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectionEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_core_v3_collection_entry_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CollectionEntry_InlineEntry); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_xds_core_v3_collection_entry_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*CollectionEntry_Locator)(nil),
+ (*CollectionEntry_InlineEntry_)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_collection_entry_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_collection_entry_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_collection_entry_proto_depIdxs,
+ MessageInfos: file_xds_core_v3_collection_entry_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_collection_entry_proto = out.File
+ file_xds_core_v3_collection_entry_proto_rawDesc = nil
+ file_xds_core_v3_collection_entry_proto_goTypes = nil
+ file_xds_core_v3_collection_entry_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go
new file mode 100644
index 000000000..610990b7f
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/collection_entry.pb.validate.go
@@ -0,0 +1,383 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/collection_entry.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on CollectionEntry with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *CollectionEntry) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CollectionEntry with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CollectionEntryMultiError, or nil if none found.
+func (m *CollectionEntry) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CollectionEntry) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofResourceSpecifierPresent := false
+ switch v := m.ResourceSpecifier.(type) {
+ case *CollectionEntry_Locator:
+ if v == nil {
+ err := CollectionEntryValidationError{
+ field: "ResourceSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofResourceSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetLocator()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CollectionEntryValidationError{
+ field: "Locator",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CollectionEntryValidationError{
+ field: "Locator",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLocator()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CollectionEntryValidationError{
+ field: "Locator",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *CollectionEntry_InlineEntry_:
+ if v == nil {
+ err := CollectionEntryValidationError{
+ field: "ResourceSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofResourceSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetInlineEntry()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CollectionEntryValidationError{
+ field: "InlineEntry",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CollectionEntryValidationError{
+ field: "InlineEntry",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetInlineEntry()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CollectionEntryValidationError{
+ field: "InlineEntry",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofResourceSpecifierPresent {
+ err := CollectionEntryValidationError{
+ field: "ResourceSpecifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return CollectionEntryMultiError(errors)
+ }
+
+ return nil
+}
+
+// CollectionEntryMultiError is an error wrapping multiple validation errors
+// returned by CollectionEntry.ValidateAll() if the designated constraints
+// aren't met.
+type CollectionEntryMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CollectionEntryMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CollectionEntryMultiError) AllErrors() []error { return m }
+
+// CollectionEntryValidationError is the validation error returned by
+// CollectionEntry.Validate if the designated constraints aren't met.
+type CollectionEntryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CollectionEntryValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CollectionEntryValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CollectionEntryValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CollectionEntryValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CollectionEntryValidationError) ErrorName() string { return "CollectionEntryValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CollectionEntryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCollectionEntry.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CollectionEntryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CollectionEntryValidationError{}
+
+// Validate checks the field values on CollectionEntry_InlineEntry with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *CollectionEntry_InlineEntry) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CollectionEntry_InlineEntry with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CollectionEntry_InlineEntryMultiError, or nil if none found.
+func (m *CollectionEntry_InlineEntry) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CollectionEntry_InlineEntry) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if !_CollectionEntry_InlineEntry_Name_Pattern.MatchString(m.GetName()) {
+ err := CollectionEntry_InlineEntryValidationError{
+ field: "Name",
+ reason: "value does not match regex pattern \"^[0-9a-zA-Z_\\\\-\\\\.~:]+$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for Version
+
+ if all {
+ switch v := interface{}(m.GetResource()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CollectionEntry_InlineEntryValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CollectionEntry_InlineEntryValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CollectionEntry_InlineEntryValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return CollectionEntry_InlineEntryMultiError(errors)
+ }
+
+ return nil
+}
+
+// CollectionEntry_InlineEntryMultiError is an error wrapping multiple
+// validation errors returned by CollectionEntry_InlineEntry.ValidateAll() if
+// the designated constraints aren't met.
+type CollectionEntry_InlineEntryMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CollectionEntry_InlineEntryMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CollectionEntry_InlineEntryMultiError) AllErrors() []error { return m }
+
+// CollectionEntry_InlineEntryValidationError is the validation error returned
+// by CollectionEntry_InlineEntry.Validate if the designated constraints
+// aren't met.
+type CollectionEntry_InlineEntryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CollectionEntry_InlineEntryValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CollectionEntry_InlineEntryValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CollectionEntry_InlineEntryValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CollectionEntry_InlineEntryValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CollectionEntry_InlineEntryValidationError) ErrorName() string {
+ return "CollectionEntry_InlineEntryValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CollectionEntry_InlineEntryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCollectionEntry_InlineEntry.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CollectionEntry_InlineEntryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CollectionEntry_InlineEntryValidationError{}
+
+var _CollectionEntry_InlineEntry_Name_Pattern = regexp.MustCompile("^[0-9a-zA-Z_\\-\\.~:]+$")
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go
new file mode 100644
index 000000000..3e75637ea
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.go
@@ -0,0 +1,160 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/context_params.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ContextParams struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Params map[string]string `protobuf:"bytes,1,rep,name=params,proto3" json:"params,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *ContextParams) Reset() {
+ *x = ContextParams{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_context_params_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ContextParams) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ContextParams) ProtoMessage() {}
+
+func (x *ContextParams) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_context_params_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ContextParams.ProtoReflect.Descriptor instead.
+func (*ContextParams) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_context_params_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ContextParams) GetParams() map[string]string {
+ if x != nil {
+ return x.Params
+ }
+ return nil
+}
+
+var File_xds_core_v3_context_params_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_context_params_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a,
+ 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x8a, 0x01, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61,
+ 0x6d, 0x73, 0x12, 0x3e, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x50,
+ 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x70, 0x61, 0x72, 0x61,
+ 0x6d, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5a, 0xd2,
+ 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42,
+ 0x12, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64,
+ 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var (
+ file_xds_core_v3_context_params_proto_rawDescOnce sync.Once
+ file_xds_core_v3_context_params_proto_rawDescData = file_xds_core_v3_context_params_proto_rawDesc
+)
+
+func file_xds_core_v3_context_params_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_context_params_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_context_params_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_context_params_proto_rawDescData)
+ })
+ return file_xds_core_v3_context_params_proto_rawDescData
+}
+
+var file_xds_core_v3_context_params_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_core_v3_context_params_proto_goTypes = []interface{}{
+ (*ContextParams)(nil), // 0: xds.core.v3.ContextParams
+ nil, // 1: xds.core.v3.ContextParams.ParamsEntry
+}
+var file_xds_core_v3_context_params_proto_depIdxs = []int32{
+ 1, // 0: xds.core.v3.ContextParams.params:type_name -> xds.core.v3.ContextParams.ParamsEntry
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_context_params_proto_init() }
+func file_xds_core_v3_context_params_proto_init() {
+ if File_xds_core_v3_context_params_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_context_params_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ContextParams); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_context_params_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_context_params_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_context_params_proto_depIdxs,
+ MessageInfos: file_xds_core_v3_context_params_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_context_params_proto = out.File
+ file_xds_core_v3_context_params_proto_rawDesc = nil
+ file_xds_core_v3_context_params_proto_goTypes = nil
+ file_xds_core_v3_context_params_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go
new file mode 100644
index 000000000..1c9accaa3
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/context_params.pb.validate.go
@@ -0,0 +1,138 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/context_params.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ContextParams with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ContextParams) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ContextParams with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ContextParamsMultiError, or
+// nil if none found.
+func (m *ContextParams) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ContextParams) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Params
+
+ if len(errors) > 0 {
+ return ContextParamsMultiError(errors)
+ }
+
+ return nil
+}
+
+// ContextParamsMultiError is an error wrapping multiple validation errors
+// returned by ContextParams.ValidateAll() if the designated constraints
+// aren't met.
+type ContextParamsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ContextParamsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ContextParamsMultiError) AllErrors() []error { return m }
+
+// ContextParamsValidationError is the validation error returned by
+// ContextParams.Validate if the designated constraints aren't met.
+type ContextParamsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ContextParamsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ContextParamsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ContextParamsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ContextParamsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ContextParamsValidationError) ErrorName() string { return "ContextParamsValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ContextParamsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sContextParams.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ContextParamsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ContextParamsValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go
new file mode 100644
index 000000000..7183e1143
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.go
@@ -0,0 +1,167 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/extension.proto
+
+package v3
+
+import (
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type TypedExtensionConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ TypedConfig *anypb.Any `protobuf:"bytes,2,opt,name=typed_config,json=typedConfig,proto3" json:"typed_config,omitempty"`
+}
+
+func (x *TypedExtensionConfig) Reset() {
+ *x = TypedExtensionConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_extension_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TypedExtensionConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TypedExtensionConfig) ProtoMessage() {}
+
+func (x *TypedExtensionConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_extension_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TypedExtensionConfig.ProtoReflect.Descriptor instead.
+func (*TypedExtensionConfig) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_extension_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TypedExtensionConfig) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *TypedExtensionConfig) GetTypedConfig() *anypb.Any {
+ if x != nil {
+ return x.TypedConfig
+ }
+ return nil
+}
+
+var File_xds_core_v3_extension_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_extension_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78,
+ 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x76,
+ 0x0a, 0x14, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x42,
+ 0x08, 0xfa, 0x42, 0x05, 0xa2, 0x01, 0x02, 0x08, 0x01, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x4e, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x42, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63,
+ 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_core_v3_extension_proto_rawDescOnce sync.Once
+ file_xds_core_v3_extension_proto_rawDescData = file_xds_core_v3_extension_proto_rawDesc
+)
+
+func file_xds_core_v3_extension_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_extension_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_extension_proto_rawDescData)
+ })
+ return file_xds_core_v3_extension_proto_rawDescData
+}
+
+var file_xds_core_v3_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_core_v3_extension_proto_goTypes = []interface{}{
+ (*TypedExtensionConfig)(nil), // 0: xds.core.v3.TypedExtensionConfig
+ (*anypb.Any)(nil), // 1: google.protobuf.Any
+}
+var file_xds_core_v3_extension_proto_depIdxs = []int32{
+ 1, // 0: xds.core.v3.TypedExtensionConfig.typed_config:type_name -> google.protobuf.Any
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_extension_proto_init() }
+func file_xds_core_v3_extension_proto_init() {
+ if File_xds_core_v3_extension_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TypedExtensionConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_extension_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_extension_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_extension_proto_depIdxs,
+ MessageInfos: file_xds_core_v3_extension_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_extension_proto = out.File
+ file_xds_core_v3_extension_proto_rawDesc = nil
+ file_xds_core_v3_extension_proto_goTypes = nil
+ file_xds_core_v3_extension_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go
new file mode 100644
index 000000000..839f3fef7
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/extension.pb.validate.go
@@ -0,0 +1,164 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/extension.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on TypedExtensionConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *TypedExtensionConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TypedExtensionConfig with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// TypedExtensionConfigMultiError, or nil if none found.
+func (m *TypedExtensionConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TypedExtensionConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ err := TypedExtensionConfigValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetTypedConfig() == nil {
+ err := TypedExtensionConfigValidationError{
+ field: "TypedConfig",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if a := m.GetTypedConfig(); a != nil {
+
+ }
+
+ if len(errors) > 0 {
+ return TypedExtensionConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// TypedExtensionConfigMultiError is an error wrapping multiple validation
+// errors returned by TypedExtensionConfig.ValidateAll() if the designated
+// constraints aren't met.
+type TypedExtensionConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TypedExtensionConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TypedExtensionConfigMultiError) AllErrors() []error { return m }
+
+// TypedExtensionConfigValidationError is the validation error returned by
+// TypedExtensionConfig.Validate if the designated constraints aren't met.
+type TypedExtensionConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TypedExtensionConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TypedExtensionConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TypedExtensionConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TypedExtensionConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TypedExtensionConfigValidationError) ErrorName() string {
+ return "TypedExtensionConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e TypedExtensionConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTypedExtensionConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TypedExtensionConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TypedExtensionConfigValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go
new file mode 100644
index 000000000..ced3bc3f4
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.go
@@ -0,0 +1,182 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/resource.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Resource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *ResourceName `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ Resource *anypb.Any `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
+}
+
+func (x *Resource) Reset() {
+ *x = Resource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_resource_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Resource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Resource) ProtoMessage() {}
+
+func (x *Resource) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_resource_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Resource.ProtoReflect.Descriptor instead.
+func (*Resource) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_resource_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Resource) GetName() *ResourceName {
+ if x != nil {
+ return x.Name
+ }
+ return nil
+}
+
+func (x *Resource) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *Resource) GetResource() *anypb.Any {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+var File_xds_core_v3_resource_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_resource_proto_rawDesc = []byte{
+ 0x0a, 0x1a, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64,
+ 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f,
+ 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x85, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x12, 0x2d, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x08,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x55,
+ 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e,
+ 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f,
+ 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_core_v3_resource_proto_rawDescOnce sync.Once
+ file_xds_core_v3_resource_proto_rawDescData = file_xds_core_v3_resource_proto_rawDesc
+)
+
+func file_xds_core_v3_resource_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_resource_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_resource_proto_rawDescData)
+ })
+ return file_xds_core_v3_resource_proto_rawDescData
+}
+
+var file_xds_core_v3_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_core_v3_resource_proto_goTypes = []interface{}{
+ (*Resource)(nil), // 0: xds.core.v3.Resource
+ (*ResourceName)(nil), // 1: xds.core.v3.ResourceName
+ (*anypb.Any)(nil), // 2: google.protobuf.Any
+}
+var file_xds_core_v3_resource_proto_depIdxs = []int32{
+ 1, // 0: xds.core.v3.Resource.name:type_name -> xds.core.v3.ResourceName
+ 2, // 1: xds.core.v3.Resource.resource:type_name -> google.protobuf.Any
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_resource_proto_init() }
+func file_xds_core_v3_resource_proto_init() {
+ if File_xds_core_v3_resource_proto != nil {
+ return
+ }
+ file_xds_core_v3_resource_name_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Resource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_resource_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_resource_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_resource_proto_depIdxs,
+ MessageInfos: file_xds_core_v3_resource_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_resource_proto = out.File
+ file_xds_core_v3_resource_proto_rawDesc = nil
+ file_xds_core_v3_resource_proto_goTypes = nil
+ file_xds_core_v3_resource_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go
new file mode 100644
index 000000000..dc972171c
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource.pb.validate.go
@@ -0,0 +1,195 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/resource.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Resource with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Resource) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Resource with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ResourceMultiError, or nil
+// if none found.
+func (m *Resource) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Resource) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetName()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceValidationError{
+ field: "Name",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceValidationError{
+ field: "Name",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetName()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceValidationError{
+ field: "Name",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Version
+
+ if all {
+ switch v := interface{}(m.GetResource()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetResource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceValidationError{
+ field: "Resource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ResourceMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResourceMultiError is an error wrapping multiple validation errors returned
+// by Resource.ValidateAll() if the designated constraints aren't met.
+type ResourceMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceMultiError) AllErrors() []error { return m }
+
+// ResourceValidationError is the validation error returned by
+// Resource.Validate if the designated constraints aren't met.
+type ResourceValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceValidationError) ErrorName() string { return "ResourceValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ResourceValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResource.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go
new file mode 100644
index 000000000..f469c18cf
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.go
@@ -0,0 +1,406 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/resource_locator.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ResourceLocator_Scheme int32
+
+const (
+ ResourceLocator_XDSTP ResourceLocator_Scheme = 0
+ ResourceLocator_HTTP ResourceLocator_Scheme = 1
+ ResourceLocator_FILE ResourceLocator_Scheme = 2
+)
+
+// Enum value maps for ResourceLocator_Scheme.
+var (
+ ResourceLocator_Scheme_name = map[int32]string{
+ 0: "XDSTP",
+ 1: "HTTP",
+ 2: "FILE",
+ }
+ ResourceLocator_Scheme_value = map[string]int32{
+ "XDSTP": 0,
+ "HTTP": 1,
+ "FILE": 2,
+ }
+)
+
+func (x ResourceLocator_Scheme) Enum() *ResourceLocator_Scheme {
+ p := new(ResourceLocator_Scheme)
+ *p = x
+ return p
+}
+
+func (x ResourceLocator_Scheme) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ResourceLocator_Scheme) Descriptor() protoreflect.EnumDescriptor {
+ return file_xds_core_v3_resource_locator_proto_enumTypes[0].Descriptor()
+}
+
+func (ResourceLocator_Scheme) Type() protoreflect.EnumType {
+ return &file_xds_core_v3_resource_locator_proto_enumTypes[0]
+}
+
+func (x ResourceLocator_Scheme) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ResourceLocator_Scheme.Descriptor instead.
+func (ResourceLocator_Scheme) EnumDescriptor() ([]byte, []int) {
+ return file_xds_core_v3_resource_locator_proto_rawDescGZIP(), []int{0, 0}
+}
+
+type ResourceLocator struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Scheme ResourceLocator_Scheme `protobuf:"varint,1,opt,name=scheme,proto3,enum=xds.core.v3.ResourceLocator_Scheme" json:"scheme,omitempty"`
+ Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+ Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
+ ResourceType string `protobuf:"bytes,4,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"`
+ // Types that are assignable to ContextParamSpecifier:
+ //
+ // *ResourceLocator_ExactContext
+ ContextParamSpecifier isResourceLocator_ContextParamSpecifier `protobuf_oneof:"context_param_specifier"`
+ Directives []*ResourceLocator_Directive `protobuf:"bytes,6,rep,name=directives,proto3" json:"directives,omitempty"`
+}
+
+func (x *ResourceLocator) Reset() {
+ *x = ResourceLocator{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_resource_locator_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceLocator) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceLocator) ProtoMessage() {}
+
+func (x *ResourceLocator) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_resource_locator_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceLocator.ProtoReflect.Descriptor instead.
+func (*ResourceLocator) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_resource_locator_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ResourceLocator) GetScheme() ResourceLocator_Scheme {
+ if x != nil {
+ return x.Scheme
+ }
+ return ResourceLocator_XDSTP
+}
+
+func (x *ResourceLocator) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *ResourceLocator) GetAuthority() string {
+ if x != nil {
+ return x.Authority
+ }
+ return ""
+}
+
+func (x *ResourceLocator) GetResourceType() string {
+ if x != nil {
+ return x.ResourceType
+ }
+ return ""
+}
+
+func (m *ResourceLocator) GetContextParamSpecifier() isResourceLocator_ContextParamSpecifier {
+ if m != nil {
+ return m.ContextParamSpecifier
+ }
+ return nil
+}
+
+func (x *ResourceLocator) GetExactContext() *ContextParams {
+ if x, ok := x.GetContextParamSpecifier().(*ResourceLocator_ExactContext); ok {
+ return x.ExactContext
+ }
+ return nil
+}
+
+func (x *ResourceLocator) GetDirectives() []*ResourceLocator_Directive {
+ if x != nil {
+ return x.Directives
+ }
+ return nil
+}
+
+type isResourceLocator_ContextParamSpecifier interface {
+ isResourceLocator_ContextParamSpecifier()
+}
+
+type ResourceLocator_ExactContext struct {
+ ExactContext *ContextParams `protobuf:"bytes,5,opt,name=exact_context,json=exactContext,proto3,oneof"`
+}
+
+func (*ResourceLocator_ExactContext) isResourceLocator_ContextParamSpecifier() {}
+
+type ResourceLocator_Directive struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to Directive:
+ //
+ // *ResourceLocator_Directive_Alt
+ // *ResourceLocator_Directive_Entry
+ Directive isResourceLocator_Directive_Directive `protobuf_oneof:"directive"`
+}
+
+func (x *ResourceLocator_Directive) Reset() {
+ *x = ResourceLocator_Directive{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_resource_locator_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceLocator_Directive) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceLocator_Directive) ProtoMessage() {}
+
+func (x *ResourceLocator_Directive) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_resource_locator_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceLocator_Directive.ProtoReflect.Descriptor instead.
+func (*ResourceLocator_Directive) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_resource_locator_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (m *ResourceLocator_Directive) GetDirective() isResourceLocator_Directive_Directive {
+ if m != nil {
+ return m.Directive
+ }
+ return nil
+}
+
+func (x *ResourceLocator_Directive) GetAlt() *ResourceLocator {
+ if x, ok := x.GetDirective().(*ResourceLocator_Directive_Alt); ok {
+ return x.Alt
+ }
+ return nil
+}
+
+func (x *ResourceLocator_Directive) GetEntry() string {
+ if x, ok := x.GetDirective().(*ResourceLocator_Directive_Entry); ok {
+ return x.Entry
+ }
+ return ""
+}
+
+type isResourceLocator_Directive_Directive interface {
+ isResourceLocator_Directive_Directive()
+}
+
+type ResourceLocator_Directive_Alt struct {
+ Alt *ResourceLocator `protobuf:"bytes,1,opt,name=alt,proto3,oneof"`
+}
+
+type ResourceLocator_Directive_Entry struct {
+ Entry string `protobuf:"bytes,2,opt,name=entry,proto3,oneof"`
+}
+
+func (*ResourceLocator_Directive_Alt) isResourceLocator_Directive_Directive() {}
+
+func (*ResourceLocator_Directive_Entry) isResourceLocator_Directive_Directive() {}
+
+var File_xds_core_v3_resource_locator_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_resource_locator_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76,
+ 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8e, 0x04,
+ 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f,
+ 0x72, 0x12, 0x45, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x23, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e,
+ 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01,
+ 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74,
+ 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2c, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa,
+ 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64,
+ 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78,
+ 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x78, 0x61, 0x63, 0x74,
+ 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x0a, 0x64, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x69, 0x76, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x78, 0x64,
+ 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x52, 0x0a, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x73, 0x1a,
+ 0x88, 0x01, 0x0a, 0x09, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x30, 0x0a,
+ 0x03, 0x61, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x78, 0x64, 0x73,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6c, 0x74, 0x12,
+ 0x37, 0x0a, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x1f,
+ 0xfa, 0x42, 0x1c, 0x72, 0x1a, 0x10, 0x01, 0x32, 0x16, 0x5e, 0x5b, 0x30, 0x2d, 0x39, 0x61, 0x2d,
+ 0x7a, 0x41, 0x2d, 0x5a, 0x5f, 0x5c, 0x2d, 0x5c, 0x2e, 0x2f, 0x7e, 0x3a, 0x5d, 0x2b, 0x24, 0x48,
+ 0x00, 0x52, 0x05, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x10, 0x0a, 0x09, 0x64, 0x69, 0x72, 0x65,
+ 0x63, 0x74, 0x69, 0x76, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x27, 0x0a, 0x06, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x58, 0x44, 0x53, 0x54, 0x50, 0x10, 0x00, 0x12,
+ 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x49, 0x4c,
+ 0x45, 0x10, 0x02, 0x42, 0x19, 0x0a, 0x17, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70,
+ 0x61, 0x72, 0x61, 0x6d, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x5c,
+ 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x42, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f,
+ 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_core_v3_resource_locator_proto_rawDescOnce sync.Once
+ file_xds_core_v3_resource_locator_proto_rawDescData = file_xds_core_v3_resource_locator_proto_rawDesc
+)
+
+func file_xds_core_v3_resource_locator_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_resource_locator_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_resource_locator_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_resource_locator_proto_rawDescData)
+ })
+ return file_xds_core_v3_resource_locator_proto_rawDescData
+}
+
+var file_xds_core_v3_resource_locator_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_xds_core_v3_resource_locator_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_core_v3_resource_locator_proto_goTypes = []interface{}{
+ (ResourceLocator_Scheme)(0), // 0: xds.core.v3.ResourceLocator.Scheme
+ (*ResourceLocator)(nil), // 1: xds.core.v3.ResourceLocator
+ (*ResourceLocator_Directive)(nil), // 2: xds.core.v3.ResourceLocator.Directive
+ (*ContextParams)(nil), // 3: xds.core.v3.ContextParams
+}
+var file_xds_core_v3_resource_locator_proto_depIdxs = []int32{
+ 0, // 0: xds.core.v3.ResourceLocator.scheme:type_name -> xds.core.v3.ResourceLocator.Scheme
+ 3, // 1: xds.core.v3.ResourceLocator.exact_context:type_name -> xds.core.v3.ContextParams
+ 2, // 2: xds.core.v3.ResourceLocator.directives:type_name -> xds.core.v3.ResourceLocator.Directive
+ 1, // 3: xds.core.v3.ResourceLocator.Directive.alt:type_name -> xds.core.v3.ResourceLocator
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 4, // [4:4] is the sub-list for extension extendee
+ 0, // [0:4] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_resource_locator_proto_init() }
+func file_xds_core_v3_resource_locator_proto_init() {
+ if File_xds_core_v3_resource_locator_proto != nil {
+ return
+ }
+ file_xds_core_v3_context_params_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_resource_locator_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceLocator); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_core_v3_resource_locator_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceLocator_Directive); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_xds_core_v3_resource_locator_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*ResourceLocator_ExactContext)(nil),
+ }
+ file_xds_core_v3_resource_locator_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*ResourceLocator_Directive_Alt)(nil),
+ (*ResourceLocator_Directive_Entry)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_resource_locator_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_resource_locator_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_resource_locator_proto_depIdxs,
+ EnumInfos: file_xds_core_v3_resource_locator_proto_enumTypes,
+ MessageInfos: file_xds_core_v3_resource_locator_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_resource_locator_proto = out.File
+ file_xds_core_v3_resource_locator_proto_rawDesc = nil
+ file_xds_core_v3_resource_locator_proto_goTypes = nil
+ file_xds_core_v3_resource_locator_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go
new file mode 100644
index 000000000..1686e98d1
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_locator.pb.validate.go
@@ -0,0 +1,439 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/resource_locator.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ResourceLocator with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *ResourceLocator) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResourceLocator with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ResourceLocatorMultiError, or nil if none found.
+func (m *ResourceLocator) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceLocator) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if _, ok := ResourceLocator_Scheme_name[int32(m.GetScheme())]; !ok {
+ err := ResourceLocatorValidationError{
+ field: "Scheme",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for Id
+
+ // no validation rules for Authority
+
+ if utf8.RuneCountInString(m.GetResourceType()) < 1 {
+ err := ResourceLocatorValidationError{
+ field: "ResourceType",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetDirectives() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceLocatorValidationError{
+ field: fmt.Sprintf("Directives[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceLocatorValidationError{
+ field: fmt.Sprintf("Directives[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceLocatorValidationError{
+ field: fmt.Sprintf("Directives[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ switch v := m.ContextParamSpecifier.(type) {
+ case *ResourceLocator_ExactContext:
+ if v == nil {
+ err := ResourceLocatorValidationError{
+ field: "ContextParamSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetExactContext()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceLocatorValidationError{
+ field: "ExactContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceLocatorValidationError{
+ field: "ExactContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExactContext()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceLocatorValidationError{
+ field: "ExactContext",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return ResourceLocatorMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResourceLocatorMultiError is an error wrapping multiple validation errors
+// returned by ResourceLocator.ValidateAll() if the designated constraints
+// aren't met.
+type ResourceLocatorMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceLocatorMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceLocatorMultiError) AllErrors() []error { return m }
+
+// ResourceLocatorValidationError is the validation error returned by
+// ResourceLocator.Validate if the designated constraints aren't met.
+type ResourceLocatorValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceLocatorValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceLocatorValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceLocatorValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceLocatorValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceLocatorValidationError) ErrorName() string { return "ResourceLocatorValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ResourceLocatorValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceLocator.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceLocatorValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceLocatorValidationError{}
+
+// Validate checks the field values on ResourceLocator_Directive with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ResourceLocator_Directive) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResourceLocator_Directive with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ResourceLocator_DirectiveMultiError, or nil if none found.
+func (m *ResourceLocator_Directive) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceLocator_Directive) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofDirectivePresent := false
+ switch v := m.Directive.(type) {
+ case *ResourceLocator_Directive_Alt:
+ if v == nil {
+ err := ResourceLocator_DirectiveValidationError{
+ field: "Directive",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofDirectivePresent = true
+
+ if all {
+ switch v := interface{}(m.GetAlt()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceLocator_DirectiveValidationError{
+ field: "Alt",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceLocator_DirectiveValidationError{
+ field: "Alt",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAlt()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceLocator_DirectiveValidationError{
+ field: "Alt",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *ResourceLocator_Directive_Entry:
+ if v == nil {
+ err := ResourceLocator_DirectiveValidationError{
+ field: "Directive",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofDirectivePresent = true
+
+ if utf8.RuneCountInString(m.GetEntry()) < 1 {
+ err := ResourceLocator_DirectiveValidationError{
+ field: "Entry",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if !_ResourceLocator_Directive_Entry_Pattern.MatchString(m.GetEntry()) {
+ err := ResourceLocator_DirectiveValidationError{
+ field: "Entry",
+ reason: "value does not match regex pattern \"^[0-9a-zA-Z_\\\\-\\\\./~:]+$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofDirectivePresent {
+ err := ResourceLocator_DirectiveValidationError{
+ field: "Directive",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return ResourceLocator_DirectiveMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResourceLocator_DirectiveMultiError is an error wrapping multiple validation
+// errors returned by ResourceLocator_Directive.ValidateAll() if the
+// designated constraints aren't met.
+type ResourceLocator_DirectiveMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceLocator_DirectiveMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceLocator_DirectiveMultiError) AllErrors() []error { return m }
+
+// ResourceLocator_DirectiveValidationError is the validation error returned by
+// ResourceLocator_Directive.Validate if the designated constraints aren't met.
+type ResourceLocator_DirectiveValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceLocator_DirectiveValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceLocator_DirectiveValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceLocator_DirectiveValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceLocator_DirectiveValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceLocator_DirectiveValidationError) ErrorName() string {
+ return "ResourceLocator_DirectiveValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ResourceLocator_DirectiveValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceLocator_Directive.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceLocator_DirectiveValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceLocator_DirectiveValidationError{}
+
+var _ResourceLocator_Directive_Entry_Pattern = regexp.MustCompile("^[0-9a-zA-Z_\\-\\./~:]+$")
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go
new file mode 100644
index 000000000..65f65fdbd
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.go
@@ -0,0 +1,190 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/core/v3/resource_name.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ResourceName struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+ Authority string `protobuf:"bytes,2,opt,name=authority,proto3" json:"authority,omitempty"`
+ ResourceType string `protobuf:"bytes,3,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"`
+ Context *ContextParams `protobuf:"bytes,4,opt,name=context,proto3" json:"context,omitempty"`
+}
+
+func (x *ResourceName) Reset() {
+ *x = ResourceName{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_core_v3_resource_name_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceName) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceName) ProtoMessage() {}
+
+func (x *ResourceName) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_core_v3_resource_name_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceName.ProtoReflect.Descriptor instead.
+func (*ResourceName) Descriptor() ([]byte, []int) {
+ return file_xds_core_v3_resource_name_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ResourceName) GetId() string {
+ if x != nil {
+ return x.Id
+ }
+ return ""
+}
+
+func (x *ResourceName) GetAuthority() string {
+ if x != nil {
+ return x.Authority
+ }
+ return ""
+}
+
+func (x *ResourceName) GetResourceType() string {
+ if x != nil {
+ return x.ResourceType
+ }
+ return ""
+}
+
+func (x *ResourceName) GetContext() *ContextParams {
+ if x != nil {
+ return x.Context
+ }
+ return nil
+}
+
+var File_xds_core_v3_resource_name_proto protoreflect.FileDescriptor
+
+var file_xds_core_v3_resource_name_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1f,
+ 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x20, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e,
+ 0x74, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa0, 0x01, 0x0a, 0x0c, 0x52,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x61,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2c, 0x0a, 0x0d, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65,
+ 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61,
+ 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x59, 0xd2,
+ 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x42,
+ 0x11, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73,
+ 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_core_v3_resource_name_proto_rawDescOnce sync.Once
+ file_xds_core_v3_resource_name_proto_rawDescData = file_xds_core_v3_resource_name_proto_rawDesc
+)
+
+func file_xds_core_v3_resource_name_proto_rawDescGZIP() []byte {
+ file_xds_core_v3_resource_name_proto_rawDescOnce.Do(func() {
+ file_xds_core_v3_resource_name_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_core_v3_resource_name_proto_rawDescData)
+ })
+ return file_xds_core_v3_resource_name_proto_rawDescData
+}
+
+var file_xds_core_v3_resource_name_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_core_v3_resource_name_proto_goTypes = []interface{}{
+ (*ResourceName)(nil), // 0: xds.core.v3.ResourceName
+ (*ContextParams)(nil), // 1: xds.core.v3.ContextParams
+}
+var file_xds_core_v3_resource_name_proto_depIdxs = []int32{
+ 1, // 0: xds.core.v3.ResourceName.context:type_name -> xds.core.v3.ContextParams
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_core_v3_resource_name_proto_init() }
+func file_xds_core_v3_resource_name_proto_init() {
+ if File_xds_core_v3_resource_name_proto != nil {
+ return
+ }
+ file_xds_core_v3_context_params_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_core_v3_resource_name_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceName); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_core_v3_resource_name_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_core_v3_resource_name_proto_goTypes,
+ DependencyIndexes: file_xds_core_v3_resource_name_proto_depIdxs,
+ MessageInfos: file_xds_core_v3_resource_name_proto_msgTypes,
+ }.Build()
+ File_xds_core_v3_resource_name_proto = out.File
+ file_xds_core_v3_resource_name_proto_rawDesc = nil
+ file_xds_core_v3_resource_name_proto_goTypes = nil
+ file_xds_core_v3_resource_name_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go
new file mode 100644
index 000000000..270e921bc
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/core/v3/resource_name.pb.validate.go
@@ -0,0 +1,179 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/core/v3/resource_name.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ResourceName with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ResourceName) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResourceName with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ResourceNameMultiError, or
+// nil if none found.
+func (m *ResourceName) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceName) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Id
+
+ // no validation rules for Authority
+
+ if utf8.RuneCountInString(m.GetResourceType()) < 1 {
+ err := ResourceNameValidationError{
+ field: "ResourceType",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetContext()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ResourceNameValidationError{
+ field: "Context",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ResourceNameValidationError{
+ field: "Context",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetContext()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ResourceNameValidationError{
+ field: "Context",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ResourceNameMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResourceNameMultiError is an error wrapping multiple validation errors
+// returned by ResourceName.ValidateAll() if the designated constraints aren't met.
+type ResourceNameMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceNameMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceNameMultiError) AllErrors() []error { return m }
+
+// ResourceNameValidationError is the validation error returned by
+// ResourceName.Validate if the designated constraints aren't met.
+type ResourceNameValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceNameValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceNameValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceNameValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceNameValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceNameValidationError) ErrorName() string { return "ResourceNameValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ResourceNameValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceName.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceNameValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceNameValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go
new file mode 100644
index 000000000..f929ca637
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.go
@@ -0,0 +1,272 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/data/orca/v3/orca_load_report.proto
+
+package v3
+
+import (
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type OrcaLoadReport struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ CpuUtilization float64 `protobuf:"fixed64,1,opt,name=cpu_utilization,json=cpuUtilization,proto3" json:"cpu_utilization,omitempty"`
+ MemUtilization float64 `protobuf:"fixed64,2,opt,name=mem_utilization,json=memUtilization,proto3" json:"mem_utilization,omitempty"`
+ // Deprecated: Marked as deprecated in xds/data/orca/v3/orca_load_report.proto.
+ Rps uint64 `protobuf:"varint,3,opt,name=rps,proto3" json:"rps,omitempty"`
+ RequestCost map[string]float64 `protobuf:"bytes,4,rep,name=request_cost,json=requestCost,proto3" json:"request_cost,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"`
+ Utilization map[string]float64 `protobuf:"bytes,5,rep,name=utilization,proto3" json:"utilization,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"`
+ RpsFractional float64 `protobuf:"fixed64,6,opt,name=rps_fractional,json=rpsFractional,proto3" json:"rps_fractional,omitempty"`
+ Eps float64 `protobuf:"fixed64,7,opt,name=eps,proto3" json:"eps,omitempty"`
+ NamedMetrics map[string]float64 `protobuf:"bytes,8,rep,name=named_metrics,json=namedMetrics,proto3" json:"named_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3"`
+ ApplicationUtilization float64 `protobuf:"fixed64,9,opt,name=application_utilization,json=applicationUtilization,proto3" json:"application_utilization,omitempty"`
+}
+
+func (x *OrcaLoadReport) Reset() {
+ *x = OrcaLoadReport{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_data_orca_v3_orca_load_report_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OrcaLoadReport) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OrcaLoadReport) ProtoMessage() {}
+
+func (x *OrcaLoadReport) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_data_orca_v3_orca_load_report_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OrcaLoadReport.ProtoReflect.Descriptor instead.
+func (*OrcaLoadReport) Descriptor() ([]byte, []int) {
+ return file_xds_data_orca_v3_orca_load_report_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *OrcaLoadReport) GetCpuUtilization() float64 {
+ if x != nil {
+ return x.CpuUtilization
+ }
+ return 0
+}
+
+func (x *OrcaLoadReport) GetMemUtilization() float64 {
+ if x != nil {
+ return x.MemUtilization
+ }
+ return 0
+}
+
+// Deprecated: Marked as deprecated in xds/data/orca/v3/orca_load_report.proto.
+func (x *OrcaLoadReport) GetRps() uint64 {
+ if x != nil {
+ return x.Rps
+ }
+ return 0
+}
+
+func (x *OrcaLoadReport) GetRequestCost() map[string]float64 {
+ if x != nil {
+ return x.RequestCost
+ }
+ return nil
+}
+
+func (x *OrcaLoadReport) GetUtilization() map[string]float64 {
+ if x != nil {
+ return x.Utilization
+ }
+ return nil
+}
+
+func (x *OrcaLoadReport) GetRpsFractional() float64 {
+ if x != nil {
+ return x.RpsFractional
+ }
+ return 0
+}
+
+func (x *OrcaLoadReport) GetEps() float64 {
+ if x != nil {
+ return x.Eps
+ }
+ return 0
+}
+
+func (x *OrcaLoadReport) GetNamedMetrics() map[string]float64 {
+ if x != nil {
+ return x.NamedMetrics
+ }
+ return nil
+}
+
+func (x *OrcaLoadReport) GetApplicationUtilization() float64 {
+ if x != nil {
+ return x.ApplicationUtilization
+ }
+ return 0
+}
+
+var File_xds_data_orca_v3_orca_load_report_proto protoreflect.FileDescriptor
+
+var file_xds_data_orca_v3_orca_load_report_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x78, 0x64, 0x73, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f,
+ 0x76, 0x33, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x72, 0x65, 0x70,
+ 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x78, 0x64, 0x73, 0x2e, 0x64,
+ 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa6, 0x06, 0x0a, 0x0e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61,
+ 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x37, 0x0a, 0x0f, 0x63, 0x70, 0x75, 0x5f, 0x75,
+ 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01,
+ 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x52, 0x0e, 0x63, 0x70, 0x75, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x40, 0x0a, 0x0f, 0x6d, 0x65, 0x6d, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x42, 0x17, 0xfa, 0x42, 0x14, 0x12, 0x12,
+ 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x52, 0x0e, 0x6d, 0x65, 0x6d, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x03, 0x72, 0x70, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x42,
+ 0x02, 0x18, 0x01, 0x52, 0x03, 0x72, 0x70, 0x73, 0x12, 0x54, 0x0a, 0x0c, 0x72, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74,
+ 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x0b, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x12, 0x71,
+ 0x0a, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f,
+ 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52,
+ 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42, 0x1c, 0xfa, 0x42, 0x19, 0x9a, 0x01, 0x16, 0x2a, 0x14,
+ 0x12, 0x12, 0x19, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, 0x29, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x52, 0x0b, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x12, 0x35, 0x0a, 0x0e, 0x72, 0x70, 0x73, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09,
+ 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x0d, 0x72, 0x70, 0x73, 0x46, 0x72,
+ 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x20, 0x0a, 0x03, 0x65, 0x70, 0x73, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x03, 0x65, 0x70, 0x73, 0x12, 0x57, 0x0a, 0x0d, 0x6e, 0x61,
+ 0x6d, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63,
+ 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70,
+ 0x6f, 0x72, 0x74, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x73, 0x12, 0x47, 0x0a, 0x17, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x75, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09,
+ 0x20, 0x01, 0x28, 0x01, 0x42, 0x0e, 0xfa, 0x42, 0x0b, 0x12, 0x09, 0x29, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x52, 0x16, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, 0x10,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x43, 0x6f, 0x73, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10,
+ 0x55, 0x74, 0x69, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3f, 0x0a, 0x11,
+ 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
+ 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x5d, 0x0a,
+ 0x1b, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x4f, 0x72,
+ 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f,
+ 0x64, 0x61, 0x74, 0x61, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_data_orca_v3_orca_load_report_proto_rawDescOnce sync.Once
+ file_xds_data_orca_v3_orca_load_report_proto_rawDescData = file_xds_data_orca_v3_orca_load_report_proto_rawDesc
+)
+
+func file_xds_data_orca_v3_orca_load_report_proto_rawDescGZIP() []byte {
+ file_xds_data_orca_v3_orca_load_report_proto_rawDescOnce.Do(func() {
+ file_xds_data_orca_v3_orca_load_report_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_data_orca_v3_orca_load_report_proto_rawDescData)
+ })
+ return file_xds_data_orca_v3_orca_load_report_proto_rawDescData
+}
+
+var file_xds_data_orca_v3_orca_load_report_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_xds_data_orca_v3_orca_load_report_proto_goTypes = []interface{}{
+ (*OrcaLoadReport)(nil), // 0: xds.data.orca.v3.OrcaLoadReport
+ nil, // 1: xds.data.orca.v3.OrcaLoadReport.RequestCostEntry
+ nil, // 2: xds.data.orca.v3.OrcaLoadReport.UtilizationEntry
+ nil, // 3: xds.data.orca.v3.OrcaLoadReport.NamedMetricsEntry
+}
+var file_xds_data_orca_v3_orca_load_report_proto_depIdxs = []int32{
+ 1, // 0: xds.data.orca.v3.OrcaLoadReport.request_cost:type_name -> xds.data.orca.v3.OrcaLoadReport.RequestCostEntry
+ 2, // 1: xds.data.orca.v3.OrcaLoadReport.utilization:type_name -> xds.data.orca.v3.OrcaLoadReport.UtilizationEntry
+ 3, // 2: xds.data.orca.v3.OrcaLoadReport.named_metrics:type_name -> xds.data.orca.v3.OrcaLoadReport.NamedMetricsEntry
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_xds_data_orca_v3_orca_load_report_proto_init() }
+func file_xds_data_orca_v3_orca_load_report_proto_init() {
+ if File_xds_data_orca_v3_orca_load_report_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_data_orca_v3_orca_load_report_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OrcaLoadReport); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_data_orca_v3_orca_load_report_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_data_orca_v3_orca_load_report_proto_goTypes,
+ DependencyIndexes: file_xds_data_orca_v3_orca_load_report_proto_depIdxs,
+ MessageInfos: file_xds_data_orca_v3_orca_load_report_proto_msgTypes,
+ }.Build()
+ File_xds_data_orca_v3_orca_load_report_proto = out.File
+ file_xds_data_orca_v3_orca_load_report_proto_rawDesc = nil
+ file_xds_data_orca_v3_orca_load_report_proto_goTypes = nil
+ file_xds_data_orca_v3_orca_load_report_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go
new file mode 100644
index 000000000..8dd55330a
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/data/orca/v3/orca_load_report.pb.validate.go
@@ -0,0 +1,225 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/data/orca/v3/orca_load_report.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on OrcaLoadReport with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *OrcaLoadReport) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on OrcaLoadReport with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in OrcaLoadReportMultiError,
+// or nil if none found.
+func (m *OrcaLoadReport) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *OrcaLoadReport) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetCpuUtilization() < 0 {
+ err := OrcaLoadReportValidationError{
+ field: "CpuUtilization",
+ reason: "value must be greater than or equal to 0",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if val := m.GetMemUtilization(); val < 0 || val > 1 {
+ err := OrcaLoadReportValidationError{
+ field: "MemUtilization",
+ reason: "value must be inside range [0, 1]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for Rps
+
+ // no validation rules for RequestCost
+
+ {
+ sorted_keys := make([]string, len(m.GetUtilization()))
+ i := 0
+ for key := range m.GetUtilization() {
+ sorted_keys[i] = key
+ i++
+ }
+ sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] })
+ for _, key := range sorted_keys {
+ val := m.GetUtilization()[key]
+ _ = val
+
+ // no validation rules for Utilization[key]
+
+ if val := val; val < 0 || val > 1 {
+ err := OrcaLoadReportValidationError{
+ field: fmt.Sprintf("Utilization[%v]", key),
+ reason: "value must be inside range [0, 1]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if m.GetRpsFractional() < 0 {
+ err := OrcaLoadReportValidationError{
+ field: "RpsFractional",
+ reason: "value must be greater than or equal to 0",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetEps() < 0 {
+ err := OrcaLoadReportValidationError{
+ field: "Eps",
+ reason: "value must be greater than or equal to 0",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ // no validation rules for NamedMetrics
+
+ if m.GetApplicationUtilization() < 0 {
+ err := OrcaLoadReportValidationError{
+ field: "ApplicationUtilization",
+ reason: "value must be greater than or equal to 0",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return OrcaLoadReportMultiError(errors)
+ }
+
+ return nil
+}
+
+// OrcaLoadReportMultiError is an error wrapping multiple validation errors
+// returned by OrcaLoadReport.ValidateAll() if the designated constraints
+// aren't met.
+type OrcaLoadReportMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m OrcaLoadReportMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m OrcaLoadReportMultiError) AllErrors() []error { return m }
+
+// OrcaLoadReportValidationError is the validation error returned by
+// OrcaLoadReport.Validate if the designated constraints aren't met.
+type OrcaLoadReportValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e OrcaLoadReportValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e OrcaLoadReportValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e OrcaLoadReportValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e OrcaLoadReportValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e OrcaLoadReportValidationError) ErrorName() string { return "OrcaLoadReportValidationError" }
+
+// Error satisfies the builtin error interface
+func (e OrcaLoadReportValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sOrcaLoadReport.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = OrcaLoadReportValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = OrcaLoadReportValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go
new file mode 100644
index 000000000..32e4a37bc
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.go
@@ -0,0 +1,182 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/service/orca/v3/orca.proto
+
+package v3
+
+import (
+ v3 "github.com/cncf/xds/go/xds/data/orca/v3"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type OrcaLoadReportRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ReportInterval *durationpb.Duration `protobuf:"bytes,1,opt,name=report_interval,json=reportInterval,proto3" json:"report_interval,omitempty"`
+ RequestCostNames []string `protobuf:"bytes,2,rep,name=request_cost_names,json=requestCostNames,proto3" json:"request_cost_names,omitempty"`
+}
+
+func (x *OrcaLoadReportRequest) Reset() {
+ *x = OrcaLoadReportRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_service_orca_v3_orca_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OrcaLoadReportRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OrcaLoadReportRequest) ProtoMessage() {}
+
+func (x *OrcaLoadReportRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_service_orca_v3_orca_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OrcaLoadReportRequest.ProtoReflect.Descriptor instead.
+func (*OrcaLoadReportRequest) Descriptor() ([]byte, []int) {
+ return file_xds_service_orca_v3_orca_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *OrcaLoadReportRequest) GetReportInterval() *durationpb.Duration {
+ if x != nil {
+ return x.ReportInterval
+ }
+ return nil
+}
+
+func (x *OrcaLoadReportRequest) GetRequestCostNames() []string {
+ if x != nil {
+ return x.RequestCostNames
+ }
+ return nil
+}
+
+var File_xds_service_orca_v3_orca_proto protoreflect.FileDescriptor
+
+var file_xds_service_orca_v3_orca_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x78, 0x64, 0x73, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6f, 0x72,
+ 0x63, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6f, 0x72,
+ 0x63, 0x61, 0x2e, 0x76, 0x33, 0x1a, 0x27, 0x78, 0x64, 0x73, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x2f,
+ 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x5f, 0x6c, 0x6f, 0x61,
+ 0x64, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
+ 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x89,
+ 0x01, 0x0a, 0x15, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72,
+ 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x70, 0x6f,
+ 0x72, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65,
+ 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x2c, 0x0a, 0x12,
+ 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x73, 0x74, 0x5f, 0x6e, 0x61, 0x6d,
+ 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x43, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x32, 0x75, 0x0a, 0x0e, 0x4f, 0x70,
+ 0x65, 0x6e, 0x52, 0x63, 0x61, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x63, 0x0a, 0x11,
+ 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x43, 0x6f, 0x72, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63,
+ 0x73, 0x12, 0x2a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
+ 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64,
+ 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
+ 0x78, 0x64, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x6f, 0x72, 0x63, 0x61, 0x2e, 0x76, 0x33,
+ 0x2e, 0x4f, 0x72, 0x63, 0x61, 0x4c, 0x6f, 0x61, 0x64, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x30,
+ 0x01, 0x42, 0x59, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x78, 0x64, 0x73, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x6f, 0x72, 0x63, 0x61,
+ 0x2e, 0x76, 0x33, 0x42, 0x09, 0x4f, 0x72, 0x63, 0x61, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63,
+ 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x2f, 0x6f, 0x72, 0x63, 0x61, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_service_orca_v3_orca_proto_rawDescOnce sync.Once
+ file_xds_service_orca_v3_orca_proto_rawDescData = file_xds_service_orca_v3_orca_proto_rawDesc
+)
+
+func file_xds_service_orca_v3_orca_proto_rawDescGZIP() []byte {
+ file_xds_service_orca_v3_orca_proto_rawDescOnce.Do(func() {
+ file_xds_service_orca_v3_orca_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_service_orca_v3_orca_proto_rawDescData)
+ })
+ return file_xds_service_orca_v3_orca_proto_rawDescData
+}
+
+var file_xds_service_orca_v3_orca_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_service_orca_v3_orca_proto_goTypes = []interface{}{
+ (*OrcaLoadReportRequest)(nil), // 0: xds.service.orca.v3.OrcaLoadReportRequest
+ (*durationpb.Duration)(nil), // 1: google.protobuf.Duration
+ (*v3.OrcaLoadReport)(nil), // 2: xds.data.orca.v3.OrcaLoadReport
+}
+var file_xds_service_orca_v3_orca_proto_depIdxs = []int32{
+ 1, // 0: xds.service.orca.v3.OrcaLoadReportRequest.report_interval:type_name -> google.protobuf.Duration
+ 0, // 1: xds.service.orca.v3.OpenRcaService.StreamCoreMetrics:input_type -> xds.service.orca.v3.OrcaLoadReportRequest
+ 2, // 2: xds.service.orca.v3.OpenRcaService.StreamCoreMetrics:output_type -> xds.data.orca.v3.OrcaLoadReport
+ 2, // [2:3] is the sub-list for method output_type
+ 1, // [1:2] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_service_orca_v3_orca_proto_init() }
+func file_xds_service_orca_v3_orca_proto_init() {
+ if File_xds_service_orca_v3_orca_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_service_orca_v3_orca_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OrcaLoadReportRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_service_orca_v3_orca_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_xds_service_orca_v3_orca_proto_goTypes,
+ DependencyIndexes: file_xds_service_orca_v3_orca_proto_depIdxs,
+ MessageInfos: file_xds_service_orca_v3_orca_proto_msgTypes,
+ }.Build()
+ File_xds_service_orca_v3_orca_proto = out.File
+ file_xds_service_orca_v3_orca_proto_rawDesc = nil
+ file_xds_service_orca_v3_orca_proto_goTypes = nil
+ file_xds_service_orca_v3_orca_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go
new file mode 100644
index 000000000..8949e8372
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca.pb.validate.go
@@ -0,0 +1,167 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/service/orca/v3/orca.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on OrcaLoadReportRequest with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *OrcaLoadReportRequest) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on OrcaLoadReportRequest with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// OrcaLoadReportRequestMultiError, or nil if none found.
+func (m *OrcaLoadReportRequest) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *OrcaLoadReportRequest) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetReportInterval()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, OrcaLoadReportRequestValidationError{
+ field: "ReportInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, OrcaLoadReportRequestValidationError{
+ field: "ReportInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetReportInterval()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return OrcaLoadReportRequestValidationError{
+ field: "ReportInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return OrcaLoadReportRequestMultiError(errors)
+ }
+
+ return nil
+}
+
+// OrcaLoadReportRequestMultiError is an error wrapping multiple validation
+// errors returned by OrcaLoadReportRequest.ValidateAll() if the designated
+// constraints aren't met.
+type OrcaLoadReportRequestMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m OrcaLoadReportRequestMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m OrcaLoadReportRequestMultiError) AllErrors() []error { return m }
+
+// OrcaLoadReportRequestValidationError is the validation error returned by
+// OrcaLoadReportRequest.Validate if the designated constraints aren't met.
+type OrcaLoadReportRequestValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e OrcaLoadReportRequestValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e OrcaLoadReportRequestValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e OrcaLoadReportRequestValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e OrcaLoadReportRequestValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e OrcaLoadReportRequestValidationError) ErrorName() string {
+ return "OrcaLoadReportRequestValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e OrcaLoadReportRequestValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sOrcaLoadReportRequest.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = OrcaLoadReportRequestValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = OrcaLoadReportRequestValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go
new file mode 100644
index 000000000..8a92439e0
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/service/orca/v3/orca_grpc.pb.go
@@ -0,0 +1,135 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.3.0
+// - protoc v5.29.1
+// source: xds/service/orca/v3/orca.proto
+
+package v3
+
+import (
+ context "context"
+ v3 "github.com/cncf/xds/go/xds/data/orca/v3"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+const (
+ OpenRcaService_StreamCoreMetrics_FullMethodName = "/xds.service.orca.v3.OpenRcaService/StreamCoreMetrics"
+)
+
+// OpenRcaServiceClient is the client API for OpenRcaService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type OpenRcaServiceClient interface {
+ StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error)
+}
+
+type openRcaServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewOpenRcaServiceClient(cc grpc.ClientConnInterface) OpenRcaServiceClient {
+ return &openRcaServiceClient{cc}
+}
+
+func (c *openRcaServiceClient) StreamCoreMetrics(ctx context.Context, in *OrcaLoadReportRequest, opts ...grpc.CallOption) (OpenRcaService_StreamCoreMetricsClient, error) {
+ stream, err := c.cc.NewStream(ctx, &OpenRcaService_ServiceDesc.Streams[0], OpenRcaService_StreamCoreMetrics_FullMethodName, opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &openRcaServiceStreamCoreMetricsClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type OpenRcaService_StreamCoreMetricsClient interface {
+ Recv() (*v3.OrcaLoadReport, error)
+ grpc.ClientStream
+}
+
+type openRcaServiceStreamCoreMetricsClient struct {
+ grpc.ClientStream
+}
+
+func (x *openRcaServiceStreamCoreMetricsClient) Recv() (*v3.OrcaLoadReport, error) {
+ m := new(v3.OrcaLoadReport)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// OpenRcaServiceServer is the server API for OpenRcaService service.
+// All implementations should embed UnimplementedOpenRcaServiceServer
+// for forward compatibility
+type OpenRcaServiceServer interface {
+ StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error
+}
+
+// UnimplementedOpenRcaServiceServer should be embedded to have forward compatible implementations.
+type UnimplementedOpenRcaServiceServer struct {
+}
+
+func (UnimplementedOpenRcaServiceServer) StreamCoreMetrics(*OrcaLoadReportRequest, OpenRcaService_StreamCoreMetricsServer) error {
+ return status.Errorf(codes.Unimplemented, "method StreamCoreMetrics not implemented")
+}
+
+// UnsafeOpenRcaServiceServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to OpenRcaServiceServer will
+// result in compilation errors.
+type UnsafeOpenRcaServiceServer interface {
+ mustEmbedUnimplementedOpenRcaServiceServer()
+}
+
+func RegisterOpenRcaServiceServer(s grpc.ServiceRegistrar, srv OpenRcaServiceServer) {
+ s.RegisterService(&OpenRcaService_ServiceDesc, srv)
+}
+
+func _OpenRcaService_StreamCoreMetrics_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(OrcaLoadReportRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(OpenRcaServiceServer).StreamCoreMetrics(m, &openRcaServiceStreamCoreMetricsServer{stream})
+}
+
+type OpenRcaService_StreamCoreMetricsServer interface {
+ Send(*v3.OrcaLoadReport) error
+ grpc.ServerStream
+}
+
+type openRcaServiceStreamCoreMetricsServer struct {
+ grpc.ServerStream
+}
+
+func (x *openRcaServiceStreamCoreMetricsServer) Send(m *v3.OrcaLoadReport) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+// OpenRcaService_ServiceDesc is the grpc.ServiceDesc for OpenRcaService service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var OpenRcaService_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "xds.service.orca.v3.OpenRcaService",
+ HandlerType: (*OpenRcaServiceServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "StreamCoreMetrics",
+ Handler: _OpenRcaService_StreamCoreMetrics_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "xds/service/orca/v3/orca.proto",
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go
new file mode 100644
index 000000000..1bd4aaf60
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.go
@@ -0,0 +1,168 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/cel.proto
+
+package v3
+
+import (
+ v3 "github.com/cncf/xds/go/xds/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type CelMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ExprMatch *v3.CelExpression `protobuf:"bytes,1,opt,name=expr_match,json=exprMatch,proto3" json:"expr_match,omitempty"`
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *CelMatcher) Reset() {
+ *x = CelMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_cel_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CelMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CelMatcher) ProtoMessage() {}
+
+func (x *CelMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_cel_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CelMatcher.ProtoReflect.Descriptor instead.
+func (*CelMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_cel_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CelMatcher) GetExprMatch() *v3.CelExpression {
+ if x != nil {
+ return x.ExprMatch
+ }
+ return nil
+}
+
+func (x *CelMatcher) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+var File_xds_type_matcher_v3_cel_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_cel_proto_rawDesc = []byte{
+ 0x0a, 0x1d, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x15, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76,
+ 0x33, 0x2f, 0x63, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x73, 0x0a, 0x0a, 0x43, 0x65, 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x12, 0x43, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69,
+ 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x65, 0x78,
+ 0x70, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x58, 0x0a, 0x1e, 0x63, 0x6f, 0x6d,
+ 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x43, 0x65, 0x6c,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f,
+ 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_cel_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_cel_proto_rawDescData = file_xds_type_matcher_v3_cel_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_cel_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_cel_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_cel_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_cel_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_cel_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_cel_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_type_matcher_v3_cel_proto_goTypes = []interface{}{
+ (*CelMatcher)(nil), // 0: xds.type.matcher.v3.CelMatcher
+ (*v3.CelExpression)(nil), // 1: xds.type.v3.CelExpression
+}
+var file_xds_type_matcher_v3_cel_proto_depIdxs = []int32{
+ 1, // 0: xds.type.matcher.v3.CelMatcher.expr_match:type_name -> xds.type.v3.CelExpression
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_cel_proto_init() }
+func file_xds_type_matcher_v3_cel_proto_init() {
+ if File_xds_type_matcher_v3_cel_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_cel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CelMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_cel_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_cel_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_cel_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_cel_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_cel_proto = out.File
+ file_xds_type_matcher_v3_cel_proto_rawDesc = nil
+ file_xds_type_matcher_v3_cel_proto_goTypes = nil
+ file_xds_type_matcher_v3_cel_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.validate.go
new file mode 100644
index 000000000..091267b0c
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/cel.pb.validate.go
@@ -0,0 +1,177 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/cel.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on CelMatcher with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *CelMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CelMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in CelMatcherMultiError, or
+// nil if none found.
+func (m *CelMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CelMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetExprMatch() == nil {
+ err := CelMatcherValidationError{
+ field: "ExprMatch",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetExprMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CelMatcherValidationError{
+ field: "ExprMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CelMatcherValidationError{
+ field: "ExprMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExprMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CelMatcherValidationError{
+ field: "ExprMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Description
+
+ if len(errors) > 0 {
+ return CelMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// CelMatcherMultiError is an error wrapping multiple validation errors
+// returned by CelMatcher.ValidateAll() if the designated constraints aren't met.
+type CelMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CelMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CelMatcherMultiError) AllErrors() []error { return m }
+
+// CelMatcherValidationError is the validation error returned by
+// CelMatcher.Validate if the designated constraints aren't met.
+type CelMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CelMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CelMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CelMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CelMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CelMatcherValidationError) ErrorName() string { return "CelMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CelMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCelMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CelMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CelMatcherValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go
new file mode 100644
index 000000000..3053b35f9
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.go
@@ -0,0 +1,242 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/domain.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ServerNameMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ DomainMatchers []*ServerNameMatcher_DomainMatcher `protobuf:"bytes,1,rep,name=domain_matchers,json=domainMatchers,proto3" json:"domain_matchers,omitempty"`
+}
+
+func (x *ServerNameMatcher) Reset() {
+ *x = ServerNameMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ServerNameMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerNameMatcher) ProtoMessage() {}
+
+func (x *ServerNameMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerNameMatcher.ProtoReflect.Descriptor instead.
+func (*ServerNameMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_domain_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ServerNameMatcher) GetDomainMatchers() []*ServerNameMatcher_DomainMatcher {
+ if x != nil {
+ return x.DomainMatchers
+ }
+ return nil
+}
+
+type ServerNameMatcher_DomainMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Domains []string `protobuf:"bytes,1,rep,name=domains,proto3" json:"domains,omitempty"`
+ OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"`
+}
+
+func (x *ServerNameMatcher_DomainMatcher) Reset() {
+ *x = ServerNameMatcher_DomainMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ServerNameMatcher_DomainMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerNameMatcher_DomainMatcher) ProtoMessage() {}
+
+func (x *ServerNameMatcher_DomainMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_domain_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerNameMatcher_DomainMatcher.ProtoReflect.Descriptor instead.
+func (*ServerNameMatcher_DomainMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_domain_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *ServerNameMatcher_DomainMatcher) GetDomains() []string {
+ if x != nil {
+ return x.Domains
+ }
+ return nil
+}
+
+func (x *ServerNameMatcher_DomainMatcher) GetOnMatch() *Matcher_OnMatch {
+ if x != nil {
+ return x.OnMatch
+ }
+ return nil
+}
+
+var File_xds_type_matcher_v3_domain_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_domain_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79,
+ 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe8, 0x01, 0x0a, 0x11, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x4e,
+ 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x0f, 0x64, 0x6f,
+ 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x44, 0x6f, 0x6d, 0x61,
+ 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0e, 0x64, 0x6f, 0x6d, 0x61, 0x69,
+ 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x74, 0x0a, 0x0d, 0x44, 0x6f, 0x6d,
+ 0x61, 0x69, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x22, 0x0a, 0x07, 0x64, 0x6f,
+ 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x08, 0xfa, 0x42, 0x05,
+ 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x07, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x73, 0x12, 0x3f,
+ 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f,
+ 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42,
+ 0x6e, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
+ 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x16, 0x53, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f,
+ 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_domain_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_domain_proto_rawDescData = file_xds_type_matcher_v3_domain_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_domain_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_domain_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_domain_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_domain_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_domain_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_domain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_type_matcher_v3_domain_proto_goTypes = []interface{}{
+ (*ServerNameMatcher)(nil), // 0: xds.type.matcher.v3.ServerNameMatcher
+ (*ServerNameMatcher_DomainMatcher)(nil), // 1: xds.type.matcher.v3.ServerNameMatcher.DomainMatcher
+ (*Matcher_OnMatch)(nil), // 2: xds.type.matcher.v3.Matcher.OnMatch
+}
+var file_xds_type_matcher_v3_domain_proto_depIdxs = []int32{
+ 1, // 0: xds.type.matcher.v3.ServerNameMatcher.domain_matchers:type_name -> xds.type.matcher.v3.ServerNameMatcher.DomainMatcher
+ 2, // 1: xds.type.matcher.v3.ServerNameMatcher.DomainMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_domain_proto_init() }
+func file_xds_type_matcher_v3_domain_proto_init() {
+ if File_xds_type_matcher_v3_domain_proto != nil {
+ return
+ }
+ file_xds_type_matcher_v3_matcher_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_domain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ServerNameMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_domain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ServerNameMatcher_DomainMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_domain_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_domain_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_domain_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_domain_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_domain_proto = out.File
+ file_xds_type_matcher_v3_domain_proto_rawDesc = nil
+ file_xds_type_matcher_v3_domain_proto_goTypes = nil
+ file_xds_type_matcher_v3_domain_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.validate.go
new file mode 100644
index 000000000..e95bdfa28
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/domain.pb.validate.go
@@ -0,0 +1,315 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/domain.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ServerNameMatcher with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *ServerNameMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ServerNameMatcher with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ServerNameMatcherMultiError, or nil if none found.
+func (m *ServerNameMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ServerNameMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetDomainMatchers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ServerNameMatcherValidationError{
+ field: fmt.Sprintf("DomainMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ServerNameMatcherValidationError{
+ field: fmt.Sprintf("DomainMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ServerNameMatcherValidationError{
+ field: fmt.Sprintf("DomainMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ServerNameMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// ServerNameMatcherMultiError is an error wrapping multiple validation errors
+// returned by ServerNameMatcher.ValidateAll() if the designated constraints
+// aren't met.
+type ServerNameMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ServerNameMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ServerNameMatcherMultiError) AllErrors() []error { return m }
+
+// ServerNameMatcherValidationError is the validation error returned by
+// ServerNameMatcher.Validate if the designated constraints aren't met.
+type ServerNameMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ServerNameMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ServerNameMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ServerNameMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ServerNameMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ServerNameMatcherValidationError) ErrorName() string {
+ return "ServerNameMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ServerNameMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sServerNameMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ServerNameMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ServerNameMatcherValidationError{}
+
+// Validate checks the field values on ServerNameMatcher_DomainMatcher with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ServerNameMatcher_DomainMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ServerNameMatcher_DomainMatcher with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// ServerNameMatcher_DomainMatcherMultiError, or nil if none found.
+func (m *ServerNameMatcher_DomainMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ServerNameMatcher_DomainMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetDomains()) < 1 {
+ err := ServerNameMatcher_DomainMatcherValidationError{
+ field: "Domains",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetOnMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ServerNameMatcher_DomainMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ServerNameMatcher_DomainMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ServerNameMatcher_DomainMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ServerNameMatcher_DomainMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// ServerNameMatcher_DomainMatcherMultiError is an error wrapping multiple
+// validation errors returned by ServerNameMatcher_DomainMatcher.ValidateAll()
+// if the designated constraints aren't met.
+type ServerNameMatcher_DomainMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ServerNameMatcher_DomainMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ServerNameMatcher_DomainMatcherMultiError) AllErrors() []error { return m }
+
+// ServerNameMatcher_DomainMatcherValidationError is the validation error
+// returned by ServerNameMatcher_DomainMatcher.Validate if the designated
+// constraints aren't met.
+type ServerNameMatcher_DomainMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ServerNameMatcher_DomainMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ServerNameMatcher_DomainMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ServerNameMatcher_DomainMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ServerNameMatcher_DomainMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ServerNameMatcher_DomainMatcherValidationError) ErrorName() string {
+ return "ServerNameMatcher_DomainMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ServerNameMatcher_DomainMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sServerNameMatcher_DomainMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ServerNameMatcher_DomainMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ServerNameMatcher_DomainMatcherValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go
new file mode 100644
index 000000000..eedcacec6
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.go
@@ -0,0 +1,140 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/http_inputs.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type HttpAttributesCelMatchInput struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *HttpAttributesCelMatchInput) Reset() {
+ *x = HttpAttributesCelMatchInput{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_http_inputs_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HttpAttributesCelMatchInput) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HttpAttributesCelMatchInput) ProtoMessage() {}
+
+func (x *HttpAttributesCelMatchInput) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_http_inputs_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HttpAttributesCelMatchInput.ProtoReflect.Descriptor instead.
+func (*HttpAttributesCelMatchInput) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_http_inputs_proto_rawDescGZIP(), []int{0}
+}
+
+var File_xds_type_matcher_v3_http_inputs_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_http_inputs_proto_rawDesc = []byte{
+ 0x0a, 0x25, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x5f, 0x69, 0x6e, 0x70, 0x75, 0x74,
+ 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x22, 0x1d, 0x0a, 0x1b,
+ 0x48, 0x74, 0x74, 0x70, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x65,
+ 0x6c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x42, 0x5f, 0x0a, 0x1e, 0x63,
+ 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x48,
+ 0x74, 0x74, 0x70, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63,
+ 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70,
+ 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_http_inputs_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_http_inputs_proto_rawDescData = file_xds_type_matcher_v3_http_inputs_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_http_inputs_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_http_inputs_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_http_inputs_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_http_inputs_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_http_inputs_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_http_inputs_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_type_matcher_v3_http_inputs_proto_goTypes = []interface{}{
+ (*HttpAttributesCelMatchInput)(nil), // 0: xds.type.matcher.v3.HttpAttributesCelMatchInput
+}
+var file_xds_type_matcher_v3_http_inputs_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_http_inputs_proto_init() }
+func file_xds_type_matcher_v3_http_inputs_proto_init() {
+ if File_xds_type_matcher_v3_http_inputs_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_http_inputs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HttpAttributesCelMatchInput); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_http_inputs_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_http_inputs_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_http_inputs_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_http_inputs_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_http_inputs_proto = out.File
+ file_xds_type_matcher_v3_http_inputs_proto_rawDesc = nil
+ file_xds_type_matcher_v3_http_inputs_proto_goTypes = nil
+ file_xds_type_matcher_v3_http_inputs_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.validate.go
new file mode 100644
index 000000000..5d8742927
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/http_inputs.pb.validate.go
@@ -0,0 +1,139 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/http_inputs.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on HttpAttributesCelMatchInput with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *HttpAttributesCelMatchInput) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HttpAttributesCelMatchInput with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HttpAttributesCelMatchInputMultiError, or nil if none found.
+func (m *HttpAttributesCelMatchInput) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HttpAttributesCelMatchInput) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return HttpAttributesCelMatchInputMultiError(errors)
+ }
+
+ return nil
+}
+
+// HttpAttributesCelMatchInputMultiError is an error wrapping multiple
+// validation errors returned by HttpAttributesCelMatchInput.ValidateAll() if
+// the designated constraints aren't met.
+type HttpAttributesCelMatchInputMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HttpAttributesCelMatchInputMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HttpAttributesCelMatchInputMultiError) AllErrors() []error { return m }
+
+// HttpAttributesCelMatchInputValidationError is the validation error returned
+// by HttpAttributesCelMatchInput.Validate if the designated constraints
+// aren't met.
+type HttpAttributesCelMatchInputValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HttpAttributesCelMatchInputValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HttpAttributesCelMatchInputValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HttpAttributesCelMatchInputValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HttpAttributesCelMatchInputValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HttpAttributesCelMatchInputValidationError) ErrorName() string {
+ return "HttpAttributesCelMatchInputValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e HttpAttributesCelMatchInputValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHttpAttributesCelMatchInput.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HttpAttributesCelMatchInputValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HttpAttributesCelMatchInputValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go
new file mode 100644
index 000000000..6facd7aeb
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.go
@@ -0,0 +1,256 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/ip.proto
+
+package v3
+
+import (
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ v3 "github.com/cncf/xds/go/xds/core/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type IPMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RangeMatchers []*IPMatcher_IPRangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"`
+}
+
+func (x *IPMatcher) Reset() {
+ *x = IPMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IPMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IPMatcher) ProtoMessage() {}
+
+func (x *IPMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IPMatcher.ProtoReflect.Descriptor instead.
+func (*IPMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_ip_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *IPMatcher) GetRangeMatchers() []*IPMatcher_IPRangeMatcher {
+ if x != nil {
+ return x.RangeMatchers
+ }
+ return nil
+}
+
+type IPMatcher_IPRangeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ranges []*v3.CidrRange `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"`
+ OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"`
+ Exclusive bool `protobuf:"varint,3,opt,name=exclusive,proto3" json:"exclusive,omitempty"`
+}
+
+func (x *IPMatcher_IPRangeMatcher) Reset() {
+ *x = IPMatcher_IPRangeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *IPMatcher_IPRangeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*IPMatcher_IPRangeMatcher) ProtoMessage() {}
+
+func (x *IPMatcher_IPRangeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_ip_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use IPMatcher_IPRangeMatcher.ProtoReflect.Descriptor instead.
+func (*IPMatcher_IPRangeMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_ip_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *IPMatcher_IPRangeMatcher) GetRanges() []*v3.CidrRange {
+ if x != nil {
+ return x.Ranges
+ }
+ return nil
+}
+
+func (x *IPMatcher_IPRangeMatcher) GetOnMatch() *Matcher_OnMatch {
+ if x != nil {
+ return x.OnMatch
+ }
+ return nil
+}
+
+func (x *IPMatcher_IPRangeMatcher) GetExclusive() bool {
+ if x != nil {
+ return x.Exclusive
+ }
+ return false
+}
+
+var File_xds_type_matcher_v3_ip_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_ip_proto_rawDesc = []byte{
+ 0x0a, 0x1c, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x69, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x13,
+ 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76,
+ 0x33, 0x2f, 0x63, 0x69, 0x64, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x78, 0x64,
+ 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76,
+ 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x02, 0x0a, 0x09, 0x49, 0x50, 0x4d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x50, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x49,
+ 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, 0x72,
+ 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0xa9, 0x01, 0x0a,
+ 0x0e, 0x49, 0x50, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12,
+ 0x38, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x16, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69,
+ 0x64, 0x72, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08,
+ 0x01, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64,
+ 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x78,
+ 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65,
+ 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x42, 0x66, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02,
+ 0x08, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78,
+ 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x76, 0x33, 0x42, 0x0e, 0x49, 0x50, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f,
+ 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73,
+ 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_ip_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_ip_proto_rawDescData = file_xds_type_matcher_v3_ip_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_ip_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_ip_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_ip_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_ip_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_ip_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_ip_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_type_matcher_v3_ip_proto_goTypes = []interface{}{
+ (*IPMatcher)(nil), // 0: xds.type.matcher.v3.IPMatcher
+ (*IPMatcher_IPRangeMatcher)(nil), // 1: xds.type.matcher.v3.IPMatcher.IPRangeMatcher
+ (*v3.CidrRange)(nil), // 2: xds.core.v3.CidrRange
+ (*Matcher_OnMatch)(nil), // 3: xds.type.matcher.v3.Matcher.OnMatch
+}
+var file_xds_type_matcher_v3_ip_proto_depIdxs = []int32{
+ 1, // 0: xds.type.matcher.v3.IPMatcher.range_matchers:type_name -> xds.type.matcher.v3.IPMatcher.IPRangeMatcher
+ 2, // 1: xds.type.matcher.v3.IPMatcher.IPRangeMatcher.ranges:type_name -> xds.core.v3.CidrRange
+ 3, // 2: xds.type.matcher.v3.IPMatcher.IPRangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_ip_proto_init() }
+func file_xds_type_matcher_v3_ip_proto_init() {
+ if File_xds_type_matcher_v3_ip_proto != nil {
+ return
+ }
+ file_xds_type_matcher_v3_matcher_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_ip_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*IPMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_ip_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*IPMatcher_IPRangeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_ip_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_ip_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_ip_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_ip_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_ip_proto = out.File
+ file_xds_type_matcher_v3_ip_proto_rawDesc = nil
+ file_xds_type_matcher_v3_ip_proto_goTypes = nil
+ file_xds_type_matcher_v3_ip_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go
new file mode 100644
index 000000000..c1fca03bc
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/ip.pb.validate.go
@@ -0,0 +1,347 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/ip.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on IPMatcher with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *IPMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on IPMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in IPMatcherMultiError, or nil
+// if none found.
+func (m *IPMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *IPMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetRangeMatchers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, IPMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, IPMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return IPMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return IPMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// IPMatcherMultiError is an error wrapping multiple validation errors returned
+// by IPMatcher.ValidateAll() if the designated constraints aren't met.
+type IPMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m IPMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m IPMatcherMultiError) AllErrors() []error { return m }
+
+// IPMatcherValidationError is the validation error returned by
+// IPMatcher.Validate if the designated constraints aren't met.
+type IPMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e IPMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e IPMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e IPMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e IPMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e IPMatcherValidationError) ErrorName() string { return "IPMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e IPMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sIPMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = IPMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = IPMatcherValidationError{}
+
+// Validate checks the field values on IPMatcher_IPRangeMatcher with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *IPMatcher_IPRangeMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on IPMatcher_IPRangeMatcher with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// IPMatcher_IPRangeMatcherMultiError, or nil if none found.
+func (m *IPMatcher_IPRangeMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *IPMatcher_IPRangeMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetRanges()) < 1 {
+ err := IPMatcher_IPRangeMatcherValidationError{
+ field: "Ranges",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetRanges() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, IPMatcher_IPRangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, IPMatcher_IPRangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return IPMatcher_IPRangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetOnMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, IPMatcher_IPRangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, IPMatcher_IPRangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return IPMatcher_IPRangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Exclusive
+
+ if len(errors) > 0 {
+ return IPMatcher_IPRangeMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// IPMatcher_IPRangeMatcherMultiError is an error wrapping multiple validation
+// errors returned by IPMatcher_IPRangeMatcher.ValidateAll() if the designated
+// constraints aren't met.
+type IPMatcher_IPRangeMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m IPMatcher_IPRangeMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m IPMatcher_IPRangeMatcherMultiError) AllErrors() []error { return m }
+
+// IPMatcher_IPRangeMatcherValidationError is the validation error returned by
+// IPMatcher_IPRangeMatcher.Validate if the designated constraints aren't met.
+type IPMatcher_IPRangeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e IPMatcher_IPRangeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e IPMatcher_IPRangeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e IPMatcher_IPRangeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e IPMatcher_IPRangeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e IPMatcher_IPRangeMatcherValidationError) ErrorName() string {
+ return "IPMatcher_IPRangeMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e IPMatcher_IPRangeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sIPMatcher_IPRangeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = IPMatcher_IPRangeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = IPMatcher_IPRangeMatcherValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go
new file mode 100644
index 000000000..1d1607b9c
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.go
@@ -0,0 +1,1067 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/matcher.proto
+
+package v3
+
+import (
+ v3 "github.com/cncf/xds/go/xds/core/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Matcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to MatcherType:
+ //
+ // *Matcher_MatcherList_
+ // *Matcher_MatcherTree_
+ MatcherType isMatcher_MatcherType `protobuf_oneof:"matcher_type"`
+ OnNoMatch *Matcher_OnMatch `protobuf:"bytes,3,opt,name=on_no_match,json=onNoMatch,proto3" json:"on_no_match,omitempty"`
+}
+
+func (x *Matcher) Reset() {
+ *x = Matcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher) ProtoMessage() {}
+
+func (x *Matcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher.ProtoReflect.Descriptor instead.
+func (*Matcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *Matcher) GetMatcherType() isMatcher_MatcherType {
+ if m != nil {
+ return m.MatcherType
+ }
+ return nil
+}
+
+func (x *Matcher) GetMatcherList() *Matcher_MatcherList {
+ if x, ok := x.GetMatcherType().(*Matcher_MatcherList_); ok {
+ return x.MatcherList
+ }
+ return nil
+}
+
+func (x *Matcher) GetMatcherTree() *Matcher_MatcherTree {
+ if x, ok := x.GetMatcherType().(*Matcher_MatcherTree_); ok {
+ return x.MatcherTree
+ }
+ return nil
+}
+
+func (x *Matcher) GetOnNoMatch() *Matcher_OnMatch {
+ if x != nil {
+ return x.OnNoMatch
+ }
+ return nil
+}
+
+type isMatcher_MatcherType interface {
+ isMatcher_MatcherType()
+}
+
+type Matcher_MatcherList_ struct {
+ MatcherList *Matcher_MatcherList `protobuf:"bytes,1,opt,name=matcher_list,json=matcherList,proto3,oneof"`
+}
+
+type Matcher_MatcherTree_ struct {
+ MatcherTree *Matcher_MatcherTree `protobuf:"bytes,2,opt,name=matcher_tree,json=matcherTree,proto3,oneof"`
+}
+
+func (*Matcher_MatcherList_) isMatcher_MatcherType() {}
+
+func (*Matcher_MatcherTree_) isMatcher_MatcherType() {}
+
+type Matcher_OnMatch struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to OnMatch:
+ //
+ // *Matcher_OnMatch_Matcher
+ // *Matcher_OnMatch_Action
+ OnMatch isMatcher_OnMatch_OnMatch `protobuf_oneof:"on_match"`
+ KeepMatching bool `protobuf:"varint,3,opt,name=keep_matching,json=keepMatching,proto3" json:"keep_matching,omitempty"`
+}
+
+func (x *Matcher_OnMatch) Reset() {
+ *x = Matcher_OnMatch{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_OnMatch) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_OnMatch) ProtoMessage() {}
+
+func (x *Matcher_OnMatch) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_OnMatch.ProtoReflect.Descriptor instead.
+func (*Matcher_OnMatch) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (m *Matcher_OnMatch) GetOnMatch() isMatcher_OnMatch_OnMatch {
+ if m != nil {
+ return m.OnMatch
+ }
+ return nil
+}
+
+func (x *Matcher_OnMatch) GetMatcher() *Matcher {
+ if x, ok := x.GetOnMatch().(*Matcher_OnMatch_Matcher); ok {
+ return x.Matcher
+ }
+ return nil
+}
+
+func (x *Matcher_OnMatch) GetAction() *v3.TypedExtensionConfig {
+ if x, ok := x.GetOnMatch().(*Matcher_OnMatch_Action); ok {
+ return x.Action
+ }
+ return nil
+}
+
+func (x *Matcher_OnMatch) GetKeepMatching() bool {
+ if x != nil {
+ return x.KeepMatching
+ }
+ return false
+}
+
+type isMatcher_OnMatch_OnMatch interface {
+ isMatcher_OnMatch_OnMatch()
+}
+
+type Matcher_OnMatch_Matcher struct {
+ Matcher *Matcher `protobuf:"bytes,1,opt,name=matcher,proto3,oneof"`
+}
+
+type Matcher_OnMatch_Action struct {
+ Action *v3.TypedExtensionConfig `protobuf:"bytes,2,opt,name=action,proto3,oneof"`
+}
+
+func (*Matcher_OnMatch_Matcher) isMatcher_OnMatch_OnMatch() {}
+
+func (*Matcher_OnMatch_Action) isMatcher_OnMatch_OnMatch() {}
+
+type Matcher_MatcherList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Matchers []*Matcher_MatcherList_FieldMatcher `protobuf:"bytes,1,rep,name=matchers,proto3" json:"matchers,omitempty"`
+}
+
+func (x *Matcher_MatcherList) Reset() {
+ *x = Matcher_MatcherList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_MatcherList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_MatcherList) ProtoMessage() {}
+
+func (x *Matcher_MatcherList) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_MatcherList.ProtoReflect.Descriptor instead.
+func (*Matcher_MatcherList) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *Matcher_MatcherList) GetMatchers() []*Matcher_MatcherList_FieldMatcher {
+ if x != nil {
+ return x.Matchers
+ }
+ return nil
+}
+
+type Matcher_MatcherTree struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Input *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"`
+ // Types that are assignable to TreeType:
+ //
+ // *Matcher_MatcherTree_ExactMatchMap
+ // *Matcher_MatcherTree_PrefixMatchMap
+ // *Matcher_MatcherTree_CustomMatch
+ TreeType isMatcher_MatcherTree_TreeType `protobuf_oneof:"tree_type"`
+}
+
+func (x *Matcher_MatcherTree) Reset() {
+ *x = Matcher_MatcherTree{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_MatcherTree) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_MatcherTree) ProtoMessage() {}
+
+func (x *Matcher_MatcherTree) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_MatcherTree.ProtoReflect.Descriptor instead.
+func (*Matcher_MatcherTree) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 2}
+}
+
+func (x *Matcher_MatcherTree) GetInput() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.Input
+ }
+ return nil
+}
+
+func (m *Matcher_MatcherTree) GetTreeType() isMatcher_MatcherTree_TreeType {
+ if m != nil {
+ return m.TreeType
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherTree) GetExactMatchMap() *Matcher_MatcherTree_MatchMap {
+ if x, ok := x.GetTreeType().(*Matcher_MatcherTree_ExactMatchMap); ok {
+ return x.ExactMatchMap
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherTree) GetPrefixMatchMap() *Matcher_MatcherTree_MatchMap {
+ if x, ok := x.GetTreeType().(*Matcher_MatcherTree_PrefixMatchMap); ok {
+ return x.PrefixMatchMap
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherTree) GetCustomMatch() *v3.TypedExtensionConfig {
+ if x, ok := x.GetTreeType().(*Matcher_MatcherTree_CustomMatch); ok {
+ return x.CustomMatch
+ }
+ return nil
+}
+
+type isMatcher_MatcherTree_TreeType interface {
+ isMatcher_MatcherTree_TreeType()
+}
+
+type Matcher_MatcherTree_ExactMatchMap struct {
+ ExactMatchMap *Matcher_MatcherTree_MatchMap `protobuf:"bytes,2,opt,name=exact_match_map,json=exactMatchMap,proto3,oneof"`
+}
+
+type Matcher_MatcherTree_PrefixMatchMap struct {
+ PrefixMatchMap *Matcher_MatcherTree_MatchMap `protobuf:"bytes,3,opt,name=prefix_match_map,json=prefixMatchMap,proto3,oneof"`
+}
+
+type Matcher_MatcherTree_CustomMatch struct {
+ CustomMatch *v3.TypedExtensionConfig `protobuf:"bytes,4,opt,name=custom_match,json=customMatch,proto3,oneof"`
+}
+
+func (*Matcher_MatcherTree_ExactMatchMap) isMatcher_MatcherTree_TreeType() {}
+
+func (*Matcher_MatcherTree_PrefixMatchMap) isMatcher_MatcherTree_TreeType() {}
+
+func (*Matcher_MatcherTree_CustomMatch) isMatcher_MatcherTree_TreeType() {}
+
+type Matcher_MatcherList_Predicate struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to MatchType:
+ //
+ // *Matcher_MatcherList_Predicate_SinglePredicate_
+ // *Matcher_MatcherList_Predicate_OrMatcher
+ // *Matcher_MatcherList_Predicate_AndMatcher
+ // *Matcher_MatcherList_Predicate_NotMatcher
+ MatchType isMatcher_MatcherList_Predicate_MatchType `protobuf_oneof:"match_type"`
+}
+
+func (x *Matcher_MatcherList_Predicate) Reset() {
+ *x = Matcher_MatcherList_Predicate{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_MatcherList_Predicate) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_MatcherList_Predicate) ProtoMessage() {}
+
+func (x *Matcher_MatcherList_Predicate) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_MatcherList_Predicate.ProtoReflect.Descriptor instead.
+func (*Matcher_MatcherList_Predicate) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0}
+}
+
+func (m *Matcher_MatcherList_Predicate) GetMatchType() isMatcher_MatcherList_Predicate_MatchType {
+ if m != nil {
+ return m.MatchType
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherList_Predicate) GetSinglePredicate() *Matcher_MatcherList_Predicate_SinglePredicate {
+ if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_SinglePredicate_); ok {
+ return x.SinglePredicate
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherList_Predicate) GetOrMatcher() *Matcher_MatcherList_Predicate_PredicateList {
+ if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_OrMatcher); ok {
+ return x.OrMatcher
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherList_Predicate) GetAndMatcher() *Matcher_MatcherList_Predicate_PredicateList {
+ if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_AndMatcher); ok {
+ return x.AndMatcher
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherList_Predicate) GetNotMatcher() *Matcher_MatcherList_Predicate {
+ if x, ok := x.GetMatchType().(*Matcher_MatcherList_Predicate_NotMatcher); ok {
+ return x.NotMatcher
+ }
+ return nil
+}
+
+type isMatcher_MatcherList_Predicate_MatchType interface {
+ isMatcher_MatcherList_Predicate_MatchType()
+}
+
+type Matcher_MatcherList_Predicate_SinglePredicate_ struct {
+ SinglePredicate *Matcher_MatcherList_Predicate_SinglePredicate `protobuf:"bytes,1,opt,name=single_predicate,json=singlePredicate,proto3,oneof"`
+}
+
+type Matcher_MatcherList_Predicate_OrMatcher struct {
+ OrMatcher *Matcher_MatcherList_Predicate_PredicateList `protobuf:"bytes,2,opt,name=or_matcher,json=orMatcher,proto3,oneof"`
+}
+
+type Matcher_MatcherList_Predicate_AndMatcher struct {
+ AndMatcher *Matcher_MatcherList_Predicate_PredicateList `protobuf:"bytes,3,opt,name=and_matcher,json=andMatcher,proto3,oneof"`
+}
+
+type Matcher_MatcherList_Predicate_NotMatcher struct {
+ NotMatcher *Matcher_MatcherList_Predicate `protobuf:"bytes,4,opt,name=not_matcher,json=notMatcher,proto3,oneof"`
+}
+
+func (*Matcher_MatcherList_Predicate_SinglePredicate_) isMatcher_MatcherList_Predicate_MatchType() {}
+
+func (*Matcher_MatcherList_Predicate_OrMatcher) isMatcher_MatcherList_Predicate_MatchType() {}
+
+func (*Matcher_MatcherList_Predicate_AndMatcher) isMatcher_MatcherList_Predicate_MatchType() {}
+
+func (*Matcher_MatcherList_Predicate_NotMatcher) isMatcher_MatcherList_Predicate_MatchType() {}
+
+type Matcher_MatcherList_FieldMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Predicate *Matcher_MatcherList_Predicate `protobuf:"bytes,1,opt,name=predicate,proto3" json:"predicate,omitempty"`
+ OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"`
+}
+
+func (x *Matcher_MatcherList_FieldMatcher) Reset() {
+ *x = Matcher_MatcherList_FieldMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_MatcherList_FieldMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_MatcherList_FieldMatcher) ProtoMessage() {}
+
+func (x *Matcher_MatcherList_FieldMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_MatcherList_FieldMatcher.ProtoReflect.Descriptor instead.
+func (*Matcher_MatcherList_FieldMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 1}
+}
+
+func (x *Matcher_MatcherList_FieldMatcher) GetPredicate() *Matcher_MatcherList_Predicate {
+ if x != nil {
+ return x.Predicate
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherList_FieldMatcher) GetOnMatch() *Matcher_OnMatch {
+ if x != nil {
+ return x.OnMatch
+ }
+ return nil
+}
+
+type Matcher_MatcherList_Predicate_SinglePredicate struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Input *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=input,proto3" json:"input,omitempty"`
+ // Types that are assignable to Matcher:
+ //
+ // *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch
+ // *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch
+ Matcher isMatcher_MatcherList_Predicate_SinglePredicate_Matcher `protobuf_oneof:"matcher"`
+}
+
+func (x *Matcher_MatcherList_Predicate_SinglePredicate) Reset() {
+ *x = Matcher_MatcherList_Predicate_SinglePredicate{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_MatcherList_Predicate_SinglePredicate) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_MatcherList_Predicate_SinglePredicate) ProtoMessage() {}
+
+func (x *Matcher_MatcherList_Predicate_SinglePredicate) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_MatcherList_Predicate_SinglePredicate.ProtoReflect.Descriptor instead.
+func (*Matcher_MatcherList_Predicate_SinglePredicate) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0, 0}
+}
+
+func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetInput() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.Input
+ }
+ return nil
+}
+
+func (m *Matcher_MatcherList_Predicate_SinglePredicate) GetMatcher() isMatcher_MatcherList_Predicate_SinglePredicate_Matcher {
+ if m != nil {
+ return m.Matcher
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetValueMatch() *StringMatcher {
+ if x, ok := x.GetMatcher().(*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch); ok {
+ return x.ValueMatch
+ }
+ return nil
+}
+
+func (x *Matcher_MatcherList_Predicate_SinglePredicate) GetCustomMatch() *v3.TypedExtensionConfig {
+ if x, ok := x.GetMatcher().(*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch); ok {
+ return x.CustomMatch
+ }
+ return nil
+}
+
+type isMatcher_MatcherList_Predicate_SinglePredicate_Matcher interface {
+ isMatcher_MatcherList_Predicate_SinglePredicate_Matcher()
+}
+
+type Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch struct {
+ ValueMatch *StringMatcher `protobuf:"bytes,2,opt,name=value_match,json=valueMatch,proto3,oneof"`
+}
+
+type Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch struct {
+ CustomMatch *v3.TypedExtensionConfig `protobuf:"bytes,3,opt,name=custom_match,json=customMatch,proto3,oneof"`
+}
+
+func (*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch) isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() {
+}
+
+func (*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch) isMatcher_MatcherList_Predicate_SinglePredicate_Matcher() {
+}
+
+type Matcher_MatcherList_Predicate_PredicateList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Predicate []*Matcher_MatcherList_Predicate `protobuf:"bytes,1,rep,name=predicate,proto3" json:"predicate,omitempty"`
+}
+
+func (x *Matcher_MatcherList_Predicate_PredicateList) Reset() {
+ *x = Matcher_MatcherList_Predicate_PredicateList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_MatcherList_Predicate_PredicateList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_MatcherList_Predicate_PredicateList) ProtoMessage() {}
+
+func (x *Matcher_MatcherList_Predicate_PredicateList) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_MatcherList_Predicate_PredicateList.ProtoReflect.Descriptor instead.
+func (*Matcher_MatcherList_Predicate_PredicateList) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 1, 0, 1}
+}
+
+func (x *Matcher_MatcherList_Predicate_PredicateList) GetPredicate() []*Matcher_MatcherList_Predicate {
+ if x != nil {
+ return x.Predicate
+ }
+ return nil
+}
+
+type Matcher_MatcherTree_MatchMap struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Map map[string]*Matcher_OnMatch `protobuf:"bytes,1,rep,name=map,proto3" json:"map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *Matcher_MatcherTree_MatchMap) Reset() {
+ *x = Matcher_MatcherTree_MatchMap{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Matcher_MatcherTree_MatchMap) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Matcher_MatcherTree_MatchMap) ProtoMessage() {}
+
+func (x *Matcher_MatcherTree_MatchMap) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_matcher_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Matcher_MatcherTree_MatchMap.ProtoReflect.Descriptor instead.
+func (*Matcher_MatcherTree_MatchMap) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_matcher_proto_rawDescGZIP(), []int{0, 2, 0}
+}
+
+func (x *Matcher_MatcherTree_MatchMap) GetMap() map[string]*Matcher_OnMatch {
+ if x != nil {
+ return x.Map
+ }
+ return nil
+}
+
+var File_xds_type_matcher_v3_matcher_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_matcher_proto_rawDesc = []byte{
+ 0x0a, 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f,
+ 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e,
+ 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0x9b, 0x10, 0x0a, 0x07, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4d, 0x0a, 0x0c,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0c, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x28, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x48, 0x00, 0x52, 0x0b, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x6f, 0x6e,
+ 0x5f, 0x6e, 0x6f, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x6f, 0x6e, 0x4e, 0x6f, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x1a, 0xb6, 0x01, 0x0a, 0x07, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, 0x38, 0x0a, 0x07,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x07, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x61, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6b, 0x65, 0x65, 0x70,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x42, 0x0f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb6, 0x08, 0x0a, 0x0b, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x08, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x78, 0x64,
+ 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x91, 0x06, 0x0a, 0x09, 0x50, 0x72, 0x65, 0x64, 0x69,
+ 0x63, 0x61, 0x74, 0x65, 0x12, 0x6f, 0x0a, 0x10, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x5f, 0x70,
+ 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x48, 0x00, 0x52, 0x0f, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x65, 0x64,
+ 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x61, 0x0a, 0x0a, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c,
+ 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x72,
+ 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x09, 0x6f,
+ 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x63, 0x0a, 0x0b, 0x61, 0x6e, 0x64, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e,
+ 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74,
+ 0x65, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x48,
+ 0x00, 0x52, 0x0a, 0x61, 0x6e, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x55, 0x0a,
+ 0x0b, 0x6e, 0x6f, 0x74, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65,
+ 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x6e, 0x6f, 0x74, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x1a, 0xf3, 0x01, 0x0a, 0x0f, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50,
+ 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a,
+ 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0b, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x22, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63,
+ 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x0e, 0x0a, 0x07, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x6b, 0x0a, 0x0d, 0x50, 0x72,
+ 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x5a, 0x0a, 0x09, 0x70,
+ 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x09, 0x70, 0x72,
+ 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x42, 0x11, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0xb5, 0x01, 0x0a, 0x0c, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x09, 0x70,
+ 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x4c, 0x69, 0x73, 0x74, 0x2e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x70, 0x72,
+ 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42,
+ 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x1a, 0xa9, 0x04, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72,
+ 0x65, 0x65, 0x12, 0x41, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05,
+ 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x5b, 0x0a, 0x0f, 0x65, 0x78, 0x61, 0x63, 0x74, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61,
+ 0x70, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x78, 0x61, 0x63, 0x74, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d,
+ 0x61, 0x70, 0x12, 0x5d, 0x0a, 0x10, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x5f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x78,
+ 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x48,
+ 0x00, 0x52, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x4d, 0x61,
+ 0x70, 0x12, 0x46, 0x0a, 0x0c, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x0b, 0x63, 0x75,
+ 0x73, 0x74, 0x6f, 0x6d, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x1a, 0xc0, 0x01, 0x0a, 0x08, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x12, 0x56, 0x0a, 0x03, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x54, 0x72, 0x65, 0x65, 0x2e, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x4d, 0x61, 0x70, 0x2e, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x42,
+ 0x08, 0xfa, 0x42, 0x05, 0x9a, 0x01, 0x02, 0x08, 0x01, 0x52, 0x03, 0x6d, 0x61, 0x70, 0x1a, 0x5c,
+ 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3a, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64,
+ 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x10, 0x0a, 0x09,
+ 0x74, 0x72, 0x65, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x0e,
+ 0x0a, 0x0c, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x42, 0x5c,
+ 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33,
+ 0x42, 0x0c, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63,
+ 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70,
+ 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_matcher_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_matcher_proto_rawDescData = file_xds_type_matcher_v3_matcher_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_matcher_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_matcher_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_matcher_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_matcher_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_matcher_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_matcher_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
+var file_xds_type_matcher_v3_matcher_proto_goTypes = []interface{}{
+ (*Matcher)(nil), // 0: xds.type.matcher.v3.Matcher
+ (*Matcher_OnMatch)(nil), // 1: xds.type.matcher.v3.Matcher.OnMatch
+ (*Matcher_MatcherList)(nil), // 2: xds.type.matcher.v3.Matcher.MatcherList
+ (*Matcher_MatcherTree)(nil), // 3: xds.type.matcher.v3.Matcher.MatcherTree
+ (*Matcher_MatcherList_Predicate)(nil), // 4: xds.type.matcher.v3.Matcher.MatcherList.Predicate
+ (*Matcher_MatcherList_FieldMatcher)(nil), // 5: xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher
+ (*Matcher_MatcherList_Predicate_SinglePredicate)(nil), // 6: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate
+ (*Matcher_MatcherList_Predicate_PredicateList)(nil), // 7: xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList
+ (*Matcher_MatcherTree_MatchMap)(nil), // 8: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap
+ nil, // 9: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry
+ (*v3.TypedExtensionConfig)(nil), // 10: xds.core.v3.TypedExtensionConfig
+ (*StringMatcher)(nil), // 11: xds.type.matcher.v3.StringMatcher
+}
+var file_xds_type_matcher_v3_matcher_proto_depIdxs = []int32{
+ 2, // 0: xds.type.matcher.v3.Matcher.matcher_list:type_name -> xds.type.matcher.v3.Matcher.MatcherList
+ 3, // 1: xds.type.matcher.v3.Matcher.matcher_tree:type_name -> xds.type.matcher.v3.Matcher.MatcherTree
+ 1, // 2: xds.type.matcher.v3.Matcher.on_no_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 0, // 3: xds.type.matcher.v3.Matcher.OnMatch.matcher:type_name -> xds.type.matcher.v3.Matcher
+ 10, // 4: xds.type.matcher.v3.Matcher.OnMatch.action:type_name -> xds.core.v3.TypedExtensionConfig
+ 5, // 5: xds.type.matcher.v3.Matcher.MatcherList.matchers:type_name -> xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher
+ 10, // 6: xds.type.matcher.v3.Matcher.MatcherTree.input:type_name -> xds.core.v3.TypedExtensionConfig
+ 8, // 7: xds.type.matcher.v3.Matcher.MatcherTree.exact_match_map:type_name -> xds.type.matcher.v3.Matcher.MatcherTree.MatchMap
+ 8, // 8: xds.type.matcher.v3.Matcher.MatcherTree.prefix_match_map:type_name -> xds.type.matcher.v3.Matcher.MatcherTree.MatchMap
+ 10, // 9: xds.type.matcher.v3.Matcher.MatcherTree.custom_match:type_name -> xds.core.v3.TypedExtensionConfig
+ 6, // 10: xds.type.matcher.v3.Matcher.MatcherList.Predicate.single_predicate:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate
+ 7, // 11: xds.type.matcher.v3.Matcher.MatcherList.Predicate.or_matcher:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList
+ 7, // 12: xds.type.matcher.v3.Matcher.MatcherList.Predicate.and_matcher:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList
+ 4, // 13: xds.type.matcher.v3.Matcher.MatcherList.Predicate.not_matcher:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate
+ 4, // 14: xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher.predicate:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate
+ 1, // 15: xds.type.matcher.v3.Matcher.MatcherList.FieldMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 10, // 16: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.input:type_name -> xds.core.v3.TypedExtensionConfig
+ 11, // 17: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.value_match:type_name -> xds.type.matcher.v3.StringMatcher
+ 10, // 18: xds.type.matcher.v3.Matcher.MatcherList.Predicate.SinglePredicate.custom_match:type_name -> xds.core.v3.TypedExtensionConfig
+ 4, // 19: xds.type.matcher.v3.Matcher.MatcherList.Predicate.PredicateList.predicate:type_name -> xds.type.matcher.v3.Matcher.MatcherList.Predicate
+ 9, // 20: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.map:type_name -> xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry
+ 1, // 21: xds.type.matcher.v3.Matcher.MatcherTree.MatchMap.MapEntry.value:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 22, // [22:22] is the sub-list for method output_type
+ 22, // [22:22] is the sub-list for method input_type
+ 22, // [22:22] is the sub-list for extension type_name
+ 22, // [22:22] is the sub-list for extension extendee
+ 0, // [0:22] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_matcher_proto_init() }
+func file_xds_type_matcher_v3_matcher_proto_init() {
+ if File_xds_type_matcher_v3_matcher_proto != nil {
+ return
+ }
+ file_xds_type_matcher_v3_string_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_OnMatch); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_MatcherList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_MatcherTree); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_MatcherList_Predicate); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_MatcherList_FieldMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_MatcherList_Predicate_SinglePredicate); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_MatcherList_Predicate_PredicateList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Matcher_MatcherTree_MatchMap); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*Matcher_MatcherList_)(nil),
+ (*Matcher_MatcherTree_)(nil),
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*Matcher_OnMatch_Matcher)(nil),
+ (*Matcher_OnMatch_Action)(nil),
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[3].OneofWrappers = []interface{}{
+ (*Matcher_MatcherTree_ExactMatchMap)(nil),
+ (*Matcher_MatcherTree_PrefixMatchMap)(nil),
+ (*Matcher_MatcherTree_CustomMatch)(nil),
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[4].OneofWrappers = []interface{}{
+ (*Matcher_MatcherList_Predicate_SinglePredicate_)(nil),
+ (*Matcher_MatcherList_Predicate_OrMatcher)(nil),
+ (*Matcher_MatcherList_Predicate_AndMatcher)(nil),
+ (*Matcher_MatcherList_Predicate_NotMatcher)(nil),
+ }
+ file_xds_type_matcher_v3_matcher_proto_msgTypes[6].OneofWrappers = []interface{}{
+ (*Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch)(nil),
+ (*Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_matcher_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 10,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_matcher_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_matcher_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_matcher_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_matcher_proto = out.File
+ file_xds_type_matcher_v3_matcher_proto_rawDesc = nil
+ file_xds_type_matcher_v3_matcher_proto_goTypes = nil
+ file_xds_type_matcher_v3_matcher_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.validate.go
new file mode 100644
index 000000000..edd1dd8aa
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/matcher.pb.validate.go
@@ -0,0 +1,1915 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/matcher.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Matcher with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Matcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Matcher with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in MatcherMultiError, or nil if none found.
+func (m *Matcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetOnNoMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, MatcherValidationError{
+ field: "OnNoMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, MatcherValidationError{
+ field: "OnNoMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOnNoMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MatcherValidationError{
+ field: "OnNoMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ switch v := m.MatcherType.(type) {
+ case *Matcher_MatcherList_:
+ if v == nil {
+ err := MatcherValidationError{
+ field: "MatcherType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetMatcherList()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, MatcherValidationError{
+ field: "MatcherList",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, MatcherValidationError{
+ field: "MatcherList",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMatcherList()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MatcherValidationError{
+ field: "MatcherList",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_MatcherTree_:
+ if v == nil {
+ err := MatcherValidationError{
+ field: "MatcherType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetMatcherTree()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, MatcherValidationError{
+ field: "MatcherTree",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, MatcherValidationError{
+ field: "MatcherTree",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMatcherTree()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MatcherValidationError{
+ field: "MatcherTree",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return MatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// MatcherMultiError is an error wrapping multiple validation errors returned
+// by Matcher.ValidateAll() if the designated constraints aren't met.
+type MatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MatcherMultiError) AllErrors() []error { return m }
+
+// MatcherValidationError is the validation error returned by Matcher.Validate
+// if the designated constraints aren't met.
+type MatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MatcherValidationError) ErrorName() string { return "MatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e MatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MatcherValidationError{}
+
+// Validate checks the field values on Matcher_OnMatch with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *Matcher_OnMatch) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Matcher_OnMatch with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Matcher_OnMatchMultiError, or nil if none found.
+func (m *Matcher_OnMatch) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_OnMatch) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for KeepMatching
+
+ oneofOnMatchPresent := false
+ switch v := m.OnMatch.(type) {
+ case *Matcher_OnMatch_Matcher:
+ if v == nil {
+ err := Matcher_OnMatchValidationError{
+ field: "OnMatch",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofOnMatchPresent = true
+
+ if all {
+ switch v := interface{}(m.GetMatcher()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_OnMatchValidationError{
+ field: "Matcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_OnMatchValidationError{
+ field: "Matcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_OnMatchValidationError{
+ field: "Matcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_OnMatch_Action:
+ if v == nil {
+ err := Matcher_OnMatchValidationError{
+ field: "OnMatch",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofOnMatchPresent = true
+
+ if all {
+ switch v := interface{}(m.GetAction()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_OnMatchValidationError{
+ field: "Action",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_OnMatchValidationError{
+ field: "Action",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAction()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_OnMatchValidationError{
+ field: "Action",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofOnMatchPresent {
+ err := Matcher_OnMatchValidationError{
+ field: "OnMatch",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return Matcher_OnMatchMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_OnMatchMultiError is an error wrapping multiple validation errors
+// returned by Matcher_OnMatch.ValidateAll() if the designated constraints
+// aren't met.
+type Matcher_OnMatchMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_OnMatchMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_OnMatchMultiError) AllErrors() []error { return m }
+
+// Matcher_OnMatchValidationError is the validation error returned by
+// Matcher_OnMatch.Validate if the designated constraints aren't met.
+type Matcher_OnMatchValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_OnMatchValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_OnMatchValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Matcher_OnMatchValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_OnMatchValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_OnMatchValidationError) ErrorName() string { return "Matcher_OnMatchValidationError" }
+
+// Error satisfies the builtin error interface
+func (e Matcher_OnMatchValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_OnMatch.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_OnMatchValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_OnMatchValidationError{}
+
+// Validate checks the field values on Matcher_MatcherList with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Matcher_MatcherList) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Matcher_MatcherList with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Matcher_MatcherListMultiError, or nil if none found.
+func (m *Matcher_MatcherList) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_MatcherList) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetMatchers()) < 1 {
+ err := Matcher_MatcherListValidationError{
+ field: "Matchers",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetMatchers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherListValidationError{
+ field: fmt.Sprintf("Matchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherListValidationError{
+ field: fmt.Sprintf("Matchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherListValidationError{
+ field: fmt.Sprintf("Matchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return Matcher_MatcherListMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_MatcherListMultiError is an error wrapping multiple validation
+// errors returned by Matcher_MatcherList.ValidateAll() if the designated
+// constraints aren't met.
+type Matcher_MatcherListMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_MatcherListMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_MatcherListMultiError) AllErrors() []error { return m }
+
+// Matcher_MatcherListValidationError is the validation error returned by
+// Matcher_MatcherList.Validate if the designated constraints aren't met.
+type Matcher_MatcherListValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_MatcherListValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_MatcherListValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Matcher_MatcherListValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_MatcherListValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_MatcherListValidationError) ErrorName() string {
+ return "Matcher_MatcherListValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Matcher_MatcherListValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_MatcherList.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_MatcherListValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_MatcherListValidationError{}
+
+// Validate checks the field values on Matcher_MatcherTree with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Matcher_MatcherTree) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Matcher_MatcherTree with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Matcher_MatcherTreeMultiError, or nil if none found.
+func (m *Matcher_MatcherTree) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_MatcherTree) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetInput() == nil {
+ err := Matcher_MatcherTreeValidationError{
+ field: "Input",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetInput()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "Input",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "Input",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetInput()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherTreeValidationError{
+ field: "Input",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ oneofTreeTypePresent := false
+ switch v := m.TreeType.(type) {
+ case *Matcher_MatcherTree_ExactMatchMap:
+ if v == nil {
+ err := Matcher_MatcherTreeValidationError{
+ field: "TreeType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofTreeTypePresent = true
+
+ if all {
+ switch v := interface{}(m.GetExactMatchMap()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "ExactMatchMap",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "ExactMatchMap",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExactMatchMap()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherTreeValidationError{
+ field: "ExactMatchMap",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_MatcherTree_PrefixMatchMap:
+ if v == nil {
+ err := Matcher_MatcherTreeValidationError{
+ field: "TreeType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofTreeTypePresent = true
+
+ if all {
+ switch v := interface{}(m.GetPrefixMatchMap()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "PrefixMatchMap",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "PrefixMatchMap",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetPrefixMatchMap()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherTreeValidationError{
+ field: "PrefixMatchMap",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_MatcherTree_CustomMatch:
+ if v == nil {
+ err := Matcher_MatcherTreeValidationError{
+ field: "TreeType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofTreeTypePresent = true
+
+ if all {
+ switch v := interface{}(m.GetCustomMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "CustomMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherTreeValidationError{
+ field: "CustomMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCustomMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherTreeValidationError{
+ field: "CustomMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofTreeTypePresent {
+ err := Matcher_MatcherTreeValidationError{
+ field: "TreeType",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return Matcher_MatcherTreeMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_MatcherTreeMultiError is an error wrapping multiple validation
+// errors returned by Matcher_MatcherTree.ValidateAll() if the designated
+// constraints aren't met.
+type Matcher_MatcherTreeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_MatcherTreeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_MatcherTreeMultiError) AllErrors() []error { return m }
+
+// Matcher_MatcherTreeValidationError is the validation error returned by
+// Matcher_MatcherTree.Validate if the designated constraints aren't met.
+type Matcher_MatcherTreeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_MatcherTreeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_MatcherTreeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Matcher_MatcherTreeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_MatcherTreeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_MatcherTreeValidationError) ErrorName() string {
+ return "Matcher_MatcherTreeValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Matcher_MatcherTreeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_MatcherTree.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_MatcherTreeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_MatcherTreeValidationError{}
+
+// Validate checks the field values on Matcher_MatcherList_Predicate with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Matcher_MatcherList_Predicate) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Matcher_MatcherList_Predicate with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// Matcher_MatcherList_PredicateMultiError, or nil if none found.
+func (m *Matcher_MatcherList_Predicate) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_MatcherList_Predicate) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofMatchTypePresent := false
+ switch v := m.MatchType.(type) {
+ case *Matcher_MatcherList_Predicate_SinglePredicate_:
+ if v == nil {
+ err := Matcher_MatcherList_PredicateValidationError{
+ field: "MatchType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchTypePresent = true
+
+ if all {
+ switch v := interface{}(m.GetSinglePredicate()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "SinglePredicate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "SinglePredicate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSinglePredicate()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_PredicateValidationError{
+ field: "SinglePredicate",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_MatcherList_Predicate_OrMatcher:
+ if v == nil {
+ err := Matcher_MatcherList_PredicateValidationError{
+ field: "MatchType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchTypePresent = true
+
+ if all {
+ switch v := interface{}(m.GetOrMatcher()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "OrMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "OrMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOrMatcher()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_PredicateValidationError{
+ field: "OrMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_MatcherList_Predicate_AndMatcher:
+ if v == nil {
+ err := Matcher_MatcherList_PredicateValidationError{
+ field: "MatchType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchTypePresent = true
+
+ if all {
+ switch v := interface{}(m.GetAndMatcher()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "AndMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "AndMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAndMatcher()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_PredicateValidationError{
+ field: "AndMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_MatcherList_Predicate_NotMatcher:
+ if v == nil {
+ err := Matcher_MatcherList_PredicateValidationError{
+ field: "MatchType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchTypePresent = true
+
+ if all {
+ switch v := interface{}(m.GetNotMatcher()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "NotMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_PredicateValidationError{
+ field: "NotMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetNotMatcher()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_PredicateValidationError{
+ field: "NotMatcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofMatchTypePresent {
+ err := Matcher_MatcherList_PredicateValidationError{
+ field: "MatchType",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return Matcher_MatcherList_PredicateMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_MatcherList_PredicateMultiError is an error wrapping multiple
+// validation errors returned by Matcher_MatcherList_Predicate.ValidateAll()
+// if the designated constraints aren't met.
+type Matcher_MatcherList_PredicateMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_MatcherList_PredicateMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_MatcherList_PredicateMultiError) AllErrors() []error { return m }
+
+// Matcher_MatcherList_PredicateValidationError is the validation error
+// returned by Matcher_MatcherList_Predicate.Validate if the designated
+// constraints aren't met.
+type Matcher_MatcherList_PredicateValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_MatcherList_PredicateValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_MatcherList_PredicateValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Matcher_MatcherList_PredicateValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_MatcherList_PredicateValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_MatcherList_PredicateValidationError) ErrorName() string {
+ return "Matcher_MatcherList_PredicateValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Matcher_MatcherList_PredicateValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_MatcherList_Predicate.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_MatcherList_PredicateValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_MatcherList_PredicateValidationError{}
+
+// Validate checks the field values on Matcher_MatcherList_FieldMatcher with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *Matcher_MatcherList_FieldMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Matcher_MatcherList_FieldMatcher with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// Matcher_MatcherList_FieldMatcherMultiError, or nil if none found.
+func (m *Matcher_MatcherList_FieldMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_MatcherList_FieldMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetPredicate() == nil {
+ err := Matcher_MatcherList_FieldMatcherValidationError{
+ field: "Predicate",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetPredicate()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{
+ field: "Predicate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{
+ field: "Predicate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetPredicate()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_FieldMatcherValidationError{
+ field: "Predicate",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if m.GetOnMatch() == nil {
+ err := Matcher_MatcherList_FieldMatcherValidationError{
+ field: "OnMatch",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetOnMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_FieldMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_FieldMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return Matcher_MatcherList_FieldMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_MatcherList_FieldMatcherMultiError is an error wrapping multiple
+// validation errors returned by
+// Matcher_MatcherList_FieldMatcher.ValidateAll() if the designated
+// constraints aren't met.
+type Matcher_MatcherList_FieldMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_MatcherList_FieldMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_MatcherList_FieldMatcherMultiError) AllErrors() []error { return m }
+
+// Matcher_MatcherList_FieldMatcherValidationError is the validation error
+// returned by Matcher_MatcherList_FieldMatcher.Validate if the designated
+// constraints aren't met.
+type Matcher_MatcherList_FieldMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_MatcherList_FieldMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_MatcherList_FieldMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Matcher_MatcherList_FieldMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_MatcherList_FieldMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_MatcherList_FieldMatcherValidationError) ErrorName() string {
+ return "Matcher_MatcherList_FieldMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Matcher_MatcherList_FieldMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_MatcherList_FieldMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_MatcherList_FieldMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_MatcherList_FieldMatcherValidationError{}
+
+// Validate checks the field values on
+// Matcher_MatcherList_Predicate_SinglePredicate with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Matcher_MatcherList_Predicate_SinglePredicate) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// Matcher_MatcherList_Predicate_SinglePredicate with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// Matcher_MatcherList_Predicate_SinglePredicateMultiError, or nil if none found.
+func (m *Matcher_MatcherList_Predicate_SinglePredicate) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_MatcherList_Predicate_SinglePredicate) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetInput() == nil {
+ err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "Input",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetInput()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "Input",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "Input",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetInput()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "Input",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ oneofMatcherPresent := false
+ switch v := m.Matcher.(type) {
+ case *Matcher_MatcherList_Predicate_SinglePredicate_ValueMatch:
+ if v == nil {
+ err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "Matcher",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatcherPresent = true
+
+ if all {
+ switch v := interface{}(m.GetValueMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "ValueMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "ValueMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetValueMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "ValueMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Matcher_MatcherList_Predicate_SinglePredicate_CustomMatch:
+ if v == nil {
+ err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "Matcher",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatcherPresent = true
+
+ if all {
+ switch v := interface{}(m.GetCustomMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "CustomMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "CustomMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCustomMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "CustomMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofMatcherPresent {
+ err := Matcher_MatcherList_Predicate_SinglePredicateValidationError{
+ field: "Matcher",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return Matcher_MatcherList_Predicate_SinglePredicateMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_MatcherList_Predicate_SinglePredicateMultiError is an error wrapping
+// multiple validation errors returned by
+// Matcher_MatcherList_Predicate_SinglePredicate.ValidateAll() if the
+// designated constraints aren't met.
+type Matcher_MatcherList_Predicate_SinglePredicateMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_MatcherList_Predicate_SinglePredicateMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_MatcherList_Predicate_SinglePredicateMultiError) AllErrors() []error { return m }
+
+// Matcher_MatcherList_Predicate_SinglePredicateValidationError is the
+// validation error returned by
+// Matcher_MatcherList_Predicate_SinglePredicate.Validate if the designated
+// constraints aren't met.
+type Matcher_MatcherList_Predicate_SinglePredicateValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) ErrorName() string {
+ return "Matcher_MatcherList_Predicate_SinglePredicateValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Matcher_MatcherList_Predicate_SinglePredicateValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_MatcherList_Predicate_SinglePredicate.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_MatcherList_Predicate_SinglePredicateValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_MatcherList_Predicate_SinglePredicateValidationError{}
+
+// Validate checks the field values on
+// Matcher_MatcherList_Predicate_PredicateList with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Matcher_MatcherList_Predicate_PredicateList) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// Matcher_MatcherList_Predicate_PredicateList with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// Matcher_MatcherList_Predicate_PredicateListMultiError, or nil if none found.
+func (m *Matcher_MatcherList_Predicate_PredicateList) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_MatcherList_Predicate_PredicateList) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetPredicate()) < 2 {
+ err := Matcher_MatcherList_Predicate_PredicateListValidationError{
+ field: "Predicate",
+ reason: "value must contain at least 2 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetPredicate() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_PredicateListValidationError{
+ field: fmt.Sprintf("Predicate[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherList_Predicate_PredicateListValidationError{
+ field: fmt.Sprintf("Predicate[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherList_Predicate_PredicateListValidationError{
+ field: fmt.Sprintf("Predicate[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return Matcher_MatcherList_Predicate_PredicateListMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_MatcherList_Predicate_PredicateListMultiError is an error wrapping
+// multiple validation errors returned by
+// Matcher_MatcherList_Predicate_PredicateList.ValidateAll() if the designated
+// constraints aren't met.
+type Matcher_MatcherList_Predicate_PredicateListMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_MatcherList_Predicate_PredicateListMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_MatcherList_Predicate_PredicateListMultiError) AllErrors() []error { return m }
+
+// Matcher_MatcherList_Predicate_PredicateListValidationError is the validation
+// error returned by Matcher_MatcherList_Predicate_PredicateList.Validate if
+// the designated constraints aren't met.
+type Matcher_MatcherList_Predicate_PredicateListValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_MatcherList_Predicate_PredicateListValidationError) ErrorName() string {
+ return "Matcher_MatcherList_Predicate_PredicateListValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Matcher_MatcherList_Predicate_PredicateListValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_MatcherList_Predicate_PredicateList.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_MatcherList_Predicate_PredicateListValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_MatcherList_Predicate_PredicateListValidationError{}
+
+// Validate checks the field values on Matcher_MatcherTree_MatchMap with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Matcher_MatcherTree_MatchMap) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Matcher_MatcherTree_MatchMap with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Matcher_MatcherTree_MatchMapMultiError, or nil if none found.
+func (m *Matcher_MatcherTree_MatchMap) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Matcher_MatcherTree_MatchMap) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetMap()) < 1 {
+ err := Matcher_MatcherTree_MatchMapValidationError{
+ field: "Map",
+ reason: "value must contain at least 1 pair(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ {
+ sorted_keys := make([]string, len(m.GetMap()))
+ i := 0
+ for key := range m.GetMap() {
+ sorted_keys[i] = key
+ i++
+ }
+ sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] })
+ for _, key := range sorted_keys {
+ val := m.GetMap()[key]
+ _ = val
+
+ // no validation rules for Map[key]
+
+ if all {
+ switch v := interface{}(val).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Matcher_MatcherTree_MatchMapValidationError{
+ field: fmt.Sprintf("Map[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Matcher_MatcherTree_MatchMapValidationError{
+ field: fmt.Sprintf("Map[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(val).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Matcher_MatcherTree_MatchMapValidationError{
+ field: fmt.Sprintf("Map[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+ }
+
+ if len(errors) > 0 {
+ return Matcher_MatcherTree_MatchMapMultiError(errors)
+ }
+
+ return nil
+}
+
+// Matcher_MatcherTree_MatchMapMultiError is an error wrapping multiple
+// validation errors returned by Matcher_MatcherTree_MatchMap.ValidateAll() if
+// the designated constraints aren't met.
+type Matcher_MatcherTree_MatchMapMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Matcher_MatcherTree_MatchMapMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Matcher_MatcherTree_MatchMapMultiError) AllErrors() []error { return m }
+
+// Matcher_MatcherTree_MatchMapValidationError is the validation error returned
+// by Matcher_MatcherTree_MatchMap.Validate if the designated constraints
+// aren't met.
+type Matcher_MatcherTree_MatchMapValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Matcher_MatcherTree_MatchMapValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Matcher_MatcherTree_MatchMapValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Matcher_MatcherTree_MatchMapValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Matcher_MatcherTree_MatchMapValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Matcher_MatcherTree_MatchMapValidationError) ErrorName() string {
+ return "Matcher_MatcherTree_MatchMapValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Matcher_MatcherTree_MatchMapValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMatcher_MatcherTree_MatchMap.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Matcher_MatcherTree_MatchMapValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Matcher_MatcherTree_MatchMapValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go
new file mode 100644
index 000000000..bc811ecb2
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.go
@@ -0,0 +1,539 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/range.proto
+
+package v3
+
+import (
+ v3 "github.com/cncf/xds/go/xds/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Int64RangeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RangeMatchers []*Int64RangeMatcher_RangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"`
+}
+
+func (x *Int64RangeMatcher) Reset() {
+ *x = Int64RangeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int64RangeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int64RangeMatcher) ProtoMessage() {}
+
+func (x *Int64RangeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int64RangeMatcher.ProtoReflect.Descriptor instead.
+func (*Int64RangeMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Int64RangeMatcher) GetRangeMatchers() []*Int64RangeMatcher_RangeMatcher {
+ if x != nil {
+ return x.RangeMatchers
+ }
+ return nil
+}
+
+type Int32RangeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RangeMatchers []*Int32RangeMatcher_RangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"`
+}
+
+func (x *Int32RangeMatcher) Reset() {
+ *x = Int32RangeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int32RangeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int32RangeMatcher) ProtoMessage() {}
+
+func (x *Int32RangeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int32RangeMatcher.ProtoReflect.Descriptor instead.
+func (*Int32RangeMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Int32RangeMatcher) GetRangeMatchers() []*Int32RangeMatcher_RangeMatcher {
+ if x != nil {
+ return x.RangeMatchers
+ }
+ return nil
+}
+
+type DoubleRangeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ RangeMatchers []*DoubleRangeMatcher_RangeMatcher `protobuf:"bytes,1,rep,name=range_matchers,json=rangeMatchers,proto3" json:"range_matchers,omitempty"`
+}
+
+func (x *DoubleRangeMatcher) Reset() {
+ *x = DoubleRangeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoubleRangeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoubleRangeMatcher) ProtoMessage() {}
+
+func (x *DoubleRangeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoubleRangeMatcher.ProtoReflect.Descriptor instead.
+func (*DoubleRangeMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DoubleRangeMatcher) GetRangeMatchers() []*DoubleRangeMatcher_RangeMatcher {
+ if x != nil {
+ return x.RangeMatchers
+ }
+ return nil
+}
+
+type Int64RangeMatcher_RangeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ranges []*v3.Int64Range `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"`
+ OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"`
+}
+
+func (x *Int64RangeMatcher_RangeMatcher) Reset() {
+ *x = Int64RangeMatcher_RangeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int64RangeMatcher_RangeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int64RangeMatcher_RangeMatcher) ProtoMessage() {}
+
+func (x *Int64RangeMatcher_RangeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int64RangeMatcher_RangeMatcher.ProtoReflect.Descriptor instead.
+func (*Int64RangeMatcher_RangeMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Int64RangeMatcher_RangeMatcher) GetRanges() []*v3.Int64Range {
+ if x != nil {
+ return x.Ranges
+ }
+ return nil
+}
+
+func (x *Int64RangeMatcher_RangeMatcher) GetOnMatch() *Matcher_OnMatch {
+ if x != nil {
+ return x.OnMatch
+ }
+ return nil
+}
+
+type Int32RangeMatcher_RangeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ranges []*v3.Int32Range `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"`
+ OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"`
+}
+
+func (x *Int32RangeMatcher_RangeMatcher) Reset() {
+ *x = Int32RangeMatcher_RangeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int32RangeMatcher_RangeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int32RangeMatcher_RangeMatcher) ProtoMessage() {}
+
+func (x *Int32RangeMatcher_RangeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int32RangeMatcher_RangeMatcher.ProtoReflect.Descriptor instead.
+func (*Int32RangeMatcher_RangeMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *Int32RangeMatcher_RangeMatcher) GetRanges() []*v3.Int32Range {
+ if x != nil {
+ return x.Ranges
+ }
+ return nil
+}
+
+func (x *Int32RangeMatcher_RangeMatcher) GetOnMatch() *Matcher_OnMatch {
+ if x != nil {
+ return x.OnMatch
+ }
+ return nil
+}
+
+type DoubleRangeMatcher_RangeMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Ranges []*v3.DoubleRange `protobuf:"bytes,1,rep,name=ranges,proto3" json:"ranges,omitempty"`
+ OnMatch *Matcher_OnMatch `protobuf:"bytes,2,opt,name=on_match,json=onMatch,proto3" json:"on_match,omitempty"`
+}
+
+func (x *DoubleRangeMatcher_RangeMatcher) Reset() {
+ *x = DoubleRangeMatcher_RangeMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoubleRangeMatcher_RangeMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoubleRangeMatcher_RangeMatcher) ProtoMessage() {}
+
+func (x *DoubleRangeMatcher_RangeMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_range_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoubleRangeMatcher_RangeMatcher.ProtoReflect.Descriptor instead.
+func (*DoubleRangeMatcher_RangeMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_range_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *DoubleRangeMatcher_RangeMatcher) GetRanges() []*v3.DoubleRange {
+ if x != nil {
+ return x.Ranges
+ }
+ return nil
+}
+
+func (x *DoubleRangeMatcher_RangeMatcher) GetOnMatch() *Matcher_OnMatch {
+ if x != nil {
+ return x.OnMatch
+ }
+ return nil
+}
+
+var File_xds_type_matcher_v3_range_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_range_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65,
+ 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
+ 0x21, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c,
+ 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xfc, 0x01, 0x0a, 0x11,
+ 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x12, 0x5a, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e,
+ 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d,
+ 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x8a, 0x01,
+ 0x0a, 0x0c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x39,
+ 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74,
+ 0x36, 0x34, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08,
+ 0x01, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64,
+ 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76,
+ 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63,
+ 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xfc, 0x01, 0x0a, 0x11, 0x49,
+ 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x12, 0x5a, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x49,
+ 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d, 0x72,
+ 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x8a, 0x01, 0x0a,
+ 0x0c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x39, 0x0a,
+ 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e,
+ 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x49, 0x6e, 0x74, 0x33,
+ 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01,
+ 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e, 0x5f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78, 0x64, 0x73,
+ 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33,
+ 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x22, 0xff, 0x01, 0x0a, 0x12, 0x44, 0x6f,
+ 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x12, 0x5b, 0x0a, 0x0e, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x44,
+ 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x52, 0x0d,
+ 0x72, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x73, 0x1a, 0x8b, 0x01,
+ 0x0a, 0x0c, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x3a,
+ 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x75,
+ 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02,
+ 0x08, 0x01, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x08, 0x6f, 0x6e,
+ 0x5f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x78,
+ 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x76, 0x33, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x4f, 0x6e, 0x4d, 0x61, 0x74,
+ 0x63, 0x68, 0x52, 0x07, 0x6f, 0x6e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x42, 0x5a, 0x0a, 0x1e, 0x63,
+ 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79,
+ 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74,
+ 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73,
+ 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_range_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_range_proto_rawDescData = file_xds_type_matcher_v3_range_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_range_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_range_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_range_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_range_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_range_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_range_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
+var file_xds_type_matcher_v3_range_proto_goTypes = []interface{}{
+ (*Int64RangeMatcher)(nil), // 0: xds.type.matcher.v3.Int64RangeMatcher
+ (*Int32RangeMatcher)(nil), // 1: xds.type.matcher.v3.Int32RangeMatcher
+ (*DoubleRangeMatcher)(nil), // 2: xds.type.matcher.v3.DoubleRangeMatcher
+ (*Int64RangeMatcher_RangeMatcher)(nil), // 3: xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher
+ (*Int32RangeMatcher_RangeMatcher)(nil), // 4: xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher
+ (*DoubleRangeMatcher_RangeMatcher)(nil), // 5: xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher
+ (*v3.Int64Range)(nil), // 6: xds.type.v3.Int64Range
+ (*Matcher_OnMatch)(nil), // 7: xds.type.matcher.v3.Matcher.OnMatch
+ (*v3.Int32Range)(nil), // 8: xds.type.v3.Int32Range
+ (*v3.DoubleRange)(nil), // 9: xds.type.v3.DoubleRange
+}
+var file_xds_type_matcher_v3_range_proto_depIdxs = []int32{
+ 3, // 0: xds.type.matcher.v3.Int64RangeMatcher.range_matchers:type_name -> xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher
+ 4, // 1: xds.type.matcher.v3.Int32RangeMatcher.range_matchers:type_name -> xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher
+ 5, // 2: xds.type.matcher.v3.DoubleRangeMatcher.range_matchers:type_name -> xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher
+ 6, // 3: xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher.ranges:type_name -> xds.type.v3.Int64Range
+ 7, // 4: xds.type.matcher.v3.Int64RangeMatcher.RangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 8, // 5: xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher.ranges:type_name -> xds.type.v3.Int32Range
+ 7, // 6: xds.type.matcher.v3.Int32RangeMatcher.RangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 9, // 7: xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher.ranges:type_name -> xds.type.v3.DoubleRange
+ 7, // 8: xds.type.matcher.v3.DoubleRangeMatcher.RangeMatcher.on_match:type_name -> xds.type.matcher.v3.Matcher.OnMatch
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_range_proto_init() }
+func file_xds_type_matcher_v3_range_proto_init() {
+ if File_xds_type_matcher_v3_range_proto != nil {
+ return
+ }
+ file_xds_type_matcher_v3_matcher_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_range_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int64RangeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_range_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int32RangeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_range_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoubleRangeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_range_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int64RangeMatcher_RangeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_range_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int32RangeMatcher_RangeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_range_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoubleRangeMatcher_RangeMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_range_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 6,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_range_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_range_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_range_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_range_proto = out.File
+ file_xds_type_matcher_v3_range_proto_rawDesc = nil
+ file_xds_type_matcher_v3_range_proto_goTypes = nil
+ file_xds_type_matcher_v3_range_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.validate.go
new file mode 100644
index 000000000..8cb598643
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/range.pb.validate.go
@@ -0,0 +1,975 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/range.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Int64RangeMatcher with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *Int64RangeMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Int64RangeMatcher with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Int64RangeMatcherMultiError, or nil if none found.
+func (m *Int64RangeMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Int64RangeMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetRangeMatchers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Int64RangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Int64RangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Int64RangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return Int64RangeMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// Int64RangeMatcherMultiError is an error wrapping multiple validation errors
+// returned by Int64RangeMatcher.ValidateAll() if the designated constraints
+// aren't met.
+type Int64RangeMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Int64RangeMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Int64RangeMatcherMultiError) AllErrors() []error { return m }
+
+// Int64RangeMatcherValidationError is the validation error returned by
+// Int64RangeMatcher.Validate if the designated constraints aren't met.
+type Int64RangeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Int64RangeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Int64RangeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Int64RangeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Int64RangeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Int64RangeMatcherValidationError) ErrorName() string {
+ return "Int64RangeMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Int64RangeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sInt64RangeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Int64RangeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Int64RangeMatcherValidationError{}
+
+// Validate checks the field values on Int32RangeMatcher with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *Int32RangeMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Int32RangeMatcher with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Int32RangeMatcherMultiError, or nil if none found.
+func (m *Int32RangeMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Int32RangeMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetRangeMatchers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Int32RangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Int32RangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Int32RangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return Int32RangeMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// Int32RangeMatcherMultiError is an error wrapping multiple validation errors
+// returned by Int32RangeMatcher.ValidateAll() if the designated constraints
+// aren't met.
+type Int32RangeMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Int32RangeMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Int32RangeMatcherMultiError) AllErrors() []error { return m }
+
+// Int32RangeMatcherValidationError is the validation error returned by
+// Int32RangeMatcher.Validate if the designated constraints aren't met.
+type Int32RangeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Int32RangeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Int32RangeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Int32RangeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Int32RangeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Int32RangeMatcherValidationError) ErrorName() string {
+ return "Int32RangeMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Int32RangeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sInt32RangeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Int32RangeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Int32RangeMatcherValidationError{}
+
+// Validate checks the field values on DoubleRangeMatcher with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *DoubleRangeMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on DoubleRangeMatcher with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// DoubleRangeMatcherMultiError, or nil if none found.
+func (m *DoubleRangeMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *DoubleRangeMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetRangeMatchers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, DoubleRangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, DoubleRangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DoubleRangeMatcherValidationError{
+ field: fmt.Sprintf("RangeMatchers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return DoubleRangeMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// DoubleRangeMatcherMultiError is an error wrapping multiple validation errors
+// returned by DoubleRangeMatcher.ValidateAll() if the designated constraints
+// aren't met.
+type DoubleRangeMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m DoubleRangeMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m DoubleRangeMatcherMultiError) AllErrors() []error { return m }
+
+// DoubleRangeMatcherValidationError is the validation error returned by
+// DoubleRangeMatcher.Validate if the designated constraints aren't met.
+type DoubleRangeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DoubleRangeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DoubleRangeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DoubleRangeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DoubleRangeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DoubleRangeMatcherValidationError) ErrorName() string {
+ return "DoubleRangeMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e DoubleRangeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDoubleRangeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DoubleRangeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DoubleRangeMatcherValidationError{}
+
+// Validate checks the field values on Int64RangeMatcher_RangeMatcher with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Int64RangeMatcher_RangeMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Int64RangeMatcher_RangeMatcher with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// Int64RangeMatcher_RangeMatcherMultiError, or nil if none found.
+func (m *Int64RangeMatcher_RangeMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Int64RangeMatcher_RangeMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetRanges()) < 1 {
+ err := Int64RangeMatcher_RangeMatcherValidationError{
+ field: "Ranges",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetRanges() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Int64RangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetOnMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Int64RangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Int64RangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return Int64RangeMatcher_RangeMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// Int64RangeMatcher_RangeMatcherMultiError is an error wrapping multiple
+// validation errors returned by Int64RangeMatcher_RangeMatcher.ValidateAll()
+// if the designated constraints aren't met.
+type Int64RangeMatcher_RangeMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Int64RangeMatcher_RangeMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Int64RangeMatcher_RangeMatcherMultiError) AllErrors() []error { return m }
+
+// Int64RangeMatcher_RangeMatcherValidationError is the validation error
+// returned by Int64RangeMatcher_RangeMatcher.Validate if the designated
+// constraints aren't met.
+type Int64RangeMatcher_RangeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Int64RangeMatcher_RangeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Int64RangeMatcher_RangeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Int64RangeMatcher_RangeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Int64RangeMatcher_RangeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Int64RangeMatcher_RangeMatcherValidationError) ErrorName() string {
+ return "Int64RangeMatcher_RangeMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Int64RangeMatcher_RangeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sInt64RangeMatcher_RangeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Int64RangeMatcher_RangeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Int64RangeMatcher_RangeMatcherValidationError{}
+
+// Validate checks the field values on Int32RangeMatcher_RangeMatcher with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Int32RangeMatcher_RangeMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Int32RangeMatcher_RangeMatcher with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// Int32RangeMatcher_RangeMatcherMultiError, or nil if none found.
+func (m *Int32RangeMatcher_RangeMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Int32RangeMatcher_RangeMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetRanges()) < 1 {
+ err := Int32RangeMatcher_RangeMatcherValidationError{
+ field: "Ranges",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetRanges() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Int32RangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetOnMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Int32RangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Int32RangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return Int32RangeMatcher_RangeMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// Int32RangeMatcher_RangeMatcherMultiError is an error wrapping multiple
+// validation errors returned by Int32RangeMatcher_RangeMatcher.ValidateAll()
+// if the designated constraints aren't met.
+type Int32RangeMatcher_RangeMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Int32RangeMatcher_RangeMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Int32RangeMatcher_RangeMatcherMultiError) AllErrors() []error { return m }
+
+// Int32RangeMatcher_RangeMatcherValidationError is the validation error
+// returned by Int32RangeMatcher_RangeMatcher.Validate if the designated
+// constraints aren't met.
+type Int32RangeMatcher_RangeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Int32RangeMatcher_RangeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Int32RangeMatcher_RangeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Int32RangeMatcher_RangeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Int32RangeMatcher_RangeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Int32RangeMatcher_RangeMatcherValidationError) ErrorName() string {
+ return "Int32RangeMatcher_RangeMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Int32RangeMatcher_RangeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sInt32RangeMatcher_RangeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Int32RangeMatcher_RangeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Int32RangeMatcher_RangeMatcherValidationError{}
+
+// Validate checks the field values on DoubleRangeMatcher_RangeMatcher with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *DoubleRangeMatcher_RangeMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on DoubleRangeMatcher_RangeMatcher with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// DoubleRangeMatcher_RangeMatcherMultiError, or nil if none found.
+func (m *DoubleRangeMatcher_RangeMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *DoubleRangeMatcher_RangeMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetRanges()) < 1 {
+ err := DoubleRangeMatcher_RangeMatcherValidationError{
+ field: "Ranges",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetRanges() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DoubleRangeMatcher_RangeMatcherValidationError{
+ field: fmt.Sprintf("Ranges[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetOnMatch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, DoubleRangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOnMatch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DoubleRangeMatcher_RangeMatcherValidationError{
+ field: "OnMatch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return DoubleRangeMatcher_RangeMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// DoubleRangeMatcher_RangeMatcherMultiError is an error wrapping multiple
+// validation errors returned by DoubleRangeMatcher_RangeMatcher.ValidateAll()
+// if the designated constraints aren't met.
+type DoubleRangeMatcher_RangeMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m DoubleRangeMatcher_RangeMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m DoubleRangeMatcher_RangeMatcherMultiError) AllErrors() []error { return m }
+
+// DoubleRangeMatcher_RangeMatcherValidationError is the validation error
+// returned by DoubleRangeMatcher_RangeMatcher.Validate if the designated
+// constraints aren't met.
+type DoubleRangeMatcher_RangeMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DoubleRangeMatcher_RangeMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DoubleRangeMatcher_RangeMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DoubleRangeMatcher_RangeMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DoubleRangeMatcher_RangeMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DoubleRangeMatcher_RangeMatcherValidationError) ErrorName() string {
+ return "DoubleRangeMatcher_RangeMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e DoubleRangeMatcher_RangeMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDoubleRangeMatcher_RangeMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DoubleRangeMatcher_RangeMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DoubleRangeMatcher_RangeMatcherValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go
new file mode 100644
index 000000000..c02ec2a91
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.go
@@ -0,0 +1,242 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/regex.proto
+
+package v3
+
+import (
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type RegexMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to EngineType:
+ //
+ // *RegexMatcher_GoogleRe2
+ EngineType isRegexMatcher_EngineType `protobuf_oneof:"engine_type"`
+ Regex string `protobuf:"bytes,2,opt,name=regex,proto3" json:"regex,omitempty"`
+}
+
+func (x *RegexMatcher) Reset() {
+ *x = RegexMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegexMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegexMatcher) ProtoMessage() {}
+
+func (x *RegexMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegexMatcher.ProtoReflect.Descriptor instead.
+func (*RegexMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_regex_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *RegexMatcher) GetEngineType() isRegexMatcher_EngineType {
+ if m != nil {
+ return m.EngineType
+ }
+ return nil
+}
+
+func (x *RegexMatcher) GetGoogleRe2() *RegexMatcher_GoogleRE2 {
+ if x, ok := x.GetEngineType().(*RegexMatcher_GoogleRe2); ok {
+ return x.GoogleRe2
+ }
+ return nil
+}
+
+func (x *RegexMatcher) GetRegex() string {
+ if x != nil {
+ return x.Regex
+ }
+ return ""
+}
+
+type isRegexMatcher_EngineType interface {
+ isRegexMatcher_EngineType()
+}
+
+type RegexMatcher_GoogleRe2 struct {
+ GoogleRe2 *RegexMatcher_GoogleRE2 `protobuf:"bytes,1,opt,name=google_re2,json=googleRe2,proto3,oneof"`
+}
+
+func (*RegexMatcher_GoogleRe2) isRegexMatcher_EngineType() {}
+
+type RegexMatcher_GoogleRE2 struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *RegexMatcher_GoogleRE2) Reset() {
+ *x = RegexMatcher_GoogleRE2{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RegexMatcher_GoogleRE2) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RegexMatcher_GoogleRE2) ProtoMessage() {}
+
+func (x *RegexMatcher_GoogleRE2) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_regex_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RegexMatcher_GoogleRE2.ProtoReflect.Descriptor instead.
+func (*RegexMatcher_GoogleRE2) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_regex_proto_rawDescGZIP(), []int{0, 0}
+}
+
+var File_xds_type_matcher_v3_regex_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_regex_proto_rawDesc = []byte{
+ 0x0a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
+ 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0xa6, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x12, 0x56, 0x0a, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5f, 0x72, 0x65, 0x32, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78,
+ 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x45,
+ 0x32, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x09, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x52, 0x65, 0x32, 0x12, 0x1d, 0x0a, 0x05, 0x72, 0x65, 0x67, 0x65,
+ 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01,
+ 0x52, 0x05, 0x72, 0x65, 0x67, 0x65, 0x78, 0x1a, 0x0b, 0x0a, 0x09, 0x47, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x52, 0x45, 0x32, 0x42, 0x12, 0x0a, 0x0b, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x5f, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x42, 0x5a, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x65, 0x67, 0x65,
+ 0x78, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f,
+ 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_regex_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_regex_proto_rawDescData = file_xds_type_matcher_v3_regex_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_regex_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_regex_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_regex_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_regex_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_regex_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_regex_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_type_matcher_v3_regex_proto_goTypes = []interface{}{
+ (*RegexMatcher)(nil), // 0: xds.type.matcher.v3.RegexMatcher
+ (*RegexMatcher_GoogleRE2)(nil), // 1: xds.type.matcher.v3.RegexMatcher.GoogleRE2
+}
+var file_xds_type_matcher_v3_regex_proto_depIdxs = []int32{
+ 1, // 0: xds.type.matcher.v3.RegexMatcher.google_re2:type_name -> xds.type.matcher.v3.RegexMatcher.GoogleRE2
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_regex_proto_init() }
+func file_xds_type_matcher_v3_regex_proto_init() {
+ if File_xds_type_matcher_v3_regex_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_regex_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegexMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_regex_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RegexMatcher_GoogleRE2); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_xds_type_matcher_v3_regex_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*RegexMatcher_GoogleRe2)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_regex_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_regex_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_regex_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_regex_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_regex_proto = out.File
+ file_xds_type_matcher_v3_regex_proto_rawDesc = nil
+ file_xds_type_matcher_v3_regex_proto_goTypes = nil
+ file_xds_type_matcher_v3_regex_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.validate.go
new file mode 100644
index 000000000..8b7682964
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/regex.pb.validate.go
@@ -0,0 +1,317 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/regex.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on RegexMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *RegexMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RegexMatcher with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in RegexMatcherMultiError, or
+// nil if none found.
+func (m *RegexMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RegexMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetRegex()) < 1 {
+ err := RegexMatcherValidationError{
+ field: "Regex",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ oneofEngineTypePresent := false
+ switch v := m.EngineType.(type) {
+ case *RegexMatcher_GoogleRe2:
+ if v == nil {
+ err := RegexMatcherValidationError{
+ field: "EngineType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofEngineTypePresent = true
+
+ if m.GetGoogleRe2() == nil {
+ err := RegexMatcherValidationError{
+ field: "GoogleRe2",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetGoogleRe2()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RegexMatcherValidationError{
+ field: "GoogleRe2",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RegexMatcherValidationError{
+ field: "GoogleRe2",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetGoogleRe2()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RegexMatcherValidationError{
+ field: "GoogleRe2",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofEngineTypePresent {
+ err := RegexMatcherValidationError{
+ field: "EngineType",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return RegexMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// RegexMatcherMultiError is an error wrapping multiple validation errors
+// returned by RegexMatcher.ValidateAll() if the designated constraints aren't met.
+type RegexMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RegexMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RegexMatcherMultiError) AllErrors() []error { return m }
+
+// RegexMatcherValidationError is the validation error returned by
+// RegexMatcher.Validate if the designated constraints aren't met.
+type RegexMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RegexMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RegexMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RegexMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RegexMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RegexMatcherValidationError) ErrorName() string { return "RegexMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RegexMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRegexMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RegexMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RegexMatcherValidationError{}
+
+// Validate checks the field values on RegexMatcher_GoogleRE2 with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *RegexMatcher_GoogleRE2) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RegexMatcher_GoogleRE2 with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RegexMatcher_GoogleRE2MultiError, or nil if none found.
+func (m *RegexMatcher_GoogleRE2) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RegexMatcher_GoogleRE2) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return RegexMatcher_GoogleRE2MultiError(errors)
+ }
+
+ return nil
+}
+
+// RegexMatcher_GoogleRE2MultiError is an error wrapping multiple validation
+// errors returned by RegexMatcher_GoogleRE2.ValidateAll() if the designated
+// constraints aren't met.
+type RegexMatcher_GoogleRE2MultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RegexMatcher_GoogleRE2MultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RegexMatcher_GoogleRE2MultiError) AllErrors() []error { return m }
+
+// RegexMatcher_GoogleRE2ValidationError is the validation error returned by
+// RegexMatcher_GoogleRE2.Validate if the designated constraints aren't met.
+type RegexMatcher_GoogleRE2ValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RegexMatcher_GoogleRE2ValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RegexMatcher_GoogleRE2ValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RegexMatcher_GoogleRE2ValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RegexMatcher_GoogleRE2ValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RegexMatcher_GoogleRE2ValidationError) ErrorName() string {
+ return "RegexMatcher_GoogleRE2ValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RegexMatcher_GoogleRE2ValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRegexMatcher_GoogleRE2.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RegexMatcher_GoogleRE2ValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RegexMatcher_GoogleRE2ValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go
new file mode 100644
index 000000000..79b70bcb7
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.go
@@ -0,0 +1,353 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/matcher/v3/string.proto
+
+package v3
+
+import (
+ v3 "github.com/cncf/xds/go/xds/core/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type StringMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to MatchPattern:
+ //
+ // *StringMatcher_Exact
+ // *StringMatcher_Prefix
+ // *StringMatcher_Suffix
+ // *StringMatcher_SafeRegex
+ // *StringMatcher_Contains
+ // *StringMatcher_Custom
+ MatchPattern isStringMatcher_MatchPattern `protobuf_oneof:"match_pattern"`
+ IgnoreCase bool `protobuf:"varint,6,opt,name=ignore_case,json=ignoreCase,proto3" json:"ignore_case,omitempty"`
+}
+
+func (x *StringMatcher) Reset() {
+ *x = StringMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_string_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StringMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StringMatcher) ProtoMessage() {}
+
+func (x *StringMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_string_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StringMatcher.ProtoReflect.Descriptor instead.
+func (*StringMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_string_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *StringMatcher) GetMatchPattern() isStringMatcher_MatchPattern {
+ if m != nil {
+ return m.MatchPattern
+ }
+ return nil
+}
+
+func (x *StringMatcher) GetExact() string {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_Exact); ok {
+ return x.Exact
+ }
+ return ""
+}
+
+func (x *StringMatcher) GetPrefix() string {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_Prefix); ok {
+ return x.Prefix
+ }
+ return ""
+}
+
+func (x *StringMatcher) GetSuffix() string {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_Suffix); ok {
+ return x.Suffix
+ }
+ return ""
+}
+
+func (x *StringMatcher) GetSafeRegex() *RegexMatcher {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_SafeRegex); ok {
+ return x.SafeRegex
+ }
+ return nil
+}
+
+func (x *StringMatcher) GetContains() string {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_Contains); ok {
+ return x.Contains
+ }
+ return ""
+}
+
+func (x *StringMatcher) GetCustom() *v3.TypedExtensionConfig {
+ if x, ok := x.GetMatchPattern().(*StringMatcher_Custom); ok {
+ return x.Custom
+ }
+ return nil
+}
+
+func (x *StringMatcher) GetIgnoreCase() bool {
+ if x != nil {
+ return x.IgnoreCase
+ }
+ return false
+}
+
+type isStringMatcher_MatchPattern interface {
+ isStringMatcher_MatchPattern()
+}
+
+type StringMatcher_Exact struct {
+ Exact string `protobuf:"bytes,1,opt,name=exact,proto3,oneof"`
+}
+
+type StringMatcher_Prefix struct {
+ Prefix string `protobuf:"bytes,2,opt,name=prefix,proto3,oneof"`
+}
+
+type StringMatcher_Suffix struct {
+ Suffix string `protobuf:"bytes,3,opt,name=suffix,proto3,oneof"`
+}
+
+type StringMatcher_SafeRegex struct {
+ SafeRegex *RegexMatcher `protobuf:"bytes,5,opt,name=safe_regex,json=safeRegex,proto3,oneof"`
+}
+
+type StringMatcher_Contains struct {
+ Contains string `protobuf:"bytes,7,opt,name=contains,proto3,oneof"`
+}
+
+type StringMatcher_Custom struct {
+ Custom *v3.TypedExtensionConfig `protobuf:"bytes,8,opt,name=custom,proto3,oneof"`
+}
+
+func (*StringMatcher_Exact) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_Prefix) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_Suffix) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_SafeRegex) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_Contains) isStringMatcher_MatchPattern() {}
+
+func (*StringMatcher_Custom) isStringMatcher_MatchPattern() {}
+
+type ListStringMatcher struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Patterns []*StringMatcher `protobuf:"bytes,1,rep,name=patterns,proto3" json:"patterns,omitempty"`
+}
+
+func (x *ListStringMatcher) Reset() {
+ *x = ListStringMatcher{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_matcher_v3_string_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListStringMatcher) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListStringMatcher) ProtoMessage() {}
+
+func (x *ListStringMatcher) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_matcher_v3_string_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListStringMatcher.ProtoReflect.Descriptor instead.
+func (*ListStringMatcher) Descriptor() ([]byte, []int) {
+ return file_xds_type_matcher_v3_string_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListStringMatcher) GetPatterns() []*StringMatcher {
+ if x != nil {
+ return x.Patterns
+ }
+ return nil
+}
+
+var File_xds_type_matcher_v3_string_proto protoreflect.FileDescriptor
+
+var file_xds_type_matcher_v3_string_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x13, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1b, 0x78, 0x64, 0x73, 0x2f, 0x63, 0x6f, 0x72,
+ 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xd6,
+ 0x02, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x12, 0x16, 0x0a, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48,
+ 0x00, 0x52, 0x05, 0x65, 0x78, 0x61, 0x63, 0x74, 0x12, 0x21, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66,
+ 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10,
+ 0x01, 0x48, 0x00, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x06, 0x73,
+ 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04,
+ 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x4c,
+ 0x0a, 0x0a, 0x73, 0x61, 0x66, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x67, 0x65, 0x78, 0x4d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x48,
+ 0x00, 0x52, 0x09, 0x73, 0x61, 0x66, 0x65, 0x52, 0x65, 0x67, 0x65, 0x78, 0x12, 0x25, 0x0a, 0x08,
+ 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07,
+ 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x48, 0x00, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x61,
+ 0x69, 0x6e, 0x73, 0x12, 0x3b, 0x0a, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x48, 0x00, 0x52, 0x06, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d,
+ 0x12, 0x1f, 0x0a, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x61, 0x73, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x61, 0x73,
+ 0x65, 0x42, 0x14, 0x0a, 0x0d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65,
+ 0x72, 0x6e, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0x5d, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53,
+ 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x08,
+ 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22,
+ 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x70, 0x61,
+ 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x42, 0x5b, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69,
+ 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61,
+ 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f,
+ 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72,
+ 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_matcher_v3_string_proto_rawDescOnce sync.Once
+ file_xds_type_matcher_v3_string_proto_rawDescData = file_xds_type_matcher_v3_string_proto_rawDesc
+)
+
+func file_xds_type_matcher_v3_string_proto_rawDescGZIP() []byte {
+ file_xds_type_matcher_v3_string_proto_rawDescOnce.Do(func() {
+ file_xds_type_matcher_v3_string_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_matcher_v3_string_proto_rawDescData)
+ })
+ return file_xds_type_matcher_v3_string_proto_rawDescData
+}
+
+var file_xds_type_matcher_v3_string_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_type_matcher_v3_string_proto_goTypes = []interface{}{
+ (*StringMatcher)(nil), // 0: xds.type.matcher.v3.StringMatcher
+ (*ListStringMatcher)(nil), // 1: xds.type.matcher.v3.ListStringMatcher
+ (*RegexMatcher)(nil), // 2: xds.type.matcher.v3.RegexMatcher
+ (*v3.TypedExtensionConfig)(nil), // 3: xds.core.v3.TypedExtensionConfig
+}
+var file_xds_type_matcher_v3_string_proto_depIdxs = []int32{
+ 2, // 0: xds.type.matcher.v3.StringMatcher.safe_regex:type_name -> xds.type.matcher.v3.RegexMatcher
+ 3, // 1: xds.type.matcher.v3.StringMatcher.custom:type_name -> xds.core.v3.TypedExtensionConfig
+ 0, // 2: xds.type.matcher.v3.ListStringMatcher.patterns:type_name -> xds.type.matcher.v3.StringMatcher
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_matcher_v3_string_proto_init() }
+func file_xds_type_matcher_v3_string_proto_init() {
+ if File_xds_type_matcher_v3_string_proto != nil {
+ return
+ }
+ file_xds_type_matcher_v3_regex_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_matcher_v3_string_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StringMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_matcher_v3_string_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListStringMatcher); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_xds_type_matcher_v3_string_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*StringMatcher_Exact)(nil),
+ (*StringMatcher_Prefix)(nil),
+ (*StringMatcher_Suffix)(nil),
+ (*StringMatcher_SafeRegex)(nil),
+ (*StringMatcher_Contains)(nil),
+ (*StringMatcher_Custom)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_matcher_v3_string_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_matcher_v3_string_proto_goTypes,
+ DependencyIndexes: file_xds_type_matcher_v3_string_proto_depIdxs,
+ MessageInfos: file_xds_type_matcher_v3_string_proto_msgTypes,
+ }.Build()
+ File_xds_type_matcher_v3_string_proto = out.File
+ file_xds_type_matcher_v3_string_proto_rawDesc = nil
+ file_xds_type_matcher_v3_string_proto_goTypes = nil
+ file_xds_type_matcher_v3_string_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.validate.go
new file mode 100644
index 000000000..339d3b631
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/matcher/v3/string.pb.validate.go
@@ -0,0 +1,481 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/matcher/v3/string.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on StringMatcher with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *StringMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on StringMatcher with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in StringMatcherMultiError, or
+// nil if none found.
+func (m *StringMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *StringMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for IgnoreCase
+
+ oneofMatchPatternPresent := false
+ switch v := m.MatchPattern.(type) {
+ case *StringMatcher_Exact:
+ if v == nil {
+ err := StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchPatternPresent = true
+ // no validation rules for Exact
+ case *StringMatcher_Prefix:
+ if v == nil {
+ err := StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchPatternPresent = true
+
+ if utf8.RuneCountInString(m.GetPrefix()) < 1 {
+ err := StringMatcherValidationError{
+ field: "Prefix",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ case *StringMatcher_Suffix:
+ if v == nil {
+ err := StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchPatternPresent = true
+
+ if utf8.RuneCountInString(m.GetSuffix()) < 1 {
+ err := StringMatcherValidationError{
+ field: "Suffix",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ case *StringMatcher_SafeRegex:
+ if v == nil {
+ err := StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchPatternPresent = true
+
+ if m.GetSafeRegex() == nil {
+ err := StringMatcherValidationError{
+ field: "SafeRegex",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetSafeRegex()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, StringMatcherValidationError{
+ field: "SafeRegex",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, StringMatcherValidationError{
+ field: "SafeRegex",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSafeRegex()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return StringMatcherValidationError{
+ field: "SafeRegex",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *StringMatcher_Contains:
+ if v == nil {
+ err := StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchPatternPresent = true
+
+ if utf8.RuneCountInString(m.GetContains()) < 1 {
+ err := StringMatcherValidationError{
+ field: "Contains",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ case *StringMatcher_Custom:
+ if v == nil {
+ err := StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofMatchPatternPresent = true
+
+ if all {
+ switch v := interface{}(m.GetCustom()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, StringMatcherValidationError{
+ field: "Custom",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, StringMatcherValidationError{
+ field: "Custom",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCustom()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return StringMatcherValidationError{
+ field: "Custom",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofMatchPatternPresent {
+ err := StringMatcherValidationError{
+ field: "MatchPattern",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return StringMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// StringMatcherMultiError is an error wrapping multiple validation errors
+// returned by StringMatcher.ValidateAll() if the designated constraints
+// aren't met.
+type StringMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m StringMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m StringMatcherMultiError) AllErrors() []error { return m }
+
+// StringMatcherValidationError is the validation error returned by
+// StringMatcher.Validate if the designated constraints aren't met.
+type StringMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e StringMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e StringMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e StringMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e StringMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e StringMatcherValidationError) ErrorName() string { return "StringMatcherValidationError" }
+
+// Error satisfies the builtin error interface
+func (e StringMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sStringMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = StringMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = StringMatcherValidationError{}
+
+// Validate checks the field values on ListStringMatcher with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *ListStringMatcher) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ListStringMatcher with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ListStringMatcherMultiError, or nil if none found.
+func (m *ListStringMatcher) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ListStringMatcher) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetPatterns()) < 1 {
+ err := ListStringMatcherValidationError{
+ field: "Patterns",
+ reason: "value must contain at least 1 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetPatterns() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListStringMatcherValidationError{
+ field: fmt.Sprintf("Patterns[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListStringMatcherValidationError{
+ field: fmt.Sprintf("Patterns[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListStringMatcherValidationError{
+ field: fmt.Sprintf("Patterns[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ListStringMatcherMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListStringMatcherMultiError is an error wrapping multiple validation errors
+// returned by ListStringMatcher.ValidateAll() if the designated constraints
+// aren't met.
+type ListStringMatcherMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListStringMatcherMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListStringMatcherMultiError) AllErrors() []error { return m }
+
+// ListStringMatcherValidationError is the validation error returned by
+// ListStringMatcher.Validate if the designated constraints aren't met.
+type ListStringMatcherValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListStringMatcherValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListStringMatcherValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListStringMatcherValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListStringMatcherValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListStringMatcherValidationError) ErrorName() string {
+ return "ListStringMatcherValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ListStringMatcherValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListStringMatcher.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListStringMatcherValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListStringMatcherValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go
new file mode 100644
index 000000000..98c13d9b2
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.go
@@ -0,0 +1,340 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/v3/cel.proto
+
+package v3
+
+import (
+ expr "cel.dev/expr"
+ _ "github.com/cncf/xds/go/xds/annotations/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ v1alpha1 "google.golang.org/genproto/googleapis/api/expr/v1alpha1"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type CelExpression struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to ExprSpecifier:
+ //
+ // *CelExpression_ParsedExpr
+ // *CelExpression_CheckedExpr
+ ExprSpecifier isCelExpression_ExprSpecifier `protobuf_oneof:"expr_specifier"`
+ CelExprParsed *expr.ParsedExpr `protobuf:"bytes,3,opt,name=cel_expr_parsed,json=celExprParsed,proto3" json:"cel_expr_parsed,omitempty"`
+ CelExprChecked *expr.CheckedExpr `protobuf:"bytes,4,opt,name=cel_expr_checked,json=celExprChecked,proto3" json:"cel_expr_checked,omitempty"`
+ CelExprString string `protobuf:"bytes,5,opt,name=cel_expr_string,json=celExprString,proto3" json:"cel_expr_string,omitempty"`
+}
+
+func (x *CelExpression) Reset() {
+ *x = CelExpression{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_v3_cel_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CelExpression) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CelExpression) ProtoMessage() {}
+
+func (x *CelExpression) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_v3_cel_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CelExpression.ProtoReflect.Descriptor instead.
+func (*CelExpression) Descriptor() ([]byte, []int) {
+ return file_xds_type_v3_cel_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *CelExpression) GetExprSpecifier() isCelExpression_ExprSpecifier {
+ if m != nil {
+ return m.ExprSpecifier
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in xds/type/v3/cel.proto.
+func (x *CelExpression) GetParsedExpr() *v1alpha1.ParsedExpr {
+ if x, ok := x.GetExprSpecifier().(*CelExpression_ParsedExpr); ok {
+ return x.ParsedExpr
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in xds/type/v3/cel.proto.
+func (x *CelExpression) GetCheckedExpr() *v1alpha1.CheckedExpr {
+ if x, ok := x.GetExprSpecifier().(*CelExpression_CheckedExpr); ok {
+ return x.CheckedExpr
+ }
+ return nil
+}
+
+func (x *CelExpression) GetCelExprParsed() *expr.ParsedExpr {
+ if x != nil {
+ return x.CelExprParsed
+ }
+ return nil
+}
+
+func (x *CelExpression) GetCelExprChecked() *expr.CheckedExpr {
+ if x != nil {
+ return x.CelExprChecked
+ }
+ return nil
+}
+
+func (x *CelExpression) GetCelExprString() string {
+ if x != nil {
+ return x.CelExprString
+ }
+ return ""
+}
+
+type isCelExpression_ExprSpecifier interface {
+ isCelExpression_ExprSpecifier()
+}
+
+type CelExpression_ParsedExpr struct {
+ // Deprecated: Marked as deprecated in xds/type/v3/cel.proto.
+ ParsedExpr *v1alpha1.ParsedExpr `protobuf:"bytes,1,opt,name=parsed_expr,json=parsedExpr,proto3,oneof"`
+}
+
+type CelExpression_CheckedExpr struct {
+ // Deprecated: Marked as deprecated in xds/type/v3/cel.proto.
+ CheckedExpr *v1alpha1.CheckedExpr `protobuf:"bytes,2,opt,name=checked_expr,json=checkedExpr,proto3,oneof"`
+}
+
+func (*CelExpression_ParsedExpr) isCelExpression_ExprSpecifier() {}
+
+func (*CelExpression_CheckedExpr) isCelExpression_ExprSpecifier() {}
+
+type CelExtractString struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ ExprExtract *CelExpression `protobuf:"bytes,1,opt,name=expr_extract,json=exprExtract,proto3" json:"expr_extract,omitempty"`
+ DefaultValue *wrapperspb.StringValue `protobuf:"bytes,2,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"`
+}
+
+func (x *CelExtractString) Reset() {
+ *x = CelExtractString{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_v3_cel_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CelExtractString) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CelExtractString) ProtoMessage() {}
+
+func (x *CelExtractString) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_v3_cel_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CelExtractString.ProtoReflect.Descriptor instead.
+func (*CelExtractString) Descriptor() ([]byte, []int) {
+ return file_xds_type_v3_cel_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *CelExtractString) GetExprExtract() *CelExpression {
+ if x != nil {
+ return x.ExprExtract
+ }
+ return nil
+}
+
+func (x *CelExtractString) GetDefaultValue() *wrapperspb.StringValue {
+ if x != nil {
+ return x.DefaultValue
+ }
+ return nil
+}
+
+var File_xds_type_v3_cel_proto protoreflect.FileDescriptor
+
+var file_xds_type_v3_cel_proto_rawDesc = []byte{
+ 0x0a, 0x15, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x65,
+ 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x26, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69,
+ 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x25, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x31,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x15, 0x63, 0x65, 0x6c,
+ 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1f, 0x78, 0x64, 0x73, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe5, 0x02, 0x0a,
+ 0x0d, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x4b,
+ 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50,
+ 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52,
+ 0x0a, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x4e, 0x0a, 0x0c, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65,
+ 0x78, 0x70, 0x72, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x65,
+ 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x42, 0x02, 0x18, 0x01, 0x48, 0x00, 0x52, 0x0b,
+ 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x12, 0x3c, 0x0a, 0x0f, 0x63,
+ 0x65, 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x70, 0x61, 0x72, 0x73, 0x65, 0x64, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
+ 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0d, 0x63, 0x65, 0x6c, 0x45,
+ 0x78, 0x70, 0x72, 0x50, 0x61, 0x72, 0x73, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x10, 0x63, 0x65, 0x6c,
+ 0x5f, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x43,
+ 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x45, 0x78, 0x70, 0x72, 0x52, 0x0e, 0x63, 0x65, 0x6c, 0x45,
+ 0x78, 0x70, 0x72, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x0f, 0x63, 0x65,
+ 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x65, 0x6c, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x72, 0x69,
+ 0x6e, 0x67, 0x42, 0x10, 0x0a, 0x0e, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69,
+ 0x66, 0x69, 0x65, 0x72, 0x22, 0x9e, 0x01, 0x0a, 0x10, 0x43, 0x65, 0x6c, 0x45, 0x78, 0x74, 0x72,
+ 0x61, 0x63, 0x74, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x0c, 0x65, 0x78, 0x70,
+ 0x72, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65,
+ 0x6c, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42, 0x05,
+ 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x72, 0x45, 0x78, 0x74, 0x72, 0x61,
+ 0x63, 0x74, 0x12, 0x41, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69,
+ 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x50, 0xd2, 0xc6, 0xa4, 0xe1, 0x06, 0x02, 0x08, 0x01, 0x0a,
+ 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x43, 0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x63, 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f,
+ 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_v3_cel_proto_rawDescOnce sync.Once
+ file_xds_type_v3_cel_proto_rawDescData = file_xds_type_v3_cel_proto_rawDesc
+)
+
+func file_xds_type_v3_cel_proto_rawDescGZIP() []byte {
+ file_xds_type_v3_cel_proto_rawDescOnce.Do(func() {
+ file_xds_type_v3_cel_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_v3_cel_proto_rawDescData)
+ })
+ return file_xds_type_v3_cel_proto_rawDescData
+}
+
+var file_xds_type_v3_cel_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_xds_type_v3_cel_proto_goTypes = []interface{}{
+ (*CelExpression)(nil), // 0: xds.type.v3.CelExpression
+ (*CelExtractString)(nil), // 1: xds.type.v3.CelExtractString
+ (*v1alpha1.ParsedExpr)(nil), // 2: google.api.expr.v1alpha1.ParsedExpr
+ (*v1alpha1.CheckedExpr)(nil), // 3: google.api.expr.v1alpha1.CheckedExpr
+ (*expr.ParsedExpr)(nil), // 4: cel.expr.ParsedExpr
+ (*expr.CheckedExpr)(nil), // 5: cel.expr.CheckedExpr
+ (*wrapperspb.StringValue)(nil), // 6: google.protobuf.StringValue
+}
+var file_xds_type_v3_cel_proto_depIdxs = []int32{
+ 2, // 0: xds.type.v3.CelExpression.parsed_expr:type_name -> google.api.expr.v1alpha1.ParsedExpr
+ 3, // 1: xds.type.v3.CelExpression.checked_expr:type_name -> google.api.expr.v1alpha1.CheckedExpr
+ 4, // 2: xds.type.v3.CelExpression.cel_expr_parsed:type_name -> cel.expr.ParsedExpr
+ 5, // 3: xds.type.v3.CelExpression.cel_expr_checked:type_name -> cel.expr.CheckedExpr
+ 0, // 4: xds.type.v3.CelExtractString.expr_extract:type_name -> xds.type.v3.CelExpression
+ 6, // 5: xds.type.v3.CelExtractString.default_value:type_name -> google.protobuf.StringValue
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_v3_cel_proto_init() }
+func file_xds_type_v3_cel_proto_init() {
+ if File_xds_type_v3_cel_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_v3_cel_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CelExpression); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_v3_cel_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CelExtractString); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_xds_type_v3_cel_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*CelExpression_ParsedExpr)(nil),
+ (*CelExpression_CheckedExpr)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_v3_cel_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_v3_cel_proto_goTypes,
+ DependencyIndexes: file_xds_type_v3_cel_proto_depIdxs,
+ MessageInfos: file_xds_type_v3_cel_proto_msgTypes,
+ }.Build()
+ File_xds_type_v3_cel_proto = out.File
+ file_xds_type_v3_cel_proto_rawDesc = nil
+ file_xds_type_v3_cel_proto_goTypes = nil
+ file_xds_type_v3_cel_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go
new file mode 100644
index 000000000..2643709be
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/v3/cel.pb.validate.go
@@ -0,0 +1,452 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/v3/cel.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on CelExpression with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *CelExpression) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CelExpression with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in CelExpressionMultiError, or
+// nil if none found.
+func (m *CelExpression) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CelExpression) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetCelExprParsed()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "CelExprParsed",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "CelExprParsed",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCelExprParsed()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CelExpressionValidationError{
+ field: "CelExprParsed",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetCelExprChecked()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "CelExprChecked",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "CelExprChecked",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCelExprChecked()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CelExpressionValidationError{
+ field: "CelExprChecked",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for CelExprString
+
+ switch v := m.ExprSpecifier.(type) {
+ case *CelExpression_ParsedExpr:
+ if v == nil {
+ err := CelExpressionValidationError{
+ field: "ExprSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetParsedExpr()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "ParsedExpr",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "ParsedExpr",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetParsedExpr()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CelExpressionValidationError{
+ field: "ParsedExpr",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *CelExpression_CheckedExpr:
+ if v == nil {
+ err := CelExpressionValidationError{
+ field: "ExprSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetCheckedExpr()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "CheckedExpr",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CelExpressionValidationError{
+ field: "CheckedExpr",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCheckedExpr()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CelExpressionValidationError{
+ field: "CheckedExpr",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return CelExpressionMultiError(errors)
+ }
+
+ return nil
+}
+
+// CelExpressionMultiError is an error wrapping multiple validation errors
+// returned by CelExpression.ValidateAll() if the designated constraints
+// aren't met.
+type CelExpressionMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CelExpressionMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CelExpressionMultiError) AllErrors() []error { return m }
+
+// CelExpressionValidationError is the validation error returned by
+// CelExpression.Validate if the designated constraints aren't met.
+type CelExpressionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CelExpressionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CelExpressionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CelExpressionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CelExpressionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CelExpressionValidationError) ErrorName() string { return "CelExpressionValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CelExpressionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCelExpression.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CelExpressionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CelExpressionValidationError{}
+
+// Validate checks the field values on CelExtractString with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *CelExtractString) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CelExtractString with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CelExtractStringMultiError, or nil if none found.
+func (m *CelExtractString) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CelExtractString) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetExprExtract() == nil {
+ err := CelExtractStringValidationError{
+ field: "ExprExtract",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetExprExtract()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CelExtractStringValidationError{
+ field: "ExprExtract",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CelExtractStringValidationError{
+ field: "ExprExtract",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExprExtract()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CelExtractStringValidationError{
+ field: "ExprExtract",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetDefaultValue()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CelExtractStringValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CelExtractStringValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDefaultValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CelExtractStringValidationError{
+ field: "DefaultValue",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return CelExtractStringMultiError(errors)
+ }
+
+ return nil
+}
+
+// CelExtractStringMultiError is an error wrapping multiple validation errors
+// returned by CelExtractString.ValidateAll() if the designated constraints
+// aren't met.
+type CelExtractStringMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CelExtractStringMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CelExtractStringMultiError) AllErrors() []error { return m }
+
+// CelExtractStringValidationError is the validation error returned by
+// CelExtractString.Validate if the designated constraints aren't met.
+type CelExtractStringValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CelExtractStringValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CelExtractStringValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CelExtractStringValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CelExtractStringValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CelExtractStringValidationError) ErrorName() string { return "CelExtractStringValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CelExtractStringValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCelExtractString.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CelExtractStringValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CelExtractStringValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go
new file mode 100644
index 000000000..c6f8bb9ba
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.go
@@ -0,0 +1,298 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/v3/range.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type Int64Range struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"`
+ End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"`
+}
+
+func (x *Int64Range) Reset() {
+ *x = Int64Range{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_v3_range_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int64Range) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int64Range) ProtoMessage() {}
+
+func (x *Int64Range) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_v3_range_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int64Range.ProtoReflect.Descriptor instead.
+func (*Int64Range) Descriptor() ([]byte, []int) {
+ return file_xds_type_v3_range_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Int64Range) GetStart() int64 {
+ if x != nil {
+ return x.Start
+ }
+ return 0
+}
+
+func (x *Int64Range) GetEnd() int64 {
+ if x != nil {
+ return x.End
+ }
+ return 0
+}
+
+type Int32Range struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Start int32 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"`
+ End int32 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"`
+}
+
+func (x *Int32Range) Reset() {
+ *x = Int32Range{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_v3_range_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Int32Range) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Int32Range) ProtoMessage() {}
+
+func (x *Int32Range) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_v3_range_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Int32Range.ProtoReflect.Descriptor instead.
+func (*Int32Range) Descriptor() ([]byte, []int) {
+ return file_xds_type_v3_range_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Int32Range) GetStart() int32 {
+ if x != nil {
+ return x.Start
+ }
+ return 0
+}
+
+func (x *Int32Range) GetEnd() int32 {
+ if x != nil {
+ return x.End
+ }
+ return 0
+}
+
+type DoubleRange struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Start float64 `protobuf:"fixed64,1,opt,name=start,proto3" json:"start,omitempty"`
+ End float64 `protobuf:"fixed64,2,opt,name=end,proto3" json:"end,omitempty"`
+}
+
+func (x *DoubleRange) Reset() {
+ *x = DoubleRange{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_v3_range_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DoubleRange) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DoubleRange) ProtoMessage() {}
+
+func (x *DoubleRange) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_v3_range_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DoubleRange.ProtoReflect.Descriptor instead.
+func (*DoubleRange) Descriptor() ([]byte, []int) {
+ return file_xds_type_v3_range_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DoubleRange) GetStart() float64 {
+ if x != nil {
+ return x.Start
+ }
+ return 0
+}
+
+func (x *DoubleRange) GetEnd() float64 {
+ if x != nil {
+ return x.End
+ }
+ return 0
+}
+
+var File_xds_type_v3_range_proto protoreflect.FileDescriptor
+
+var file_xds_type_v3_range_proto_rawDesc = []byte{
+ 0x0a, 0x17, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x61,
+ 0x6e, 0x67, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x22, 0x34, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x52,
+ 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e,
+ 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x34, 0x0a, 0x0a,
+ 0x49, 0x6e, 0x74, 0x33, 0x32, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
+ 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65,
+ 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x52, 0x61, 0x6e, 0x67,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01,
+ 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x01, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x42, 0x4a, 0x0a, 0x16, 0x63, 0x6f, 0x6d,
+ 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65,
+ 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6e,
+ 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79,
+ 0x70, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_v3_range_proto_rawDescOnce sync.Once
+ file_xds_type_v3_range_proto_rawDescData = file_xds_type_v3_range_proto_rawDesc
+)
+
+func file_xds_type_v3_range_proto_rawDescGZIP() []byte {
+ file_xds_type_v3_range_proto_rawDescOnce.Do(func() {
+ file_xds_type_v3_range_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_v3_range_proto_rawDescData)
+ })
+ return file_xds_type_v3_range_proto_rawDescData
+}
+
+var file_xds_type_v3_range_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_xds_type_v3_range_proto_goTypes = []interface{}{
+ (*Int64Range)(nil), // 0: xds.type.v3.Int64Range
+ (*Int32Range)(nil), // 1: xds.type.v3.Int32Range
+ (*DoubleRange)(nil), // 2: xds.type.v3.DoubleRange
+}
+var file_xds_type_v3_range_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_v3_range_proto_init() }
+func file_xds_type_v3_range_proto_init() {
+ if File_xds_type_v3_range_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_v3_range_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int64Range); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_v3_range_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Int32Range); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_xds_type_v3_range_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DoubleRange); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_v3_range_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_v3_range_proto_goTypes,
+ DependencyIndexes: file_xds_type_v3_range_proto_depIdxs,
+ MessageInfos: file_xds_type_v3_range_proto_msgTypes,
+ }.Build()
+ File_xds_type_v3_range_proto = out.File
+ file_xds_type_v3_range_proto_rawDesc = nil
+ file_xds_type_v3_range_proto_goTypes = nil
+ file_xds_type_v3_range_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.validate.go
new file mode 100644
index 000000000..ccaf418e5
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/v3/range.pb.validate.go
@@ -0,0 +1,345 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/v3/range.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Int64Range with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Int64Range) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Int64Range with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in Int64RangeMultiError, or
+// nil if none found.
+func (m *Int64Range) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Int64Range) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Start
+
+ // no validation rules for End
+
+ if len(errors) > 0 {
+ return Int64RangeMultiError(errors)
+ }
+
+ return nil
+}
+
+// Int64RangeMultiError is an error wrapping multiple validation errors
+// returned by Int64Range.ValidateAll() if the designated constraints aren't met.
+type Int64RangeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Int64RangeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Int64RangeMultiError) AllErrors() []error { return m }
+
+// Int64RangeValidationError is the validation error returned by
+// Int64Range.Validate if the designated constraints aren't met.
+type Int64RangeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Int64RangeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Int64RangeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Int64RangeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Int64RangeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Int64RangeValidationError) ErrorName() string { return "Int64RangeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e Int64RangeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sInt64Range.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Int64RangeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Int64RangeValidationError{}
+
+// Validate checks the field values on Int32Range with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Int32Range) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Int32Range with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in Int32RangeMultiError, or
+// nil if none found.
+func (m *Int32Range) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Int32Range) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Start
+
+ // no validation rules for End
+
+ if len(errors) > 0 {
+ return Int32RangeMultiError(errors)
+ }
+
+ return nil
+}
+
+// Int32RangeMultiError is an error wrapping multiple validation errors
+// returned by Int32Range.ValidateAll() if the designated constraints aren't met.
+type Int32RangeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Int32RangeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Int32RangeMultiError) AllErrors() []error { return m }
+
+// Int32RangeValidationError is the validation error returned by
+// Int32Range.Validate if the designated constraints aren't met.
+type Int32RangeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Int32RangeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Int32RangeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Int32RangeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Int32RangeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Int32RangeValidationError) ErrorName() string { return "Int32RangeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e Int32RangeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sInt32Range.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Int32RangeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Int32RangeValidationError{}
+
+// Validate checks the field values on DoubleRange with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *DoubleRange) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on DoubleRange with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in DoubleRangeMultiError, or
+// nil if none found.
+func (m *DoubleRange) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *DoubleRange) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Start
+
+ // no validation rules for End
+
+ if len(errors) > 0 {
+ return DoubleRangeMultiError(errors)
+ }
+
+ return nil
+}
+
+// DoubleRangeMultiError is an error wrapping multiple validation errors
+// returned by DoubleRange.ValidateAll() if the designated constraints aren't met.
+type DoubleRangeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m DoubleRangeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m DoubleRangeMultiError) AllErrors() []error { return m }
+
+// DoubleRangeValidationError is the validation error returned by
+// DoubleRange.Validate if the designated constraints aren't met.
+type DoubleRangeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DoubleRangeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DoubleRangeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DoubleRangeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DoubleRangeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DoubleRangeValidationError) ErrorName() string { return "DoubleRangeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e DoubleRangeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDoubleRange.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DoubleRangeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DoubleRangeValidationError{}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go
new file mode 100644
index 000000000..ba42cb0e8
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.go
@@ -0,0 +1,163 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.33.0
+// protoc v5.29.1
+// source: xds/type/v3/typed_struct.proto
+
+package v3
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type TypedStruct struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+ Value *structpb.Struct `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *TypedStruct) Reset() {
+ *x = TypedStruct{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_xds_type_v3_typed_struct_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TypedStruct) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TypedStruct) ProtoMessage() {}
+
+func (x *TypedStruct) ProtoReflect() protoreflect.Message {
+ mi := &file_xds_type_v3_typed_struct_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TypedStruct.ProtoReflect.Descriptor instead.
+func (*TypedStruct) Descriptor() ([]byte, []int) {
+ return file_xds_type_v3_typed_struct_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TypedStruct) GetTypeUrl() string {
+ if x != nil {
+ return x.TypeUrl
+ }
+ return ""
+}
+
+func (x *TypedStruct) GetValue() *structpb.Struct {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+var File_xds_type_v3_typed_struct_proto protoreflect.FileDescriptor
+
+var file_xds_type_v3_typed_struct_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x78, 0x64, 0x73, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x74, 0x79,
+ 0x70, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x0b, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x1a, 0x1c, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73,
+ 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x57, 0x0a, 0x0b, 0x54,
+ 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79,
+ 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79,
+ 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x42, 0x50, 0x0a, 0x16, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x78, 0x64, 0x73, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x42, 0x10,
+ 0x54, 0x79, 0x70, 0x65, 0x64, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x50, 0x01, 0x5a, 0x22, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63,
+ 0x6e, 0x63, 0x66, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x78, 0x64, 0x73, 0x2f, 0x74,
+ 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_xds_type_v3_typed_struct_proto_rawDescOnce sync.Once
+ file_xds_type_v3_typed_struct_proto_rawDescData = file_xds_type_v3_typed_struct_proto_rawDesc
+)
+
+func file_xds_type_v3_typed_struct_proto_rawDescGZIP() []byte {
+ file_xds_type_v3_typed_struct_proto_rawDescOnce.Do(func() {
+ file_xds_type_v3_typed_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_xds_type_v3_typed_struct_proto_rawDescData)
+ })
+ return file_xds_type_v3_typed_struct_proto_rawDescData
+}
+
+var file_xds_type_v3_typed_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_xds_type_v3_typed_struct_proto_goTypes = []interface{}{
+ (*TypedStruct)(nil), // 0: xds.type.v3.TypedStruct
+ (*structpb.Struct)(nil), // 1: google.protobuf.Struct
+}
+var file_xds_type_v3_typed_struct_proto_depIdxs = []int32{
+ 1, // 0: xds.type.v3.TypedStruct.value:type_name -> google.protobuf.Struct
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_xds_type_v3_typed_struct_proto_init() }
+func file_xds_type_v3_typed_struct_proto_init() {
+ if File_xds_type_v3_typed_struct_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_xds_type_v3_typed_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TypedStruct); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_xds_type_v3_typed_struct_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_xds_type_v3_typed_struct_proto_goTypes,
+ DependencyIndexes: file_xds_type_v3_typed_struct_proto_depIdxs,
+ MessageInfos: file_xds_type_v3_typed_struct_proto_msgTypes,
+ }.Build()
+ File_xds_type_v3_typed_struct_proto = out.File
+ file_xds_type_v3_typed_struct_proto_rawDesc = nil
+ file_xds_type_v3_typed_struct_proto_goTypes = nil
+ file_xds_type_v3_typed_struct_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.validate.go b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.validate.go
new file mode 100644
index 000000000..f39bce906
--- /dev/null
+++ b/vendor/github.com/cncf/xds/go/xds/type/v3/typed_struct.pb.validate.go
@@ -0,0 +1,166 @@
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: xds/type/v3/typed_struct.proto
+
+package v3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on TypedStruct with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *TypedStruct) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TypedStruct with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in TypedStructMultiError, or
+// nil if none found.
+func (m *TypedStruct) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TypedStruct) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for TypeUrl
+
+ if all {
+ switch v := interface{}(m.GetValue()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, TypedStructValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, TypedStructValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TypedStructValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return TypedStructMultiError(errors)
+ }
+
+ return nil
+}
+
+// TypedStructMultiError is an error wrapping multiple validation errors
+// returned by TypedStruct.ValidateAll() if the designated constraints aren't met.
+type TypedStructMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TypedStructMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TypedStructMultiError) AllErrors() []error { return m }
+
+// TypedStructValidationError is the validation error returned by
+// TypedStruct.Validate if the designated constraints aren't met.
+type TypedStructValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TypedStructValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TypedStructValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TypedStructValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TypedStructValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TypedStructValidationError) ErrorName() string { return "TypedStructValidationError" }
+
+// Error satisfies the builtin error interface
+func (e TypedStructValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTypedStruct.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TypedStructValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TypedStructValidationError{}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go
index 9227415db..433cd8c27 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go
@@ -8,22 +8,22 @@ import (
)
// BenchmarkLabels is a real example from Kubernetes' embedded cAdvisor metrics, lightly obfuscated
-var BenchmarkLabels = labels.Labels{
- {Name: model.MetricNameLabel, Value: "container_cpu_usage_seconds_total"},
- {Name: "beta_kubernetes_io_arch", Value: "amd64"},
- {Name: "beta_kubernetes_io_instance_type", Value: "c3.somesize"},
- {Name: "beta_kubernetes_io_os", Value: "linux"},
- {Name: "container_name", Value: "some-name"},
- {Name: "cpu", Value: "cpu01"},
- {Name: "failure_domain_beta_kubernetes_io_region", Value: "somewhere-1"},
- {Name: "failure_domain_beta_kubernetes_io_zone", Value: "somewhere-1b"},
- {Name: "id", Value: "/kubepods/burstable/pod6e91c467-e4c5-11e7-ace3-0a97ed59c75e/a3c8498918bd6866349fed5a6f8c643b77c91836427fb6327913276ebc6bde28"},
- {Name: "image", Value: "registry/organisation/name@sha256:dca3d877a80008b45d71d7edc4fd2e44c0c8c8e7102ba5cbabec63a374d1d506"},
- {Name: "instance", Value: "ip-111-11-1-11.ec2.internal"},
- {Name: "job", Value: "kubernetes-cadvisor"},
- {Name: "kubernetes_io_hostname", Value: "ip-111-11-1-11"},
- {Name: "monitor", Value: "prod"},
- {Name: "name", Value: "k8s_some-name_some-other-name-5j8s8_kube-system_6e91c467-e4c5-11e7-ace3-0a97ed59c75e_0"},
- {Name: "namespace", Value: "kube-system"},
- {Name: "pod_name", Value: "some-other-name-5j8s8"},
-}
+var BenchmarkLabels = labels.FromStrings(
+ model.MetricNameLabel, "container_cpu_usage_seconds_total",
+ "beta_kubernetes_io_arch", "amd64",
+ "beta_kubernetes_io_instance_type", "c3.somesize",
+ "beta_kubernetes_io_os", "linux",
+ "container_name", "some-name",
+ "cpu", "cpu01",
+ "failure_domain_beta_kubernetes_io_region", "somewhere-1",
+ "failure_domain_beta_kubernetes_io_zone", "somewhere-1b",
+ "id", "/kubepods/burstable/pod6e91c467-e4c5-11e7-ace3-0a97ed59c75e/a3c8498918bd6866349fed5a6f8c643b77c91836427fb6327913276ebc6bde28",
+ "image", "registry/organisation/name@sha256:dca3d877a80008b45d71d7edc4fd2e44c0c8c8e7102ba5cbabec63a374d1d506",
+ "instance", "ip-111-11-1-11.ec2.internal",
+ "job", "kubernetes-cadvisor",
+ "kubernetes_io_hostname", "ip-111-11-1-11",
+ "monitor", "prod",
+ "name", "k8s_some-name_some-other-name-5j8s8_kube-system_6e91c467-e4c5-11e7-ace3-0a97ed59c75e_0",
+ "namespace", "kube-system",
+ "pod_name", "some-other-name-5j8s8",
+)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/json_helpers.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/json_helpers.go
index 831fc5fa3..217111493 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/chunk/json_helpers.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/json_helpers.go
@@ -1,7 +1,6 @@
package chunk
import (
- "sort"
"unsafe"
jsoniter "github.com/json-iterator/go"
@@ -10,44 +9,49 @@ import (
)
func init() {
- jsoniter.RegisterTypeDecoderFunc("labels.Labels", decodeLabels)
- jsoniter.RegisterTypeEncoderFunc("labels.Labels", encodeLabels, labelsIsEmpty)
+ jsoniter.RegisterTypeDecoderFunc("labels.Labels", DecodeLabels)
+ jsoniter.RegisterTypeEncoderFunc("labels.Labels", EncodeLabels, labelsIsEmpty)
jsoniter.RegisterTypeDecoderFunc("model.Time", decodeModelTime)
jsoniter.RegisterTypeEncoderFunc("model.Time", encodeModelTime, modelTimeIsEmpty)
}
// Override Prometheus' labels.Labels decoder which goes via a map
-func decodeLabels(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
+func DecodeLabels(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
labelsPtr := (*labels.Labels)(ptr)
- *labelsPtr = make(labels.Labels, 0, 10)
+ b := labels.NewBuilder(labels.EmptyLabels())
+
iter.ReadMapCB(func(iter *jsoniter.Iterator, key string) bool {
value := iter.ReadString()
- *labelsPtr = append(*labelsPtr, labels.Label{Name: key, Value: value})
+ b.Set(key, value)
return true
})
- // Labels are always sorted, but earlier Cortex using a map would
- // output in any order so we have to sort on read in
- sort.Sort(*labelsPtr)
+ *labelsPtr = b.Labels()
}
// Override Prometheus' labels.Labels encoder which goes via a map
-func encodeLabels(ptr unsafe.Pointer, stream *jsoniter.Stream) {
- labelsPtr := (*labels.Labels)(ptr)
+func EncodeLabels(ptr unsafe.Pointer, stream *jsoniter.Stream) {
+ lbls := *(*labels.Labels)(ptr)
+
stream.WriteObjectStart()
- for i, v := range *labelsPtr {
- if i != 0 {
+ first := true
+
+ lbls.Range(func(l labels.Label) {
+ if !first {
stream.WriteMore()
}
- stream.WriteString(v.Name)
+ first = false
+
+ stream.WriteString(l.Name)
stream.WriteRaw(`:`)
- stream.WriteString(v.Value)
- }
+ stream.WriteString(l.Value)
+ })
+
stream.WriteObjectEnd()
}
func labelsIsEmpty(ptr unsafe.Pointer) bool {
- labelsPtr := (*labels.Labels)(ptr)
- return len(*labelsPtr) == 0
+ labelsPtr := *(*labels.Labels)(ptr)
+ return labelsPtr.Len() == 0
}
// Decode via jsoniter's float64 routine is faster than getting the string data and decoding as two integers
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/codec.go b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/codec.go
new file mode 100644
index 000000000..0bf037ac9
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/codec.go
@@ -0,0 +1,153 @@
+package cortexpb
+
+import (
+ "fmt"
+
+ gogoproto "github.com/gogo/protobuf/proto"
+ "google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/mem"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/protoadapt"
+)
+
+// Name is the name registered for the proto codec.
+const Name = "proto"
+
+func init() {
+ encoding.RegisterCodecV2(&cortexCodec{
+ noOpBufferPool: mem.NopBufferPool{},
+ defaultBufferPool: mem.DefaultBufferPool(),
+ })
+}
+
+type ReleasableMessage interface {
+ RegisterBuffer(mem.Buffer)
+}
+
+type GogoProtoMessage interface {
+ MarshalToSizedBuffer(dAtA []byte) (int, error)
+}
+
+type cortexCodec struct {
+ noOpBufferPool mem.BufferPool
+ defaultBufferPool mem.BufferPool
+}
+
+func (c cortexCodec) Name() string {
+ return Name
+}
+
+// Marshal is basically the same as https://github.com/grpc/grpc-go/blob/d2e836604b36400a54fbf04af495d12b38fa1e3a/encoding/proto/proto.go#L43-L67
+// but it uses gogo proto methods where applicable.
+func (c *cortexCodec) Marshal(v any) (data mem.BufferSlice, err error) {
+ vv := messageV2Of(v)
+ if vv == nil {
+ return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
+ }
+
+ var size int
+ if sizer, ok := v.(gogoproto.Sizer); ok {
+ size = sizer.Size()
+ } else {
+ size = proto.Size(vv)
+ }
+
+ if mem.IsBelowBufferPoolingThreshold(size) {
+ var buf mem.SliceBuffer
+
+ // If v implements MarshalToSizedBuffer we should use it as it is more optimized
+ if m, ok := v.(GogoProtoMessage); ok {
+ buf = make([]byte, size)
+ if _, err := m.MarshalToSizedBuffer(buf[:size]); err != nil {
+ return nil, err
+ }
+ } else {
+ buf, err = proto.Marshal(vv)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ data = append(data, buf)
+ } else {
+ pool := c.defaultBufferPool
+ buf := pool.Get(size)
+
+ // If v implements MarshalToSizedBuffer we should use it as it is more optimized
+ if m, ok := v.(GogoProtoMessage); ok {
+ if _, err := m.MarshalToSizedBuffer((*buf)[:size]); err != nil {
+ pool.Put(buf)
+ return nil, err
+ }
+ } else {
+ if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil {
+ pool.Put(buf)
+ return nil, err
+ }
+ }
+
+ data = append(data, mem.NewBuffer(buf, pool))
+ }
+
+ return data, nil
+}
+
+// Unmarshal Copied from https://github.com/grpc/grpc-go/blob/d2e836604b36400a54fbf04af495d12b38fa1e3a/encoding/proto/proto.go#L69-L81
+// but without releasing the buffer
+func (c *cortexCodec) Unmarshal(data mem.BufferSlice, v any) error {
+ vv := messageV2Of(v)
+ if vv == nil {
+ return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
+ }
+
+ // To be safe, we avoid automatically releasing the buffer used to unmarshal the message.
+ // Additionally, we avoid using a pooled byte slice unless the message implements ReleasableMessage.
+ // This mimics the behavior of gRPC versions 1.65.0 and earlier.
+ rm, ok := v.(ReleasableMessage)
+ bufferPool := c.defaultBufferPool
+
+ if !ok {
+ bufferPool = c.noOpBufferPool
+ }
+
+ buf := data.MaterializeToBuffer(bufferPool)
+ err := proto.Unmarshal(buf.ReadOnlyData(), vv)
+
+ if err != nil {
+ defer buf.Free()
+ return err
+ }
+
+ // If v implements ReleasableMessage interface, we add the buff to be freed later when the request is no longer being used
+ if rm != nil {
+ rm.RegisterBuffer(buf)
+ }
+
+ return err
+}
+
+func messageV2Of(v any) proto.Message {
+ switch v := v.(type) {
+ case protoadapt.MessageV1:
+ return protoadapt.MessageV2Of(v)
+ case protoadapt.MessageV2:
+ return v
+ }
+
+ return nil
+}
+
+var _ ReleasableMessage = &MessageWithBufRef{}
+
+type MessageWithBufRef struct {
+ bs mem.BufferSlice
+}
+
+func (m *MessageWithBufRef) RegisterBuffer(buffer mem.Buffer) {
+ m.bs = append(m.bs, buffer)
+}
+
+func (m *MessageWithBufRef) Free() {
+ m.bs.Free()
+ m.bs = m.bs[:0]
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/compat.go b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/compat.go
index 6de2423d5..9579eb37e 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/compat.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/compat.go
@@ -20,7 +20,7 @@ import (
// ToWriteRequest converts matched slices of Labels, Samples, Metadata and Histograms into a WriteRequest proto.
// It gets timeseries from the pool, so ReuseSlice() should be called when done.
-func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMetadata, histograms []Histogram, source WriteRequest_SourceEnum) *WriteRequest {
+func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMetadata, histograms []Histogram, source SourceEnum) *WriteRequest {
req := &WriteRequest{
Timeseries: PreallocTimeseriesSliceFromPool(),
Metadata: metadata,
@@ -45,7 +45,7 @@ func ToWriteRequest(lbls []labels.Labels, samples []Sample, metadata []*MetricMe
}
func (w *WriteRequest) AddHistogramTimeSeries(lbls []labels.Labels, histograms []Histogram) {
- for i := 0; i < len(lbls); i++ {
+ for i := range lbls {
ts := TimeseriesFromPool()
ts.Labels = append(ts.Labels, FromLabelsToLabelAdapters(lbls[i])...)
ts.Histograms = append(ts.Histograms, histograms[i])
@@ -67,13 +67,13 @@ func FromLabelAdaptersToLabels(ls []LabelAdapter) labels.Labels {
// Do NOT use unsafe to convert between data types because this function may
// get in input labels whose data structure is reused.
func FromLabelAdaptersToLabelsWithCopy(input []LabelAdapter) labels.Labels {
- return CopyLabels(FromLabelAdaptersToLabels(input))
+ return CopyLabels(input)
}
// Efficiently copies labels input slice. To be used in cases where input slice
// can be reused, but long-term copy is needed.
-func CopyLabels(input []labels.Label) labels.Labels {
- result := make(labels.Labels, len(input))
+func CopyLabels(input []LabelAdapter) labels.Labels {
+ builder := labels.NewBuilder(labels.EmptyLabels())
size := 0
for _, l := range input {
@@ -84,12 +84,14 @@ func CopyLabels(input []labels.Label) labels.Labels {
// Copy all strings into the buffer, and use 'yoloString' to convert buffer
// slices to strings.
buf := make([]byte, size)
+ var name, value string
- for i, l := range input {
- result[i].Name, buf = copyStringToBuffer(l.Name, buf)
- result[i].Value, buf = copyStringToBuffer(l.Value, buf)
+ for _, l := range input {
+ name, buf = copyStringToBuffer(l.Name, buf)
+ value, buf = copyStringToBuffer(l.Value, buf)
+ builder.Set(name, value)
}
- return result
+ return builder.Labels()
}
// Copies string to buffer (which must be big enough), and converts buffer slice containing
@@ -211,7 +213,7 @@ func (s Sample) MarshalJSON() ([]byte, error) {
if err != nil {
return nil, err
}
- return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+ return fmt.Appendf(nil, "[%s,%s]", t, v), nil
}
// UnmarshalJSON implements json.Unmarshaler.
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/compatv2.go b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/compatv2.go
new file mode 100644
index 000000000..c1cbda900
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/compatv2.go
@@ -0,0 +1,22 @@
+package cortexpb
+
+import "github.com/prometheus/prometheus/model/labels"
+
+func (e *ExemplarV2) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels {
+ return desymbolizeLabels(b, e.GetLabelsRefs(), symbols)
+}
+
+func (t *TimeSeriesV2) ToLabels(b *labels.ScratchBuilder, symbols []string) labels.Labels {
+ return desymbolizeLabels(b, t.GetLabelsRefs(), symbols)
+}
+
+// desymbolizeLabels decodes label references, with given symbols to labels.
+// Copied from the Prometheus: https://github.com/prometheus/prometheus/blob/v3.5.0/prompb/io/prometheus/write/v2/symbols.go#L76
+func desymbolizeLabels(b *labels.ScratchBuilder, labelRefs []uint32, symbols []string) labels.Labels {
+ b.Reset()
+ for i := 0; i < len(labelRefs); i += 2 {
+ b.Add(symbols[labelRefs[i]], symbols[labelRefs[i+1]])
+ }
+ b.Sort()
+ return b.Labels()
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.pb.go b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.pb.go
index 3b63e1590..03028c309 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.pb.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.pb.go
@@ -28,25 +28,64 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
-type WriteRequest_SourceEnum int32
+type SourceEnum int32
const (
- API WriteRequest_SourceEnum = 0
- RULE WriteRequest_SourceEnum = 1
+ API SourceEnum = 0
+ RULE SourceEnum = 1
)
-var WriteRequest_SourceEnum_name = map[int32]string{
+var SourceEnum_name = map[int32]string{
0: "API",
1: "RULE",
}
-var WriteRequest_SourceEnum_value = map[string]int32{
+var SourceEnum_value = map[string]int32{
"API": 0,
"RULE": 1,
}
-func (WriteRequest_SourceEnum) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_893a47d0a749d749, []int{0, 0}
+func (SourceEnum) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_893a47d0a749d749, []int{0}
+}
+
+type MetadataV2_MetricType int32
+
+const (
+ METRIC_TYPE_UNSPECIFIED MetadataV2_MetricType = 0
+ METRIC_TYPE_COUNTER MetadataV2_MetricType = 1
+ METRIC_TYPE_GAUGE MetadataV2_MetricType = 2
+ METRIC_TYPE_HISTOGRAM MetadataV2_MetricType = 3
+ METRIC_TYPE_GAUGEHISTOGRAM MetadataV2_MetricType = 4
+ METRIC_TYPE_SUMMARY MetadataV2_MetricType = 5
+ METRIC_TYPE_INFO MetadataV2_MetricType = 6
+ METRIC_TYPE_STATESET MetadataV2_MetricType = 7
+)
+
+var MetadataV2_MetricType_name = map[int32]string{
+ 0: "METRIC_TYPE_UNSPECIFIED",
+ 1: "METRIC_TYPE_COUNTER",
+ 2: "METRIC_TYPE_GAUGE",
+ 3: "METRIC_TYPE_HISTOGRAM",
+ 4: "METRIC_TYPE_GAUGEHISTOGRAM",
+ 5: "METRIC_TYPE_SUMMARY",
+ 6: "METRIC_TYPE_INFO",
+ 7: "METRIC_TYPE_STATESET",
+}
+
+var MetadataV2_MetricType_value = map[string]int32{
+ "METRIC_TYPE_UNSPECIFIED": 0,
+ "METRIC_TYPE_COUNTER": 1,
+ "METRIC_TYPE_GAUGE": 2,
+ "METRIC_TYPE_HISTOGRAM": 3,
+ "METRIC_TYPE_GAUGEHISTOGRAM": 4,
+ "METRIC_TYPE_SUMMARY": 5,
+ "METRIC_TYPE_INFO": 6,
+ "METRIC_TYPE_STATESET": 7,
+}
+
+func (MetadataV2_MetricType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_893a47d0a749d749, []int{5, 0}
}
type MetricMetadata_MetricType int32
@@ -85,7 +124,7 @@ var MetricMetadata_MetricType_value = map[string]int32{
}
func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_893a47d0a749d749, []int{5, 0}
+ return fileDescriptor_893a47d0a749d749, []int{11, 0}
}
type Histogram_ResetHint int32
@@ -112,20 +151,53 @@ var Histogram_ResetHint_value = map[string]int32{
}
func (Histogram_ResetHint) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_893a47d0a749d749, []int{8, 0}
+ return fileDescriptor_893a47d0a749d749, []int{14, 0}
+}
+
+func (m *MessageWithBufRef) Reset() { *m = MessageWithBufRef{} }
+func (*MessageWithBufRef) ProtoMessage() {}
+func (*MessageWithBufRef) Descriptor() ([]byte, []int) {
+ return fileDescriptor_893a47d0a749d749, []int{0}
+}
+func (m *MessageWithBufRef) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MessageWithBufRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MessageWithBufRef.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MessageWithBufRef) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MessageWithBufRef.Merge(m, src)
+}
+func (m *MessageWithBufRef) XXX_Size() int {
+ return m.Size()
+}
+func (m *MessageWithBufRef) XXX_DiscardUnknown() {
+ xxx_messageInfo_MessageWithBufRef.DiscardUnknown(m)
}
+var xxx_messageInfo_MessageWithBufRef proto.InternalMessageInfo
+
type WriteRequest struct {
- Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"`
- Source WriteRequest_SourceEnum `protobuf:"varint,2,opt,name=Source,proto3,enum=cortexpb.WriteRequest_SourceEnum" json:"Source,omitempty"`
- Metadata []*MetricMetadata `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty"`
- SkipLabelNameValidation bool `protobuf:"varint,1000,opt,name=skip_label_name_validation,json=skipLabelNameValidation,proto3" json:"skip_label_name_validation,omitempty"`
+ Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"`
+ Source SourceEnum `protobuf:"varint,2,opt,name=Source,proto3,enum=cortexpb.SourceEnum" json:"Source,omitempty"`
+ Metadata []*MetricMetadata `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty"`
+ SkipLabelNameValidation bool `protobuf:"varint,1000,opt,name=skip_label_name_validation,json=skipLabelNameValidation,proto3" json:"skip_label_name_validation,omitempty"`
+ MessageWithBufRef `protobuf:"bytes,1001,opt,name=Ref,proto3,embedded=Ref,customtype=MessageWithBufRef" json:"Ref"`
}
func (m *WriteRequest) Reset() { *m = WriteRequest{} }
func (*WriteRequest) ProtoMessage() {}
func (*WriteRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_893a47d0a749d749, []int{0}
+ return fileDescriptor_893a47d0a749d749, []int{1}
}
func (m *WriteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
@@ -154,7 +226,7 @@ func (m *WriteRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_WriteRequest proto.InternalMessageInfo
-func (m *WriteRequest) GetSource() WriteRequest_SourceEnum {
+func (m *WriteRequest) GetSource() SourceEnum {
if m != nil {
return m.Source
}
@@ -175,20 +247,27 @@ func (m *WriteRequest) GetSkipLabelNameValidation() bool {
return false
}
-type WriteResponse struct {
+// refer to https://github.com/prometheus/prometheus/blob/v3.5.0/prompb/io/prometheus/write/v2/types.proto
+// The histogram and Sample are shared with PRW1.
+type WriteRequestV2 struct {
+ Symbols []string `protobuf:"bytes,4,rep,name=symbols,proto3" json:"symbols,omitempty"`
+ Timeseries []PreallocTimeseriesV2 `protobuf:"bytes,5,rep,name=timeseries,proto3,customtype=PreallocTimeseriesV2" json:"timeseries"`
+ Source SourceEnum `protobuf:"varint,6,opt,name=Source,proto3,enum=cortexpb.SourceEnum" json:"Source,omitempty"`
+ SkipLabelNameValidation bool `protobuf:"varint,1000,opt,name=skip_label_name_validation,json=skipLabelNameValidation,proto3" json:"skip_label_name_validation,omitempty"`
+ MessageWithBufRef `protobuf:"bytes,1001,opt,name=Ref,proto3,embedded=Ref,customtype=MessageWithBufRef" json:"Ref"`
}
-func (m *WriteResponse) Reset() { *m = WriteResponse{} }
-func (*WriteResponse) ProtoMessage() {}
-func (*WriteResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_893a47d0a749d749, []int{1}
+func (m *WriteRequestV2) Reset() { *m = WriteRequestV2{} }
+func (*WriteRequestV2) ProtoMessage() {}
+func (*WriteRequestV2) Descriptor() ([]byte, []int) {
+ return fileDescriptor_893a47d0a749d749, []int{2}
}
-func (m *WriteResponse) XXX_Unmarshal(b []byte) error {
+func (m *WriteRequestV2) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *WriteRequestV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
- return xxx_messageInfo_WriteResponse.Marshal(b, m, deterministic)
+ return xxx_messageInfo_WriteRequestV2.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
@@ -198,37 +277,82 @@ func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error
return b[:n], nil
}
}
-func (m *WriteResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WriteResponse.Merge(m, src)
+func (m *WriteRequestV2) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WriteRequestV2.Merge(m, src)
}
-func (m *WriteResponse) XXX_Size() int {
+func (m *WriteRequestV2) XXX_Size() int {
return m.Size()
}
-func (m *WriteResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_WriteResponse.DiscardUnknown(m)
+func (m *WriteRequestV2) XXX_DiscardUnknown() {
+ xxx_messageInfo_WriteRequestV2.DiscardUnknown(m)
}
-var xxx_messageInfo_WriteResponse proto.InternalMessageInfo
+var xxx_messageInfo_WriteRequestV2 proto.InternalMessageInfo
-type TimeSeries struct {
- Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"`
- // Sorted by time, oldest sample first.
+func (m *WriteRequestV2) GetSymbols() []string {
+ if m != nil {
+ return m.Symbols
+ }
+ return nil
+}
+
+func (m *WriteRequestV2) GetSource() SourceEnum {
+ if m != nil {
+ return m.Source
+ }
+ return API
+}
+
+func (m *WriteRequestV2) GetSkipLabelNameValidation() bool {
+ if m != nil {
+ return m.SkipLabelNameValidation
+ }
+ return false
+}
+
+type TimeSeriesV2 struct {
+ LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"`
+ // Timeseries messages can either specify samples or (native) histogram samples
+ // (histogram field), but not both. For a typical sender (real-time metric
+ // streaming), in healthy cases, there will be only one sample or histogram.
+ //
+ // Samples and histograms are sorted by timestamp (older first).
Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
- Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"`
- Histograms []Histogram `protobuf:"bytes,4,rep,name=histograms,proto3" json:"histograms"`
+ Histograms []Histogram `protobuf:"bytes,3,rep,name=histograms,proto3" json:"histograms"`
+ // exemplars represents an optional set of exemplars attached to this series' samples.
+ Exemplars []ExemplarV2 `protobuf:"bytes,4,rep,name=exemplars,proto3" json:"exemplars"`
+ // metadata represents the metadata associated with the given series' samples.
+ Metadata MetadataV2 `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata"`
+ // created_timestamp represents an optional created timestamp associated with
+ // this series' samples in ms format, typically for counter or histogram type
+ // metrics. Created timestamp represents the time when the counter started
+ // counting (sometimes referred to as start timestamp), which can increase
+ // the accuracy of query results.
+ //
+ // Note that some receivers might require this and in return fail to
+ // ingest such samples within the Request.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ //
+ // Note that the "optional" keyword is omitted due to
+ // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
+ // Zero value means value not set. If you need to use exactly zero value for
+ // the timestamp, use 1 millisecond before or after.
+ CreatedTimestamp int64 `protobuf:"varint,6,opt,name=created_timestamp,json=createdTimestamp,proto3" json:"created_timestamp,omitempty"`
}
-func (m *TimeSeries) Reset() { *m = TimeSeries{} }
-func (*TimeSeries) ProtoMessage() {}
-func (*TimeSeries) Descriptor() ([]byte, []int) {
- return fileDescriptor_893a47d0a749d749, []int{2}
+func (m *TimeSeriesV2) Reset() { *m = TimeSeriesV2{} }
+func (*TimeSeriesV2) ProtoMessage() {}
+func (*TimeSeriesV2) Descriptor() ([]byte, []int) {
+ return fileDescriptor_893a47d0a749d749, []int{3}
}
-func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
+func (m *TimeSeriesV2) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *TimeSeriesV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
- return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic)
+ return xxx_messageInfo_TimeSeriesV2.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
@@ -238,106 +362,90 @@ func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (m *TimeSeries) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TimeSeries.Merge(m, src)
+func (m *TimeSeriesV2) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TimeSeriesV2.Merge(m, src)
}
-func (m *TimeSeries) XXX_Size() int {
+func (m *TimeSeriesV2) XXX_Size() int {
return m.Size()
}
-func (m *TimeSeries) XXX_DiscardUnknown() {
- xxx_messageInfo_TimeSeries.DiscardUnknown(m)
+func (m *TimeSeriesV2) XXX_DiscardUnknown() {
+ xxx_messageInfo_TimeSeriesV2.DiscardUnknown(m)
}
-var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
+var xxx_messageInfo_TimeSeriesV2 proto.InternalMessageInfo
-func (m *TimeSeries) GetSamples() []Sample {
+func (m *TimeSeriesV2) GetLabelsRefs() []uint32 {
if m != nil {
- return m.Samples
+ return m.LabelsRefs
}
return nil
}
-func (m *TimeSeries) GetExemplars() []Exemplar {
+func (m *TimeSeriesV2) GetSamples() []Sample {
if m != nil {
- return m.Exemplars
+ return m.Samples
}
return nil
}
-func (m *TimeSeries) GetHistograms() []Histogram {
+func (m *TimeSeriesV2) GetHistograms() []Histogram {
if m != nil {
return m.Histograms
}
return nil
}
-type LabelPair struct {
- Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
-}
-
-func (m *LabelPair) Reset() { *m = LabelPair{} }
-func (*LabelPair) ProtoMessage() {}
-func (*LabelPair) Descriptor() ([]byte, []int) {
- return fileDescriptor_893a47d0a749d749, []int{3}
-}
-func (m *LabelPair) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
+func (m *TimeSeriesV2) GetExemplars() []ExemplarV2 {
+ if m != nil {
+ return m.Exemplars
}
+ return nil
}
-func (m *LabelPair) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelPair.Merge(m, src)
-}
-func (m *LabelPair) XXX_Size() int {
- return m.Size()
-}
-func (m *LabelPair) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelPair.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LabelPair proto.InternalMessageInfo
-func (m *LabelPair) GetName() []byte {
+func (m *TimeSeriesV2) GetMetadata() MetadataV2 {
if m != nil {
- return m.Name
+ return m.Metadata
}
- return nil
+ return MetadataV2{}
}
-func (m *LabelPair) GetValue() []byte {
+func (m *TimeSeriesV2) GetCreatedTimestamp() int64 {
if m != nil {
- return m.Value
+ return m.CreatedTimestamp
}
- return nil
+ return 0
}
-type Sample struct {
- Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
- TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"`
+// Exemplar is an additional information attached to some series' samples.
+// It is typically used to attach an example trace or request ID associated with
+// the metric changes.
+type ExemplarV2 struct {
+ // labels_refs is an optional list of label name-value pair references, encoded
+ // as indices to the Request.symbols array. This list's len is always
+ // a multiple of 2, and the underlying labels should be sorted lexicographically.
+ // If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
+ LabelsRefs []uint32 `protobuf:"varint,1,rep,packed,name=labels_refs,json=labelsRefs,proto3" json:"labels_refs,omitempty"`
+ // value represents an exact example value. This can be useful when the exemplar
+ // is attached to a histogram, which only gives an estimated value through buckets.
+ Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
+ // timestamp represents the timestamp of the exemplar in ms.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
}
-func (m *Sample) Reset() { *m = Sample{} }
-func (*Sample) ProtoMessage() {}
-func (*Sample) Descriptor() ([]byte, []int) {
+func (m *ExemplarV2) Reset() { *m = ExemplarV2{} }
+func (*ExemplarV2) ProtoMessage() {}
+func (*ExemplarV2) Descriptor() ([]byte, []int) {
return fileDescriptor_893a47d0a749d749, []int{4}
}
-func (m *Sample) XXX_Unmarshal(b []byte) error {
+func (m *ExemplarV2) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *ExemplarV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
- return xxx_messageInfo_Sample.Marshal(b, m, deterministic)
+ return xxx_messageInfo_ExemplarV2.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
@@ -347,50 +455,63 @@ func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (m *Sample) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Sample.Merge(m, src)
+func (m *ExemplarV2) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExemplarV2.Merge(m, src)
}
-func (m *Sample) XXX_Size() int {
+func (m *ExemplarV2) XXX_Size() int {
return m.Size()
}
-func (m *Sample) XXX_DiscardUnknown() {
- xxx_messageInfo_Sample.DiscardUnknown(m)
+func (m *ExemplarV2) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExemplarV2.DiscardUnknown(m)
}
-var xxx_messageInfo_Sample proto.InternalMessageInfo
+var xxx_messageInfo_ExemplarV2 proto.InternalMessageInfo
-func (m *Sample) GetValue() float64 {
+func (m *ExemplarV2) GetLabelsRefs() []uint32 {
+ if m != nil {
+ return m.LabelsRefs
+ }
+ return nil
+}
+
+func (m *ExemplarV2) GetValue() float64 {
if m != nil {
return m.Value
}
return 0
}
-func (m *Sample) GetTimestampMs() int64 {
+func (m *ExemplarV2) GetTimestamp() int64 {
if m != nil {
- return m.TimestampMs
+ return m.Timestamp
}
return 0
}
-type MetricMetadata struct {
- Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortexpb.MetricMetadata_MetricType" json:"type,omitempty"`
- MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"`
- Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"`
- Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"`
+// Metadata represents the metadata associated with the given series' samples.
+type MetadataV2 struct {
+ Type MetadataV2_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortexpb.MetadataV2_MetricType" json:"type,omitempty"`
+ // help_ref is a reference to the Request.symbols array representing help
+ // text for the metric. Help is optional, reference should point to an empty string in
+ // such a case.
+ HelpRef uint32 `protobuf:"varint,3,opt,name=help_ref,json=helpRef,proto3" json:"help_ref,omitempty"`
+ // unit_ref is a reference to the Request.symbols array representing a unit
+ // for the metric. Unit is optional, reference should point to an empty string in
+ // such a case.
+ UnitRef uint32 `protobuf:"varint,4,opt,name=unit_ref,json=unitRef,proto3" json:"unit_ref,omitempty"`
}
-func (m *MetricMetadata) Reset() { *m = MetricMetadata{} }
-func (*MetricMetadata) ProtoMessage() {}
-func (*MetricMetadata) Descriptor() ([]byte, []int) {
+func (m *MetadataV2) Reset() { *m = MetadataV2{} }
+func (*MetadataV2) ProtoMessage() {}
+func (*MetadataV2) Descriptor() ([]byte, []int) {
return fileDescriptor_893a47d0a749d749, []int{5}
}
-func (m *MetricMetadata) XXX_Unmarshal(b []byte) error {
+func (m *MetadataV2) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *MetricMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *MetadataV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
- return xxx_messageInfo_MetricMetadata.Marshal(b, m, deterministic)
+ return xxx_messageInfo_MetadataV2.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
@@ -400,61 +521,56 @@ func (m *MetricMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, erro
return b[:n], nil
}
}
-func (m *MetricMetadata) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricMetadata.Merge(m, src)
+func (m *MetadataV2) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MetadataV2.Merge(m, src)
}
-func (m *MetricMetadata) XXX_Size() int {
+func (m *MetadataV2) XXX_Size() int {
return m.Size()
}
-func (m *MetricMetadata) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricMetadata.DiscardUnknown(m)
+func (m *MetadataV2) XXX_DiscardUnknown() {
+ xxx_messageInfo_MetadataV2.DiscardUnknown(m)
}
-var xxx_messageInfo_MetricMetadata proto.InternalMessageInfo
+var xxx_messageInfo_MetadataV2 proto.InternalMessageInfo
-func (m *MetricMetadata) GetType() MetricMetadata_MetricType {
+func (m *MetadataV2) GetType() MetadataV2_MetricType {
if m != nil {
return m.Type
}
- return UNKNOWN
-}
-
-func (m *MetricMetadata) GetMetricFamilyName() string {
- if m != nil {
- return m.MetricFamilyName
- }
- return ""
+ return METRIC_TYPE_UNSPECIFIED
}
-func (m *MetricMetadata) GetHelp() string {
+func (m *MetadataV2) GetHelpRef() uint32 {
if m != nil {
- return m.Help
+ return m.HelpRef
}
- return ""
+ return 0
}
-func (m *MetricMetadata) GetUnit() string {
+func (m *MetadataV2) GetUnitRef() uint32 {
if m != nil {
- return m.Unit
+ return m.UnitRef
}
- return ""
+ return 0
}
-type Metric struct {
- Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"`
+type StreamWriteRequest struct {
+ TenantID string `protobuf:"bytes,1,opt,name=TenantID,proto3" json:"TenantID,omitempty"`
+ Request *WriteRequest `protobuf:"bytes,2,opt,name=Request,proto3" json:"Request,omitempty"`
+ MessageWithBufRef `protobuf:"bytes,1000,opt,name=Ref,proto3,embedded=Ref,customtype=MessageWithBufRef" json:"Ref"`
}
-func (m *Metric) Reset() { *m = Metric{} }
-func (*Metric) ProtoMessage() {}
-func (*Metric) Descriptor() ([]byte, []int) {
+func (m *StreamWriteRequest) Reset() { *m = StreamWriteRequest{} }
+func (*StreamWriteRequest) ProtoMessage() {}
+func (*StreamWriteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_893a47d0a749d749, []int{6}
}
-func (m *Metric) XXX_Unmarshal(b []byte) error {
+func (m *StreamWriteRequest) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *StreamWriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
- return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
+ return xxx_messageInfo_StreamWriteRequest.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
@@ -464,36 +580,54 @@ func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (m *Metric) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metric.Merge(m, src)
+func (m *StreamWriteRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StreamWriteRequest.Merge(m, src)
}
-func (m *Metric) XXX_Size() int {
+func (m *StreamWriteRequest) XXX_Size() int {
return m.Size()
}
-func (m *Metric) XXX_DiscardUnknown() {
- xxx_messageInfo_Metric.DiscardUnknown(m)
+func (m *StreamWriteRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_StreamWriteRequest.DiscardUnknown(m)
}
-var xxx_messageInfo_Metric proto.InternalMessageInfo
+var xxx_messageInfo_StreamWriteRequest proto.InternalMessageInfo
-type Exemplar struct {
- // Exemplar labels, different than series labels
- Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"`
- Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
- TimestampMs int64 `protobuf:"varint,3,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"`
+func (m *StreamWriteRequest) GetTenantID() string {
+ if m != nil {
+ return m.TenantID
+ }
+ return ""
}
-func (m *Exemplar) Reset() { *m = Exemplar{} }
-func (*Exemplar) ProtoMessage() {}
-func (*Exemplar) Descriptor() ([]byte, []int) {
+func (m *StreamWriteRequest) GetRequest() *WriteRequest {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+type WriteResponse struct {
+ Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ // Samples represents X-Prometheus-Remote-Write-Written-Samples
+ Samples int64 `protobuf:"varint,3,opt,name=Samples,proto3" json:"Samples,omitempty"`
+ // Histograms represents X-Prometheus-Remote-Write-Written-Histograms
+ Histograms int64 `protobuf:"varint,4,opt,name=Histograms,proto3" json:"Histograms,omitempty"`
+ // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars
+ Exemplars int64 `protobuf:"varint,5,opt,name=Exemplars,proto3" json:"Exemplars,omitempty"`
+}
+
+func (m *WriteResponse) Reset() { *m = WriteResponse{} }
+func (*WriteResponse) ProtoMessage() {}
+func (*WriteResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_893a47d0a749d749, []int{7}
}
-func (m *Exemplar) XXX_Unmarshal(b []byte) error {
+func (m *WriteResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
- return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
+ return xxx_messageInfo_WriteResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
@@ -503,90 +637,72 @@ func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (m *Exemplar) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Exemplar.Merge(m, src)
+func (m *WriteResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WriteResponse.Merge(m, src)
}
-func (m *Exemplar) XXX_Size() int {
+func (m *WriteResponse) XXX_Size() int {
return m.Size()
}
-func (m *Exemplar) XXX_DiscardUnknown() {
- xxx_messageInfo_Exemplar.DiscardUnknown(m)
+func (m *WriteResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_WriteResponse.DiscardUnknown(m)
}
-var xxx_messageInfo_Exemplar proto.InternalMessageInfo
+var xxx_messageInfo_WriteResponse proto.InternalMessageInfo
-func (m *Exemplar) GetValue() float64 {
+func (m *WriteResponse) GetCode() int32 {
if m != nil {
- return m.Value
+ return m.Code
}
return 0
}
-func (m *Exemplar) GetTimestampMs() int64 {
+func (m *WriteResponse) GetMessage() string {
if m != nil {
- return m.TimestampMs
+ return m.Message
}
- return 0
+ return ""
}
-// A native histogram, also known as a sparse histogram.
-// Original design doc:
-// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
-// The appendix of this design doc also explains the concept of float
-// histograms. This Histogram message can represent both, the usual
-// integer histogram as well as a float histogram.
-type Histogram struct {
- // Types that are valid to be assigned to Count:
- //
- // *Histogram_CountInt
- // *Histogram_CountFloat
- Count isHistogram_Count `protobuf_oneof:"count"`
- Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"`
- // The schema defines the bucket schema. Currently, valid numbers
- // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
- // is a bucket boundary in each case, and then each power of two is
- // divided into 2^n logarithmic buckets. Or in other words, each
- // bucket boundary is the previous boundary times 2^(2^-n). In the
- // future, more bucket schemas may be added using numbers < -4 or >
- // 8.
- Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"`
- ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"`
- // Types that are valid to be assigned to ZeroCount:
- //
- // *Histogram_ZeroCountInt
- // *Histogram_ZeroCountFloat
- ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"`
- // Negative Buckets.
- NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"`
- // Use either "negative_deltas" or "negative_counts", the former for
- // regular histograms with integer counts, the latter for float
- // histograms.
- NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"`
- NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"`
- // Positive Buckets.
- PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"`
- // Use either "positive_deltas" or "positive_counts", the former for
- // regular histograms with integer counts, the latter for float
- // histograms.
- PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"`
- PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"`
- ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=cortexpb.Histogram_ResetHint" json:"reset_hint,omitempty"`
- // timestamp is in ms format, see model/timestamp/timestamp.go for
- // conversion from time.Time to Prometheus timestamp.
- TimestampMs int64 `protobuf:"varint,15,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"`
+func (m *WriteResponse) GetSamples() int64 {
+ if m != nil {
+ return m.Samples
+ }
+ return 0
}
-func (m *Histogram) Reset() { *m = Histogram{} }
-func (*Histogram) ProtoMessage() {}
-func (*Histogram) Descriptor() ([]byte, []int) {
+func (m *WriteResponse) GetHistograms() int64 {
+ if m != nil {
+ return m.Histograms
+ }
+ return 0
+}
+
+func (m *WriteResponse) GetExemplars() int64 {
+ if m != nil {
+ return m.Exemplars
+ }
+ return 0
+}
+
+type TimeSeries struct {
+ Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"`
+ // Sorted by time, oldest sample first.
+ Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"`
+ Exemplars []Exemplar `protobuf:"bytes,3,rep,name=exemplars,proto3" json:"exemplars"`
+ Histograms []Histogram `protobuf:"bytes,4,rep,name=histograms,proto3" json:"histograms"`
+}
+
+func (m *TimeSeries) Reset() { *m = TimeSeries{} }
+func (*TimeSeries) ProtoMessage() {}
+func (*TimeSeries) Descriptor() ([]byte, []int) {
return fileDescriptor_893a47d0a749d749, []int{8}
}
-func (m *Histogram) XXX_Unmarshal(b []byte) error {
+func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
- return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
+ return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
@@ -596,198 +712,223 @@ func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (m *Histogram) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Histogram.Merge(m, src)
+func (m *TimeSeries) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TimeSeries.Merge(m, src)
}
-func (m *Histogram) XXX_Size() int {
+func (m *TimeSeries) XXX_Size() int {
return m.Size()
}
-func (m *Histogram) XXX_DiscardUnknown() {
- xxx_messageInfo_Histogram.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Histogram proto.InternalMessageInfo
-
-type isHistogram_Count interface {
- isHistogram_Count()
- Equal(interface{}) bool
- MarshalTo([]byte) (int, error)
- Size() int
-}
-type isHistogram_ZeroCount interface {
- isHistogram_ZeroCount()
- Equal(interface{}) bool
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type Histogram_CountInt struct {
- CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof"`
-}
-type Histogram_CountFloat struct {
- CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof"`
-}
-type Histogram_ZeroCountInt struct {
- ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof"`
-}
-type Histogram_ZeroCountFloat struct {
- ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof"`
+func (m *TimeSeries) XXX_DiscardUnknown() {
+ xxx_messageInfo_TimeSeries.DiscardUnknown(m)
}
-func (*Histogram_CountInt) isHistogram_Count() {}
-func (*Histogram_CountFloat) isHistogram_Count() {}
-func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {}
-func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {}
+var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
-func (m *Histogram) GetCount() isHistogram_Count {
+func (m *TimeSeries) GetSamples() []Sample {
if m != nil {
- return m.Count
+ return m.Samples
}
return nil
}
-func (m *Histogram) GetZeroCount() isHistogram_ZeroCount {
+
+func (m *TimeSeries) GetExemplars() []Exemplar {
if m != nil {
- return m.ZeroCount
+ return m.Exemplars
}
return nil
}
-func (m *Histogram) GetCountInt() uint64 {
- if x, ok := m.GetCount().(*Histogram_CountInt); ok {
- return x.CountInt
+func (m *TimeSeries) GetHistograms() []Histogram {
+ if m != nil {
+ return m.Histograms
}
- return 0
+ return nil
}
-func (m *Histogram) GetCountFloat() float64 {
- if x, ok := m.GetCount().(*Histogram_CountFloat); ok {
- return x.CountFloat
- }
- return 0
+type LabelPair struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
}
-func (m *Histogram) GetSum() float64 {
- if m != nil {
- return m.Sum
- }
- return 0
+func (m *LabelPair) Reset() { *m = LabelPair{} }
+func (*LabelPair) ProtoMessage() {}
+func (*LabelPair) Descriptor() ([]byte, []int) {
+ return fileDescriptor_893a47d0a749d749, []int{9}
}
-
-func (m *Histogram) GetSchema() int32 {
- if m != nil {
- return m.Schema
- }
- return 0
+func (m *LabelPair) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
}
-
-func (m *Histogram) GetZeroThreshold() float64 {
- if m != nil {
- return m.ZeroThreshold
+func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
}
- return 0
}
-
-func (m *Histogram) GetZeroCountInt() uint64 {
- if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok {
- return x.ZeroCountInt
- }
- return 0
+func (m *LabelPair) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelPair.Merge(m, src)
}
-
-func (m *Histogram) GetZeroCountFloat() float64 {
- if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok {
- return x.ZeroCountFloat
- }
- return 0
+func (m *LabelPair) XXX_Size() int {
+ return m.Size()
+}
+func (m *LabelPair) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelPair.DiscardUnknown(m)
}
-func (m *Histogram) GetNegativeSpans() []BucketSpan {
+var xxx_messageInfo_LabelPair proto.InternalMessageInfo
+
+func (m *LabelPair) GetName() []byte {
if m != nil {
- return m.NegativeSpans
+ return m.Name
}
return nil
}
-func (m *Histogram) GetNegativeDeltas() []int64 {
+func (m *LabelPair) GetValue() []byte {
if m != nil {
- return m.NegativeDeltas
+ return m.Value
}
return nil
}
-func (m *Histogram) GetNegativeCounts() []float64 {
- if m != nil {
- return m.NegativeCounts
- }
- return nil
+type Sample struct {
+ Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+ TimestampMs int64 `protobuf:"varint,2,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"`
}
-func (m *Histogram) GetPositiveSpans() []BucketSpan {
- if m != nil {
- return m.PositiveSpans
+func (m *Sample) Reset() { *m = Sample{} }
+func (*Sample) ProtoMessage() {}
+func (*Sample) Descriptor() ([]byte, []int) {
+ return fileDescriptor_893a47d0a749d749, []int{10}
+}
+func (m *Sample) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Sample) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Sample.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
}
- return nil
+}
+func (m *Sample) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Sample.Merge(m, src)
+}
+func (m *Sample) XXX_Size() int {
+ return m.Size()
+}
+func (m *Sample) XXX_DiscardUnknown() {
+ xxx_messageInfo_Sample.DiscardUnknown(m)
}
-func (m *Histogram) GetPositiveDeltas() []int64 {
+var xxx_messageInfo_Sample proto.InternalMessageInfo
+
+func (m *Sample) GetValue() float64 {
if m != nil {
- return m.PositiveDeltas
+ return m.Value
}
- return nil
+ return 0
}
-func (m *Histogram) GetPositiveCounts() []float64 {
+func (m *Sample) GetTimestampMs() int64 {
if m != nil {
- return m.PositiveCounts
+ return m.TimestampMs
}
- return nil
+ return 0
}
-func (m *Histogram) GetResetHint() Histogram_ResetHint {
+type MetricMetadata struct {
+ Type MetricMetadata_MetricType `protobuf:"varint,1,opt,name=type,proto3,enum=cortexpb.MetricMetadata_MetricType" json:"type,omitempty"`
+ MetricFamilyName string `protobuf:"bytes,2,opt,name=metric_family_name,json=metricFamilyName,proto3" json:"metric_family_name,omitempty"`
+ Help string `protobuf:"bytes,4,opt,name=help,proto3" json:"help,omitempty"`
+ Unit string `protobuf:"bytes,5,opt,name=unit,proto3" json:"unit,omitempty"`
+}
+
+func (m *MetricMetadata) Reset() { *m = MetricMetadata{} }
+func (*MetricMetadata) ProtoMessage() {}
+func (*MetricMetadata) Descriptor() ([]byte, []int) {
+ return fileDescriptor_893a47d0a749d749, []int{11}
+}
+func (m *MetricMetadata) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MetricMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MetricMetadata.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MetricMetadata) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MetricMetadata.Merge(m, src)
+}
+func (m *MetricMetadata) XXX_Size() int {
+ return m.Size()
+}
+func (m *MetricMetadata) XXX_DiscardUnknown() {
+ xxx_messageInfo_MetricMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricMetadata proto.InternalMessageInfo
+
+func (m *MetricMetadata) GetType() MetricMetadata_MetricType {
if m != nil {
- return m.ResetHint
+ return m.Type
}
- return Histogram_UNKNOWN
+ return UNKNOWN
}
-func (m *Histogram) GetTimestampMs() int64 {
+func (m *MetricMetadata) GetMetricFamilyName() string {
if m != nil {
- return m.TimestampMs
+ return m.MetricFamilyName
}
- return 0
+ return ""
}
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*Histogram) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*Histogram_CountInt)(nil),
- (*Histogram_CountFloat)(nil),
- (*Histogram_ZeroCountInt)(nil),
- (*Histogram_ZeroCountFloat)(nil),
+func (m *MetricMetadata) GetHelp() string {
+ if m != nil {
+ return m.Help
}
+ return ""
}
-// A BucketSpan defines a number of consecutive buckets with their
-// offset. Logically, it would be more straightforward to include the
-// bucket counts in the Span. However, the protobuf representation is
-// more compact in the way the data is structured here (with all the
-// buckets in a single array separate from the Spans).
-type BucketSpan struct {
- Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
- Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"`
+func (m *MetricMetadata) GetUnit() string {
+ if m != nil {
+ return m.Unit
+ }
+ return ""
}
-func (m *BucketSpan) Reset() { *m = BucketSpan{} }
-func (*BucketSpan) ProtoMessage() {}
-func (*BucketSpan) Descriptor() ([]byte, []int) {
- return fileDescriptor_893a47d0a749d749, []int{9}
+type Metric struct {
+ Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"`
}
-func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (*Metric) ProtoMessage() {}
+func (*Metric) Descriptor() ([]byte, []int) {
+ return fileDescriptor_893a47d0a749d749, []int{12}
+}
+func (m *Metric) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
-func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
- return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
+ return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
@@ -797,319 +938,522 @@ func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return b[:n], nil
}
}
-func (m *BucketSpan) XXX_Merge(src proto.Message) {
- xxx_messageInfo_BucketSpan.Merge(m, src)
+func (m *Metric) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Metric.Merge(m, src)
}
-func (m *BucketSpan) XXX_Size() int {
+func (m *Metric) XXX_Size() int {
return m.Size()
}
-func (m *BucketSpan) XXX_DiscardUnknown() {
- xxx_messageInfo_BucketSpan.DiscardUnknown(m)
+func (m *Metric) XXX_DiscardUnknown() {
+ xxx_messageInfo_Metric.DiscardUnknown(m)
}
-var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
+var xxx_messageInfo_Metric proto.InternalMessageInfo
-func (m *BucketSpan) GetOffset() int32 {
+type Exemplar struct {
+ // Exemplar labels, different than series labels
+ Labels []LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=LabelAdapter" json:"labels"`
+ Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
+ TimestampMs int64 `protobuf:"varint,3,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"`
+}
+
+func (m *Exemplar) Reset() { *m = Exemplar{} }
+func (*Exemplar) ProtoMessage() {}
+func (*Exemplar) Descriptor() ([]byte, []int) {
+ return fileDescriptor_893a47d0a749d749, []int{13}
+}
+func (m *Exemplar) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Exemplar) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Exemplar.Merge(m, src)
+}
+func (m *Exemplar) XXX_Size() int {
+ return m.Size()
+}
+func (m *Exemplar) XXX_DiscardUnknown() {
+ xxx_messageInfo_Exemplar.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Exemplar proto.InternalMessageInfo
+
+func (m *Exemplar) GetValue() float64 {
if m != nil {
- return m.Offset
+ return m.Value
}
return 0
}
-func (m *BucketSpan) GetLength() uint32 {
+func (m *Exemplar) GetTimestampMs() int64 {
if m != nil {
- return m.Length
+ return m.TimestampMs
}
return 0
}
-func init() {
- proto.RegisterEnum("cortexpb.WriteRequest_SourceEnum", WriteRequest_SourceEnum_name, WriteRequest_SourceEnum_value)
- proto.RegisterEnum("cortexpb.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value)
- proto.RegisterEnum("cortexpb.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value)
- proto.RegisterType((*WriteRequest)(nil), "cortexpb.WriteRequest")
- proto.RegisterType((*WriteResponse)(nil), "cortexpb.WriteResponse")
- proto.RegisterType((*TimeSeries)(nil), "cortexpb.TimeSeries")
- proto.RegisterType((*LabelPair)(nil), "cortexpb.LabelPair")
- proto.RegisterType((*Sample)(nil), "cortexpb.Sample")
- proto.RegisterType((*MetricMetadata)(nil), "cortexpb.MetricMetadata")
- proto.RegisterType((*Metric)(nil), "cortexpb.Metric")
- proto.RegisterType((*Exemplar)(nil), "cortexpb.Exemplar")
- proto.RegisterType((*Histogram)(nil), "cortexpb.Histogram")
- proto.RegisterType((*BucketSpan)(nil), "cortexpb.BucketSpan")
+// A native histogram, also known as a sparse histogram.
+// Original design doc:
+// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
+// The appendix of this design doc also explains the concept of float
+// histograms. This Histogram message can represent both, the usual
+// integer histogram as well as a float histogram.
+type Histogram struct {
+ // Types that are valid to be assigned to Count:
+ //
+ // *Histogram_CountInt
+ // *Histogram_CountFloat
+ Count isHistogram_Count `protobuf_oneof:"count"`
+ Sum float64 `protobuf:"fixed64,3,opt,name=sum,proto3" json:"sum,omitempty"`
+ // The schema defines the bucket schema. Currently, valid numbers
+ // are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
+ // is a bucket boundary in each case, and then each power of two is
+ // divided into 2^n logarithmic buckets. Or in other words, each
+ // bucket boundary is the previous boundary times 2^(2^-n). In the
+ // future, more bucket schemas may be added using numbers < -4 or >
+ // 8.
+ Schema int32 `protobuf:"zigzag32,4,opt,name=schema,proto3" json:"schema,omitempty"`
+ ZeroThreshold float64 `protobuf:"fixed64,5,opt,name=zero_threshold,json=zeroThreshold,proto3" json:"zero_threshold,omitempty"`
+ // Types that are valid to be assigned to ZeroCount:
+ //
+ // *Histogram_ZeroCountInt
+ // *Histogram_ZeroCountFloat
+ ZeroCount isHistogram_ZeroCount `protobuf_oneof:"zero_count"`
+ // Negative Buckets.
+ NegativeSpans []BucketSpan `protobuf:"bytes,8,rep,name=negative_spans,json=negativeSpans,proto3" json:"negative_spans"`
+ // Use either "negative_deltas" or "negative_counts", the former for
+ // regular histograms with integer counts, the latter for float
+ // histograms.
+ NegativeDeltas []int64 `protobuf:"zigzag64,9,rep,packed,name=negative_deltas,json=negativeDeltas,proto3" json:"negative_deltas,omitempty"`
+ NegativeCounts []float64 `protobuf:"fixed64,10,rep,packed,name=negative_counts,json=negativeCounts,proto3" json:"negative_counts,omitempty"`
+ // Positive Buckets.
+ PositiveSpans []BucketSpan `protobuf:"bytes,11,rep,name=positive_spans,json=positiveSpans,proto3" json:"positive_spans"`
+ // Use either "positive_deltas" or "positive_counts", the former for
+ // regular histograms with integer counts, the latter for float
+ // histograms.
+ PositiveDeltas []int64 `protobuf:"zigzag64,12,rep,packed,name=positive_deltas,json=positiveDeltas,proto3" json:"positive_deltas,omitempty"`
+ PositiveCounts []float64 `protobuf:"fixed64,13,rep,packed,name=positive_counts,json=positiveCounts,proto3" json:"positive_counts,omitempty"`
+ ResetHint Histogram_ResetHint `protobuf:"varint,14,opt,name=reset_hint,json=resetHint,proto3,enum=cortexpb.Histogram_ResetHint" json:"reset_hint,omitempty"`
+ // timestamp is in ms format, see model/timestamp/timestamp.go for
+ // conversion from time.Time to Prometheus timestamp.
+ TimestampMs int64 `protobuf:"varint,15,opt,name=timestamp_ms,json=timestampMs,proto3" json:"timestamp_ms,omitempty"`
}
-func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) }
-
-var fileDescriptor_893a47d0a749d749 = []byte{
- // 1031 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x4b, 0x6f, 0x23, 0x45,
- 0x17, 0xed, 0x72, 0xfb, 0x79, 0x63, 0x3b, 0x3d, 0xf5, 0x45, 0x1f, 0xad, 0x48, 0xd3, 0x71, 0x1a,
- 0x01, 0x16, 0x42, 0x01, 0x05, 0x01, 0x9a, 0x51, 0x84, 0x64, 0x0f, 0xce, 0x43, 0x33, 0x76, 0xa2,
- 0xb2, 0xc3, 0x68, 0xd8, 0x58, 0x15, 0xa7, 0x12, 0xb7, 0xa6, 0x5f, 0x74, 0x95, 0xa3, 0x09, 0x2b,
- 0x56, 0x88, 0x25, 0x6b, 0xb6, 0x6c, 0xf8, 0x05, 0xfc, 0x86, 0x2c, 0xb3, 0x1c, 0xb1, 0x88, 0x88,
- 0xb3, 0x99, 0xe5, 0x2c, 0xf8, 0x01, 0xa8, 0xaa, 0x5f, 0xce, 0x84, 0x11, 0x9b, 0xd9, 0x55, 0x9d,
- 0x7b, 0xcf, 0xbd, 0xa7, 0xea, 0x9e, 0x2e, 0x35, 0xd4, 0x27, 0x41, 0x24, 0xd8, 0x8b, 0x8d, 0x30,
- 0x0a, 0x44, 0x80, 0xab, 0xf1, 0x2e, 0x3c, 0x5a, 0x5d, 0x39, 0x0d, 0x4e, 0x03, 0x05, 0x7e, 0x2a,
- 0x57, 0x71, 0xdc, 0xfe, 0xa3, 0x00, 0xf5, 0xa7, 0x91, 0x23, 0x18, 0x61, 0xdf, 0xcf, 0x18, 0x17,
- 0xf8, 0x00, 0x40, 0x38, 0x1e, 0xe3, 0x2c, 0x72, 0x18, 0x37, 0x51, 0x4b, 0x6f, 0x2f, 0x6d, 0xae,
- 0x6c, 0xa4, 0x55, 0x36, 0x46, 0x8e, 0xc7, 0x86, 0x2a, 0xd6, 0x5d, 0xbd, 0xb8, 0x5a, 0xd3, 0xfe,
- 0xbc, 0x5a, 0xc3, 0x07, 0x11, 0xa3, 0xae, 0x1b, 0x4c, 0x46, 0x19, 0x8f, 0x2c, 0xd4, 0xc0, 0x0f,
- 0xa0, 0x3c, 0x0c, 0x66, 0xd1, 0x84, 0x99, 0x85, 0x16, 0x6a, 0x37, 0x37, 0xd7, 0xf3, 0x6a, 0x8b,
- 0x9d, 0x37, 0xe2, 0xa4, 0x9e, 0x3f, 0xf3, 0x48, 0x42, 0xc0, 0x0f, 0xa1, 0xea, 0x31, 0x41, 0x8f,
- 0xa9, 0xa0, 0xa6, 0xae, 0xa4, 0x98, 0x39, 0xb9, 0xcf, 0x44, 0xe4, 0x4c, 0xfa, 0x49, 0xbc, 0x5b,
- 0xbc, 0xb8, 0x5a, 0x43, 0x24, 0xcb, 0xc7, 0x5b, 0xb0, 0xca, 0x9f, 0x3b, 0xe1, 0xd8, 0xa5, 0x47,
- 0xcc, 0x1d, 0xfb, 0xd4, 0x63, 0xe3, 0x33, 0xea, 0x3a, 0xc7, 0x54, 0x38, 0x81, 0x6f, 0xbe, 0xaa,
- 0xb4, 0x50, 0xbb, 0x4a, 0xde, 0x93, 0x29, 0x4f, 0x64, 0xc6, 0x80, 0x7a, 0xec, 0xdb, 0x2c, 0x6e,
- 0xaf, 0x01, 0xe4, 0x7a, 0x70, 0x05, 0xf4, 0xce, 0xc1, 0x9e, 0xa1, 0xe1, 0x2a, 0x14, 0xc9, 0xe1,
- 0x93, 0x9e, 0x81, 0xec, 0x65, 0x68, 0x24, 0xea, 0x79, 0x18, 0xf8, 0x9c, 0xd9, 0x7f, 0x23, 0x80,
- 0xfc, 0x76, 0x70, 0x07, 0xca, 0xaa, 0x73, 0x7a, 0x87, 0xff, 0xcb, 0x85, 0xab, 0x7e, 0x07, 0xd4,
- 0x89, 0xba, 0x2b, 0xc9, 0x15, 0xd6, 0x15, 0xd4, 0x39, 0xa6, 0xa1, 0x60, 0x11, 0x49, 0x88, 0xf8,
- 0x33, 0xa8, 0x70, 0xea, 0x85, 0x2e, 0xe3, 0x66, 0x41, 0xd5, 0x30, 0xf2, 0x1a, 0x43, 0x15, 0x50,
- 0x87, 0xd6, 0x48, 0x9a, 0x86, 0xbf, 0x84, 0x1a, 0x7b, 0xc1, 0xbc, 0xd0, 0xa5, 0x11, 0x4f, 0x2e,
- 0x0c, 0xe7, 0x9c, 0x5e, 0x12, 0x4a, 0x58, 0x79, 0x2a, 0x7e, 0x00, 0x30, 0x75, 0xb8, 0x08, 0x4e,
- 0x23, 0xea, 0x71, 0xb3, 0xf8, 0xa6, 0xe0, 0xdd, 0x34, 0x96, 0x30, 0x17, 0x92, 0xed, 0x2f, 0xa0,
- 0x96, 0x9d, 0x07, 0x63, 0x28, 0xca, 0x8b, 0x36, 0x51, 0x0b, 0xb5, 0xeb, 0x44, 0xad, 0xf1, 0x0a,
- 0x94, 0xce, 0xa8, 0x3b, 0x8b, 0xa7, 0x5f, 0x27, 0xf1, 0xc6, 0xee, 0x40, 0x39, 0x3e, 0x42, 0x1e,
- 0x97, 0x24, 0x94, 0xc4, 0xf1, 0x3a, 0xd4, 0x95, 0x85, 0x04, 0xf5, 0xc2, 0xb1, 0xc7, 0x15, 0x59,
- 0x27, 0x4b, 0x19, 0xd6, 0xe7, 0xf6, 0xaf, 0x05, 0x68, 0xde, 0xf6, 0x00, 0xfe, 0x0a, 0x8a, 0xe2,
- 0x3c, 0x8c, 0x4b, 0x35, 0x37, 0xdf, 0x7f, 0x9b, 0x57, 0x92, 0xed, 0xe8, 0x3c, 0x64, 0x44, 0x11,
- 0xf0, 0x27, 0x80, 0x3d, 0x85, 0x8d, 0x4f, 0xa8, 0xe7, 0xb8, 0xe7, 0xca, 0x2f, 0xaa, 0x69, 0x8d,
- 0x18, 0x71, 0x64, 0x5b, 0x05, 0xa4, 0x4d, 0xe4, 0x31, 0xa7, 0xcc, 0x0d, 0xcd, 0xa2, 0x8a, 0xab,
- 0xb5, 0xc4, 0x66, 0xbe, 0x23, 0xcc, 0x52, 0x8c, 0xc9, 0xb5, 0x7d, 0x0e, 0x90, 0x77, 0xc2, 0x4b,
- 0x50, 0x39, 0x1c, 0x3c, 0x1e, 0xec, 0x3f, 0x1d, 0x18, 0x9a, 0xdc, 0x3c, 0xda, 0x3f, 0x1c, 0x8c,
- 0x7a, 0xc4, 0x40, 0xb8, 0x06, 0xa5, 0x9d, 0xce, 0xe1, 0x4e, 0xcf, 0x28, 0xe0, 0x06, 0xd4, 0x76,
- 0xf7, 0x86, 0xa3, 0xfd, 0x1d, 0xd2, 0xe9, 0x1b, 0x3a, 0xc6, 0xd0, 0x54, 0x91, 0x1c, 0x2b, 0x4a,
- 0xea, 0xf0, 0xb0, 0xdf, 0xef, 0x90, 0x67, 0x46, 0x49, 0x1a, 0x72, 0x6f, 0xb0, 0xbd, 0x6f, 0x94,
- 0x71, 0x1d, 0xaa, 0xc3, 0x51, 0x67, 0xd4, 0x1b, 0xf6, 0x46, 0x46, 0xc5, 0x7e, 0x0c, 0xe5, 0xb8,
- 0xf5, 0x3b, 0x30, 0xa2, 0xfd, 0x13, 0x82, 0x6a, 0x6a, 0x9e, 0x77, 0x61, 0xec, 0x5b, 0x96, 0x78,
- 0xeb, 0xc8, 0xf5, 0xbb, 0x23, 0xbf, 0x2c, 0x41, 0x2d, 0x33, 0x23, 0xbe, 0x0f, 0xb5, 0x49, 0x30,
- 0xf3, 0xc5, 0xd8, 0xf1, 0x85, 0x1a, 0x79, 0x71, 0x57, 0x23, 0x55, 0x05, 0xed, 0xf9, 0x02, 0xaf,
- 0xc3, 0x52, 0x1c, 0x3e, 0x71, 0x03, 0x2a, 0xe2, 0x5e, 0xbb, 0x1a, 0x01, 0x05, 0x6e, 0x4b, 0x0c,
- 0x1b, 0xa0, 0xf3, 0x99, 0xa7, 0x3a, 0x21, 0x22, 0x97, 0xf8, 0xff, 0x50, 0xe6, 0x93, 0x29, 0xf3,
- 0xa8, 0x1a, 0xee, 0x3d, 0x92, 0xec, 0xf0, 0x07, 0xd0, 0xfc, 0x81, 0x45, 0xc1, 0x58, 0x4c, 0x23,
- 0xc6, 0xa7, 0x81, 0x7b, 0xac, 0x06, 0x8d, 0x48, 0x43, 0xa2, 0xa3, 0x14, 0xc4, 0x1f, 0x26, 0x69,
- 0xb9, 0xae, 0xb2, 0xd2, 0x85, 0x48, 0x5d, 0xe2, 0x8f, 0x52, 0x6d, 0x1f, 0x83, 0xb1, 0x90, 0x17,
- 0x0b, 0xac, 0x28, 0x81, 0x88, 0x34, 0xb3, 0xcc, 0x58, 0x64, 0x07, 0x9a, 0x3e, 0x3b, 0xa5, 0xc2,
- 0x39, 0x63, 0x63, 0x1e, 0x52, 0x9f, 0x9b, 0xd5, 0x37, 0x5f, 0xe5, 0xee, 0x6c, 0xf2, 0x9c, 0x89,
- 0x61, 0x48, 0xfd, 0xe4, 0x0b, 0x6d, 0xa4, 0x0c, 0x89, 0x71, 0xfc, 0x11, 0x2c, 0x67, 0x25, 0x8e,
- 0x99, 0x2b, 0x28, 0x37, 0x6b, 0x2d, 0xbd, 0x8d, 0x49, 0x56, 0xf9, 0x1b, 0x85, 0xde, 0x4a, 0x54,
- 0xda, 0xb8, 0x09, 0x2d, 0xbd, 0x8d, 0xf2, 0x44, 0x25, 0x4c, 0x3e, 0x6f, 0xcd, 0x30, 0xe0, 0xce,
- 0x82, 0xa8, 0xa5, 0xff, 0x16, 0x95, 0x32, 0x32, 0x51, 0x59, 0x89, 0x44, 0x54, 0x3d, 0x16, 0x95,
- 0xc2, 0xb9, 0xa8, 0x2c, 0x31, 0x11, 0xd5, 0x88, 0x45, 0xa5, 0x70, 0x22, 0x6a, 0x0b, 0x20, 0x62,
- 0x9c, 0x89, 0xf1, 0x54, 0xde, 0x7c, 0x53, 0x3d, 0x02, 0xf7, 0xff, 0xe5, 0x19, 0xdb, 0x20, 0x32,
- 0x6b, 0xd7, 0xf1, 0x05, 0xa9, 0x45, 0xe9, 0xf2, 0x8e, 0xff, 0x96, 0xef, 0xfa, 0xef, 0x21, 0xd4,
- 0x32, 0xea, 0xed, 0xef, 0xb9, 0x02, 0xfa, 0xb3, 0xde, 0xd0, 0x40, 0xb8, 0x0c, 0x85, 0xc1, 0xbe,
- 0x51, 0xc8, 0xbf, 0x69, 0x7d, 0xb5, 0xf8, 0xf3, 0x6f, 0x16, 0xea, 0x56, 0xa0, 0xa4, 0xc4, 0x77,
- 0xeb, 0x00, 0xf9, 0xec, 0xed, 0x2d, 0x80, 0xfc, 0xa2, 0xa4, 0xfd, 0x82, 0x93, 0x13, 0xce, 0x62,
- 0x3f, 0xdf, 0x23, 0xc9, 0x4e, 0xe2, 0x2e, 0xf3, 0x4f, 0xc5, 0x54, 0xd9, 0xb8, 0x41, 0x92, 0x5d,
- 0xf7, 0xeb, 0xcb, 0x6b, 0x4b, 0x7b, 0x79, 0x6d, 0x69, 0xaf, 0xaf, 0x2d, 0xf4, 0xe3, 0xdc, 0x42,
- 0xbf, 0xcf, 0x2d, 0x74, 0x31, 0xb7, 0xd0, 0xe5, 0xdc, 0x42, 0x7f, 0xcd, 0x2d, 0xf4, 0x6a, 0x6e,
- 0x69, 0xaf, 0xe7, 0x16, 0xfa, 0xe5, 0xc6, 0xd2, 0x2e, 0x6f, 0x2c, 0xed, 0xe5, 0x8d, 0xa5, 0x7d,
- 0x97, 0xfd, 0x14, 0x1c, 0x95, 0xd5, 0x5f, 0xc0, 0xe7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x4b,
- 0xb6, 0xdb, 0xd4, 0x35, 0x08, 0x00, 0x00,
-}
-
-func (x WriteRequest_SourceEnum) String() string {
- s, ok := WriteRequest_SourceEnum_name[int32(x)]
- if ok {
- return s
- }
- return strconv.Itoa(int(x))
+func (m *Histogram) Reset() { *m = Histogram{} }
+func (*Histogram) ProtoMessage() {}
+func (*Histogram) Descriptor() ([]byte, []int) {
+ return fileDescriptor_893a47d0a749d749, []int{14}
}
-func (x MetricMetadata_MetricType) String() string {
- s, ok := MetricMetadata_MetricType_name[int32(x)]
- if ok {
- return s
- }
- return strconv.Itoa(int(x))
+func (m *Histogram) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
}
-func (x Histogram_ResetHint) String() string {
- s, ok := Histogram_ResetHint_name[int32(x)]
- if ok {
- return s
+func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
}
- return strconv.Itoa(int(x))
}
-func (this *WriteRequest) Equal(that interface{}) bool {
- if that == nil {
- return this == nil
- }
-
- that1, ok := that.(*WriteRequest)
- if !ok {
- that2, ok := that.(WriteRequest)
- if ok {
- that1 = &that2
- } else {
- return false
- }
- }
- if that1 == nil {
- return this == nil
- } else if this == nil {
- return false
- }
- if len(this.Timeseries) != len(that1.Timeseries) {
- return false
- }
- for i := range this.Timeseries {
- if !this.Timeseries[i].Equal(that1.Timeseries[i]) {
- return false
- }
- }
- if this.Source != that1.Source {
- return false
- }
- if len(this.Metadata) != len(that1.Metadata) {
- return false
- }
- for i := range this.Metadata {
- if !this.Metadata[i].Equal(that1.Metadata[i]) {
- return false
- }
- }
- if this.SkipLabelNameValidation != that1.SkipLabelNameValidation {
- return false
- }
- return true
+func (m *Histogram) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Histogram.Merge(m, src)
+}
+func (m *Histogram) XXX_Size() int {
+ return m.Size()
+}
+func (m *Histogram) XXX_DiscardUnknown() {
+ xxx_messageInfo_Histogram.DiscardUnknown(m)
}
-func (this *WriteResponse) Equal(that interface{}) bool {
- if that == nil {
- return this == nil
- }
- that1, ok := that.(*WriteResponse)
- if !ok {
- that2, ok := that.(WriteResponse)
- if ok {
- that1 = &that2
- } else {
- return false
- }
- }
- if that1 == nil {
- return this == nil
- } else if this == nil {
- return false
- }
- return true
+var xxx_messageInfo_Histogram proto.InternalMessageInfo
+
+type isHistogram_Count interface {
+ isHistogram_Count()
+ Equal(interface{}) bool
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+type isHistogram_ZeroCount interface {
+ isHistogram_ZeroCount()
+ Equal(interface{}) bool
+ MarshalTo([]byte) (int, error)
+ Size() int
}
-func (this *TimeSeries) Equal(that interface{}) bool {
- if that == nil {
- return this == nil
- }
- that1, ok := that.(*TimeSeries)
- if !ok {
- that2, ok := that.(TimeSeries)
- if ok {
- that1 = &that2
- } else {
- return false
- }
+type Histogram_CountInt struct {
+ CountInt uint64 `protobuf:"varint,1,opt,name=count_int,json=countInt,proto3,oneof"`
+}
+type Histogram_CountFloat struct {
+ CountFloat float64 `protobuf:"fixed64,2,opt,name=count_float,json=countFloat,proto3,oneof"`
+}
+type Histogram_ZeroCountInt struct {
+ ZeroCountInt uint64 `protobuf:"varint,6,opt,name=zero_count_int,json=zeroCountInt,proto3,oneof"`
+}
+type Histogram_ZeroCountFloat struct {
+ ZeroCountFloat float64 `protobuf:"fixed64,7,opt,name=zero_count_float,json=zeroCountFloat,proto3,oneof"`
+}
+
+func (*Histogram_CountInt) isHistogram_Count() {}
+func (*Histogram_CountFloat) isHistogram_Count() {}
+func (*Histogram_ZeroCountInt) isHistogram_ZeroCount() {}
+func (*Histogram_ZeroCountFloat) isHistogram_ZeroCount() {}
+
+func (m *Histogram) GetCount() isHistogram_Count {
+ if m != nil {
+ return m.Count
}
- if that1 == nil {
- return this == nil
- } else if this == nil {
- return false
+ return nil
+}
+func (m *Histogram) GetZeroCount() isHistogram_ZeroCount {
+ if m != nil {
+ return m.ZeroCount
}
- if len(this.Labels) != len(that1.Labels) {
- return false
+ return nil
+}
+
+func (m *Histogram) GetCountInt() uint64 {
+ if x, ok := m.GetCount().(*Histogram_CountInt); ok {
+ return x.CountInt
}
- for i := range this.Labels {
- if !this.Labels[i].Equal(that1.Labels[i]) {
- return false
- }
+ return 0
+}
+
+func (m *Histogram) GetCountFloat() float64 {
+ if x, ok := m.GetCount().(*Histogram_CountFloat); ok {
+ return x.CountFloat
}
- if len(this.Samples) != len(that1.Samples) {
- return false
+ return 0
+}
+
+func (m *Histogram) GetSum() float64 {
+ if m != nil {
+ return m.Sum
}
- for i := range this.Samples {
- if !this.Samples[i].Equal(&that1.Samples[i]) {
- return false
- }
+ return 0
+}
+
+func (m *Histogram) GetSchema() int32 {
+ if m != nil {
+ return m.Schema
}
- if len(this.Exemplars) != len(that1.Exemplars) {
- return false
+ return 0
+}
+
+func (m *Histogram) GetZeroThreshold() float64 {
+ if m != nil {
+ return m.ZeroThreshold
}
- for i := range this.Exemplars {
- if !this.Exemplars[i].Equal(&that1.Exemplars[i]) {
- return false
- }
+ return 0
+}
+
+func (m *Histogram) GetZeroCountInt() uint64 {
+ if x, ok := m.GetZeroCount().(*Histogram_ZeroCountInt); ok {
+ return x.ZeroCountInt
}
- if len(this.Histograms) != len(that1.Histograms) {
- return false
+ return 0
+}
+
+func (m *Histogram) GetZeroCountFloat() float64 {
+ if x, ok := m.GetZeroCount().(*Histogram_ZeroCountFloat); ok {
+ return x.ZeroCountFloat
}
- for i := range this.Histograms {
- if !this.Histograms[i].Equal(&that1.Histograms[i]) {
- return false
- }
+ return 0
+}
+
+func (m *Histogram) GetNegativeSpans() []BucketSpan {
+ if m != nil {
+ return m.NegativeSpans
}
- return true
+ return nil
}
-func (this *LabelPair) Equal(that interface{}) bool {
- if that == nil {
- return this == nil
+
+func (m *Histogram) GetNegativeDeltas() []int64 {
+ if m != nil {
+ return m.NegativeDeltas
}
+ return nil
+}
- that1, ok := that.(*LabelPair)
- if !ok {
- that2, ok := that.(LabelPair)
- if ok {
- that1 = &that2
- } else {
- return false
- }
+func (m *Histogram) GetNegativeCounts() []float64 {
+ if m != nil {
+ return m.NegativeCounts
}
- if that1 == nil {
- return this == nil
- } else if this == nil {
- return false
+ return nil
+}
+
+func (m *Histogram) GetPositiveSpans() []BucketSpan {
+ if m != nil {
+ return m.PositiveSpans
}
- if !bytes.Equal(this.Name, that1.Name) {
- return false
+ return nil
+}
+
+func (m *Histogram) GetPositiveDeltas() []int64 {
+ if m != nil {
+ return m.PositiveDeltas
}
- if !bytes.Equal(this.Value, that1.Value) {
- return false
+ return nil
+}
+
+func (m *Histogram) GetPositiveCounts() []float64 {
+ if m != nil {
+ return m.PositiveCounts
}
- return true
+ return nil
}
-func (this *Sample) Equal(that interface{}) bool {
- if that == nil {
- return this == nil
+
+func (m *Histogram) GetResetHint() Histogram_ResetHint {
+ if m != nil {
+ return m.ResetHint
}
+ return Histogram_UNKNOWN
+}
- that1, ok := that.(*Sample)
- if !ok {
- that2, ok := that.(Sample)
- if ok {
- that1 = &that2
- } else {
- return false
- }
+func (m *Histogram) GetTimestampMs() int64 {
+ if m != nil {
+ return m.TimestampMs
}
- if that1 == nil {
- return this == nil
- } else if this == nil {
- return false
+ return 0
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Histogram) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*Histogram_CountInt)(nil),
+ (*Histogram_CountFloat)(nil),
+ (*Histogram_ZeroCountInt)(nil),
+ (*Histogram_ZeroCountFloat)(nil),
}
- if this.Value != that1.Value {
- return false
+}
+
+// A BucketSpan defines a number of consecutive buckets with their
+// offset. Logically, it would be more straightforward to include the
+// bucket counts in the Span. However, the protobuf representation is
+// more compact in the way the data is structured here (with all the
+// buckets in a single array separate from the Spans).
+type BucketSpan struct {
+ Offset int32 `protobuf:"zigzag32,1,opt,name=offset,proto3" json:"offset,omitempty"`
+ Length uint32 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"`
+}
+
+func (m *BucketSpan) Reset() { *m = BucketSpan{} }
+func (*BucketSpan) ProtoMessage() {}
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+ return fileDescriptor_893a47d0a749d749, []int{15}
+}
+func (m *BucketSpan) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *BucketSpan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_BucketSpan.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
}
- if this.TimestampMs != that1.TimestampMs {
- return false
+}
+func (m *BucketSpan) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_BucketSpan.Merge(m, src)
+}
+func (m *BucketSpan) XXX_Size() int {
+ return m.Size()
+}
+func (m *BucketSpan) XXX_DiscardUnknown() {
+ xxx_messageInfo_BucketSpan.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BucketSpan proto.InternalMessageInfo
+
+func (m *BucketSpan) GetOffset() int32 {
+ if m != nil {
+ return m.Offset
}
- return true
+ return 0
}
-func (this *MetricMetadata) Equal(that interface{}) bool {
+
+func (m *BucketSpan) GetLength() uint32 {
+ if m != nil {
+ return m.Length
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("cortexpb.SourceEnum", SourceEnum_name, SourceEnum_value)
+ proto.RegisterEnum("cortexpb.MetadataV2_MetricType", MetadataV2_MetricType_name, MetadataV2_MetricType_value)
+ proto.RegisterEnum("cortexpb.MetricMetadata_MetricType", MetricMetadata_MetricType_name, MetricMetadata_MetricType_value)
+ proto.RegisterEnum("cortexpb.Histogram_ResetHint", Histogram_ResetHint_name, Histogram_ResetHint_value)
+ proto.RegisterType((*MessageWithBufRef)(nil), "cortexpb.MessageWithBufRef")
+ proto.RegisterType((*WriteRequest)(nil), "cortexpb.WriteRequest")
+ proto.RegisterType((*WriteRequestV2)(nil), "cortexpb.WriteRequestV2")
+ proto.RegisterType((*TimeSeriesV2)(nil), "cortexpb.TimeSeriesV2")
+ proto.RegisterType((*ExemplarV2)(nil), "cortexpb.ExemplarV2")
+ proto.RegisterType((*MetadataV2)(nil), "cortexpb.MetadataV2")
+ proto.RegisterType((*StreamWriteRequest)(nil), "cortexpb.StreamWriteRequest")
+ proto.RegisterType((*WriteResponse)(nil), "cortexpb.WriteResponse")
+ proto.RegisterType((*TimeSeries)(nil), "cortexpb.TimeSeries")
+ proto.RegisterType((*LabelPair)(nil), "cortexpb.LabelPair")
+ proto.RegisterType((*Sample)(nil), "cortexpb.Sample")
+ proto.RegisterType((*MetricMetadata)(nil), "cortexpb.MetricMetadata")
+ proto.RegisterType((*Metric)(nil), "cortexpb.Metric")
+ proto.RegisterType((*Exemplar)(nil), "cortexpb.Exemplar")
+ proto.RegisterType((*Histogram)(nil), "cortexpb.Histogram")
+ proto.RegisterType((*BucketSpan)(nil), "cortexpb.BucketSpan")
+}
+
+func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) }
+
+var fileDescriptor_893a47d0a749d749 = []byte{
+ // 1458 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0xcb, 0x6f, 0x53, 0x47,
+ 0x17, 0xf7, 0xf8, 0x7d, 0x8f, 0x1f, 0xdc, 0x0c, 0x06, 0x4c, 0x80, 0xeb, 0xe0, 0x4f, 0xdf, 0xf7,
+ 0x45, 0x14, 0xa5, 0x55, 0x50, 0x69, 0x8b, 0x50, 0x25, 0x3b, 0x38, 0xc4, 0x02, 0x3b, 0xd1, 0xd8,
+ 0x09, 0xa2, 0x1b, 0xeb, 0xc6, 0x19, 0xc7, 0x57, 0xdc, 0x87, 0x7b, 0x67, 0x8c, 0x48, 0x57, 0x5d,
+ 0x55, 0x5d, 0x76, 0xd3, 0x4d, 0x77, 0x55, 0x37, 0xdd, 0x76, 0xdd, 0x7f, 0x80, 0x65, 0x76, 0x45,
+ 0x48, 0x8d, 0x4a, 0xd8, 0xd0, 0x1d, 0x8b, 0xfe, 0x01, 0xd5, 0xcc, 0x7d, 0x3a, 0x0e, 0xa2, 0xad,
+ 0x58, 0x74, 0x37, 0x73, 0x1e, 0x33, 0xbf, 0x39, 0xe7, 0x77, 0x7e, 0xd7, 0x86, 0xe2, 0xd0, 0x71,
+ 0x39, 0x7d, 0xb2, 0x32, 0x71, 0x1d, 0xee, 0xe0, 0xbc, 0xb7, 0x9b, 0xec, 0x2e, 0x56, 0xf6, 0x9d,
+ 0x7d, 0x47, 0x1a, 0xdf, 0x17, 0x2b, 0xcf, 0x5f, 0xbf, 0x08, 0x0b, 0x1d, 0xca, 0x98, 0xbe, 0x4f,
+ 0x1f, 0x18, 0x7c, 0xdc, 0x9c, 0x8e, 0x08, 0x1d, 0xdd, 0x4a, 0xbf, 0xfe, 0xbe, 0x96, 0xa8, 0xff,
+ 0x92, 0x84, 0xe2, 0x03, 0xd7, 0xe0, 0x94, 0xd0, 0xcf, 0xa7, 0x94, 0x71, 0xbc, 0x05, 0xc0, 0x0d,
+ 0x8b, 0x32, 0xea, 0x1a, 0x94, 0x55, 0xd1, 0x52, 0x6a, 0xb9, 0xb0, 0x5a, 0x59, 0x09, 0x2e, 0x58,
+ 0xe9, 0x1b, 0x16, 0xed, 0x49, 0x5f, 0x73, 0xf1, 0xe9, 0x51, 0x2d, 0xf1, 0xfc, 0xa8, 0x86, 0xb7,
+ 0x5c, 0xaa, 0x9b, 0xa6, 0x33, 0xec, 0x87, 0x79, 0x24, 0x76, 0x06, 0xbe, 0x0e, 0xd9, 0x9e, 0x33,
+ 0x75, 0x87, 0xb4, 0x9a, 0x5c, 0x42, 0xcb, 0xe5, 0xf8, 0x69, 0x9e, 0xbd, 0x65, 0x4f, 0x2d, 0xe2,
+ 0xc7, 0xe0, 0x5b, 0x90, 0xb7, 0x28, 0xd7, 0xf7, 0x74, 0xae, 0x57, 0x53, 0xf2, 0xf6, 0x6a, 0x14,
+ 0xdf, 0xa1, 0xdc, 0x35, 0x86, 0x1d, 0xdf, 0xdf, 0x4c, 0x3f, 0x3d, 0xaa, 0x21, 0x12, 0xc6, 0xe3,
+ 0xdb, 0xb0, 0xc8, 0x1e, 0x19, 0x93, 0x81, 0xa9, 0xef, 0x52, 0x73, 0x60, 0xeb, 0x16, 0x1d, 0x3c,
+ 0xd6, 0x4d, 0x63, 0x4f, 0xe7, 0x86, 0x63, 0x57, 0x5f, 0xe5, 0x96, 0xd0, 0x72, 0x9e, 0x5c, 0x10,
+ 0x21, 0xf7, 0x45, 0x44, 0x57, 0xb7, 0xe8, 0x4e, 0xe8, 0xc7, 0x1d, 0x48, 0x11, 0x3a, 0xaa, 0xfe,
+ 0x2e, 0xc2, 0x0a, 0xab, 0x97, 0xe2, 0xb7, 0x9e, 0xa8, 0x5d, 0xf3, 0x8a, 0x78, 0xfa, 0xe1, 0x51,
+ 0x0d, 0x3d, 0x3f, 0xaa, 0xcd, 0x97, 0x96, 0x88, 0x73, 0xea, 0x3f, 0x27, 0xa1, 0x1c, 0xaf, 0xec,
+ 0xce, 0x2a, 0xae, 0x42, 0x8e, 0x1d, 0x58, 0xbb, 0x8e, 0xc9, 0xaa, 0xe9, 0xa5, 0xd4, 0xb2, 0x42,
+ 0x82, 0x2d, 0xee, 0xcf, 0x54, 0x3d, 0x23, 0xdf, 0x7d, 0xfe, 0xb4, 0xaa, 0xef, 0xac, 0x36, 0x2f,
+ 0xfb, 0x75, 0xaf, 0xcc, 0xd7, 0x7d, 0x67, 0xf5, 0x0d, 0x95, 0xcf, 0xfe, 0x85, 0xca, 0xff, 0xdb,
+ 0xaa, 0x57, 0x8c, 0xbf, 0x1a, 0xd7, 0xa0, 0x20, 0x81, 0xb1, 0x81, 0x4b, 0x47, 0x1e, 0x31, 0x4b,
+ 0x04, 0x3c, 0x13, 0xa1, 0x23, 0x86, 0x3f, 0x80, 0x1c, 0xd3, 0xad, 0x89, 0x49, 0x59, 0x35, 0x29,
+ 0xeb, 0xa7, 0xc6, 0x5e, 0x2b, 0x1d, 0x92, 0x2f, 0x09, 0x12, 0x84, 0xe1, 0x4f, 0x00, 0xc6, 0x06,
+ 0xe3, 0xce, 0xbe, 0xab, 0x5b, 0xcc, 0x27, 0xdb, 0xd9, 0x28, 0x69, 0x23, 0xf0, 0xf9, 0x79, 0xb1,
+ 0x60, 0xfc, 0x31, 0x28, 0xf4, 0x09, 0xb5, 0x26, 0xa6, 0xee, 0x7a, 0xbd, 0x9c, 0x19, 0x92, 0x96,
+ 0xef, 0xda, 0x59, 0xf5, 0x53, 0xa3, 0x60, 0x7c, 0x33, 0xc6, 0xef, 0x8c, 0xac, 0x55, 0x65, 0x86,
+ 0xdf, 0xd2, 0x13, 0x26, 0x46, 0xdc, 0x7e, 0x0f, 0x16, 0x86, 0x2e, 0xd5, 0x39, 0xdd, 0x1b, 0xc8,
+ 0x0e, 0x73, 0xdd, 0x9a, 0xc8, 0xb6, 0xa6, 0x88, 0xea, 0x3b, 0xfa, 0x81, 0xbd, 0xae, 0x03, 0x44,
+ 0x18, 0xde, 0x5e, 0xba, 0x0a, 0x64, 0x1e, 0xeb, 0xe6, 0xd4, 0x1b, 0x50, 0x44, 0xbc, 0x0d, 0xbe,
+ 0x0c, 0x4a, 0x74, 0x53, 0x4a, 0xde, 0x14, 0x19, 0x84, 0x70, 0x40, 0x04, 0x17, 0xdf, 0x80, 0x34,
+ 0x3f, 0x98, 0xd0, 0x2a, 0x92, 0x44, 0xab, 0x9d, 0xf6, 0x24, 0x7f, 0x7a, 0xfb, 0x07, 0x13, 0x4a,
+ 0x64, 0x30, 0xbe, 0x08, 0xf9, 0x31, 0x35, 0x27, 0x02, 0x96, 0xbc, 0xa0, 0x44, 0x72, 0x62, 0x4f,
+ 0xe8, 0x48, 0xb8, 0xa6, 0xb6, 0xc1, 0xa5, 0x2b, 0xed, 0xb9, 0xc4, 0x5e, 0x50, 0xe3, 0x57, 0x24,
+ 0x6f, 0xf6, 0x8f, 0xc2, 0x97, 0xe0, 0x42, 0xa7, 0xd5, 0x27, 0xed, 0xb5, 0x41, 0xff, 0xe1, 0x56,
+ 0x6b, 0xb0, 0xdd, 0xed, 0x6d, 0xb5, 0xd6, 0xda, 0xeb, 0xed, 0xd6, 0x1d, 0x35, 0x81, 0x2f, 0xc0,
+ 0xd9, 0xb8, 0x73, 0x6d, 0x73, 0xbb, 0xdb, 0x6f, 0x11, 0x15, 0xe1, 0x73, 0xb0, 0x10, 0x77, 0xdc,
+ 0x6d, 0x6c, 0xdf, 0x6d, 0xa9, 0x49, 0x7c, 0x11, 0xce, 0xc5, 0xcd, 0x1b, 0xed, 0x5e, 0x7f, 0xf3,
+ 0x2e, 0x69, 0x74, 0xd4, 0x14, 0xd6, 0x60, 0x71, 0x2e, 0x23, 0xf2, 0xa7, 0x4f, 0x5e, 0xd5, 0xdb,
+ 0xee, 0x74, 0x1a, 0xe4, 0xa1, 0x9a, 0xc1, 0x15, 0x50, 0xe3, 0x8e, 0x76, 0x77, 0x7d, 0x53, 0xcd,
+ 0xe2, 0x2a, 0x54, 0x66, 0xc2, 0xfb, 0x8d, 0x7e, 0xab, 0xd7, 0xea, 0xab, 0xb9, 0xfa, 0x4f, 0x08,
+ 0x70, 0x8f, 0xbb, 0x54, 0xb7, 0x66, 0x84, 0x79, 0x11, 0xf2, 0x7d, 0x6a, 0xeb, 0x36, 0x6f, 0xdf,
+ 0x91, 0x55, 0x56, 0x48, 0xb8, 0x17, 0xdc, 0xf7, 0xc3, 0x64, 0x0b, 0x67, 0xb4, 0x23, 0x7e, 0x08,
+ 0x09, 0xc2, 0x82, 0x71, 0x7d, 0xf5, 0x8e, 0xc6, 0xf5, 0x5b, 0x04, 0x25, 0xff, 0x22, 0x36, 0x71,
+ 0x6c, 0x46, 0x31, 0x86, 0xf4, 0xd0, 0xd9, 0xf3, 0x08, 0x91, 0x21, 0x72, 0x2d, 0xf4, 0xcf, 0xf2,
+ 0xf2, 0x25, 0x4c, 0x85, 0x04, 0x5b, 0xe1, 0xe9, 0xf9, 0xc3, 0xeb, 0x31, 0x2d, 0xd8, 0x62, 0x0d,
+ 0x60, 0x23, 0x1a, 0xd2, 0xb4, 0x74, 0xc6, 0x2c, 0x82, 0xa5, 0xad, 0x70, 0x12, 0x33, 0x1e, 0x4b,
+ 0x43, 0x43, 0xfd, 0x0f, 0x04, 0x10, 0xc9, 0x08, 0x6e, 0x40, 0xd6, 0xa3, 0xbd, 0xff, 0x61, 0x8b,
+ 0x4d, 0xbb, 0xd4, 0xb4, 0x2d, 0xdd, 0x70, 0x9b, 0x15, 0x5f, 0x5f, 0x8b, 0xd2, 0xd4, 0xd8, 0xd3,
+ 0x27, 0x9c, 0xba, 0xc4, 0x4f, 0xfc, 0x07, 0x32, 0x73, 0x33, 0xae, 0x15, 0x9e, 0xca, 0xe0, 0x79,
+ 0xad, 0x98, 0x57, 0x8a, 0x59, 0x79, 0x4a, 0xff, 0x0d, 0x79, 0xaa, 0x7f, 0x08, 0x4a, 0xf8, 0x1e,
+ 0xd1, 0x09, 0x21, 0xe6, 0xb2, 0x13, 0x45, 0x22, 0xd7, 0xb3, 0x13, 0x5f, 0xf4, 0x27, 0xbe, 0xde,
+ 0x80, 0xac, 0xf7, 0x84, 0xc8, 0x8f, 0xe2, 0x8a, 0x70, 0x15, 0x8a, 0xa1, 0x00, 0x0c, 0x2c, 0x26,
+ 0x93, 0x53, 0xa4, 0x10, 0xda, 0x3a, 0xac, 0xfe, 0x5d, 0x12, 0xca, 0xb3, 0x5f, 0x69, 0xfc, 0xd1,
+ 0x8c, 0x34, 0xfc, 0xe7, 0x4d, 0x5f, 0xf3, 0x79, 0x79, 0xb8, 0x0e, 0xd8, 0x92, 0xb6, 0xc1, 0x48,
+ 0xb7, 0x0c, 0xf3, 0x40, 0x7e, 0x93, 0x7c, 0xe6, 0xa8, 0x9e, 0x67, 0x5d, 0x3a, 0xc4, 0xa7, 0x48,
+ 0x3c, 0x53, 0x88, 0x87, 0xa4, 0x88, 0x42, 0xe4, 0x5a, 0xd8, 0x84, 0x6a, 0x48, 0x5e, 0x28, 0x44,
+ 0xae, 0xeb, 0x07, 0x33, 0xea, 0x51, 0x80, 0xdc, 0x76, 0xf7, 0x5e, 0x77, 0xf3, 0x41, 0x57, 0x4d,
+ 0x88, 0x4d, 0xa4, 0x10, 0x0a, 0x64, 0x02, 0x55, 0x28, 0x81, 0x12, 0x57, 0x02, 0x0c, 0xe5, 0xb9,
+ 0xe9, 0x2f, 0x40, 0x2e, 0x9a, 0xf8, 0x3c, 0xa4, 0xfd, 0x29, 0x2f, 0x42, 0x3e, 0x36, 0xd9, 0xf7,
+ 0x20, 0xeb, 0x5d, 0xfd, 0x0e, 0x88, 0x58, 0xff, 0x0a, 0x41, 0x3e, 0x20, 0xcf, 0xbb, 0x20, 0xf6,
+ 0xe9, 0x1f, 0x81, 0x93, 0x2d, 0x4f, 0xcd, 0xb7, 0xfc, 0x30, 0x03, 0x4a, 0x48, 0x46, 0x7c, 0x05,
+ 0x94, 0xa1, 0x33, 0xb5, 0xf9, 0xc0, 0xb0, 0xb9, 0x6c, 0x79, 0x7a, 0x23, 0x41, 0xf2, 0xd2, 0xd4,
+ 0xb6, 0x39, 0xbe, 0x0a, 0x05, 0xcf, 0x3d, 0x32, 0x1d, 0xdd, 0x53, 0x2b, 0xb4, 0x91, 0x20, 0x20,
+ 0x8d, 0xeb, 0xc2, 0x86, 0x55, 0x48, 0xb1, 0xa9, 0x25, 0x6f, 0x42, 0x44, 0x2c, 0xf1, 0x79, 0xc8,
+ 0xb2, 0xe1, 0x98, 0x5a, 0xba, 0x6c, 0xee, 0x02, 0xf1, 0x77, 0xf8, 0xbf, 0x50, 0xfe, 0x82, 0xba,
+ 0xce, 0x80, 0x8f, 0x5d, 0xca, 0xc6, 0x8e, 0xb9, 0x27, 0x1b, 0x8d, 0x48, 0x49, 0x58, 0xfb, 0x81,
+ 0x11, 0xff, 0xcf, 0x0f, 0x8b, 0x70, 0x65, 0x25, 0x2e, 0x44, 0x8a, 0xc2, 0xbe, 0x16, 0x60, 0xbb,
+ 0x06, 0x6a, 0x2c, 0xce, 0x03, 0x98, 0x93, 0x00, 0x11, 0x29, 0x87, 0x91, 0x1e, 0xc8, 0x06, 0x94,
+ 0x6d, 0xba, 0xaf, 0x73, 0xe3, 0x31, 0x1d, 0xb0, 0x89, 0x6e, 0xb3, 0x6a, 0xfe, 0xe4, 0xaf, 0x80,
+ 0xe6, 0x74, 0xf8, 0x88, 0xf2, 0xde, 0x44, 0xb7, 0xfd, 0x09, 0x2d, 0x05, 0x19, 0xc2, 0xc6, 0xf0,
+ 0xff, 0xe1, 0x4c, 0x78, 0xc4, 0x1e, 0x35, 0xb9, 0xce, 0xaa, 0xca, 0x52, 0x6a, 0x19, 0x93, 0xf0,
+ 0xe4, 0x3b, 0xd2, 0x3a, 0x13, 0x28, 0xb1, 0xb1, 0x2a, 0x2c, 0xa5, 0x96, 0x51, 0x14, 0x28, 0x81,
+ 0x09, 0x79, 0x2b, 0x4f, 0x1c, 0x66, 0xc4, 0x40, 0x15, 0xde, 0x0e, 0x2a, 0xc8, 0x08, 0x41, 0x85,
+ 0x47, 0xf8, 0xa0, 0x8a, 0x1e, 0xa8, 0xc0, 0x1c, 0x81, 0x0a, 0x03, 0x7d, 0x50, 0x25, 0x0f, 0x54,
+ 0x60, 0xf6, 0x41, 0xdd, 0x06, 0x70, 0x29, 0xa3, 0x7c, 0x30, 0x16, 0x95, 0x2f, 0x4b, 0x11, 0xb8,
+ 0x72, 0x8a, 0x8c, 0xad, 0x10, 0x11, 0xb5, 0x61, 0xd8, 0x9c, 0x28, 0x6e, 0xb0, 0x9c, 0xe3, 0xdf,
+ 0x99, 0x79, 0xfe, 0xdd, 0x02, 0x25, 0x4c, 0x9d, 0x9d, 0xe7, 0x1c, 0xa4, 0x1e, 0xb6, 0x7a, 0x2a,
+ 0xc2, 0x59, 0x48, 0x76, 0x37, 0xd5, 0x64, 0x34, 0xd3, 0xa9, 0xc5, 0xf4, 0xd7, 0x3f, 0x68, 0xa8,
+ 0x99, 0x83, 0x8c, 0x04, 0xdf, 0x2c, 0x02, 0x44, 0xbd, 0xaf, 0xdf, 0x06, 0x88, 0x0a, 0x25, 0xe8,
+ 0xe7, 0x8c, 0x46, 0x8c, 0x7a, 0x7c, 0x5e, 0x20, 0xfe, 0x4e, 0xd8, 0x4d, 0x6a, 0xef, 0xf3, 0xb1,
+ 0xa4, 0x71, 0x89, 0xf8, 0xbb, 0x6b, 0x35, 0x80, 0xe8, 0xe7, 0xb5, 0x00, 0xd1, 0xd8, 0x6a, 0xab,
+ 0x09, 0xa1, 0x0a, 0x64, 0xfb, 0x7e, 0x4b, 0x45, 0xcd, 0x4f, 0x0f, 0x5f, 0x68, 0x89, 0x67, 0x2f,
+ 0xb4, 0xc4, 0xeb, 0x17, 0x1a, 0xfa, 0xf2, 0x58, 0x43, 0x3f, 0x1e, 0x6b, 0xe8, 0xe9, 0xb1, 0x86,
+ 0x0e, 0x8f, 0x35, 0xf4, 0xdb, 0xb1, 0x86, 0x5e, 0x1d, 0x6b, 0x89, 0xd7, 0xc7, 0x1a, 0xfa, 0xe6,
+ 0xa5, 0x96, 0x38, 0x7c, 0xa9, 0x25, 0x9e, 0xbd, 0xd4, 0x12, 0x9f, 0x85, 0xff, 0xf2, 0x76, 0xb3,
+ 0xf2, 0x6f, 0xdd, 0x8d, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x7b, 0x33, 0xce, 0xcd, 0x06, 0x0e,
+ 0x00, 0x00,
+}
+
+func (x SourceEnum) String() string {
+ s, ok := SourceEnum_name[int32(x)]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(x))
+}
+func (x MetadataV2_MetricType) String() string {
+ s, ok := MetadataV2_MetricType_name[int32(x)]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(x))
+}
+func (x MetricMetadata_MetricType) String() string {
+ s, ok := MetricMetadata_MetricType_name[int32(x)]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(x))
+}
+func (x Histogram_ResetHint) String() string {
+ s, ok := Histogram_ResetHint_name[int32(x)]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(x))
+}
+func (this *MessageWithBufRef) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
- that1, ok := that.(*MetricMetadata)
+ that1, ok := that.(*MessageWithBufRef)
if !ok {
- that2, ok := that.(MetricMetadata)
+ that2, ok := that.(MessageWithBufRef)
if ok {
that1 = &that2
} else {
@@ -1121,28 +1465,16 @@ func (this *MetricMetadata) Equal(that interface{}) bool {
} else if this == nil {
return false
}
- if this.Type != that1.Type {
- return false
- }
- if this.MetricFamilyName != that1.MetricFamilyName {
- return false
- }
- if this.Help != that1.Help {
- return false
- }
- if this.Unit != that1.Unit {
- return false
- }
return true
}
-func (this *Metric) Equal(that interface{}) bool {
+func (this *WriteRequest) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
- that1, ok := that.(*Metric)
+ that1, ok := that.(*WriteRequest)
if !ok {
- that2, ok := that.(Metric)
+ that2, ok := that.(WriteRequest)
if ok {
that1 = &that2
} else {
@@ -1154,24 +1486,41 @@ func (this *Metric) Equal(that interface{}) bool {
} else if this == nil {
return false
}
- if len(this.Labels) != len(that1.Labels) {
+ if len(this.Timeseries) != len(that1.Timeseries) {
return false
}
- for i := range this.Labels {
- if !this.Labels[i].Equal(that1.Labels[i]) {
+ for i := range this.Timeseries {
+ if !this.Timeseries[i].Equal(that1.Timeseries[i]) {
+ return false
+ }
+ }
+ if this.Source != that1.Source {
+ return false
+ }
+ if len(this.Metadata) != len(that1.Metadata) {
+ return false
+ }
+ for i := range this.Metadata {
+ if !this.Metadata[i].Equal(that1.Metadata[i]) {
return false
}
}
+ if this.SkipLabelNameValidation != that1.SkipLabelNameValidation {
+ return false
+ }
+ if !this.MessageWithBufRef.Equal(that1.MessageWithBufRef) {
+ return false
+ }
return true
}
-func (this *Exemplar) Equal(that interface{}) bool {
+func (this *WriteRequestV2) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
- that1, ok := that.(*Exemplar)
+ that1, ok := that.(*WriteRequestV2)
if !ok {
- that2, ok := that.(Exemplar)
+ that2, ok := that.(WriteRequestV2)
if ok {
that1 = &that2
} else {
@@ -1183,30 +1532,41 @@ func (this *Exemplar) Equal(that interface{}) bool {
} else if this == nil {
return false
}
- if len(this.Labels) != len(that1.Labels) {
+ if len(this.Symbols) != len(that1.Symbols) {
return false
}
- for i := range this.Labels {
- if !this.Labels[i].Equal(that1.Labels[i]) {
+ for i := range this.Symbols {
+ if this.Symbols[i] != that1.Symbols[i] {
return false
}
}
- if this.Value != that1.Value {
+ if len(this.Timeseries) != len(that1.Timeseries) {
return false
}
- if this.TimestampMs != that1.TimestampMs {
+ for i := range this.Timeseries {
+ if !this.Timeseries[i].Equal(that1.Timeseries[i]) {
+ return false
+ }
+ }
+ if this.Source != that1.Source {
+ return false
+ }
+ if this.SkipLabelNameValidation != that1.SkipLabelNameValidation {
+ return false
+ }
+ if !this.MessageWithBufRef.Equal(that1.MessageWithBufRef) {
return false
}
return true
}
-func (this *Histogram) Equal(that interface{}) bool {
+func (this *TimeSeriesV2) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
- that1, ok := that.(*Histogram)
+ that1, ok := that.(*TimeSeriesV2)
if !ok {
- that2, ok := that.(Histogram)
+ that2, ok := that.(TimeSeriesV2)
if ok {
that1 = &that2
} else {
@@ -1218,97 +1578,89 @@ func (this *Histogram) Equal(that interface{}) bool {
} else if this == nil {
return false
}
- if that1.Count == nil {
- if this.Count != nil {
- return false
- }
- } else if this.Count == nil {
- return false
- } else if !this.Count.Equal(that1.Count) {
- return false
- }
- if this.Sum != that1.Sum {
- return false
- }
- if this.Schema != that1.Schema {
- return false
- }
- if this.ZeroThreshold != that1.ZeroThreshold {
+ if len(this.LabelsRefs) != len(that1.LabelsRefs) {
return false
}
- if that1.ZeroCount == nil {
- if this.ZeroCount != nil {
+ for i := range this.LabelsRefs {
+ if this.LabelsRefs[i] != that1.LabelsRefs[i] {
return false
}
- } else if this.ZeroCount == nil {
- return false
- } else if !this.ZeroCount.Equal(that1.ZeroCount) {
- return false
}
- if len(this.NegativeSpans) != len(that1.NegativeSpans) {
+ if len(this.Samples) != len(that1.Samples) {
return false
}
- for i := range this.NegativeSpans {
- if !this.NegativeSpans[i].Equal(&that1.NegativeSpans[i]) {
+ for i := range this.Samples {
+ if !this.Samples[i].Equal(&that1.Samples[i]) {
return false
}
}
- if len(this.NegativeDeltas) != len(that1.NegativeDeltas) {
+ if len(this.Histograms) != len(that1.Histograms) {
return false
}
- for i := range this.NegativeDeltas {
- if this.NegativeDeltas[i] != that1.NegativeDeltas[i] {
+ for i := range this.Histograms {
+ if !this.Histograms[i].Equal(&that1.Histograms[i]) {
return false
}
}
- if len(this.NegativeCounts) != len(that1.NegativeCounts) {
+ if len(this.Exemplars) != len(that1.Exemplars) {
return false
}
- for i := range this.NegativeCounts {
- if this.NegativeCounts[i] != that1.NegativeCounts[i] {
+ for i := range this.Exemplars {
+ if !this.Exemplars[i].Equal(&that1.Exemplars[i]) {
return false
}
}
- if len(this.PositiveSpans) != len(that1.PositiveSpans) {
+ if !this.Metadata.Equal(&that1.Metadata) {
return false
}
- for i := range this.PositiveSpans {
- if !this.PositiveSpans[i].Equal(&that1.PositiveSpans[i]) {
- return false
- }
- }
- if len(this.PositiveDeltas) != len(that1.PositiveDeltas) {
+ if this.CreatedTimestamp != that1.CreatedTimestamp {
return false
}
- for i := range this.PositiveDeltas {
- if this.PositiveDeltas[i] != that1.PositiveDeltas[i] {
+ return true
+}
+func (this *ExemplarV2) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
+
+ that1, ok := that.(*ExemplarV2)
+ if !ok {
+ that2, ok := that.(ExemplarV2)
+ if ok {
+ that1 = &that2
+ } else {
return false
}
}
- if len(this.PositiveCounts) != len(that1.PositiveCounts) {
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
return false
}
- for i := range this.PositiveCounts {
- if this.PositiveCounts[i] != that1.PositiveCounts[i] {
+ if len(this.LabelsRefs) != len(that1.LabelsRefs) {
+ return false
+ }
+ for i := range this.LabelsRefs {
+ if this.LabelsRefs[i] != that1.LabelsRefs[i] {
return false
}
}
- if this.ResetHint != that1.ResetHint {
+ if this.Value != that1.Value {
return false
}
- if this.TimestampMs != that1.TimestampMs {
+ if this.Timestamp != that1.Timestamp {
return false
}
return true
}
-func (this *Histogram_CountInt) Equal(that interface{}) bool {
+func (this *MetadataV2) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
- that1, ok := that.(*Histogram_CountInt)
+ that1, ok := that.(*MetadataV2)
if !ok {
- that2, ok := that.(Histogram_CountInt)
+ that2, ok := that.(MetadataV2)
if ok {
that1 = &that2
} else {
@@ -1320,19 +1672,25 @@ func (this *Histogram_CountInt) Equal(that interface{}) bool {
} else if this == nil {
return false
}
- if this.CountInt != that1.CountInt {
+ if this.Type != that1.Type {
return false
}
- return true
+ if this.HelpRef != that1.HelpRef {
+ return false
+ }
+ if this.UnitRef != that1.UnitRef {
+ return false
+ }
+ return true
}
-func (this *Histogram_CountFloat) Equal(that interface{}) bool {
+func (this *StreamWriteRequest) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
- that1, ok := that.(*Histogram_CountFloat)
+ that1, ok := that.(*StreamWriteRequest)
if !ok {
- that2, ok := that.(Histogram_CountFloat)
+ that2, ok := that.(StreamWriteRequest)
if ok {
that1 = &that2
} else {
@@ -1344,19 +1702,25 @@ func (this *Histogram_CountFloat) Equal(that interface{}) bool {
} else if this == nil {
return false
}
- if this.CountFloat != that1.CountFloat {
+ if this.TenantID != that1.TenantID {
+ return false
+ }
+ if !this.Request.Equal(that1.Request) {
+ return false
+ }
+ if !this.MessageWithBufRef.Equal(that1.MessageWithBufRef) {
return false
}
return true
}
-func (this *Histogram_ZeroCountInt) Equal(that interface{}) bool {
+func (this *WriteResponse) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
- that1, ok := that.(*Histogram_ZeroCountInt)
+ that1, ok := that.(*WriteResponse)
if !ok {
- that2, ok := that.(Histogram_ZeroCountInt)
+ that2, ok := that.(WriteResponse)
if ok {
that1 = &that2
} else {
@@ -1368,19 +1732,31 @@ func (this *Histogram_ZeroCountInt) Equal(that interface{}) bool {
} else if this == nil {
return false
}
- if this.ZeroCountInt != that1.ZeroCountInt {
+ if this.Code != that1.Code {
+ return false
+ }
+ if this.Message != that1.Message {
+ return false
+ }
+ if this.Samples != that1.Samples {
+ return false
+ }
+ if this.Histograms != that1.Histograms {
+ return false
+ }
+ if this.Exemplars != that1.Exemplars {
return false
}
return true
}
-func (this *Histogram_ZeroCountFloat) Equal(that interface{}) bool {
+func (this *TimeSeries) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
- that1, ok := that.(*Histogram_ZeroCountFloat)
+ that1, ok := that.(*TimeSeries)
if !ok {
- that2, ok := that.(Histogram_ZeroCountFloat)
+ that2, ok := that.(TimeSeries)
if ok {
that1 = &that2
} else {
@@ -1392,19 +1768,48 @@ func (this *Histogram_ZeroCountFloat) Equal(that interface{}) bool {
} else if this == nil {
return false
}
- if this.ZeroCountFloat != that1.ZeroCountFloat {
+ if len(this.Labels) != len(that1.Labels) {
+ return false
+ }
+ for i := range this.Labels {
+ if !this.Labels[i].Equal(that1.Labels[i]) {
+ return false
+ }
+ }
+ if len(this.Samples) != len(that1.Samples) {
+ return false
+ }
+ for i := range this.Samples {
+ if !this.Samples[i].Equal(&that1.Samples[i]) {
+ return false
+ }
+ }
+ if len(this.Exemplars) != len(that1.Exemplars) {
+ return false
+ }
+ for i := range this.Exemplars {
+ if !this.Exemplars[i].Equal(&that1.Exemplars[i]) {
+ return false
+ }
+ }
+ if len(this.Histograms) != len(that1.Histograms) {
return false
}
+ for i := range this.Histograms {
+ if !this.Histograms[i].Equal(&that1.Histograms[i]) {
+ return false
+ }
+ }
return true
}
-func (this *BucketSpan) Equal(that interface{}) bool {
+func (this *LabelPair) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
- that1, ok := that.(*BucketSpan)
+ that1, ok := that.(*LabelPair)
if !ok {
- that2, ok := that.(BucketSpan)
+ that2, ok := that.(LabelPair)
if ok {
that1 = &that2
} else {
@@ -1416,791 +1821,665 @@ func (this *BucketSpan) Equal(that interface{}) bool {
} else if this == nil {
return false
}
- if this.Offset != that1.Offset {
+ if !bytes.Equal(this.Name, that1.Name) {
return false
}
- if this.Length != that1.Length {
+ if !bytes.Equal(this.Value, that1.Value) {
return false
}
return true
}
-func (this *WriteRequest) GoString() string {
- if this == nil {
- return "nil"
+func (this *Sample) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
}
- s := make([]string, 0, 8)
- s = append(s, "&cortexpb.WriteRequest{")
- s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n")
- s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n")
- if this.Metadata != nil {
- s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n")
+
+ that1, ok := that.(*Sample)
+ if !ok {
+ that2, ok := that.(Sample)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
}
- s = append(s, "SkipLabelNameValidation: "+fmt.Sprintf("%#v", this.SkipLabelNameValidation)+",\n")
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *WriteResponse) GoString() string {
- if this == nil {
- return "nil"
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
}
- s := make([]string, 0, 4)
- s = append(s, "&cortexpb.WriteResponse{")
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *TimeSeries) GoString() string {
- if this == nil {
- return "nil"
+ if this.Value != that1.Value {
+ return false
}
- s := make([]string, 0, 8)
- s = append(s, "&cortexpb.TimeSeries{")
- s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
- if this.Samples != nil {
- vs := make([]*Sample, len(this.Samples))
- for i := range vs {
- vs[i] = &this.Samples[i]
- }
- s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n")
+ if this.TimestampMs != that1.TimestampMs {
+ return false
}
- if this.Exemplars != nil {
- vs := make([]*Exemplar, len(this.Exemplars))
- for i := range vs {
- vs[i] = &this.Exemplars[i]
- }
- s = append(s, "Exemplars: "+fmt.Sprintf("%#v", vs)+",\n")
+ return true
+}
+func (this *MetricMetadata) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
}
- if this.Histograms != nil {
- vs := make([]*Histogram, len(this.Histograms))
- for i := range vs {
- vs[i] = &this.Histograms[i]
+
+ that1, ok := that.(*MetricMetadata)
+ if !ok {
+ that2, ok := that.(MetricMetadata)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
}
- s = append(s, "Histograms: "+fmt.Sprintf("%#v", vs)+",\n")
}
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *LabelPair) GoString() string {
- if this == nil {
- return "nil"
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
}
- s := make([]string, 0, 6)
- s = append(s, "&cortexpb.LabelPair{")
- s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
- s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *Sample) GoString() string {
- if this == nil {
- return "nil"
+ if this.Type != that1.Type {
+ return false
}
- s := make([]string, 0, 6)
- s = append(s, "&cortexpb.Sample{")
- s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
- s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n")
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *MetricMetadata) GoString() string {
- if this == nil {
- return "nil"
+ if this.MetricFamilyName != that1.MetricFamilyName {
+ return false
}
- s := make([]string, 0, 8)
- s = append(s, "&cortexpb.MetricMetadata{")
- s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
- s = append(s, "MetricFamilyName: "+fmt.Sprintf("%#v", this.MetricFamilyName)+",\n")
- s = append(s, "Help: "+fmt.Sprintf("%#v", this.Help)+",\n")
- s = append(s, "Unit: "+fmt.Sprintf("%#v", this.Unit)+",\n")
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *Metric) GoString() string {
- if this == nil {
- return "nil"
+ if this.Help != that1.Help {
+ return false
}
- s := make([]string, 0, 5)
- s = append(s, "&cortexpb.Metric{")
- s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func (this *Exemplar) GoString() string {
- if this == nil {
- return "nil"
+ if this.Unit != that1.Unit {
+ return false
}
- s := make([]string, 0, 7)
- s = append(s, "&cortexpb.Exemplar{")
- s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
- s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
- s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n")
- s = append(s, "}")
- return strings.Join(s, "")
+ return true
}
-func (this *Histogram) GoString() string {
- if this == nil {
- return "nil"
+func (this *Metric) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
}
- s := make([]string, 0, 19)
- s = append(s, "&cortexpb.Histogram{")
- if this.Count != nil {
- s = append(s, "Count: "+fmt.Sprintf("%#v", this.Count)+",\n")
+
+ that1, ok := that.(*Metric)
+ if !ok {
+ that2, ok := that.(Metric)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
}
- s = append(s, "Sum: "+fmt.Sprintf("%#v", this.Sum)+",\n")
- s = append(s, "Schema: "+fmt.Sprintf("%#v", this.Schema)+",\n")
- s = append(s, "ZeroThreshold: "+fmt.Sprintf("%#v", this.ZeroThreshold)+",\n")
- if this.ZeroCount != nil {
- s = append(s, "ZeroCount: "+fmt.Sprintf("%#v", this.ZeroCount)+",\n")
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
}
- if this.NegativeSpans != nil {
- vs := make([]*BucketSpan, len(this.NegativeSpans))
- for i := range vs {
- vs[i] = &this.NegativeSpans[i]
- }
- s = append(s, "NegativeSpans: "+fmt.Sprintf("%#v", vs)+",\n")
+ if len(this.Labels) != len(that1.Labels) {
+ return false
}
- s = append(s, "NegativeDeltas: "+fmt.Sprintf("%#v", this.NegativeDeltas)+",\n")
- s = append(s, "NegativeCounts: "+fmt.Sprintf("%#v", this.NegativeCounts)+",\n")
- if this.PositiveSpans != nil {
- vs := make([]*BucketSpan, len(this.PositiveSpans))
- for i := range vs {
- vs[i] = &this.PositiveSpans[i]
+ for i := range this.Labels {
+ if !this.Labels[i].Equal(that1.Labels[i]) {
+ return false
}
- s = append(s, "PositiveSpans: "+fmt.Sprintf("%#v", vs)+",\n")
}
- s = append(s, "PositiveDeltas: "+fmt.Sprintf("%#v", this.PositiveDeltas)+",\n")
- s = append(s, "PositiveCounts: "+fmt.Sprintf("%#v", this.PositiveCounts)+",\n")
- s = append(s, "ResetHint: "+fmt.Sprintf("%#v", this.ResetHint)+",\n")
- s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n")
- s = append(s, "}")
- return strings.Join(s, "")
+ return true
}
-func (this *Histogram_CountInt) GoString() string {
- if this == nil {
- return "nil"
+func (this *Exemplar) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
}
- s := strings.Join([]string{`&cortexpb.Histogram_CountInt{` +
- `CountInt:` + fmt.Sprintf("%#v", this.CountInt) + `}`}, ", ")
- return s
-}
-func (this *Histogram_CountFloat) GoString() string {
- if this == nil {
- return "nil"
+
+ that1, ok := that.(*Exemplar)
+ if !ok {
+ that2, ok := that.(Exemplar)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
}
- s := strings.Join([]string{`&cortexpb.Histogram_CountFloat{` +
- `CountFloat:` + fmt.Sprintf("%#v", this.CountFloat) + `}`}, ", ")
- return s
-}
-func (this *Histogram_ZeroCountInt) GoString() string {
- if this == nil {
- return "nil"
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
}
- s := strings.Join([]string{`&cortexpb.Histogram_ZeroCountInt{` +
- `ZeroCountInt:` + fmt.Sprintf("%#v", this.ZeroCountInt) + `}`}, ", ")
- return s
-}
-func (this *Histogram_ZeroCountFloat) GoString() string {
- if this == nil {
- return "nil"
+ if len(this.Labels) != len(that1.Labels) {
+ return false
}
- s := strings.Join([]string{`&cortexpb.Histogram_ZeroCountFloat{` +
- `ZeroCountFloat:` + fmt.Sprintf("%#v", this.ZeroCountFloat) + `}`}, ", ")
- return s
-}
-func (this *BucketSpan) GoString() string {
- if this == nil {
- return "nil"
+ for i := range this.Labels {
+ if !this.Labels[i].Equal(that1.Labels[i]) {
+ return false
+ }
}
- s := make([]string, 0, 6)
- s = append(s, "&cortexpb.BucketSpan{")
- s = append(s, "Offset: "+fmt.Sprintf("%#v", this.Offset)+",\n")
- s = append(s, "Length: "+fmt.Sprintf("%#v", this.Length)+",\n")
- s = append(s, "}")
- return strings.Join(s, "")
-}
-func valueToGoStringCortex(v interface{}, typ string) string {
- rv := reflect.ValueOf(v)
- if rv.IsNil() {
- return "nil"
+ if this.Value != that1.Value {
+ return false
}
- pv := reflect.Indirect(rv).Interface()
- return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
-}
-func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+ if this.TimestampMs != that1.TimestampMs {
+ return false
}
- return dAtA[:n], nil
-}
-
-func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+ return true
}
+func (this *Histogram) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
-func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.SkipLabelNameValidation {
- i--
- if m.SkipLabelNameValidation {
- dAtA[i] = 1
+ that1, ok := that.(*Histogram)
+ if !ok {
+ that2, ok := that.(Histogram)
+ if ok {
+ that1 = &that2
} else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x3e
- i--
- dAtA[i] = 0xc0
- }
- if len(m.Metadata) > 0 {
- for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCortex(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
+ return false
}
}
- if m.Source != 0 {
- i = encodeVarintCortex(dAtA, i, uint64(m.Source))
- i--
- dAtA[i] = 0x10
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
}
- if len(m.Timeseries) > 0 {
- for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
- {
- size := m.Timeseries[iNdEx].Size()
- i -= size
- if _, err := m.Timeseries[iNdEx].MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintCortex(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
+ if that1.Count == nil {
+ if this.Count != nil {
+ return false
}
+ } else if this.Count == nil {
+ return false
+ } else if !this.Count.Equal(that1.Count) {
+ return false
}
- return len(dAtA) - i, nil
-}
-
-func (m *WriteResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+ if this.Sum != that1.Sum {
+ return false
}
- return dAtA[:n], nil
-}
-
-func (m *WriteResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WriteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- return len(dAtA) - i, nil
-}
-
-func (m *TimeSeries) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+ if this.Schema != that1.Schema {
+ return false
}
- return dAtA[:n], nil
-}
-
-func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Histograms) > 0 {
- for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCortex(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
+ if this.ZeroThreshold != that1.ZeroThreshold {
+ return false
+ }
+ if that1.ZeroCount == nil {
+ if this.ZeroCount != nil {
+ return false
}
+ } else if this.ZeroCount == nil {
+ return false
+ } else if !this.ZeroCount.Equal(that1.ZeroCount) {
+ return false
}
- if len(m.Exemplars) > 0 {
- for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCortex(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
+ if len(this.NegativeSpans) != len(that1.NegativeSpans) {
+ return false
+ }
+ for i := range this.NegativeSpans {
+ if !this.NegativeSpans[i].Equal(&that1.NegativeSpans[i]) {
+ return false
}
}
- if len(m.Samples) > 0 {
- for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCortex(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
+ if len(this.NegativeDeltas) != len(that1.NegativeDeltas) {
+ return false
+ }
+ for i := range this.NegativeDeltas {
+ if this.NegativeDeltas[i] != that1.NegativeDeltas[i] {
+ return false
}
}
- if len(m.Labels) > 0 {
- for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
- {
- size := m.Labels[iNdEx].Size()
- i -= size
- if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintCortex(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
+ if len(this.NegativeCounts) != len(that1.NegativeCounts) {
+ return false
+ }
+ for i := range this.NegativeCounts {
+ if this.NegativeCounts[i] != that1.NegativeCounts[i] {
+ return false
}
}
- return len(dAtA) - i, nil
-}
-
-func (m *LabelPair) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+ if len(this.PositiveSpans) != len(that1.PositiveSpans) {
+ return false
}
- return dAtA[:n], nil
-}
-
-func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LabelPair) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Value) > 0 {
- i -= len(m.Value)
- copy(dAtA[i:], m.Value)
- i = encodeVarintCortex(dAtA, i, uint64(len(m.Value)))
- i--
- dAtA[i] = 0x12
+ for i := range this.PositiveSpans {
+ if !this.PositiveSpans[i].Equal(&that1.PositiveSpans[i]) {
+ return false
+ }
}
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintCortex(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
+ if len(this.PositiveDeltas) != len(that1.PositiveDeltas) {
+ return false
}
- return len(dAtA) - i, nil
-}
-
-func (m *Sample) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+ for i := range this.PositiveDeltas {
+ if this.PositiveDeltas[i] != that1.PositiveDeltas[i] {
+ return false
+ }
}
- return dAtA[:n], nil
-}
-
-func (m *Sample) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.TimestampMs != 0 {
- i = encodeVarintCortex(dAtA, i, uint64(m.TimestampMs))
- i--
- dAtA[i] = 0x10
+ if len(this.PositiveCounts) != len(that1.PositiveCounts) {
+ return false
}
- if m.Value != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
- i--
- dAtA[i] = 0x9
+ for i := range this.PositiveCounts {
+ if this.PositiveCounts[i] != that1.PositiveCounts[i] {
+ return false
+ }
}
- return len(dAtA) - i, nil
-}
-
-func (m *MetricMetadata) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+ if this.ResetHint != that1.ResetHint {
+ return false
}
- return dAtA[:n], nil
-}
-
-func (m *MetricMetadata) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+ if this.TimestampMs != that1.TimestampMs {
+ return false
+ }
+ return true
}
-
-func (m *MetricMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Unit) > 0 {
- i -= len(m.Unit)
- copy(dAtA[i:], m.Unit)
- i = encodeVarintCortex(dAtA, i, uint64(len(m.Unit)))
- i--
- dAtA[i] = 0x2a
+func (this *Histogram_CountInt) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
}
- if len(m.Help) > 0 {
- i -= len(m.Help)
- copy(dAtA[i:], m.Help)
- i = encodeVarintCortex(dAtA, i, uint64(len(m.Help)))
- i--
- dAtA[i] = 0x22
+
+ that1, ok := that.(*Histogram_CountInt)
+ if !ok {
+ that2, ok := that.(Histogram_CountInt)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
}
- if len(m.MetricFamilyName) > 0 {
- i -= len(m.MetricFamilyName)
- copy(dAtA[i:], m.MetricFamilyName)
- i = encodeVarintCortex(dAtA, i, uint64(len(m.MetricFamilyName)))
- i--
- dAtA[i] = 0x12
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
}
- if m.Type != 0 {
- i = encodeVarintCortex(dAtA, i, uint64(m.Type))
- i--
- dAtA[i] = 0x8
+ if this.CountInt != that1.CountInt {
+ return false
}
- return len(dAtA) - i, nil
+ return true
}
-
-func (m *Metric) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+func (this *Histogram_CountFloat) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
}
- return dAtA[:n], nil
-}
-
-func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if len(m.Labels) > 0 {
- for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
- {
- size := m.Labels[iNdEx].Size()
- i -= size
- if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintCortex(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
+ that1, ok := that.(*Histogram_CountFloat)
+ if !ok {
+ that2, ok := that.(Histogram_CountFloat)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
}
}
- return len(dAtA) - i, nil
-}
-
-func (m *Exemplar) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
}
- return dAtA[:n], nil
-}
-
-func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+ if this.CountFloat != that1.CountFloat {
+ return false
+ }
+ return true
}
+func (this *Histogram_ZeroCountInt) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
-func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.TimestampMs != 0 {
- i = encodeVarintCortex(dAtA, i, uint64(m.TimestampMs))
- i--
- dAtA[i] = 0x18
+ that1, ok := that.(*Histogram_ZeroCountInt)
+ if !ok {
+ that2, ok := that.(Histogram_ZeroCountInt)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
}
- if m.Value != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
- i--
- dAtA[i] = 0x11
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
}
- if len(m.Labels) > 0 {
- for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
- {
- size := m.Labels[iNdEx].Size()
- i -= size
- if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- i = encodeVarintCortex(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
+ if this.ZeroCountInt != that1.ZeroCountInt {
+ return false
}
- return len(dAtA) - i, nil
+ return true
}
-
-func (m *Histogram) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
+func (this *Histogram_ZeroCountFloat) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
}
- return dAtA[:n], nil
-}
-func (m *Histogram) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
+ that1, ok := that.(*Histogram_ZeroCountFloat)
+ if !ok {
+ that2, ok := that.(Histogram_ZeroCountFloat)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
+ }
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
+ }
+ if this.ZeroCountFloat != that1.ZeroCountFloat {
+ return false
+ }
+ return true
}
+func (this *BucketSpan) Equal(that interface{}) bool {
+ if that == nil {
+ return this == nil
+ }
-func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.TimestampMs != 0 {
- i = encodeVarintCortex(dAtA, i, uint64(m.TimestampMs))
- i--
- dAtA[i] = 0x78
+ that1, ok := that.(*BucketSpan)
+ if !ok {
+ that2, ok := that.(BucketSpan)
+ if ok {
+ that1 = &that2
+ } else {
+ return false
+ }
}
- if m.ResetHint != 0 {
- i = encodeVarintCortex(dAtA, i, uint64(m.ResetHint))
- i--
- dAtA[i] = 0x70
+ if that1 == nil {
+ return this == nil
+ } else if this == nil {
+ return false
}
- if len(m.PositiveCounts) > 0 {
- for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- {
- f1 := math.Float64bits(float64(m.PositiveCounts[iNdEx]))
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f1))
- }
- i = encodeVarintCortex(dAtA, i, uint64(len(m.PositiveCounts)*8))
- i--
- dAtA[i] = 0x6a
+ if this.Offset != that1.Offset {
+ return false
}
- if len(m.PositiveDeltas) > 0 {
- var j2 int
- dAtA4 := make([]byte, len(m.PositiveDeltas)*10)
- for _, num := range m.PositiveDeltas {
- x3 := (uint64(num) << 1) ^ uint64((num >> 63))
- for x3 >= 1<<7 {
- dAtA4[j2] = uint8(uint64(x3)&0x7f | 0x80)
- j2++
- x3 >>= 7
- }
- dAtA4[j2] = uint8(x3)
- j2++
- }
- i -= j2
- copy(dAtA[i:], dAtA4[:j2])
- i = encodeVarintCortex(dAtA, i, uint64(j2))
- i--
- dAtA[i] = 0x62
+ if this.Length != that1.Length {
+ return false
}
- if len(m.PositiveSpans) > 0 {
- for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCortex(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x5a
- }
+ return true
+}
+func (this *MessageWithBufRef) GoString() string {
+ if this == nil {
+ return "nil"
}
- if len(m.NegativeCounts) > 0 {
- for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- {
- f5 := math.Float64bits(float64(m.NegativeCounts[iNdEx]))
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f5))
- }
- i = encodeVarintCortex(dAtA, i, uint64(len(m.NegativeCounts)*8))
- i--
- dAtA[i] = 0x52
+ s := make([]string, 0, 4)
+ s = append(s, "&cortexpb.MessageWithBufRef{")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *WriteRequest) GoString() string {
+ if this == nil {
+ return "nil"
}
- if len(m.NegativeDeltas) > 0 {
- var j6 int
- dAtA8 := make([]byte, len(m.NegativeDeltas)*10)
- for _, num := range m.NegativeDeltas {
- x7 := (uint64(num) << 1) ^ uint64((num >> 63))
- for x7 >= 1<<7 {
- dAtA8[j6] = uint8(uint64(x7)&0x7f | 0x80)
- j6++
- x7 >>= 7
- }
- dAtA8[j6] = uint8(x7)
- j6++
+ s := make([]string, 0, 9)
+ s = append(s, "&cortexpb.WriteRequest{")
+ s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n")
+ s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n")
+ if this.Metadata != nil {
+ s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n")
+ }
+ s = append(s, "SkipLabelNameValidation: "+fmt.Sprintf("%#v", this.SkipLabelNameValidation)+",\n")
+ s = append(s, "MessageWithBufRef: "+fmt.Sprintf("%#v", this.MessageWithBufRef)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *WriteRequestV2) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&cortexpb.WriteRequestV2{")
+ s = append(s, "Symbols: "+fmt.Sprintf("%#v", this.Symbols)+",\n")
+ s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n")
+ s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n")
+ s = append(s, "SkipLabelNameValidation: "+fmt.Sprintf("%#v", this.SkipLabelNameValidation)+",\n")
+ s = append(s, "MessageWithBufRef: "+fmt.Sprintf("%#v", this.MessageWithBufRef)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *TimeSeriesV2) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 10)
+ s = append(s, "&cortexpb.TimeSeriesV2{")
+ s = append(s, "LabelsRefs: "+fmt.Sprintf("%#v", this.LabelsRefs)+",\n")
+ if this.Samples != nil {
+ vs := make([]*Sample, len(this.Samples))
+ for i := range vs {
+ vs[i] = &this.Samples[i]
}
- i -= j6
- copy(dAtA[i:], dAtA8[:j6])
- i = encodeVarintCortex(dAtA, i, uint64(j6))
- i--
- dAtA[i] = 0x4a
+ s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n")
}
- if len(m.NegativeSpans) > 0 {
- for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintCortex(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x42
+ if this.Histograms != nil {
+ vs := make([]*Histogram, len(this.Histograms))
+ for i := range vs {
+ vs[i] = &this.Histograms[i]
}
+ s = append(s, "Histograms: "+fmt.Sprintf("%#v", vs)+",\n")
}
- if m.ZeroCount != nil {
- {
- size := m.ZeroCount.Size()
- i -= size
- if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
+ if this.Exemplars != nil {
+ vs := make([]*ExemplarV2, len(this.Exemplars))
+ for i := range vs {
+ vs[i] = &this.Exemplars[i]
}
+ s = append(s, "Exemplars: "+fmt.Sprintf("%#v", vs)+",\n")
}
- if m.ZeroThreshold != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold))))
- i--
- dAtA[i] = 0x29
+ s = append(s, "Metadata: "+strings.Replace(this.Metadata.GoString(), `&`, ``, 1)+",\n")
+ s = append(s, "CreatedTimestamp: "+fmt.Sprintf("%#v", this.CreatedTimestamp)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *ExemplarV2) GoString() string {
+ if this == nil {
+ return "nil"
}
- if m.Schema != 0 {
- i = encodeVarintCortex(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31))))
- i--
- dAtA[i] = 0x20
+ s := make([]string, 0, 7)
+ s = append(s, "&cortexpb.ExemplarV2{")
+ s = append(s, "LabelsRefs: "+fmt.Sprintf("%#v", this.LabelsRefs)+",\n")
+ s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+ s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *MetadataV2) GoString() string {
+ if this == nil {
+ return "nil"
}
- if m.Sum != 0 {
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
- i--
- dAtA[i] = 0x19
+ s := make([]string, 0, 7)
+ s = append(s, "&cortexpb.MetadataV2{")
+ s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
+ s = append(s, "HelpRef: "+fmt.Sprintf("%#v", this.HelpRef)+",\n")
+ s = append(s, "UnitRef: "+fmt.Sprintf("%#v", this.UnitRef)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func (this *StreamWriteRequest) GoString() string {
+ if this == nil {
+ return "nil"
}
- if m.Count != nil {
- {
- size := m.Count.Size()
- i -= size
- if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
+ s := make([]string, 0, 7)
+ s = append(s, "&cortexpb.StreamWriteRequest{")
+ s = append(s, "TenantID: "+fmt.Sprintf("%#v", this.TenantID)+",\n")
+ if this.Request != nil {
+ s = append(s, "Request: "+fmt.Sprintf("%#v", this.Request)+",\n")
}
- return len(dAtA) - i, nil
+ s = append(s, "MessageWithBufRef: "+fmt.Sprintf("%#v", this.MessageWithBufRef)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
}
-
-func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+func (this *WriteResponse) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 9)
+ s = append(s, "&cortexpb.WriteResponse{")
+ s = append(s, "Code: "+fmt.Sprintf("%#v", this.Code)+",\n")
+ s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n")
+ s = append(s, "Samples: "+fmt.Sprintf("%#v", this.Samples)+",\n")
+ s = append(s, "Histograms: "+fmt.Sprintf("%#v", this.Histograms)+",\n")
+ s = append(s, "Exemplars: "+fmt.Sprintf("%#v", this.Exemplars)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
}
-
-func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintCortex(dAtA, i, uint64(m.CountInt))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
+func (this *TimeSeries) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&cortexpb.TimeSeries{")
+ s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
+ if this.Samples != nil {
+ vs := make([]*Sample, len(this.Samples))
+ for i := range vs {
+ vs[i] = &this.Samples[i]
+ }
+ s = append(s, "Samples: "+fmt.Sprintf("%#v", vs)+",\n")
+ }
+ if this.Exemplars != nil {
+ vs := make([]*Exemplar, len(this.Exemplars))
+ for i := range vs {
+ vs[i] = &this.Exemplars[i]
+ }
+ s = append(s, "Exemplars: "+fmt.Sprintf("%#v", vs)+",\n")
+ }
+ if this.Histograms != nil {
+ vs := make([]*Histogram, len(this.Histograms))
+ for i := range vs {
+ vs[i] = &this.Histograms[i]
+ }
+ s = append(s, "Histograms: "+fmt.Sprintf("%#v", vs)+",\n")
+ }
+ s = append(s, "}")
+ return strings.Join(s, "")
}
-func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+func (this *LabelPair) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&cortexpb.LabelPair{")
+ s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+ s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
}
-
-func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat))))
- i--
- dAtA[i] = 0x11
- return len(dAtA) - i, nil
+func (this *Sample) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&cortexpb.Sample{")
+ s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+ s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
}
-func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+func (this *MetricMetadata) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 8)
+ s = append(s, "&cortexpb.MetricMetadata{")
+ s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
+ s = append(s, "MetricFamilyName: "+fmt.Sprintf("%#v", this.MetricFamilyName)+",\n")
+ s = append(s, "Help: "+fmt.Sprintf("%#v", this.Help)+",\n")
+ s = append(s, "Unit: "+fmt.Sprintf("%#v", this.Unit)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
}
-
-func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintCortex(dAtA, i, uint64(m.ZeroCountInt))
- i--
- dAtA[i] = 0x30
- return len(dAtA) - i, nil
+func (this *Metric) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 5)
+ s = append(s, "&cortexpb.Metric{")
+ s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
}
-func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+func (this *Exemplar) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 7)
+ s = append(s, "&cortexpb.Exemplar{")
+ s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n")
+ s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+ s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
}
-
-func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i -= 8
- encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat))))
- i--
- dAtA[i] = 0x39
- return len(dAtA) - i, nil
+func (this *Histogram) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 19)
+ s = append(s, "&cortexpb.Histogram{")
+ if this.Count != nil {
+ s = append(s, "Count: "+fmt.Sprintf("%#v", this.Count)+",\n")
+ }
+ s = append(s, "Sum: "+fmt.Sprintf("%#v", this.Sum)+",\n")
+ s = append(s, "Schema: "+fmt.Sprintf("%#v", this.Schema)+",\n")
+ s = append(s, "ZeroThreshold: "+fmt.Sprintf("%#v", this.ZeroThreshold)+",\n")
+ if this.ZeroCount != nil {
+ s = append(s, "ZeroCount: "+fmt.Sprintf("%#v", this.ZeroCount)+",\n")
+ }
+ if this.NegativeSpans != nil {
+ vs := make([]*BucketSpan, len(this.NegativeSpans))
+ for i := range vs {
+ vs[i] = &this.NegativeSpans[i]
+ }
+ s = append(s, "NegativeSpans: "+fmt.Sprintf("%#v", vs)+",\n")
+ }
+ s = append(s, "NegativeDeltas: "+fmt.Sprintf("%#v", this.NegativeDeltas)+",\n")
+ s = append(s, "NegativeCounts: "+fmt.Sprintf("%#v", this.NegativeCounts)+",\n")
+ if this.PositiveSpans != nil {
+ vs := make([]*BucketSpan, len(this.PositiveSpans))
+ for i := range vs {
+ vs[i] = &this.PositiveSpans[i]
+ }
+ s = append(s, "PositiveSpans: "+fmt.Sprintf("%#v", vs)+",\n")
+ }
+ s = append(s, "PositiveDeltas: "+fmt.Sprintf("%#v", this.PositiveDeltas)+",\n")
+ s = append(s, "PositiveCounts: "+fmt.Sprintf("%#v", this.PositiveCounts)+",\n")
+ s = append(s, "ResetHint: "+fmt.Sprintf("%#v", this.ResetHint)+",\n")
+ s = append(s, "TimestampMs: "+fmt.Sprintf("%#v", this.TimestampMs)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
}
-func (m *BucketSpan) Marshal() (dAtA []byte, err error) {
+func (this *Histogram_CountInt) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&cortexpb.Histogram_CountInt{` +
+ `CountInt:` + fmt.Sprintf("%#v", this.CountInt) + `}`}, ", ")
+ return s
+}
+func (this *Histogram_CountFloat) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&cortexpb.Histogram_CountFloat{` +
+ `CountFloat:` + fmt.Sprintf("%#v", this.CountFloat) + `}`}, ", ")
+ return s
+}
+func (this *Histogram_ZeroCountInt) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&cortexpb.Histogram_ZeroCountInt{` +
+ `ZeroCountInt:` + fmt.Sprintf("%#v", this.ZeroCountInt) + `}`}, ", ")
+ return s
+}
+func (this *Histogram_ZeroCountFloat) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&cortexpb.Histogram_ZeroCountFloat{` +
+ `ZeroCountFloat:` + fmt.Sprintf("%#v", this.ZeroCountFloat) + `}`}, ", ")
+ return s
+}
+func (this *BucketSpan) GoString() string {
+ if this == nil {
+ return "nil"
+ }
+ s := make([]string, 0, 6)
+ s = append(s, "&cortexpb.BucketSpan{")
+ s = append(s, "Offset: "+fmt.Sprintf("%#v", this.Offset)+",\n")
+ s = append(s, "Length: "+fmt.Sprintf("%#v", this.Length)+",\n")
+ s = append(s, "}")
+ return strings.Join(s, "")
+}
+func valueToGoStringCortex(v interface{}, typ string) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func (m *MessageWithBufRef) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
@@ -2210,524 +2489,2606 @@ func (m *BucketSpan) Marshal() (dAtA []byte, err error) {
return dAtA[:n], nil
}
-func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) {
+func (m *MessageWithBufRef) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+func (m *MessageWithBufRef) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
- if m.Length != 0 {
- i = encodeVarintCortex(dAtA, i, uint64(m.Length))
- i--
- dAtA[i] = 0x10
- }
- if m.Offset != 0 {
- i = encodeVarintCortex(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31))))
- i--
- dAtA[i] = 0x8
- }
return len(dAtA) - i, nil
}
-func encodeVarintCortex(dAtA []byte, offset int, v uint64) int {
- offset -= sovCortex(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
+func (m *WriteRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- dAtA[offset] = uint8(v)
- return base
+ return dAtA[:n], nil
}
-func (m *WriteRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Timeseries) > 0 {
- for _, e := range m.Timeseries {
- l = e.Size()
- n += 1 + l + sovCortex(uint64(l))
+
+func (m *WriteRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size := m.MessageWithBufRef.Size()
+ i -= size
+ if _, err := m.MessageWithBufRef.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
}
+ i = encodeVarintCortex(dAtA, i, uint64(size))
}
- if m.Source != 0 {
- n += 1 + sovCortex(uint64(m.Source))
+ i--
+ dAtA[i] = 0x3e
+ i--
+ dAtA[i] = 0xca
+ if m.SkipLabelNameValidation {
+ i--
+ if m.SkipLabelNameValidation {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x3e
+ i--
+ dAtA[i] = 0xc0
}
if len(m.Metadata) > 0 {
- for _, e := range m.Metadata {
- l = e.Size()
- n += 1 + l + sovCortex(uint64(l))
+ for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Metadata[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCortex(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
}
}
- if m.SkipLabelNameValidation {
- n += 3
+ if m.Source != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.Source))
+ i--
+ dAtA[i] = 0x10
}
- return n
+ if len(m.Timeseries) > 0 {
+ for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size := m.Timeseries[iNdEx].Size()
+ i -= size
+ if _, err := m.Timeseries[iNdEx].MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ i = encodeVarintCortex(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
}
-func (m *WriteResponse) Size() (n int) {
- if m == nil {
- return 0
+func (m *WriteRequestV2) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- var l int
- _ = l
- return n
+ return dAtA[:n], nil
}
-func (m *TimeSeries) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *WriteRequestV2) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WriteRequestV2) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if len(m.Labels) > 0 {
- for _, e := range m.Labels {
- l = e.Size()
- n += 1 + l + sovCortex(uint64(l))
+ {
+ size := m.MessageWithBufRef.Size()
+ i -= size
+ if _, err := m.MessageWithBufRef.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
}
+ i = encodeVarintCortex(dAtA, i, uint64(size))
}
- if len(m.Samples) > 0 {
- for _, e := range m.Samples {
- l = e.Size()
- n += 1 + l + sovCortex(uint64(l))
+ i--
+ dAtA[i] = 0x3e
+ i--
+ dAtA[i] = 0xca
+ if m.SkipLabelNameValidation {
+ i--
+ if m.SkipLabelNameValidation {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
}
+ i--
+ dAtA[i] = 0x3e
+ i--
+ dAtA[i] = 0xc0
}
- if len(m.Exemplars) > 0 {
- for _, e := range m.Exemplars {
- l = e.Size()
- n += 1 + l + sovCortex(uint64(l))
+ if m.Source != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.Source))
+ i--
+ dAtA[i] = 0x30
+ }
+ if len(m.Timeseries) > 0 {
+ for iNdEx := len(m.Timeseries) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size := m.Timeseries[iNdEx].Size()
+ i -= size
+ if _, err := m.Timeseries[iNdEx].MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ i = encodeVarintCortex(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
}
}
- if len(m.Histograms) > 0 {
- for _, e := range m.Histograms {
- l = e.Size()
- n += 1 + l + sovCortex(uint64(l))
+ if len(m.Symbols) > 0 {
+ for iNdEx := len(m.Symbols) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Symbols[iNdEx])
+ copy(dAtA[i:], m.Symbols[iNdEx])
+ i = encodeVarintCortex(dAtA, i, uint64(len(m.Symbols[iNdEx])))
+ i--
+ dAtA[i] = 0x22
}
}
- return n
+ return len(dAtA) - i, nil
}
-func (m *LabelPair) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovCortex(uint64(l))
- }
- l = len(m.Value)
- if l > 0 {
- n += 1 + l + sovCortex(uint64(l))
+func (m *TimeSeriesV2) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- return n
+ return dAtA[:n], nil
}
-func (m *Sample) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Value != 0 {
- n += 9
- }
- if m.TimestampMs != 0 {
- n += 1 + sovCortex(uint64(m.TimestampMs))
- }
- return n
+func (m *TimeSeriesV2) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *MetricMetadata) Size() (n int) {
- if m == nil {
- return 0
- }
+func (m *TimeSeriesV2) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if m.Type != 0 {
- n += 1 + sovCortex(uint64(m.Type))
+ if m.CreatedTimestamp != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.CreatedTimestamp))
+ i--
+ dAtA[i] = 0x30
}
- l = len(m.MetricFamilyName)
- if l > 0 {
- n += 1 + l + sovCortex(uint64(l))
+ {
+ size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCortex(dAtA, i, uint64(size))
}
- l = len(m.Help)
- if l > 0 {
- n += 1 + l + sovCortex(uint64(l))
+ i--
+ dAtA[i] = 0x2a
+ if len(m.Exemplars) > 0 {
+ for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCortex(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
}
- l = len(m.Unit)
- if l > 0 {
- n += 1 + l + sovCortex(uint64(l))
+ if len(m.Histograms) > 0 {
+ for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCortex(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
}
- return n
-}
-
-func (m *Metric) Size() (n int) {
- if m == nil {
- return 0
+ if len(m.Samples) > 0 {
+ for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCortex(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
}
- var l int
- _ = l
- if len(m.Labels) > 0 {
- for _, e := range m.Labels {
- l = e.Size()
- n += 1 + l + sovCortex(uint64(l))
+ if len(m.LabelsRefs) > 0 {
+ dAtA5 := make([]byte, len(m.LabelsRefs)*10)
+ var j4 int
+ for _, num := range m.LabelsRefs {
+ for num >= 1<<7 {
+ dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j4++
+ }
+ dAtA5[j4] = uint8(num)
+ j4++
}
+ i -= j4
+ copy(dAtA[i:], dAtA5[:j4])
+ i = encodeVarintCortex(dAtA, i, uint64(j4))
+ i--
+ dAtA[i] = 0xa
}
- return n
+ return len(dAtA) - i, nil
}
-func (m *Exemplar) Size() (n int) {
- if m == nil {
- return 0
+func (m *ExemplarV2) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- var l int
- _ = l
- if len(m.Labels) > 0 {
- for _, e := range m.Labels {
- l = e.Size()
- n += 1 + l + sovCortex(uint64(l))
- }
+ return dAtA[:n], nil
+}
+
+func (m *ExemplarV2) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ExemplarV2) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Timestamp != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.Timestamp))
+ i--
+ dAtA[i] = 0x18
}
if m.Value != 0 {
- n += 9
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
+ i--
+ dAtA[i] = 0x11
}
- if m.TimestampMs != 0 {
- n += 1 + sovCortex(uint64(m.TimestampMs))
+ if len(m.LabelsRefs) > 0 {
+ dAtA7 := make([]byte, len(m.LabelsRefs)*10)
+ var j6 int
+ for _, num := range m.LabelsRefs {
+ for num >= 1<<7 {
+ dAtA7[j6] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j6++
+ }
+ dAtA7[j6] = uint8(num)
+ j6++
+ }
+ i -= j6
+ copy(dAtA[i:], dAtA7[:j6])
+ i = encodeVarintCortex(dAtA, i, uint64(j6))
+ i--
+ dAtA[i] = 0xa
}
- return n
+ return len(dAtA) - i, nil
}
-func (m *Histogram) Size() (n int) {
- if m == nil {
- return 0
+func (m *MetadataV2) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
+ return dAtA[:n], nil
+}
+
+func (m *MetadataV2) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MetadataV2) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- if m.Count != nil {
- n += m.Count.Size()
- }
- if m.Sum != 0 {
- n += 9
- }
- if m.Schema != 0 {
- n += 1 + sozCortex(uint64(m.Schema))
- }
- if m.ZeroThreshold != 0 {
- n += 9
- }
- if m.ZeroCount != nil {
- n += m.ZeroCount.Size()
+ if m.UnitRef != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.UnitRef))
+ i--
+ dAtA[i] = 0x20
}
- if len(m.NegativeSpans) > 0 {
- for _, e := range m.NegativeSpans {
- l = e.Size()
- n += 1 + l + sovCortex(uint64(l))
- }
+ if m.HelpRef != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.HelpRef))
+ i--
+ dAtA[i] = 0x18
}
- if len(m.NegativeDeltas) > 0 {
- l = 0
- for _, e := range m.NegativeDeltas {
- l += sozCortex(uint64(e))
- }
- n += 1 + sovCortex(uint64(l)) + l
+ if m.Type != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x8
}
- if len(m.NegativeCounts) > 0 {
- n += 1 + sovCortex(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8
+ return len(dAtA) - i, nil
+}
+
+func (m *StreamWriteRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- if len(m.PositiveSpans) > 0 {
- for _, e := range m.PositiveSpans {
- l = e.Size()
- n += 1 + l + sovCortex(uint64(l))
+ return dAtA[:n], nil
+}
+
+func (m *StreamWriteRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StreamWriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size := m.MessageWithBufRef.Size()
+ i -= size
+ if _, err := m.MessageWithBufRef.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
}
+ i = encodeVarintCortex(dAtA, i, uint64(size))
}
- if len(m.PositiveDeltas) > 0 {
- l = 0
- for _, e := range m.PositiveDeltas {
- l += sozCortex(uint64(e))
+ i--
+ dAtA[i] = 0x3e
+ i--
+ dAtA[i] = 0xc2
+ if m.Request != nil {
+ {
+ size, err := m.Request.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCortex(dAtA, i, uint64(size))
}
- n += 1 + sovCortex(uint64(l)) + l
- }
- if len(m.PositiveCounts) > 0 {
- n += 1 + sovCortex(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8
- }
- if m.ResetHint != 0 {
- n += 1 + sovCortex(uint64(m.ResetHint))
+ i--
+ dAtA[i] = 0x12
}
- if m.TimestampMs != 0 {
- n += 1 + sovCortex(uint64(m.TimestampMs))
+ if len(m.TenantID) > 0 {
+ i -= len(m.TenantID)
+ copy(dAtA[i:], m.TenantID)
+ i = encodeVarintCortex(dAtA, i, uint64(len(m.TenantID)))
+ i--
+ dAtA[i] = 0xa
}
- return n
+ return len(dAtA) - i, nil
}
-func (m *Histogram_CountInt) Size() (n int) {
- if m == nil {
- return 0
+func (m *WriteResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- var l int
- _ = l
- n += 1 + sovCortex(uint64(m.CountInt))
- return n
+ return dAtA[:n], nil
}
-func (m *Histogram_CountFloat) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 9
- return n
+
+func (m *WriteResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (m *Histogram_ZeroCountInt) Size() (n int) {
- if m == nil {
- return 0
- }
+
+func (m *WriteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
var l int
_ = l
- n += 1 + sovCortex(uint64(m.ZeroCountInt))
- return n
-}
-func (m *Histogram_ZeroCountFloat) Size() (n int) {
- if m == nil {
- return 0
+ if m.Exemplars != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.Exemplars))
+ i--
+ dAtA[i] = 0x28
}
- var l int
- _ = l
- n += 9
- return n
-}
-func (m *BucketSpan) Size() (n int) {
- if m == nil {
- return 0
+ if m.Histograms != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.Histograms))
+ i--
+ dAtA[i] = 0x20
}
- var l int
- _ = l
- if m.Offset != 0 {
- n += 1 + sozCortex(uint64(m.Offset))
+ if m.Samples != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.Samples))
+ i--
+ dAtA[i] = 0x18
}
- if m.Length != 0 {
- n += 1 + sovCortex(uint64(m.Length))
+ if len(m.Message) > 0 {
+ i -= len(m.Message)
+ copy(dAtA[i:], m.Message)
+ i = encodeVarintCortex(dAtA, i, uint64(len(m.Message)))
+ i--
+ dAtA[i] = 0x12
}
- return n
+ if m.Code != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.Code))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
}
-func sovCortex(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
+func (m *TimeSeries) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
}
-func sozCortex(x uint64) (n int) {
- return sovCortex(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+
+func (m *TimeSeries) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (this *WriteRequest) String() string {
- if this == nil {
- return "nil"
+
+func (m *TimeSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Histograms) > 0 {
+ for iNdEx := len(m.Histograms) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Histograms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCortex(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
}
- repeatedStringForMetadata := "[]*MetricMetadata{"
- for _, f := range this.Metadata {
- repeatedStringForMetadata += strings.Replace(f.String(), "MetricMetadata", "MetricMetadata", 1) + ","
+ if len(m.Exemplars) > 0 {
+ for iNdEx := len(m.Exemplars) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Exemplars[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCortex(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
}
- repeatedStringForMetadata += "}"
- s := strings.Join([]string{`&WriteRequest{`,
- `Timeseries:` + fmt.Sprintf("%v", this.Timeseries) + `,`,
- `Source:` + fmt.Sprintf("%v", this.Source) + `,`,
- `Metadata:` + repeatedStringForMetadata + `,`,
- `SkipLabelNameValidation:` + fmt.Sprintf("%v", this.SkipLabelNameValidation) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *WriteResponse) String() string {
- if this == nil {
- return "nil"
+ if len(m.Samples) > 0 {
+ for iNdEx := len(m.Samples) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Samples[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCortex(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
}
- s := strings.Join([]string{`&WriteResponse{`,
- `}`,
- }, "")
- return s
-}
-func (this *TimeSeries) String() string {
- if this == nil {
- return "nil"
+ if len(m.Labels) > 0 {
+ for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size := m.Labels[iNdEx].Size()
+ i -= size
+ if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ i = encodeVarintCortex(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
}
- repeatedStringForSamples := "[]Sample{"
- for _, f := range this.Samples {
- repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + ","
+ return len(dAtA) - i, nil
+}
+
+func (m *LabelPair) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- repeatedStringForSamples += "}"
- repeatedStringForExemplars := "[]Exemplar{"
- for _, f := range this.Exemplars {
- repeatedStringForExemplars += strings.Replace(strings.Replace(f.String(), "Exemplar", "Exemplar", 1), `&`, ``, 1) + ","
+ return dAtA[:n], nil
+}
+
+func (m *LabelPair) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LabelPair) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Value) > 0 {
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintCortex(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x12
}
- repeatedStringForExemplars += "}"
- repeatedStringForHistograms := "[]Histogram{"
- for _, f := range this.Histograms {
- repeatedStringForHistograms += strings.Replace(strings.Replace(f.String(), "Histogram", "Histogram", 1), `&`, ``, 1) + ","
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintCortex(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
}
- repeatedStringForHistograms += "}"
- s := strings.Join([]string{`&TimeSeries{`,
- `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
- `Samples:` + repeatedStringForSamples + `,`,
- `Exemplars:` + repeatedStringForExemplars + `,`,
- `Histograms:` + repeatedStringForHistograms + `,`,
- `}`,
- }, "")
- return s
+ return len(dAtA) - i, nil
}
-func (this *LabelPair) String() string {
- if this == nil {
- return "nil"
+
+func (m *Sample) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- s := strings.Join([]string{`&LabelPair{`,
- `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
- `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
- `}`,
- }, "")
- return s
+ return dAtA[:n], nil
}
-func (this *Sample) String() string {
- if this == nil {
- return "nil"
- }
- s := strings.Join([]string{`&Sample{`,
- `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
- `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`,
- `}`,
- }, "")
- return s
+
+func (m *Sample) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
}
-func (this *MetricMetadata) String() string {
- if this == nil {
- return "nil"
+
+func (m *Sample) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.TimestampMs != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.TimestampMs))
+ i--
+ dAtA[i] = 0x10
}
- s := strings.Join([]string{`&MetricMetadata{`,
- `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
- `MetricFamilyName:` + fmt.Sprintf("%v", this.MetricFamilyName) + `,`,
- `Help:` + fmt.Sprintf("%v", this.Help) + `,`,
- `Unit:` + fmt.Sprintf("%v", this.Unit) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *Metric) String() string {
- if this == nil {
- return "nil"
+ if m.Value != 0 {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
+ i--
+ dAtA[i] = 0x9
}
- s := strings.Join([]string{`&Metric{`,
- `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
- `}`,
- }, "")
- return s
+ return len(dAtA) - i, nil
}
-func (this *Exemplar) String() string {
- if this == nil {
- return "nil"
+
+func (m *MetricMetadata) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- s := strings.Join([]string{`&Exemplar{`,
- `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
- `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
- `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`,
- `}`,
- }, "")
- return s
+ return dAtA[:n], nil
}
-func (this *Histogram) String() string {
- if this == nil {
- return "nil"
+
+func (m *MetricMetadata) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MetricMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Unit) > 0 {
+ i -= len(m.Unit)
+ copy(dAtA[i:], m.Unit)
+ i = encodeVarintCortex(dAtA, i, uint64(len(m.Unit)))
+ i--
+ dAtA[i] = 0x2a
}
- repeatedStringForNegativeSpans := "[]BucketSpan{"
- for _, f := range this.NegativeSpans {
- repeatedStringForNegativeSpans += strings.Replace(strings.Replace(f.String(), "BucketSpan", "BucketSpan", 1), `&`, ``, 1) + ","
+ if len(m.Help) > 0 {
+ i -= len(m.Help)
+ copy(dAtA[i:], m.Help)
+ i = encodeVarintCortex(dAtA, i, uint64(len(m.Help)))
+ i--
+ dAtA[i] = 0x22
}
- repeatedStringForNegativeSpans += "}"
- repeatedStringForPositiveSpans := "[]BucketSpan{"
- for _, f := range this.PositiveSpans {
- repeatedStringForPositiveSpans += strings.Replace(strings.Replace(f.String(), "BucketSpan", "BucketSpan", 1), `&`, ``, 1) + ","
+ if len(m.MetricFamilyName) > 0 {
+ i -= len(m.MetricFamilyName)
+ copy(dAtA[i:], m.MetricFamilyName)
+ i = encodeVarintCortex(dAtA, i, uint64(len(m.MetricFamilyName)))
+ i--
+ dAtA[i] = 0x12
}
- repeatedStringForPositiveSpans += "}"
- s := strings.Join([]string{`&Histogram{`,
- `Count:` + fmt.Sprintf("%v", this.Count) + `,`,
- `Sum:` + fmt.Sprintf("%v", this.Sum) + `,`,
- `Schema:` + fmt.Sprintf("%v", this.Schema) + `,`,
- `ZeroThreshold:` + fmt.Sprintf("%v", this.ZeroThreshold) + `,`,
- `ZeroCount:` + fmt.Sprintf("%v", this.ZeroCount) + `,`,
- `NegativeSpans:` + repeatedStringForNegativeSpans + `,`,
- `NegativeDeltas:` + fmt.Sprintf("%v", this.NegativeDeltas) + `,`,
- `NegativeCounts:` + fmt.Sprintf("%v", this.NegativeCounts) + `,`,
- `PositiveSpans:` + repeatedStringForPositiveSpans + `,`,
- `PositiveDeltas:` + fmt.Sprintf("%v", this.PositiveDeltas) + `,`,
- `PositiveCounts:` + fmt.Sprintf("%v", this.PositiveCounts) + `,`,
- `ResetHint:` + fmt.Sprintf("%v", this.ResetHint) + `,`,
- `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`,
- `}`,
- }, "")
- return s
-}
-func (this *Histogram_CountInt) String() string {
- if this == nil {
- return "nil"
+ if m.Type != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x8
}
- s := strings.Join([]string{`&Histogram_CountInt{`,
- `CountInt:` + fmt.Sprintf("%v", this.CountInt) + `,`,
- `}`,
- }, "")
- return s
+ return len(dAtA) - i, nil
}
-func (this *Histogram_CountFloat) String() string {
- if this == nil {
- return "nil"
+
+func (m *Metric) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- s := strings.Join([]string{`&Histogram_CountFloat{`,
- `CountFloat:` + fmt.Sprintf("%v", this.CountFloat) + `,`,
- `}`,
- }, "")
- return s
+ return dAtA[:n], nil
}
-func (this *Histogram_ZeroCountInt) String() string {
- if this == nil {
- return "nil"
+
+func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Metric) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Labels) > 0 {
+ for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size := m.Labels[iNdEx].Size()
+ i -= size
+ if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ i = encodeVarintCortex(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
}
- s := strings.Join([]string{`&Histogram_ZeroCountInt{`,
- `ZeroCountInt:` + fmt.Sprintf("%v", this.ZeroCountInt) + `,`,
- `}`,
- }, "")
- return s
+ return len(dAtA) - i, nil
}
-func (this *Histogram_ZeroCountFloat) String() string {
- if this == nil {
- return "nil"
+
+func (m *Exemplar) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
}
- s := strings.Join([]string{`&Histogram_ZeroCountFloat{`,
- `ZeroCountFloat:` + fmt.Sprintf("%v", this.ZeroCountFloat) + `,`,
- `}`,
- }, "")
- return s
+ return dAtA[:n], nil
}
-func (this *BucketSpan) String() string {
- if this == nil {
- return "nil"
+
+func (m *Exemplar) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Exemplar) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.TimestampMs != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.TimestampMs))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Value != 0 {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value))))
+ i--
+ dAtA[i] = 0x11
+ }
+ if len(m.Labels) > 0 {
+ for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size := m.Labels[iNdEx].Size()
+ i -= size
+ if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ i = encodeVarintCortex(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Histogram) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Histogram) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Histogram) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.TimestampMs != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.TimestampMs))
+ i--
+ dAtA[i] = 0x78
+ }
+ if m.ResetHint != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.ResetHint))
+ i--
+ dAtA[i] = 0x70
+ }
+ if len(m.PositiveCounts) > 0 {
+ for iNdEx := len(m.PositiveCounts) - 1; iNdEx >= 0; iNdEx-- {
+ f10 := math.Float64bits(float64(m.PositiveCounts[iNdEx]))
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f10))
+ }
+ i = encodeVarintCortex(dAtA, i, uint64(len(m.PositiveCounts)*8))
+ i--
+ dAtA[i] = 0x6a
+ }
+ if len(m.PositiveDeltas) > 0 {
+ var j11 int
+ dAtA13 := make([]byte, len(m.PositiveDeltas)*10)
+ for _, num := range m.PositiveDeltas {
+ x12 := (uint64(num) << 1) ^ uint64((num >> 63))
+ for x12 >= 1<<7 {
+ dAtA13[j11] = uint8(uint64(x12)&0x7f | 0x80)
+ j11++
+ x12 >>= 7
+ }
+ dAtA13[j11] = uint8(x12)
+ j11++
+ }
+ i -= j11
+ copy(dAtA[i:], dAtA13[:j11])
+ i = encodeVarintCortex(dAtA, i, uint64(j11))
+ i--
+ dAtA[i] = 0x62
+ }
+ if len(m.PositiveSpans) > 0 {
+ for iNdEx := len(m.PositiveSpans) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.PositiveSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCortex(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ }
+ }
+ if len(m.NegativeCounts) > 0 {
+ for iNdEx := len(m.NegativeCounts) - 1; iNdEx >= 0; iNdEx-- {
+ f14 := math.Float64bits(float64(m.NegativeCounts[iNdEx]))
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(f14))
+ }
+ i = encodeVarintCortex(dAtA, i, uint64(len(m.NegativeCounts)*8))
+ i--
+ dAtA[i] = 0x52
+ }
+ if len(m.NegativeDeltas) > 0 {
+ var j15 int
+ dAtA17 := make([]byte, len(m.NegativeDeltas)*10)
+ for _, num := range m.NegativeDeltas {
+ x16 := (uint64(num) << 1) ^ uint64((num >> 63))
+ for x16 >= 1<<7 {
+ dAtA17[j15] = uint8(uint64(x16)&0x7f | 0x80)
+ j15++
+ x16 >>= 7
+ }
+ dAtA17[j15] = uint8(x16)
+ j15++
+ }
+ i -= j15
+ copy(dAtA[i:], dAtA17[:j15])
+ i = encodeVarintCortex(dAtA, i, uint64(j15))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.NegativeSpans) > 0 {
+ for iNdEx := len(m.NegativeSpans) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.NegativeSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintCortex(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if m.ZeroCount != nil {
+ {
+ size := m.ZeroCount.Size()
+ i -= size
+ if _, err := m.ZeroCount.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ if m.ZeroThreshold != 0 {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroThreshold))))
+ i--
+ dAtA[i] = 0x29
+ }
+ if m.Schema != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64((uint32(m.Schema)<<1)^uint32((m.Schema>>31))))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.Sum != 0 {
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Sum))))
+ i--
+ dAtA[i] = 0x19
+ }
+ if m.Count != nil {
+ {
+ size := m.Count.Size()
+ i -= size
+ if _, err := m.Count.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Histogram_CountInt) MarshalTo(dAtA []byte) (int, error) {
+ return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Histogram_CountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i = encodeVarintCortex(dAtA, i, uint64(m.CountInt))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+func (m *Histogram_CountFloat) MarshalTo(dAtA []byte) (int, error) {
+ return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Histogram_CountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.CountFloat))))
+ i--
+ dAtA[i] = 0x11
+ return len(dAtA) - i, nil
+}
+func (m *Histogram_ZeroCountInt) MarshalTo(dAtA []byte) (int, error) {
+ return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Histogram_ZeroCountInt) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i = encodeVarintCortex(dAtA, i, uint64(m.ZeroCountInt))
+ i--
+ dAtA[i] = 0x30
+ return len(dAtA) - i, nil
+}
+func (m *Histogram_ZeroCountFloat) MarshalTo(dAtA []byte) (int, error) {
+ return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Histogram_ZeroCountFloat) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= 8
+ encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ZeroCountFloat))))
+ i--
+ dAtA[i] = 0x39
+ return len(dAtA) - i, nil
+}
+func (m *BucketSpan) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BucketSpan) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *BucketSpan) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Length != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64(m.Length))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Offset != 0 {
+ i = encodeVarintCortex(dAtA, i, uint64((uint32(m.Offset)<<1)^uint32((m.Offset>>31))))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintCortex(dAtA []byte, offset int, v uint64) int {
+ offset -= sovCortex(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *MessageWithBufRef) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ return n
+}
+
+func (m *WriteRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Timeseries) > 0 {
+ for _, e := range m.Timeseries {
+ l = e.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ if m.Source != 0 {
+ n += 1 + sovCortex(uint64(m.Source))
+ }
+ if len(m.Metadata) > 0 {
+ for _, e := range m.Metadata {
+ l = e.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ if m.SkipLabelNameValidation {
+ n += 3
+ }
+ l = m.MessageWithBufRef.Size()
+ n += 2 + l + sovCortex(uint64(l))
+ return n
+}
+
+func (m *WriteRequestV2) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Symbols) > 0 {
+ for _, s := range m.Symbols {
+ l = len(s)
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ if len(m.Timeseries) > 0 {
+ for _, e := range m.Timeseries {
+ l = e.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ if m.Source != 0 {
+ n += 1 + sovCortex(uint64(m.Source))
+ }
+ if m.SkipLabelNameValidation {
+ n += 3
+ }
+ l = m.MessageWithBufRef.Size()
+ n += 2 + l + sovCortex(uint64(l))
+ return n
+}
+
+func (m *TimeSeriesV2) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.LabelsRefs) > 0 {
+ l = 0
+ for _, e := range m.LabelsRefs {
+ l += sovCortex(uint64(e))
+ }
+ n += 1 + sovCortex(uint64(l)) + l
+ }
+ if len(m.Samples) > 0 {
+ for _, e := range m.Samples {
+ l = e.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ if len(m.Histograms) > 0 {
+ for _, e := range m.Histograms {
+ l = e.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ if len(m.Exemplars) > 0 {
+ for _, e := range m.Exemplars {
+ l = e.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ l = m.Metadata.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ if m.CreatedTimestamp != 0 {
+ n += 1 + sovCortex(uint64(m.CreatedTimestamp))
+ }
+ return n
+}
+
+func (m *ExemplarV2) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.LabelsRefs) > 0 {
+ l = 0
+ for _, e := range m.LabelsRefs {
+ l += sovCortex(uint64(e))
+ }
+ n += 1 + sovCortex(uint64(l)) + l
+ }
+ if m.Value != 0 {
+ n += 9
+ }
+ if m.Timestamp != 0 {
+ n += 1 + sovCortex(uint64(m.Timestamp))
+ }
+ return n
+}
+
+func (m *MetadataV2) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Type != 0 {
+ n += 1 + sovCortex(uint64(m.Type))
+ }
+ if m.HelpRef != 0 {
+ n += 1 + sovCortex(uint64(m.HelpRef))
+ }
+ if m.UnitRef != 0 {
+ n += 1 + sovCortex(uint64(m.UnitRef))
+ }
+ return n
+}
+
+func (m *StreamWriteRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.TenantID)
+ if l > 0 {
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ if m.Request != nil {
+ l = m.Request.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ l = m.MessageWithBufRef.Size()
+ n += 2 + l + sovCortex(uint64(l))
+ return n
+}
+
+func (m *WriteResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Code != 0 {
+ n += 1 + sovCortex(uint64(m.Code))
+ }
+ l = len(m.Message)
+ if l > 0 {
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ if m.Samples != 0 {
+ n += 1 + sovCortex(uint64(m.Samples))
+ }
+ if m.Histograms != 0 {
+ n += 1 + sovCortex(uint64(m.Histograms))
+ }
+ if m.Exemplars != 0 {
+ n += 1 + sovCortex(uint64(m.Exemplars))
+ }
+ return n
+}
+
+func (m *TimeSeries) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Labels) > 0 {
+ for _, e := range m.Labels {
+ l = e.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ if len(m.Samples) > 0 {
+ for _, e := range m.Samples {
+ l = e.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ if len(m.Exemplars) > 0 {
+ for _, e := range m.Exemplars {
+ l = e.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ if len(m.Histograms) > 0 {
+ for _, e := range m.Histograms {
+ l = e.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *LabelPair) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ return n
+}
+
+func (m *Sample) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Value != 0 {
+ n += 9
+ }
+ if m.TimestampMs != 0 {
+ n += 1 + sovCortex(uint64(m.TimestampMs))
+ }
+ return n
+}
+
+func (m *MetricMetadata) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Type != 0 {
+ n += 1 + sovCortex(uint64(m.Type))
+ }
+ l = len(m.MetricFamilyName)
+ if l > 0 {
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ l = len(m.Help)
+ if l > 0 {
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ l = len(m.Unit)
+ if l > 0 {
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ return n
+}
+
+func (m *Metric) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Labels) > 0 {
+ for _, e := range m.Labels {
+ l = e.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *Exemplar) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Labels) > 0 {
+ for _, e := range m.Labels {
+ l = e.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ if m.Value != 0 {
+ n += 9
+ }
+ if m.TimestampMs != 0 {
+ n += 1 + sovCortex(uint64(m.TimestampMs))
+ }
+ return n
+}
+
+func (m *Histogram) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Count != nil {
+ n += m.Count.Size()
+ }
+ if m.Sum != 0 {
+ n += 9
+ }
+ if m.Schema != 0 {
+ n += 1 + sozCortex(uint64(m.Schema))
+ }
+ if m.ZeroThreshold != 0 {
+ n += 9
+ }
+ if m.ZeroCount != nil {
+ n += m.ZeroCount.Size()
+ }
+ if len(m.NegativeSpans) > 0 {
+ for _, e := range m.NegativeSpans {
+ l = e.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ if len(m.NegativeDeltas) > 0 {
+ l = 0
+ for _, e := range m.NegativeDeltas {
+ l += sozCortex(uint64(e))
+ }
+ n += 1 + sovCortex(uint64(l)) + l
+ }
+ if len(m.NegativeCounts) > 0 {
+ n += 1 + sovCortex(uint64(len(m.NegativeCounts)*8)) + len(m.NegativeCounts)*8
+ }
+ if len(m.PositiveSpans) > 0 {
+ for _, e := range m.PositiveSpans {
+ l = e.Size()
+ n += 1 + l + sovCortex(uint64(l))
+ }
+ }
+ if len(m.PositiveDeltas) > 0 {
+ l = 0
+ for _, e := range m.PositiveDeltas {
+ l += sozCortex(uint64(e))
+ }
+ n += 1 + sovCortex(uint64(l)) + l
+ }
+ if len(m.PositiveCounts) > 0 {
+ n += 1 + sovCortex(uint64(len(m.PositiveCounts)*8)) + len(m.PositiveCounts)*8
+ }
+ if m.ResetHint != 0 {
+ n += 1 + sovCortex(uint64(m.ResetHint))
+ }
+ if m.TimestampMs != 0 {
+ n += 1 + sovCortex(uint64(m.TimestampMs))
+ }
+ return n
+}
+
+func (m *Histogram_CountInt) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovCortex(uint64(m.CountInt))
+ return n
+}
+func (m *Histogram_CountFloat) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 9
+ return n
+}
+func (m *Histogram_ZeroCountInt) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovCortex(uint64(m.ZeroCountInt))
+ return n
+}
+func (m *Histogram_ZeroCountFloat) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 9
+ return n
+}
+func (m *BucketSpan) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Offset != 0 {
+ n += 1 + sozCortex(uint64(m.Offset))
+ }
+ if m.Length != 0 {
+ n += 1 + sovCortex(uint64(m.Length))
+ }
+ return n
+}
+
+func sovCortex(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozCortex(x uint64) (n int) {
+ return sovCortex(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *MessageWithBufRef) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MessageWithBufRef{`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WriteRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForMetadata := "[]*MetricMetadata{"
+ for _, f := range this.Metadata {
+ repeatedStringForMetadata += strings.Replace(f.String(), "MetricMetadata", "MetricMetadata", 1) + ","
+ }
+ repeatedStringForMetadata += "}"
+ s := strings.Join([]string{`&WriteRequest{`,
+ `Timeseries:` + fmt.Sprintf("%v", this.Timeseries) + `,`,
+ `Source:` + fmt.Sprintf("%v", this.Source) + `,`,
+ `Metadata:` + repeatedStringForMetadata + `,`,
+ `SkipLabelNameValidation:` + fmt.Sprintf("%v", this.SkipLabelNameValidation) + `,`,
+ `MessageWithBufRef:` + fmt.Sprintf("%v", this.MessageWithBufRef) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WriteRequestV2) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&WriteRequestV2{`,
+ `Symbols:` + fmt.Sprintf("%v", this.Symbols) + `,`,
+ `Timeseries:` + fmt.Sprintf("%v", this.Timeseries) + `,`,
+ `Source:` + fmt.Sprintf("%v", this.Source) + `,`,
+ `SkipLabelNameValidation:` + fmt.Sprintf("%v", this.SkipLabelNameValidation) + `,`,
+ `MessageWithBufRef:` + fmt.Sprintf("%v", this.MessageWithBufRef) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TimeSeriesV2) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForSamples := "[]Sample{"
+ for _, f := range this.Samples {
+ repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSamples += "}"
+ repeatedStringForHistograms := "[]Histogram{"
+ for _, f := range this.Histograms {
+ repeatedStringForHistograms += strings.Replace(strings.Replace(f.String(), "Histogram", "Histogram", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForHistograms += "}"
+ repeatedStringForExemplars := "[]ExemplarV2{"
+ for _, f := range this.Exemplars {
+ repeatedStringForExemplars += strings.Replace(strings.Replace(f.String(), "ExemplarV2", "ExemplarV2", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForExemplars += "}"
+ s := strings.Join([]string{`&TimeSeriesV2{`,
+ `LabelsRefs:` + fmt.Sprintf("%v", this.LabelsRefs) + `,`,
+ `Samples:` + repeatedStringForSamples + `,`,
+ `Histograms:` + repeatedStringForHistograms + `,`,
+ `Exemplars:` + repeatedStringForExemplars + `,`,
+ `Metadata:` + strings.Replace(strings.Replace(this.Metadata.String(), "MetadataV2", "MetadataV2", 1), `&`, ``, 1) + `,`,
+ `CreatedTimestamp:` + fmt.Sprintf("%v", this.CreatedTimestamp) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *ExemplarV2) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&ExemplarV2{`,
+ `LabelsRefs:` + fmt.Sprintf("%v", this.LabelsRefs) + `,`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+ `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MetadataV2) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MetadataV2{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `HelpRef:` + fmt.Sprintf("%v", this.HelpRef) + `,`,
+ `UnitRef:` + fmt.Sprintf("%v", this.UnitRef) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *StreamWriteRequest) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&StreamWriteRequest{`,
+ `TenantID:` + fmt.Sprintf("%v", this.TenantID) + `,`,
+ `Request:` + strings.Replace(this.Request.String(), "WriteRequest", "WriteRequest", 1) + `,`,
+ `MessageWithBufRef:` + fmt.Sprintf("%v", this.MessageWithBufRef) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *WriteResponse) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&WriteResponse{`,
+ `Code:` + fmt.Sprintf("%v", this.Code) + `,`,
+ `Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+ `Samples:` + fmt.Sprintf("%v", this.Samples) + `,`,
+ `Histograms:` + fmt.Sprintf("%v", this.Histograms) + `,`,
+ `Exemplars:` + fmt.Sprintf("%v", this.Exemplars) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *TimeSeries) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForSamples := "[]Sample{"
+ for _, f := range this.Samples {
+ repeatedStringForSamples += strings.Replace(strings.Replace(f.String(), "Sample", "Sample", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForSamples += "}"
+ repeatedStringForExemplars := "[]Exemplar{"
+ for _, f := range this.Exemplars {
+ repeatedStringForExemplars += strings.Replace(strings.Replace(f.String(), "Exemplar", "Exemplar", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForExemplars += "}"
+ repeatedStringForHistograms := "[]Histogram{"
+ for _, f := range this.Histograms {
+ repeatedStringForHistograms += strings.Replace(strings.Replace(f.String(), "Histogram", "Histogram", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForHistograms += "}"
+ s := strings.Join([]string{`&TimeSeries{`,
+ `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
+ `Samples:` + repeatedStringForSamples + `,`,
+ `Exemplars:` + repeatedStringForExemplars + `,`,
+ `Histograms:` + repeatedStringForHistograms + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *LabelPair) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&LabelPair{`,
+ `Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Sample) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Sample{`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+ `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *MetricMetadata) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&MetricMetadata{`,
+ `Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+ `MetricFamilyName:` + fmt.Sprintf("%v", this.MetricFamilyName) + `,`,
+ `Help:` + fmt.Sprintf("%v", this.Help) + `,`,
+ `Unit:` + fmt.Sprintf("%v", this.Unit) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Metric) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Metric{`,
+ `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Exemplar) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Exemplar{`,
+ `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`,
+ `Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+ `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Histogram) String() string {
+ if this == nil {
+ return "nil"
+ }
+ repeatedStringForNegativeSpans := "[]BucketSpan{"
+ for _, f := range this.NegativeSpans {
+ repeatedStringForNegativeSpans += strings.Replace(strings.Replace(f.String(), "BucketSpan", "BucketSpan", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForNegativeSpans += "}"
+ repeatedStringForPositiveSpans := "[]BucketSpan{"
+ for _, f := range this.PositiveSpans {
+ repeatedStringForPositiveSpans += strings.Replace(strings.Replace(f.String(), "BucketSpan", "BucketSpan", 1), `&`, ``, 1) + ","
+ }
+ repeatedStringForPositiveSpans += "}"
+ s := strings.Join([]string{`&Histogram{`,
+ `Count:` + fmt.Sprintf("%v", this.Count) + `,`,
+ `Sum:` + fmt.Sprintf("%v", this.Sum) + `,`,
+ `Schema:` + fmt.Sprintf("%v", this.Schema) + `,`,
+ `ZeroThreshold:` + fmt.Sprintf("%v", this.ZeroThreshold) + `,`,
+ `ZeroCount:` + fmt.Sprintf("%v", this.ZeroCount) + `,`,
+ `NegativeSpans:` + repeatedStringForNegativeSpans + `,`,
+ `NegativeDeltas:` + fmt.Sprintf("%v", this.NegativeDeltas) + `,`,
+ `NegativeCounts:` + fmt.Sprintf("%v", this.NegativeCounts) + `,`,
+ `PositiveSpans:` + repeatedStringForPositiveSpans + `,`,
+ `PositiveDeltas:` + fmt.Sprintf("%v", this.PositiveDeltas) + `,`,
+ `PositiveCounts:` + fmt.Sprintf("%v", this.PositiveCounts) + `,`,
+ `ResetHint:` + fmt.Sprintf("%v", this.ResetHint) + `,`,
+ `TimestampMs:` + fmt.Sprintf("%v", this.TimestampMs) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Histogram_CountInt) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Histogram_CountInt{`,
+ `CountInt:` + fmt.Sprintf("%v", this.CountInt) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Histogram_CountFloat) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Histogram_CountFloat{`,
+ `CountFloat:` + fmt.Sprintf("%v", this.CountFloat) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Histogram_ZeroCountInt) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Histogram_ZeroCountInt{`,
+ `ZeroCountInt:` + fmt.Sprintf("%v", this.ZeroCountInt) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *Histogram_ZeroCountFloat) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&Histogram_ZeroCountFloat{`,
+ `ZeroCountFloat:` + fmt.Sprintf("%v", this.ZeroCountFloat) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func (this *BucketSpan) String() string {
+ if this == nil {
+ return "nil"
+ }
+ s := strings.Join([]string{`&BucketSpan{`,
+ `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
+ `Length:` + fmt.Sprintf("%v", this.Length) + `,`,
+ `}`,
+ }, "")
+ return s
+}
+func valueToStringCortex(v interface{}) string {
+ rv := reflect.ValueOf(v)
+ if rv.IsNil() {
+ return "nil"
+ }
+ pv := reflect.Indirect(rv).Interface()
+ return fmt.Sprintf("*%v", pv)
+}
+func (m *MessageWithBufRef) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MessageWithBufRef: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MessageWithBufRef: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCortex(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WriteRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Timeseries = append(m.Timeseries, PreallocTimeseries{})
+ if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+ }
+ m.Source = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Source |= SourceEnum(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Metadata = append(m.Metadata, &MetricMetadata{})
+ if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1000:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SkipLabelNameValidation", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.SkipLabelNameValidation = bool(v != 0)
+ case 1001:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MessageWithBufRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.MessageWithBufRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCortex(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WriteRequestV2) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WriteRequestV2: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WriteRequestV2: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Symbols", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Symbols = append(m.Symbols, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Timeseries = append(m.Timeseries, PreallocTimeseriesV2{})
+ if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+ }
+ m.Source = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Source |= SourceEnum(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 1000:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SkipLabelNameValidation", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.SkipLabelNameValidation = bool(v != 0)
+ case 1001:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MessageWithBufRef", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.MessageWithBufRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCortex(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TimeSeriesV2) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TimeSeriesV2: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TimeSeriesV2: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType == 0 {
+ var v uint32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LabelsRefs = append(m.LabelsRefs, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.LabelsRefs) == 0 {
+ m.LabelsRefs = make([]uint32, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LabelsRefs = append(m.LabelsRefs, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType)
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Samples = append(m.Samples, Sample{})
+ if err := m.Samples[len(m.Samples)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Histograms = append(m.Histograms, Histogram{})
+ if err := m.Histograms[len(m.Histograms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Exemplars = append(m.Exemplars, ExemplarV2{})
+ if err := m.Exemplars[len(m.Exemplars)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreatedTimestamp", wireType)
+ }
+ m.CreatedTimestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CreatedTimestamp |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCortex(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ExemplarV2) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ExemplarV2: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ExemplarV2: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType == 0 {
+ var v uint32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LabelsRefs = append(m.LabelsRefs, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.LabelsRefs) == 0 {
+ m.LabelsRefs = make([]uint32, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint32
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LabelsRefs = append(m.LabelsRefs, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field LabelsRefs", wireType)
+ }
+ case 2:
+ if wireType != 1 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var v uint64
+ if (iNdEx + 8) > l {
+ return io.ErrUnexpectedEOF
+ }
+ v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:]))
+ iNdEx += 8
+ m.Value = float64(math.Float64frombits(v))
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+ }
+ m.Timestamp = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Timestamp |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCortex(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if (iNdEx + skippy) < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
}
- s := strings.Join([]string{`&BucketSpan{`,
- `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
- `Length:` + fmt.Sprintf("%v", this.Length) + `,`,
- `}`,
- }, "")
- return s
-}
-func valueToStringCortex(v interface{}) string {
- rv := reflect.ValueOf(v)
- if rv.IsNil() {
- return "nil"
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
}
- pv := reflect.Indirect(rv).Interface()
- return fmt.Sprintf("*%v", pv)
+ return nil
}
-func (m *WriteRequest) Unmarshal(dAtA []byte) error {
+func (m *MetadataV2) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
@@ -2750,17 +5111,17 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
- return fmt.Errorf("proto: WriteRequest: wiretype end group for non-group")
+ return fmt.Errorf("proto: MetadataV2: wiretype end group for non-group")
}
if fieldNum <= 0 {
- return fmt.Errorf("proto: WriteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ return fmt.Errorf("proto: MetadataV2: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Timeseries", wireType)
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
- var msglen int
+ m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCortex
@@ -2770,31 +5131,107 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- msglen |= int(b&0x7F) << shift
+ m.Type |= MetadataV2_MetricType(b&0x7F) << shift
if b < 0x80 {
break
}
}
- if msglen < 0 {
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HelpRef", wireType)
+ }
+ m.HelpRef = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.HelpRef |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field UnitRef", wireType)
+ }
+ m.UnitRef = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.UnitRef |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipCortex(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if skippy < 0 {
return ErrInvalidLengthCortex
}
- postIndex := iNdEx + msglen
- if postIndex < 0 {
+ if (iNdEx + skippy) < 0 {
return ErrInvalidLengthCortex
}
- if postIndex > l {
+ if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
- m.Timeseries = append(m.Timeseries, PreallocTimeseries{})
- if err := m.Timeseries[len(m.Timeseries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StreamWriteRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
}
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
}
- m.Source = 0
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StreamWriteRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StreamWriteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TenantID", wireType)
+ }
+ var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCortex
@@ -2804,14 +5241,27 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- m.Source |= WriteRequest_SourceEnum(b&0x7F) << shift
+ stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
- case 3:
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TenantID = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+ return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
@@ -2838,16 +5288,18 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
if postIndex > l {
return io.ErrUnexpectedEOF
}
- m.Metadata = append(m.Metadata, &MetricMetadata{})
- if err := m.Metadata[len(m.Metadata)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ if m.Request == nil {
+ m.Request = &WriteRequest{}
+ }
+ if err := m.Request.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 1000:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SkipLabelNameValidation", wireType)
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MessageWithBufRef", wireType)
}
- var v int
+ var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowCortex
@@ -2857,12 +5309,25 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
- v |= int(b&0x7F) << shift
+ msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
- m.SkipLabelNameValidation = bool(v != 0)
+ if msglen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.MessageWithBufRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipCortex(dAtA[iNdEx:])
@@ -2916,6 +5381,114 @@ func (m *WriteResponse) Unmarshal(dAtA []byte) error {
return fmt.Errorf("proto: WriteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType)
+ }
+ m.Code = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Code |= int32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthCortex
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthCortex
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Message = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Samples", wireType)
+ }
+ m.Samples = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Samples |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Histograms", wireType)
+ }
+ m.Histograms = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Histograms |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Exemplars", wireType)
+ }
+ m.Exemplars = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowCortex
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Exemplars |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipCortex(dAtA[iNdEx:])
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto
index cedb17318..7a2ae9754 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/cortex.proto
@@ -5,23 +5,133 @@ package cortexpb;
option go_package = "cortexpb";
import "gogoproto/gogo.proto";
-
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
-message WriteRequest {
+enum SourceEnum {
+ API = 0;
+ RULE = 1;
+}
+
+message MessageWithBufRef {
+ option (gogoproto.typedecl) = false;
+}
+
+message WriteRequest {
repeated TimeSeries timeseries = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseries"];
- enum SourceEnum {
- API = 0;
- RULE = 1;
- }
SourceEnum Source = 2;
repeated MetricMetadata metadata = 3 [(gogoproto.nullable) = true];
bool skip_label_name_validation = 1000; //set intentionally high to keep WriteRequest compatible with upstream Prometheus
+ MessageWithBufRef Ref = 1001 [(gogoproto.embed) = true, (gogoproto.customtype) = "MessageWithBufRef", (gogoproto.nullable) = false];
}
-message WriteResponse {}
+// refer to https://github.com/prometheus/prometheus/blob/v3.5.0/prompb/io/prometheus/write/v2/types.proto
+// The histogram and Sample are shared with PRW1.
+message WriteRequestV2 {
+ repeated string symbols = 4;
+ repeated TimeSeriesV2 timeseries = 5 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseriesV2"];
+ SourceEnum Source = 6;
+
+ bool skip_label_name_validation = 1000; // set intentionally high to keep WriteRequest compatible with upstream Prometheus
+ MessageWithBufRef Ref = 1001 [(gogoproto.embed) = true, (gogoproto.customtype) = "MessageWithBufRef", (gogoproto.nullable) = false];
+}
+
+message TimeSeriesV2 {
+ repeated uint32 labels_refs = 1;
+ // Timeseries messages can either specify samples or (native) histogram samples
+ // (histogram field), but not both. For a typical sender (real-time metric
+ // streaming), in healthy cases, there will be only one sample or histogram.
+ //
+ // Samples and histograms are sorted by timestamp (older first).
+ repeated Sample samples = 2 [(gogoproto.nullable) = false];
+ repeated Histogram histograms = 3 [(gogoproto.nullable) = false];
+
+ // exemplars represents an optional set of exemplars attached to this series' samples.
+ repeated ExemplarV2 exemplars = 4 [(gogoproto.nullable) = false];
+
+ // metadata represents the metadata associated with the given series' samples.
+ MetadataV2 metadata = 5 [(gogoproto.nullable) = false];
+
+ // created_timestamp represents an optional created timestamp associated with
+ // this series' samples in ms format, typically for counter or histogram type
+ // metrics. Created timestamp represents the time when the counter started
+ // counting (sometimes referred to as start timestamp), which can increase
+ // the accuracy of query results.
+ //
+ // Note that some receivers might require this and in return fail to
+ // ingest such samples within the Request.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ //
+ // Note that the "optional" keyword is omitted due to
+ // https://cloud.google.com/apis/design/design_patterns.md#optional_primitive_fields
+ // Zero value means value not set. If you need to use exactly zero value for
+ // the timestamp, use 1 millisecond before or after.
+ int64 created_timestamp = 6;
+}
+
+// Exemplar is an additional information attached to some series' samples.
+// It is typically used to attach an example trace or request ID associated with
+// the metric changes.
+message ExemplarV2 {
+ // labels_refs is an optional list of label name-value pair references, encoded
+ // as indices to the Request.symbols array. This list's len is always
+ // a multiple of 2, and the underlying labels should be sorted lexicographically.
+ // If the exemplar references a trace it should use the `trace_id` label name, as a best practice.
+ repeated uint32 labels_refs = 1;
+ // value represents an exact example value. This can be useful when the exemplar
+ // is attached to a histogram, which only gives an estimated value through buckets.
+ double value = 2;
+ // timestamp represents the timestamp of the exemplar in ms.
+ //
+ // For Go, see github.com/prometheus/prometheus/model/timestamp/timestamp.go
+ // for conversion from/to time.Time to Prometheus timestamp.
+ int64 timestamp = 3;
+}
+
+// Metadata represents the metadata associated with the given series' samples.
+message MetadataV2 {
+ enum MetricType {
+ METRIC_TYPE_UNSPECIFIED = 0;
+ METRIC_TYPE_COUNTER = 1;
+ METRIC_TYPE_GAUGE = 2;
+ METRIC_TYPE_HISTOGRAM = 3;
+ METRIC_TYPE_GAUGEHISTOGRAM = 4;
+ METRIC_TYPE_SUMMARY = 5;
+ METRIC_TYPE_INFO = 6;
+ METRIC_TYPE_STATESET = 7;
+ }
+
+ MetricType type = 1;
+ // help_ref is a reference to the Request.symbols array representing help
+ // text for the metric. Help is optional, reference should point to an empty string in
+ // such a case.
+ uint32 help_ref = 3;
+ // unit_ref is a reference to the Request.symbols array representing a unit
+ // for the metric. Unit is optional, reference should point to an empty string in
+ // such a case.
+ uint32 unit_ref = 4;
+}
+
+message StreamWriteRequest {
+ string TenantID = 1;
+ WriteRequest Request = 2;
+
+ MessageWithBufRef Ref = 1000 [(gogoproto.embed) = true, (gogoproto.customtype) = "MessageWithBufRef", (gogoproto.nullable) = false]; //set intentionally high to keep WriteRequest compatible with upstream Prometheus
+}
+
+message WriteResponse {
+ int32 code = 1;
+ string message = 2;
+ // Samples represents X-Prometheus-Remote-Write-Written-Samples
+ int64 Samples = 3;
+ // Histograms represents X-Prometheus-Remote-Write-Written-Histograms
+ int64 Histograms = 4;
+ // Exemplars represents X-Prometheus-Remote-Write-Written-Exemplars
+ int64 Exemplars = 5;
+}
message TimeSeries {
repeated LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "LabelAdapter"];
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/extensions.go b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/extensions.go
index 716fafcc7..e75b45e2a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/extensions.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/extensions.go
@@ -15,7 +15,7 @@ const maxBufferSize = 1024
const signVersion = "v1"
var signerPool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
return newSigner()
},
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/histograms.go b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/histograms.go
index 60e7207a1..a05a4812a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/histograms.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/histograms.go
@@ -131,7 +131,7 @@ func FloatHistogramToHistogramProto(timestamp int64, fh *histogram.FloatHistogra
func spansProtoToSpans(s []BucketSpan) []histogram.Span {
spans := make([]histogram.Span, len(s))
- for i := 0; i < len(s); i++ {
+ for i := range s {
spans[i] = histogram.Span{Offset: s[i].Offset, Length: s[i].Length}
}
@@ -140,7 +140,7 @@ func spansProtoToSpans(s []BucketSpan) []histogram.Span {
func spansToSpansProto(s []histogram.Span) []BucketSpan {
spans := make([]BucketSpan, len(s))
- for i := 0; i < len(s); i++ {
+ for i := range s {
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
}
@@ -149,7 +149,7 @@ func spansToSpansProto(s []histogram.Span) []BucketSpan {
func spansPromProtoToSpansProto(s []prompb.BucketSpan) []BucketSpan {
spans := make([]BucketSpan, len(s))
- for i := 0; i < len(s); i++ {
+ for i := range s {
spans[i] = BucketSpan{Offset: s[i].Offset, Length: s[i].Length}
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/signature.go b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/signature.go
new file mode 100644
index 000000000..a11c5bcd0
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/signature.go
@@ -0,0 +1,50 @@
+package cortexpb
+
+import (
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+// Ref: https://github.com/prometheus/common/blob/main/model/fnv.go
+
+func LabelsToFingerprint(lset labels.Labels) model.Fingerprint {
+ if lset.Len() == 0 {
+ return model.Fingerprint(hashNew())
+ }
+
+ sum := hashNew()
+ lset.Range(func(l labels.Label) {
+ sum = hashAdd(sum, string(l.Name))
+ sum = hashAddByte(sum, model.SeparatorByte)
+ sum = hashAdd(sum, string(l.Value))
+ sum = hashAddByte(sum, model.SeparatorByte)
+ })
+ return model.Fingerprint(sum)
+}
+
+const (
+ offset64 = 14695981039346656037
+ prime64 = 1099511628211
+)
+
+// hashNew initializes a new fnv64a hash value.
+func hashNew() uint64 {
+ return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+ for i := 0; i < len(s); i++ {
+ h ^= uint64(s[i])
+ h *= prime64
+ }
+ return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+ h ^= uint64(b)
+ h *= prime64
+ return h
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/slicesPool.go b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/slicesPool.go
index e28d51d4f..c0f3a2c7c 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/slicesPool.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/slicesPool.go
@@ -21,10 +21,10 @@ func newSlicePool(pools int) *byteSlicePools {
func (sp *byteSlicePools) init(pools int) {
sp.pools = make([]sync.Pool, pools)
- for i := 0; i < pools; i++ {
+ for i := range pools {
size := int(math.Pow(2, float64(i+minPoolSizePower)))
sp.pools[i] = sync.Pool{
- New: func() interface{} {
+ New: func() any {
buf := make([]byte, 0, size)
return &buf
},
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/timeseries.go b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/timeseries.go
index db7354ffe..4d780bba6 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/timeseries.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/timeseries.go
@@ -24,13 +24,13 @@ var (
is re-used. But since the slices are far far larger, we come out ahead.
*/
slicePool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
return make([]PreallocTimeseries, 0, expectedTimeseries)
},
}
timeSeriesPool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &TimeSeries{
Labels: make([]LabelAdapter, 0, expectedLabels),
Samples: make([]Sample, 0, expectedSamplesPerSeries),
@@ -41,7 +41,7 @@ var (
}
writeRequestPool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &PreallocWriteRequest{
WriteRequest: WriteRequest{},
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortexpb/timeseriesv2.go b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/timeseriesv2.go
new file mode 100644
index 000000000..5a3e15a01
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/cortexpb/timeseriesv2.go
@@ -0,0 +1,131 @@
+package cortexpb
+
+import (
+ "sync"
+)
+
+var (
+ expectedSymbols = 20
+
+ slicePoolV2 = sync.Pool{
+ New: func() any {
+ return make([]PreallocTimeseriesV2, 0, expectedTimeseries)
+ },
+ }
+
+ timeSeriesPoolV2 = sync.Pool{
+ New: func() any {
+ return &TimeSeriesV2{
+ LabelsRefs: make([]uint32, 0, expectedLabels),
+ Samples: make([]Sample, 0, expectedSamplesPerSeries),
+ Histograms: make([]Histogram, 0, expectedHistogramsPerSeries),
+ Exemplars: make([]ExemplarV2, 0, expectedExemplarsPerSeries),
+ Metadata: MetadataV2{},
+ }
+ },
+ }
+
+ writeRequestPoolV2 = sync.Pool{
+ New: func() any {
+ return &PreallocWriteRequestV2{
+ WriteRequestV2: WriteRequestV2{
+ Symbols: make([]string, 0, expectedSymbols),
+ },
+ }
+ },
+ }
+ bytePoolV2 = newSlicePool(20)
+)
+
+// PreallocWriteRequestV2 is a WriteRequestV2 which preallocs slices on Unmarshal.
+type PreallocWriteRequestV2 struct {
+ WriteRequestV2
+ data *[]byte
+}
+
+// Unmarshal implements proto.Message.
+func (p *PreallocWriteRequestV2) Unmarshal(dAtA []byte) error {
+ p.Timeseries = PreallocTimeseriesV2SliceFromPool()
+ return p.WriteRequestV2.Unmarshal(dAtA)
+}
+
+func (p *PreallocWriteRequestV2) Marshal() (dAtA []byte, err error) {
+ size := p.Size()
+ p.data = bytePoolV2.getSlice(size)
+ dAtA = *p.data
+ n, err := p.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+// PreallocTimeseriesV2 is a TimeSeries which preallocs slices on Unmarshal.
+type PreallocTimeseriesV2 struct {
+ *TimeSeriesV2
+}
+
+// Unmarshal implements proto.Message.
+func (p *PreallocTimeseriesV2) Unmarshal(dAtA []byte) error {
+ p.TimeSeriesV2 = TimeseriesV2FromPool()
+ return p.TimeSeriesV2.Unmarshal(dAtA)
+}
+
+func ReuseWriteRequestV2(req *PreallocWriteRequestV2) {
+ if req.data != nil {
+ bytePoolV2.reuseSlice(req.data)
+ req.data = nil
+ }
+ req.Source = 0
+ req.Symbols = req.Symbols[:0]
+ if req.Timeseries != nil {
+ ReuseSliceV2(req.Timeseries)
+ req.Timeseries = nil
+ }
+ writeRequestPoolV2.Put(req)
+}
+
+func PreallocWriteRequestV2FromPool() *PreallocWriteRequestV2 {
+ return writeRequestPoolV2.Get().(*PreallocWriteRequestV2)
+}
+
+// PreallocTimeseriesV2SliceFromPool retrieves a slice of PreallocTimeseriesV2 from a sync.Pool.
+// ReuseSliceV2 should be called once done.
+func PreallocTimeseriesV2SliceFromPool() []PreallocTimeseriesV2 {
+ return slicePoolV2.Get().([]PreallocTimeseriesV2)
+}
+
+// ReuseSliceV2 puts the slice back into a sync.Pool for reuse.
+func ReuseSliceV2(ts []PreallocTimeseriesV2) {
+ for i := range ts {
+ ReuseTimeseriesV2(ts[i].TimeSeriesV2)
+ }
+
+ slicePoolV2.Put(ts[:0]) //nolint:staticcheck //see comment on slicePool for more details
+}
+
+// TimeseriesV2FromPool retrieves a pointer to a TimeSeriesV2 from a sync.Pool.
+// ReuseTimeseriesV2 should be called once done, unless ReuseSliceV2 was called on the slice that contains this TimeSeriesV2 .
+func TimeseriesV2FromPool() *TimeSeriesV2 {
+ return timeSeriesPoolV2.Get().(*TimeSeriesV2)
+}
+
+// ReuseTimeseriesV2 puts the timeseriesV2 back into a sync.Pool for reuse.
+func ReuseTimeseriesV2(ts *TimeSeriesV2) {
+ // clear ts labelRef and samples
+ ts.LabelsRefs = ts.LabelsRefs[:0]
+ ts.Samples = ts.Samples[:0]
+
+ // clear exemplar label refs
+ for i := range ts.Exemplars {
+ ts.Exemplars[i].LabelsRefs = ts.Exemplars[i].LabelsRefs[:0]
+ }
+
+ for i := range ts.Histograms {
+ ts.Histograms[i].Reset()
+ }
+
+ ts.Exemplars = ts.Exemplars[:0]
+ ts.Histograms = ts.Histograms[:0]
+ timeSeriesPoolV2.Put(ts)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go
index b437b95d0..eb0ac91ec 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go
@@ -3,8 +3,13 @@ package client
import (
"context"
"flag"
+ "fmt"
+ "io"
+ "net/http"
+ "sync"
"github.com/cortexproject/cortex/pkg/cortexpb"
+ "github.com/cortexproject/cortex/pkg/tenant"
"github.com/cortexproject/cortex/pkg/util/grpcclient"
"github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock"
@@ -12,6 +17,8 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/weaveworks/common/httpgrpc"
+ "github.com/weaveworks/common/user"
"go.uber.org/atomic"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
@@ -31,6 +38,8 @@ var ingesterClientInflightPushRequests = promauto.NewGaugeVec(prometheus.GaugeOp
var errTooManyInflightPushRequests = errors.New("too many inflight push requests in ingester client")
+const INGESTER_CLIENT_STREAM_WORKER_COUNT = 100
+
// ClosableClientConn is grpc.ClientConnInterface with Close function
type ClosableClientConn interface {
grpc.ClientConnInterface
@@ -43,6 +52,15 @@ type HealthAndIngesterClient interface {
grpc_health_v1.HealthClient
Close() error
PushPreAlloc(ctx context.Context, in *cortexpb.PreallocWriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error)
+ PushStreamConnection(ctx context.Context, in *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error)
+}
+
+type streamWriteJob struct {
+ req *cortexpb.StreamWriteRequest
+ resp *cortexpb.WriteResponse
+ ctx context.Context
+ cancel context.CancelFunc
+ err error
}
type closableHealthAndIngesterClient struct {
@@ -53,6 +71,9 @@ type closableHealthAndIngesterClient struct {
maxInflightPushRequests int64
inflightRequests atomic.Int64
inflightPushRequests *prometheus.GaugeVec
+ streamPushChan chan *streamWriteJob
+ streamCtx context.Context
+ streamCancel context.CancelFunc
}
func (c *closableHealthAndIngesterClient) PushPreAlloc(ctx context.Context, in *cortexpb.PreallocWriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) {
@@ -72,6 +93,38 @@ func (c *closableHealthAndIngesterClient) Push(ctx context.Context, in *cortexpb
})
}
+func (c *closableHealthAndIngesterClient) PushStreamConnection(ctx context.Context, in *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error) {
+ return c.handlePushRequest(func() (*cortexpb.WriteResponse, error) {
+ select {
+ case <-c.streamCtx.Done():
+ return nil, errors.Wrap(c.streamCtx.Err(), "ingester client stream connection closed")
+ default:
+ }
+
+ tenantID, err := tenant.TenantID(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ streamReq := &cortexpb.StreamWriteRequest{
+ TenantID: tenantID,
+ Request: in,
+ }
+
+ reqCtx, reqCancel := context.WithCancel(ctx)
+ defer reqCancel()
+
+ job := &streamWriteJob{
+ req: streamReq,
+ ctx: reqCtx,
+ cancel: reqCancel,
+ }
+ c.streamPushChan <- job
+ <-reqCtx.Done()
+ return job.resp, job.err
+ })
+}
+
func (c *closableHealthAndIngesterClient) handlePushRequest(mainFunc func() (*cortexpb.WriteResponse, error)) (*cortexpb.WriteResponse, error) {
currentInflight := c.inflightRequests.Inc()
c.inflightPushRequests.WithLabelValues(c.addr).Set(float64(currentInflight))
@@ -85,8 +138,12 @@ func (c *closableHealthAndIngesterClient) handlePushRequest(mainFunc func() (*co
}
// MakeIngesterClient makes a new IngesterClient
-func MakeIngesterClient(addr string, cfg Config) (HealthAndIngesterClient, error) {
- dialOpts, err := cfg.GRPCClientConfig.DialOption(grpcclient.Instrument(ingesterClientRequestDuration))
+func MakeIngesterClient(addr string, cfg Config, useStreamConnection bool) (HealthAndIngesterClient, error) {
+ unaryClientInterceptor, streamClientInterceptor := grpcclient.Instrument(ingesterClientRequestDuration)
+ if useStreamConnection {
+ unaryClientInterceptor, streamClientInterceptor = grpcclient.InstrumentReusableStream(ingesterClientRequestDuration)
+ }
+ dialOpts, err := cfg.GRPCClientConfig.DialOption(unaryClientInterceptor, streamClientInterceptor)
if err != nil {
return nil, err
}
@@ -94,25 +151,123 @@ func MakeIngesterClient(addr string, cfg Config) (HealthAndIngesterClient, error
if err != nil {
return nil, err
}
- return &closableHealthAndIngesterClient{
+ c := &closableHealthAndIngesterClient{
IngesterClient: NewIngesterClient(conn),
HealthClient: grpc_health_v1.NewHealthClient(conn),
conn: conn,
addr: addr,
maxInflightPushRequests: cfg.MaxInflightPushRequests,
inflightPushRequests: ingesterClientInflightPushRequests,
- }, nil
+ }
+ if useStreamConnection {
+ streamCtx, streamCancel := context.WithCancel(context.Background())
+ err = c.Run(make(chan *streamWriteJob, INGESTER_CLIENT_STREAM_WORKER_COUNT), streamCtx, streamCancel)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return c, nil
}
func (c *closableHealthAndIngesterClient) Close() error {
c.inflightPushRequests.DeleteLabelValues(c.addr)
+
+ if c.streamCancel != nil {
+ c.streamCancel()
+ }
+
+ if c.streamPushChan != nil {
+ drainingLoop:
+ for {
+ select {
+ case job, ok := <-c.streamPushChan:
+ if !ok {
+ break drainingLoop
+ }
+ if job != nil && job.cancel != nil {
+ job.err = errors.New("stream connection ingester client closing")
+ job.cancel()
+ }
+ default:
+ close(c.streamPushChan)
+ break drainingLoop
+ }
+ }
+ }
+
return c.conn.Close()
}
+func (c *closableHealthAndIngesterClient) Run(streamPushChan chan *streamWriteJob, streamCtx context.Context, streamCancel context.CancelFunc) error {
+ c.streamPushChan = streamPushChan
+ c.streamCtx = streamCtx
+ c.streamCancel = streamCancel
+
+ var workerErr error
+ var wg sync.WaitGroup
+ for i := range INGESTER_CLIENT_STREAM_WORKER_COUNT {
+ workerName := fmt.Sprintf("ingester-%s-stream-push-worker-%d", c.addr, i)
+ wg.Add(1)
+ go func() {
+ workerCtx := user.InjectOrgID(streamCtx, workerName)
+ err := c.worker(workerCtx)
+ if err != nil {
+ workerErr = err
+ }
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+ return workerErr
+}
+
+func (c *closableHealthAndIngesterClient) worker(ctx context.Context) error {
+ stream, err := c.PushStream(ctx)
+ if err != nil {
+ return err
+ }
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case job, ok := <-c.streamPushChan:
+ if !ok {
+ return
+ }
+ err = stream.Send(job.req)
+ if err == io.EOF {
+ job.resp = &cortexpb.WriteResponse{}
+ job.cancel()
+ return
+ }
+ if err != nil {
+ job.err = err
+ job.cancel()
+ continue
+ }
+ resp, err := stream.Recv()
+ if err == io.EOF {
+ job.resp = &cortexpb.WriteResponse{}
+ job.cancel()
+ return
+ }
+ job.resp = resp
+ job.err = err
+ if err == nil && job.resp.Code != http.StatusOK {
+ job.err = httpgrpc.Errorf(int(job.resp.Code), "%s", job.resp.Message)
+ }
+ job.cancel()
+ }
+ }
+ }()
+ return nil
+}
+
// Config is the configuration struct for the ingester client
type Config struct {
- GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"`
- MaxInflightPushRequests int64 `yaml:"max_inflight_push_requests"`
+ GRPCClientConfig grpcclient.ConfigWithHealthCheck `yaml:"grpc_client_config"`
+ MaxInflightPushRequests int64 `yaml:"max_inflight_push_requests"`
}
// RegisterFlags registers configuration settings used by the ingester client config.
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go
index 6e4a81d63..d6f982a1f 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/compat.go
@@ -7,6 +7,7 @@ import (
"github.com/prometheus/prometheus/model/labels"
"github.com/prometheus/prometheus/storage"
"github.com/prometheus/prometheus/tsdb/chunkenc"
+ storecache "github.com/thanos-io/thanos/pkg/store/cache"
"github.com/cortexproject/cortex/pkg/cortexpb"
)
@@ -26,8 +27,8 @@ func ToQueryRequest(from, to model.Time, matchers []*labels.Matcher) (*QueryRequ
}
// FromQueryRequest unpacks a QueryRequest proto.
-func FromQueryRequest(req *QueryRequest) (model.Time, model.Time, []*labels.Matcher, error) {
- matchers, err := FromLabelMatchers(req.Matchers)
+func FromQueryRequest(cache storecache.MatchersCache, req *QueryRequest) (model.Time, model.Time, []*labels.Matcher, error) {
+ matchers, err := FromLabelMatchers(cache, req.Matchers)
if err != nil {
return 0, 0, nil, err
}
@@ -55,10 +56,10 @@ func ToExemplarQueryRequest(from, to model.Time, matchers ...[]*labels.Matcher)
}
// FromExemplarQueryRequest unpacks a ExemplarQueryRequest proto.
-func FromExemplarQueryRequest(req *ExemplarQueryRequest) (int64, int64, [][]*labels.Matcher, error) {
+func FromExemplarQueryRequest(cache storecache.MatchersCache, req *ExemplarQueryRequest) (int64, int64, [][]*labels.Matcher, error) {
var result [][]*labels.Matcher
for _, m := range req.Matchers {
- matchers, err := FromLabelMatchers(m.Matchers)
+ matchers, err := FromLabelMatchers(cache, m.Matchers)
if err != nil {
return 0, 0, nil, err
}
@@ -94,25 +95,6 @@ func MatrixFromSeriesSet(set storage.SeriesSet) (model.Matrix, error) {
return m, set.Err()
}
-// ToQueryResponse builds a QueryResponse proto.
-func ToQueryResponse(matrix model.Matrix) *QueryResponse {
- resp := &QueryResponse{}
- for _, ss := range matrix {
- ts := cortexpb.TimeSeries{
- Labels: cortexpb.FromMetricsToLabelAdapters(ss.Metric),
- Samples: make([]cortexpb.Sample, 0, len(ss.Values)),
- }
- for _, s := range ss.Values {
- ts.Samples = append(ts.Samples, cortexpb.Sample{
- Value: float64(s.Value),
- TimestampMs: int64(s.Timestamp),
- })
- }
- resp.Timeseries = append(resp.Timeseries, ts)
- }
- return resp
-}
-
// ToMetricsForLabelMatchersRequest builds a MetricsForLabelMatchersRequest proto
func ToMetricsForLabelMatchersRequest(from, to model.Time, limit int, matchers []*labels.Matcher) (*MetricsForLabelMatchersRequest, error) {
ms, err := toLabelMatchers(matchers)
@@ -175,10 +157,10 @@ func SeriesSetToQueryResponse(s storage.SeriesSet) (*QueryResponse, error) {
}
// FromMetricsForLabelMatchersRequest unpacks a MetricsForLabelMatchersRequest proto
-func FromMetricsForLabelMatchersRequest(req *MetricsForLabelMatchersRequest) (model.Time, model.Time, int, [][]*labels.Matcher, error) {
+func FromMetricsForLabelMatchersRequest(cache storecache.MatchersCache, req *MetricsForLabelMatchersRequest) (model.Time, model.Time, int, [][]*labels.Matcher, error) {
matchersSet := make([][]*labels.Matcher, 0, len(req.MatchersSet))
for _, matchers := range req.MatchersSet {
- matchers, err := FromLabelMatchers(matchers.Matchers)
+ matchers, err := FromLabelMatchers(cache, matchers.Matchers)
if err != nil {
return 0, 0, 0, nil, err
}
@@ -206,12 +188,12 @@ func ToLabelValuesRequest(labelName model.LabelName, from, to model.Time, limit
}
// FromLabelValuesRequest unpacks a LabelValuesRequest proto
-func FromLabelValuesRequest(req *LabelValuesRequest) (string, int64, int64, int, []*labels.Matcher, error) {
+func FromLabelValuesRequest(cache storecache.MatchersCache, req *LabelValuesRequest) (string, int64, int64, int, []*labels.Matcher, error) {
var err error
var matchers []*labels.Matcher
if req.Matchers != nil {
- matchers, err = FromLabelMatchers(req.Matchers.Matchers)
+ matchers, err = FromLabelMatchers(cache, req.Matchers.Matchers)
if err != nil {
return "", 0, 0, 0, nil, err
}
@@ -220,6 +202,36 @@ func FromLabelValuesRequest(req *LabelValuesRequest) (string, int64, int64, int,
return req.LabelName, req.StartTimestampMs, req.EndTimestampMs, int(req.Limit), matchers, nil
}
+// ToLabelNamesRequest builds a LabelNamesRequest proto
+func ToLabelNamesRequest(from, to model.Time, limit int, matchers []*labels.Matcher) (*LabelNamesRequest, error) {
+ ms, err := toLabelMatchers(matchers)
+ if err != nil {
+ return nil, err
+ }
+
+ return &LabelNamesRequest{
+ StartTimestampMs: int64(from),
+ EndTimestampMs: int64(to),
+ Matchers: &LabelMatchers{Matchers: ms},
+ Limit: int64(limit),
+ }, nil
+}
+
+// FromLabelNamesRequest unpacks a LabelNamesRequest proto
+func FromLabelNamesRequest(cache storecache.MatchersCache, req *LabelNamesRequest) (int64, int64, int, []*labels.Matcher, error) {
+ var err error
+ var matchers []*labels.Matcher
+
+ if req.Matchers != nil {
+ matchers, err = FromLabelMatchers(cache, req.Matchers.Matchers)
+ if err != nil {
+ return 0, 0, 0, nil, err
+ }
+ }
+
+ return req.StartTimestampMs, req.EndTimestampMs, int(req.Limit), matchers, nil
+}
+
func toLabelMatchers(matchers []*labels.Matcher) ([]*LabelMatcher, error) {
result := make([]*LabelMatcher, 0, len(matchers))
for _, matcher := range matchers {
@@ -245,27 +257,31 @@ func toLabelMatchers(matchers []*labels.Matcher) ([]*LabelMatcher, error) {
return result, nil
}
-func FromLabelMatchers(matchers []*LabelMatcher) ([]*labels.Matcher, error) {
+func FromLabelMatchers(cache storecache.MatchersCache, matchers []*LabelMatcher) ([]*labels.Matcher, error) {
result := make([]*labels.Matcher, 0, len(matchers))
for _, matcher := range matchers {
- var mtype labels.MatchType
- switch matcher.Type {
- case EQUAL:
- mtype = labels.MatchEqual
- case NOT_EQUAL:
- mtype = labels.MatchNotEqual
- case REGEX_MATCH:
- mtype = labels.MatchRegexp
- case REGEX_NO_MATCH:
- mtype = labels.MatchNotRegexp
- default:
- return nil, fmt.Errorf("invalid matcher type")
- }
- matcher, err := labels.NewMatcher(mtype, matcher.Name, matcher.Value)
+ m, err := cache.GetOrSet(matcher, func() (*labels.Matcher, error) {
+ var mtype labels.MatchType
+ switch matcher.Type {
+ case EQUAL:
+ mtype = labels.MatchEqual
+ case NOT_EQUAL:
+ mtype = labels.MatchNotEqual
+ case REGEX_MATCH:
+ mtype = labels.MatchRegexp
+ case REGEX_NO_MATCH:
+ mtype = labels.MatchNotRegexp
+ default:
+ return nil, fmt.Errorf("invalid matcher type")
+ }
+ return labels.NewMatcher(mtype, matcher.GetName(), matcher.GetValue())
+ })
+
if err != nil {
return nil, err
}
- result = append(result, matcher)
+
+ result = append(result, m)
}
return result, nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex_util.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex_util.go
index b3ba0e2d2..5d463d49a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex_util.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex_util.go
@@ -32,10 +32,7 @@ func SendLabelNamesStream(s Ingester_LabelNamesStreamServer, l *LabelNamesStream
func SendAsBatchToStream(totalItems int, streamBatchSize int, fn func(start, end int) error) error {
for i := 0; i < totalItems; i += streamBatchSize {
- j := i + streamBatchSize
- if j > totalItems {
- j = totalItems
- }
+ j := min(i+streamBatchSize, totalItems)
if err := fn(i, j); err != nil {
return err
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/custom.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/custom.go
index 62c0f4393..aa3f0d84f 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/custom.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/custom.go
@@ -3,6 +3,9 @@ package client
import (
"encoding/binary"
+ "github.com/pkg/errors"
+ "github.com/prometheus/prometheus/model/labels"
+
"github.com/cortexproject/cortex/pkg/chunk/encoding"
)
@@ -45,3 +48,20 @@ func (m *QueryStreamResponse) SamplesCount() (count int) {
}
return
}
+
+func (m *LabelMatcher) MatcherType() (labels.MatchType, error) {
+ var t labels.MatchType
+ switch m.Type {
+ case EQUAL:
+ t = labels.MatchEqual
+ case NOT_EQUAL:
+ t = labels.MatchNotEqual
+ case REGEX_MATCH:
+ t = labels.MatchRegexp
+ case REGEX_NO_MATCH:
+ t = labels.MatchNotRegexp
+ default:
+ return 0, errors.Errorf("unrecognized label matcher type %d", m.Type)
+ }
+ return t, nil
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.pb.go
index 594292429..cf23254f8 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.pb.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.pb.go
@@ -559,9 +559,10 @@ func (m *LabelValuesStreamResponse) GetLabelValues() []string {
}
type LabelNamesRequest struct {
- StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"`
- EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"`
- Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
+ StartTimestampMs int64 `protobuf:"varint,1,opt,name=start_timestamp_ms,json=startTimestampMs,proto3" json:"start_timestamp_ms,omitempty"`
+ EndTimestampMs int64 `protobuf:"varint,2,opt,name=end_timestamp_ms,json=endTimestampMs,proto3" json:"end_timestamp_ms,omitempty"`
+ Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
+ Matchers *LabelMatchers `protobuf:"bytes,4,opt,name=matchers,proto3" json:"matchers,omitempty"`
}
func (m *LabelNamesRequest) Reset() { *m = LabelNamesRequest{} }
@@ -617,6 +618,13 @@ func (m *LabelNamesRequest) GetLimit() int64 {
return 0
}
+func (m *LabelNamesRequest) GetMatchers() *LabelMatchers {
+ if m != nil {
+ return m.Matchers
+ }
+ return nil
+}
+
type LabelNamesResponse struct {
LabelNames []string `protobuf:"bytes,1,rep,name=label_names,json=labelNames,proto3" json:"label_names,omitempty"`
}
@@ -744,6 +752,7 @@ type UserStatsResponse struct {
ApiIngestionRate float64 `protobuf:"fixed64,3,opt,name=api_ingestion_rate,json=apiIngestionRate,proto3" json:"api_ingestion_rate,omitempty"`
RuleIngestionRate float64 `protobuf:"fixed64,4,opt,name=rule_ingestion_rate,json=ruleIngestionRate,proto3" json:"rule_ingestion_rate,omitempty"`
ActiveSeries uint64 `protobuf:"varint,5,opt,name=active_series,json=activeSeries,proto3" json:"active_series,omitempty"`
+ LoadedBlocks uint64 `protobuf:"varint,6,opt,name=loaded_blocks,json=loadedBlocks,proto3" json:"loaded_blocks,omitempty"`
}
func (m *UserStatsResponse) Reset() { *m = UserStatsResponse{} }
@@ -813,6 +822,13 @@ func (m *UserStatsResponse) GetActiveSeries() uint64 {
return 0
}
+func (m *UserStatsResponse) GetLoadedBlocks() uint64 {
+ if m != nil {
+ return m.LoadedBlocks
+ }
+ return 0
+}
+
type UserIDStatsResponse struct {
UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
Data *UserStatsResponse `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
@@ -1061,6 +1077,9 @@ func (m *MetricsForLabelMatchersStreamResponse) GetMetric() []*cortexpb.Metric {
}
type MetricsMetadataRequest struct {
+ Limit int64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"`
+ LimitPerMetric int64 `protobuf:"varint,2,opt,name=limit_per_metric,json=limitPerMetric,proto3" json:"limit_per_metric,omitempty"`
+ Metric string `protobuf:"bytes,3,opt,name=metric,proto3" json:"metric,omitempty"`
}
func (m *MetricsMetadataRequest) Reset() { *m = MetricsMetadataRequest{} }
@@ -1095,6 +1114,27 @@ func (m *MetricsMetadataRequest) XXX_DiscardUnknown() {
var xxx_messageInfo_MetricsMetadataRequest proto.InternalMessageInfo
+func (m *MetricsMetadataRequest) GetLimit() int64 {
+ if m != nil {
+ return m.Limit
+ }
+ return 0
+}
+
+func (m *MetricsMetadataRequest) GetLimitPerMetric() int64 {
+ if m != nil {
+ return m.LimitPerMetric
+ }
+ return 0
+}
+
+func (m *MetricsMetadataRequest) GetMetric() string {
+ if m != nil {
+ return m.Metric
+ }
+ return ""
+}
+
type MetricsMetadataResponse struct {
Metadata []*cortexpb.MetricMetadata `protobuf:"bytes,1,rep,name=metadata,proto3" json:"metadata,omitempty"`
}
@@ -1468,89 +1508,94 @@ func init() {
func init() { proto.RegisterFile("ingester.proto", fileDescriptor_60f6df4f3586b478) }
var fileDescriptor_60f6df4f3586b478 = []byte{
- // 1309 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4b, 0x6f, 0xd4, 0xd6,
- 0x17, 0xb7, 0x33, 0x8f, 0x64, 0xce, 0x4c, 0x86, 0xc9, 0x4d, 0x20, 0x83, 0xf9, 0xe3, 0x80, 0xff,
- 0xa2, 0x8d, 0xda, 0x92, 0x40, 0xda, 0x4a, 0xd0, 0x17, 0x4a, 0x20, 0x40, 0x80, 0x10, 0x70, 0x02,
- 0xad, 0xaa, 0x56, 0x96, 0x33, 0x73, 0x49, 0x5c, 0xfc, 0xc2, 0xbe, 0x46, 0xd0, 0x55, 0xab, 0x7e,
- 0x80, 0x76, 0xd9, 0x6d, 0x77, 0xfd, 0x28, 0x2c, 0x59, 0x74, 0x81, 0xaa, 0x0a, 0x95, 0x41, 0xaa,
- 0xba, 0xa4, 0xdf, 0xa0, 0xf2, 0x7d, 0xf8, 0x15, 0x27, 0x19, 0x24, 0xd2, 0x9d, 0xef, 0x39, 0xbf,
- 0x73, 0xee, 0x79, 0xde, 0x73, 0x0c, 0x6d, 0xcb, 0xdd, 0xc2, 0x21, 0xc1, 0xc1, 0x9c, 0x1f, 0x78,
- 0xc4, 0x43, 0xf5, 0x9e, 0x17, 0x10, 0xfc, 0x48, 0x99, 0xda, 0xf2, 0xb6, 0x3c, 0x4a, 0x9a, 0x8f,
- 0xbf, 0x18, 0x57, 0x39, 0xbf, 0x65, 0x91, 0xed, 0x68, 0x73, 0xae, 0xe7, 0x39, 0xf3, 0x0c, 0xe8,
- 0x07, 0xde, 0x37, 0xb8, 0x47, 0xf8, 0x69, 0xde, 0xbf, 0xbf, 0x25, 0x18, 0x9b, 0xfc, 0x83, 0x89,
- 0x6a, 0x9f, 0x42, 0x53, 0xc7, 0x66, 0x5f, 0xc7, 0x0f, 0x22, 0x1c, 0x12, 0x34, 0x07, 0xa3, 0x0f,
- 0x22, 0x1c, 0x58, 0x38, 0xec, 0xca, 0x27, 0x2a, 0xb3, 0xcd, 0x85, 0xa9, 0x39, 0x0e, 0xbf, 0x1d,
- 0xe1, 0xe0, 0x31, 0x87, 0xe9, 0x02, 0xa4, 0x5d, 0x80, 0x16, 0x13, 0x0f, 0x7d, 0xcf, 0x0d, 0x31,
- 0x9a, 0x87, 0xd1, 0x00, 0x87, 0x91, 0x4d, 0x84, 0xfc, 0xe1, 0x82, 0x3c, 0xc3, 0xe9, 0x02, 0xa5,
- 0x5d, 0x87, 0xf1, 0x1c, 0x07, 0x7d, 0x04, 0x40, 0x2c, 0x07, 0x87, 0x65, 0x46, 0xf8, 0x9b, 0x73,
- 0x1b, 0x96, 0x83, 0xd7, 0x29, 0x6f, 0xa9, 0xfa, 0xe4, 0xf9, 0x8c, 0xa4, 0x67, 0xd0, 0xda, 0xcf,
- 0x32, 0xb4, 0xb2, 0x76, 0xa2, 0xf7, 0x00, 0x85, 0xc4, 0x0c, 0x88, 0x41, 0x41, 0xc4, 0x74, 0x7c,
- 0xc3, 0x89, 0x95, 0xca, 0xb3, 0x15, 0xbd, 0x43, 0x39, 0x1b, 0x82, 0xb1, 0x1a, 0xa2, 0x59, 0xe8,
- 0x60, 0xb7, 0x9f, 0xc7, 0x8e, 0x50, 0x6c, 0x1b, 0xbb, 0xfd, 0x2c, 0xf2, 0x0c, 0x8c, 0x39, 0x26,
- 0xe9, 0x6d, 0xe3, 0x20, 0xec, 0x56, 0xf2, 0x71, 0xba, 0x61, 0x6e, 0x62, 0x7b, 0x95, 0x31, 0xf5,
- 0x04, 0xa5, 0xfd, 0x22, 0xc3, 0xd4, 0xf2, 0x23, 0xec, 0xf8, 0xb6, 0x19, 0xfc, 0x27, 0x26, 0x9e,
- 0xdd, 0x61, 0xe2, 0xe1, 0x32, 0x13, 0xc3, 0x8c, 0x8d, 0x5f, 0xc1, 0x24, 0x35, 0x6d, 0x9d, 0x04,
- 0xd8, 0x74, 0x92, 0x8c, 0x5c, 0x80, 0x66, 0x6f, 0x3b, 0x72, 0xef, 0xe7, 0x52, 0x32, 0x2d, 0x94,
- 0xa5, 0x09, 0xb9, 0x18, 0x83, 0x78, 0x56, 0xb2, 0x12, 0xd7, 0xaa, 0x63, 0x23, 0x9d, 0x8a, 0xb6,
- 0x0e, 0x87, 0x0b, 0x01, 0x78, 0x03, 0x19, 0xff, 0x4d, 0x06, 0x44, 0xdd, 0xb9, 0x6b, 0xda, 0x11,
- 0x0e, 0x45, 0x50, 0x8f, 0x03, 0xd8, 0x31, 0xd5, 0x70, 0x4d, 0x07, 0xd3, 0x60, 0x36, 0xf4, 0x06,
- 0xa5, 0xdc, 0x34, 0x1d, 0xbc, 0x4b, 0xcc, 0x47, 0x5e, 0x23, 0xe6, 0x95, 0x7d, 0x63, 0x5e, 0x3d,
- 0x21, 0x0f, 0x11, 0x73, 0x34, 0x05, 0x35, 0xdb, 0x72, 0x2c, 0xd2, 0xad, 0x51, 0x8d, 0xec, 0xa0,
- 0x9d, 0x83, 0xc9, 0x9c, 0x57, 0x3c, 0x52, 0x27, 0xa1, 0xc5, 0xdc, 0x7a, 0x48, 0xe9, 0x34, 0x56,
- 0x0d, 0xbd, 0x69, 0xa7, 0x50, 0xed, 0x33, 0x38, 0x9a, 0x91, 0x2c, 0x64, 0x72, 0x08, 0xf9, 0xef,
- 0x65, 0x98, 0xb8, 0x21, 0x02, 0x15, 0x1e, 0x74, 0x91, 0x26, 0xde, 0x57, 0xb2, 0xde, 0x7f, 0xc8,
- 0x73, 0xca, 0x4d, 0xe0, 0xc6, 0xcf, 0x40, 0x33, 0xcd, 0xa9, 0xb0, 0x1d, 0x92, 0xa4, 0x86, 0xda,
- 0xc7, 0xd0, 0x4d, 0xc5, 0x0a, 0x9e, 0xef, 0x2b, 0x8c, 0xa0, 0x73, 0x27, 0xc4, 0xc1, 0x3a, 0x31,
- 0x89, 0xf0, 0x5a, 0xfb, 0x43, 0x86, 0x89, 0x0c, 0x91, 0xab, 0x3a, 0x25, 0x1e, 0x67, 0xcb, 0x73,
- 0x8d, 0xc0, 0x24, 0xac, 0xbe, 0x64, 0x7d, 0x3c, 0xa1, 0xea, 0x26, 0xc1, 0x71, 0x09, 0xba, 0x91,
- 0x63, 0xf0, 0xaa, 0x8e, 0xdd, 0xaf, 0xea, 0x0d, 0x37, 0x72, 0x58, 0x29, 0xc7, 0x11, 0x35, 0x7d,
- 0xcb, 0x28, 0x68, 0xaa, 0x50, 0x4d, 0x1d, 0xd3, 0xb7, 0x56, 0x72, 0xca, 0xe6, 0x60, 0x32, 0x88,
- 0x6c, 0x5c, 0x84, 0x57, 0x29, 0x7c, 0x22, 0x66, 0xe5, 0xf1, 0xff, 0x87, 0x71, 0xb3, 0x47, 0xac,
- 0x87, 0x58, 0xdc, 0x5f, 0xa3, 0xf7, 0xb7, 0x18, 0x91, 0x99, 0xa0, 0x7d, 0x0d, 0x93, 0xb1, 0x77,
- 0x2b, 0x97, 0xf2, 0xfe, 0x4d, 0xc3, 0x68, 0x14, 0xe2, 0xc0, 0xb0, 0xfa, 0xbc, 0x71, 0xea, 0xf1,
- 0x71, 0xa5, 0x8f, 0x4e, 0x43, 0xb5, 0x6f, 0x12, 0x93, 0xfa, 0xd2, 0x5c, 0x38, 0x2a, 0x2a, 0x7b,
- 0x47, 0x84, 0x74, 0x0a, 0xd3, 0xae, 0x00, 0x8a, 0x59, 0x61, 0x5e, 0xfb, 0x59, 0xa8, 0x85, 0x31,
- 0x81, 0xf7, 0xf9, 0xb1, 0xac, 0x96, 0x82, 0x25, 0x3a, 0x43, 0x6a, 0x4f, 0x64, 0x50, 0x57, 0x31,
- 0x09, 0xac, 0x5e, 0x78, 0xd9, 0x0b, 0xf2, 0x8d, 0x74, 0xc0, 0xf5, 0x79, 0x0e, 0x5a, 0xa2, 0x53,
- 0x8d, 0x10, 0x93, 0xbd, 0x1f, 0xd2, 0xa6, 0x80, 0xae, 0x63, 0x92, 0x56, 0x76, 0x35, 0x5b, 0xd9,
- 0xd7, 0x61, 0x66, 0x57, 0x4f, 0x78, 0x80, 0x66, 0xa1, 0xee, 0x50, 0x08, 0x8f, 0x50, 0x27, 0x7d,
- 0x09, 0x99, 0xa8, 0xce, 0xf9, 0xda, 0x6d, 0x38, 0xb5, 0x8b, 0xb2, 0x42, 0xf1, 0x0f, 0xaf, 0xb2,
- 0x0b, 0x47, 0xb8, 0xca, 0x55, 0x4c, 0xcc, 0x38, 0x8d, 0xa2, 0x17, 0xd6, 0x60, 0x7a, 0x07, 0x87,
- 0xab, 0xff, 0x00, 0xc6, 0x1c, 0x4e, 0xe3, 0x17, 0x74, 0x8b, 0x17, 0x24, 0x32, 0x09, 0x52, 0xfb,
- 0x47, 0x86, 0x43, 0x85, 0xd9, 0x11, 0x27, 0xe6, 0x5e, 0xe0, 0x39, 0x86, 0x58, 0x7e, 0xd2, 0x1a,
- 0x6c, 0xc7, 0xf4, 0x15, 0x4e, 0x5e, 0xe9, 0x67, 0x8b, 0x74, 0x24, 0x57, 0xa4, 0x2e, 0xd4, 0x69,
- 0x57, 0x8b, 0xa1, 0x37, 0x99, 0x9a, 0x42, 0x43, 0x74, 0xcb, 0xb4, 0x82, 0xa5, 0xc5, 0x78, 0x8e,
- 0xfc, 0xfe, 0x7c, 0xe6, 0xb5, 0xf6, 0x26, 0x26, 0xbf, 0xd8, 0x37, 0x7d, 0x82, 0x03, 0x9d, 0xdf,
- 0x82, 0xde, 0x85, 0x3a, 0x1b, 0x75, 0xdd, 0x2a, 0xbd, 0x6f, 0x5c, 0xd4, 0x46, 0x76, 0x1a, 0x72,
- 0x88, 0xf6, 0xa3, 0x0c, 0x35, 0xe6, 0xe9, 0x41, 0x15, 0xac, 0x02, 0x63, 0xd8, 0xed, 0x79, 0x7d,
- 0xcb, 0xdd, 0xa2, 0x8f, 0x49, 0x4d, 0x4f, 0xce, 0x08, 0xf1, 0xfe, 0x8d, 0x2b, 0xb2, 0xc5, 0x9b,
- 0x74, 0x11, 0xc6, 0x73, 0x95, 0x93, 0xdb, 0x6c, 0xe4, 0xa1, 0x36, 0x1b, 0x03, 0x5a, 0x59, 0x0e,
- 0x3a, 0x05, 0x55, 0xf2, 0xd8, 0x67, 0xaf, 0x62, 0x7b, 0x61, 0x42, 0x48, 0x53, 0xf6, 0xc6, 0x63,
- 0x1f, 0xeb, 0x94, 0x1d, 0x5b, 0x43, 0x87, 0x33, 0x4b, 0x1f, 0xfd, 0x8e, 0x9b, 0x86, 0x4e, 0x26,
- 0x6a, 0x7a, 0x43, 0x67, 0x07, 0xed, 0x07, 0x19, 0xda, 0x69, 0xa5, 0x5c, 0xb6, 0x6c, 0xfc, 0x26,
- 0x0a, 0x45, 0x81, 0xb1, 0x7b, 0x96, 0x8d, 0xa9, 0x0d, 0xec, 0xba, 0xe4, 0x5c, 0x16, 0xa9, 0x77,
- 0xae, 0x41, 0x23, 0x71, 0x01, 0x35, 0xa0, 0xb6, 0x7c, 0xfb, 0xce, 0xe2, 0x8d, 0x8e, 0x84, 0xc6,
- 0xa1, 0x71, 0x73, 0x6d, 0xc3, 0x60, 0x47, 0x19, 0x1d, 0x82, 0xa6, 0xbe, 0x7c, 0x65, 0xf9, 0x0b,
- 0x63, 0x75, 0x71, 0xe3, 0xe2, 0xd5, 0xce, 0x08, 0x42, 0xd0, 0x66, 0x84, 0x9b, 0x6b, 0x9c, 0x56,
- 0x59, 0xf8, 0x6b, 0x14, 0xc6, 0x84, 0x8d, 0xe8, 0x3c, 0x54, 0x6f, 0x45, 0xe1, 0x36, 0x3a, 0x92,
- 0x56, 0xea, 0xe7, 0x81, 0x45, 0x30, 0xef, 0x3c, 0x65, 0x7a, 0x07, 0x9d, 0xf5, 0x9d, 0x26, 0xa1,
- 0x4b, 0xd0, 0xcc, 0x2c, 0x6c, 0xa8, 0x74, 0x57, 0x57, 0x8e, 0xe5, 0xa8, 0xf9, 0xa7, 0x41, 0x93,
- 0xce, 0xc8, 0x68, 0x0d, 0xda, 0x94, 0x25, 0xb6, 0xb3, 0x10, 0xfd, 0x4f, 0x88, 0x94, 0x6d, 0xac,
- 0xca, 0xf1, 0x5d, 0xb8, 0x89, 0x59, 0x57, 0xa1, 0x99, 0xd9, 0x41, 0x90, 0x92, 0x2b, 0xa0, 0xdc,
- 0xa2, 0x96, 0x1a, 0x57, 0xb2, 0xee, 0x68, 0x12, 0xba, 0xcb, 0x97, 0x91, 0xec, 0x36, 0xb3, 0xa7,
- 0xbe, 0x93, 0x25, 0xbc, 0x12, 0x97, 0x97, 0x01, 0xd2, 0x55, 0x01, 0x1d, 0xcd, 0x09, 0x65, 0x17,
- 0x1f, 0x45, 0x29, 0x63, 0x25, 0xe6, 0xad, 0x43, 0xa7, 0xb8, 0x71, 0xec, 0xa5, 0xec, 0xc4, 0x4e,
- 0x56, 0x89, 0x6d, 0x4b, 0xd0, 0x48, 0x46, 0x2a, 0xea, 0x96, 0x4c, 0x59, 0xa6, 0x6c, 0xf7, 0xf9,
- 0xab, 0x49, 0xe8, 0x32, 0xb4, 0x16, 0x6d, 0x7b, 0x18, 0x35, 0x4a, 0x96, 0x13, 0x16, 0xf5, 0xd8,
- 0xc9, 0xab, 0x5f, 0x1c, 0x31, 0xe8, 0xad, 0xa4, 0xb1, 0xf7, 0x1c, 0xcd, 0xca, 0xdb, 0xfb, 0xe2,
- 0x92, 0xdb, 0xbe, 0x85, 0xe3, 0x7b, 0x0e, 0xb4, 0xa1, 0xef, 0x3c, 0xbd, 0x0f, 0xae, 0x24, 0xea,
- 0x1b, 0x70, 0xa8, 0x30, 0xdf, 0x90, 0x5a, 0xd0, 0x52, 0x18, 0x89, 0xca, 0xcc, 0xae, 0x7c, 0xa1,
- 0x77, 0xe9, 0x93, 0xa7, 0x2f, 0x54, 0xe9, 0xd9, 0x0b, 0x55, 0x7a, 0xf5, 0x42, 0x95, 0xbf, 0x1b,
- 0xa8, 0xf2, 0xaf, 0x03, 0x55, 0x7e, 0x32, 0x50, 0xe5, 0xa7, 0x03, 0x55, 0xfe, 0x73, 0xa0, 0xca,
- 0x7f, 0x0f, 0x54, 0xe9, 0xd5, 0x40, 0x95, 0x7f, 0x7a, 0xa9, 0x4a, 0x4f, 0x5f, 0xaa, 0xd2, 0xb3,
- 0x97, 0xaa, 0xf4, 0x65, 0xbd, 0x67, 0x5b, 0xd8, 0x25, 0x9b, 0x75, 0xfa, 0x8b, 0xfe, 0xfe, 0xbf,
- 0x01, 0x00, 0x00, 0xff, 0xff, 0xdd, 0xd4, 0xb0, 0x94, 0x0d, 0x10, 0x00, 0x00,
+ // 1386 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4b, 0x6f, 0x14, 0xc7,
+ 0x13, 0xdf, 0xf6, 0x3e, 0xd8, 0xad, 0x7d, 0xb0, 0x6e, 0x1b, 0xbc, 0x0c, 0x7f, 0xc6, 0x30, 0x88,
+ 0x7f, 0xac, 0x24, 0xac, 0xc1, 0x49, 0x24, 0xc8, 0x0b, 0xd9, 0x60, 0xc0, 0x80, 0x31, 0x8c, 0x0d,
+ 0x89, 0xa2, 0x44, 0xa3, 0xf1, 0x6e, 0x63, 0x4f, 0x98, 0x17, 0x33, 0xbd, 0x08, 0x72, 0x4a, 0x94,
+ 0x0f, 0x90, 0x1c, 0x73, 0xcd, 0x2d, 0xd7, 0x48, 0xf9, 0x10, 0x1c, 0x39, 0xe4, 0x80, 0x72, 0x40,
+ 0x61, 0xb9, 0xe4, 0x48, 0xbe, 0x41, 0x34, 0xdd, 0x3d, 0x4f, 0xcf, 0xda, 0x4b, 0x84, 0x73, 0x9b,
+ 0xae, 0xfa, 0x55, 0x75, 0xd5, 0xaf, 0xab, 0xbb, 0x6a, 0x17, 0x5a, 0x86, 0xbd, 0x45, 0x7c, 0x4a,
+ 0xbc, 0xae, 0xeb, 0x39, 0xd4, 0xc1, 0x95, 0x9e, 0xe3, 0x51, 0xf2, 0x48, 0x9a, 0xde, 0x72, 0xb6,
+ 0x1c, 0x26, 0x9a, 0x0f, 0xbe, 0xb8, 0x56, 0x3a, 0xbf, 0x65, 0xd0, 0xed, 0xc1, 0x66, 0xb7, 0xe7,
+ 0x58, 0xf3, 0x1c, 0xe8, 0x7a, 0xce, 0xd7, 0xa4, 0x47, 0xc5, 0x6a, 0xde, 0xbd, 0xbf, 0x15, 0x2a,
+ 0x36, 0xc5, 0x07, 0x37, 0x55, 0x3e, 0x81, 0xba, 0x4a, 0xf4, 0xbe, 0x4a, 0x1e, 0x0c, 0x88, 0x4f,
+ 0x71, 0x17, 0x0e, 0x3c, 0x18, 0x10, 0xcf, 0x20, 0x7e, 0x07, 0x1d, 0x2f, 0xce, 0xd5, 0x17, 0xa6,
+ 0xbb, 0x02, 0x7e, 0x7b, 0x40, 0xbc, 0xc7, 0x02, 0xa6, 0x86, 0x20, 0xe5, 0x02, 0x34, 0xb8, 0xb9,
+ 0xef, 0x3a, 0xb6, 0x4f, 0xf0, 0x3c, 0x1c, 0xf0, 0x88, 0x3f, 0x30, 0x69, 0x68, 0x7f, 0x28, 0x63,
+ 0xcf, 0x71, 0x6a, 0x88, 0x52, 0xae, 0x43, 0x33, 0xa5, 0xc1, 0x1f, 0x02, 0x50, 0xc3, 0x22, 0x7e,
+ 0x5e, 0x10, 0xee, 0x66, 0x77, 0xc3, 0xb0, 0xc8, 0x3a, 0xd3, 0x2d, 0x95, 0x9e, 0x3c, 0x9f, 0x2d,
+ 0xa8, 0x09, 0xb4, 0xf2, 0x13, 0x82, 0x46, 0x32, 0x4e, 0xfc, 0x2e, 0x60, 0x9f, 0xea, 0x1e, 0xd5,
+ 0x18, 0x88, 0xea, 0x96, 0xab, 0x59, 0x81, 0x53, 0x34, 0x57, 0x54, 0xdb, 0x4c, 0xb3, 0x11, 0x2a,
+ 0x56, 0x7d, 0x3c, 0x07, 0x6d, 0x62, 0xf7, 0xd3, 0xd8, 0x09, 0x86, 0x6d, 0x11, 0xbb, 0x9f, 0x44,
+ 0x9e, 0x81, 0xaa, 0xa5, 0xd3, 0xde, 0x36, 0xf1, 0xfc, 0x4e, 0x31, 0xcd, 0xd3, 0x0d, 0x7d, 0x93,
+ 0x98, 0xab, 0x5c, 0xa9, 0x46, 0x28, 0xe5, 0x67, 0x04, 0xd3, 0xcb, 0x8f, 0x88, 0xe5, 0x9a, 0xba,
+ 0xf7, 0x9f, 0x84, 0x78, 0x76, 0x47, 0x88, 0x87, 0xf2, 0x42, 0xf4, 0x13, 0x31, 0x7e, 0x09, 0x53,
+ 0x2c, 0xb4, 0x75, 0xea, 0x11, 0xdd, 0x8a, 0x4e, 0xe4, 0x02, 0xd4, 0x7b, 0xdb, 0x03, 0xfb, 0x7e,
+ 0xea, 0x48, 0x66, 0x42, 0x67, 0xf1, 0x81, 0x5c, 0x0c, 0x40, 0xe2, 0x54, 0x92, 0x16, 0xd7, 0x4a,
+ 0xd5, 0x89, 0x76, 0x51, 0x59, 0x87, 0x43, 0x19, 0x02, 0xde, 0xc0, 0x89, 0xff, 0x8e, 0x00, 0xb3,
+ 0x74, 0xee, 0xea, 0xe6, 0x80, 0xf8, 0x21, 0xa9, 0xc7, 0x00, 0xcc, 0x40, 0xaa, 0xd9, 0xba, 0x45,
+ 0x18, 0x99, 0x35, 0xb5, 0xc6, 0x24, 0x37, 0x75, 0x8b, 0x8c, 0xe0, 0x7c, 0xe2, 0x35, 0x38, 0x2f,
+ 0xee, 0xc9, 0x79, 0xe9, 0x38, 0x1a, 0x83, 0x73, 0x3c, 0x0d, 0x65, 0xd3, 0xb0, 0x0c, 0xda, 0x29,
+ 0x33, 0x8f, 0x7c, 0xa1, 0x9c, 0x83, 0xa9, 0x54, 0x56, 0x82, 0xa9, 0x13, 0xd0, 0xe0, 0x69, 0x3d,
+ 0x64, 0x72, 0xc6, 0x55, 0x4d, 0xad, 0x9b, 0x31, 0x54, 0xf9, 0x14, 0x8e, 0x24, 0x2c, 0x33, 0x27,
+ 0x39, 0x86, 0xfd, 0x6f, 0x08, 0x26, 0x6f, 0x84, 0x44, 0xf9, 0xfb, 0x5d, 0xa4, 0x51, 0xf6, 0xc5,
+ 0x44, 0xf6, 0xff, 0x82, 0x46, 0xe5, 0x03, 0x51, 0x06, 0x22, 0x6a, 0x91, 0xef, 0x2c, 0xd4, 0xe3,
+ 0x32, 0x08, 0xd3, 0x85, 0xa8, 0x0e, 0x7c, 0xe5, 0x23, 0xe8, 0xc4, 0x66, 0x19, 0xb2, 0xf6, 0x34,
+ 0xc6, 0xd0, 0xbe, 0xe3, 0x13, 0x6f, 0x9d, 0xea, 0x34, 0x24, 0x4a, 0xf9, 0x6e, 0x02, 0x26, 0x13,
+ 0x42, 0xe1, 0xea, 0x54, 0xf8, 0x9e, 0x1b, 0x8e, 0xad, 0x79, 0x3a, 0xe5, 0x25, 0x89, 0xd4, 0x66,
+ 0x24, 0x55, 0x75, 0x4a, 0x82, 0xaa, 0xb5, 0x07, 0x96, 0x26, 0x2e, 0x42, 0xc0, 0x58, 0x49, 0xad,
+ 0xd9, 0x03, 0x8b, 0x57, 0x7f, 0x70, 0x08, 0xba, 0x6b, 0x68, 0x19, 0x4f, 0x45, 0xe6, 0xa9, 0xad,
+ 0xbb, 0xc6, 0x4a, 0xca, 0x59, 0x17, 0xa6, 0xbc, 0x81, 0x49, 0xb2, 0xf0, 0x12, 0x83, 0x4f, 0x06,
+ 0xaa, 0x34, 0xfe, 0x24, 0x34, 0xf5, 0x1e, 0x35, 0x1e, 0x92, 0x70, 0xff, 0x32, 0xdb, 0xbf, 0xc1,
+ 0x85, 0x22, 0x84, 0x93, 0xd0, 0x34, 0x1d, 0xbd, 0x4f, 0xfa, 0xda, 0xa6, 0xe9, 0xf4, 0xee, 0xfb,
+ 0x9d, 0x0a, 0x07, 0x71, 0xe1, 0x12, 0x93, 0x29, 0x5f, 0xc1, 0x54, 0x40, 0xc1, 0xca, 0xa5, 0x34,
+ 0x09, 0x33, 0x70, 0x60, 0xe0, 0x13, 0x4f, 0x33, 0xfa, 0xe2, 0x42, 0x56, 0x82, 0xe5, 0x4a, 0x1f,
+ 0x9f, 0x86, 0x52, 0x5f, 0xa7, 0x3a, 0x4b, 0xb8, 0xbe, 0x70, 0x24, 0x3c, 0xea, 0x1d, 0x34, 0xaa,
+ 0x0c, 0xa6, 0x5c, 0x01, 0x1c, 0xa8, 0xfc, 0xb4, 0xf7, 0xb3, 0x50, 0xf6, 0x03, 0x81, 0x78, 0x3f,
+ 0x8e, 0x26, 0xbd, 0x64, 0x22, 0x51, 0x39, 0x52, 0x79, 0x82, 0x40, 0x5e, 0x25, 0xd4, 0x33, 0x7a,
+ 0xfe, 0x65, 0xc7, 0x4b, 0x57, 0xd6, 0x3e, 0xd7, 0xfd, 0x39, 0x68, 0x84, 0xa5, 0xab, 0xf9, 0x84,
+ 0xee, 0xfe, 0x40, 0xd7, 0x43, 0xe8, 0x3a, 0xa1, 0xf1, 0x8d, 0x29, 0x25, 0xdf, 0x8b, 0xeb, 0x30,
+ 0x3b, 0x32, 0x13, 0x41, 0xd0, 0x1c, 0x54, 0x2c, 0x06, 0x11, 0x0c, 0xb5, 0xe3, 0x17, 0x96, 0x9b,
+ 0xaa, 0x42, 0xaf, 0xdc, 0x86, 0x53, 0x23, 0x9c, 0x65, 0x6e, 0xc8, 0xf8, 0x2e, 0x5d, 0x38, 0x2c,
+ 0x5c, 0xae, 0x12, 0xaa, 0x07, 0xc7, 0x18, 0x32, 0x1c, 0xe5, 0x83, 0x92, 0x2f, 0xc0, 0x1c, 0xb4,
+ 0xd9, 0x87, 0xe6, 0x12, 0x4f, 0x13, 0x7b, 0x08, 0x26, 0x99, 0xfc, 0x16, 0xf1, 0xb8, 0x3f, 0x7c,
+ 0x38, 0x8a, 0xa1, 0xc8, 0x8b, 0x4a, 0xec, 0xb8, 0x06, 0x33, 0x3b, 0x76, 0x14, 0x61, 0xbf, 0x0f,
+ 0x55, 0x4b, 0xc8, 0x44, 0xe0, 0x9d, 0x6c, 0xe0, 0x91, 0x4d, 0x84, 0x54, 0xfe, 0x46, 0x70, 0x30,
+ 0xd3, 0xeb, 0x82, 0x30, 0xef, 0x79, 0x8e, 0xa5, 0x85, 0xc3, 0x5a, 0x5c, 0xdb, 0xad, 0x40, 0xbe,
+ 0x22, 0xc4, 0x2b, 0xfd, 0x64, 0xf1, 0x4f, 0xa4, 0x8a, 0xdf, 0x86, 0x0a, 0x7b, 0x52, 0xc2, 0x26,
+ 0x3d, 0x15, 0x87, 0xc2, 0xa8, 0xbf, 0xa5, 0x1b, 0xde, 0xd2, 0x62, 0xd0, 0xf7, 0xfe, 0x78, 0x3e,
+ 0xfb, 0x5a, 0x73, 0x1e, 0xb7, 0x5f, 0xec, 0xeb, 0x2e, 0x25, 0x9e, 0x2a, 0x76, 0xc1, 0xef, 0x40,
+ 0x85, 0xb7, 0xe6, 0x4e, 0x89, 0xed, 0xd7, 0x0c, 0x6b, 0x2e, 0xd9, 0xbd, 0x05, 0x44, 0xf9, 0x01,
+ 0x41, 0x99, 0x67, 0xba, 0x5f, 0x17, 0x41, 0x82, 0x2a, 0xb1, 0x7b, 0x4e, 0xdf, 0xb0, 0xb7, 0xd8,
+ 0x01, 0x96, 0xd5, 0x68, 0x8d, 0xb1, 0x78, 0x17, 0x82, 0x4a, 0x6f, 0x88, 0xcb, 0xbf, 0x08, 0xcd,
+ 0x54, 0x45, 0xa6, 0x26, 0x31, 0x34, 0xd6, 0x24, 0xa6, 0x41, 0x23, 0xa9, 0xc1, 0xa7, 0xa0, 0x44,
+ 0x1f, 0xbb, 0xfc, 0x49, 0x6e, 0x2d, 0x4c, 0x86, 0xd6, 0x4c, 0xbd, 0xf1, 0xd8, 0x25, 0x2a, 0x53,
+ 0x07, 0xd1, 0xb0, 0x61, 0x82, 0x1f, 0x1f, 0xfb, 0x0e, 0x8a, 0x97, 0x75, 0x52, 0x51, 0x7b, 0x7c,
+ 0xa1, 0x7c, 0x8f, 0xa0, 0x15, 0x57, 0xca, 0x65, 0xc3, 0x24, 0x6f, 0xa2, 0x50, 0x24, 0xa8, 0xde,
+ 0x33, 0x4c, 0xc2, 0x62, 0xe0, 0xdb, 0x45, 0xeb, 0x3c, 0xa6, 0xde, 0xbe, 0x06, 0xb5, 0x28, 0x05,
+ 0x5c, 0x83, 0xf2, 0xf2, 0xed, 0x3b, 0x8b, 0x37, 0xda, 0x05, 0xdc, 0x84, 0xda, 0xcd, 0xb5, 0x0d,
+ 0x8d, 0x2f, 0x11, 0x3e, 0x08, 0x75, 0x75, 0xf9, 0xca, 0xf2, 0xe7, 0xda, 0xea, 0xe2, 0xc6, 0xc5,
+ 0xab, 0xed, 0x09, 0x8c, 0xa1, 0xc5, 0x05, 0x37, 0xd7, 0x84, 0xac, 0xb8, 0xf0, 0x6b, 0x15, 0xaa,
+ 0x61, 0x8c, 0xf8, 0x3c, 0x94, 0x6e, 0x0d, 0xfc, 0x6d, 0x7c, 0x38, 0xae, 0xd4, 0xcf, 0x3c, 0x83,
+ 0x12, 0x71, 0xa3, 0xa5, 0x99, 0x1d, 0x72, 0x7e, 0xef, 0x94, 0x02, 0x5e, 0x01, 0x08, 0x4c, 0xf9,
+ 0x33, 0x82, 0xff, 0x17, 0x03, 0xb9, 0x64, 0x4c, 0x37, 0x73, 0xe8, 0x0c, 0xc2, 0x97, 0xa0, 0x9e,
+ 0x98, 0x55, 0x71, 0xee, 0xcf, 0x14, 0xe9, 0x68, 0x4a, 0x9a, 0x7e, 0xbd, 0x94, 0xc2, 0x19, 0x84,
+ 0xd7, 0xa0, 0xc5, 0x54, 0xe1, 0x60, 0xea, 0x47, 0x41, 0x75, 0xf3, 0x86, 0x75, 0xe9, 0xd8, 0x08,
+ 0x6d, 0x94, 0xe1, 0x55, 0xa8, 0x27, 0xc6, 0x2f, 0x2c, 0xa5, 0x6a, 0x31, 0x35, 0xa3, 0xc6, 0xc1,
+ 0xe5, 0x4c, 0x7a, 0x4a, 0x01, 0xdf, 0x15, 0x73, 0x58, 0x72, 0x90, 0xdb, 0xd5, 0xdf, 0x89, 0x1c,
+ 0x5d, 0x4e, 0xca, 0xcb, 0x00, 0xf1, 0xc8, 0x83, 0x8f, 0xa4, 0x8c, 0x92, 0x33, 0x9f, 0x24, 0xe5,
+ 0xa9, 0xa2, 0xf0, 0xd6, 0xa1, 0x9d, 0x9d, 0x9c, 0x76, 0x73, 0x76, 0x7c, 0xa7, 0x2a, 0x27, 0xb6,
+ 0x25, 0xa8, 0x45, 0x5d, 0x1f, 0x77, 0x72, 0x06, 0x01, 0xee, 0x6c, 0xf4, 0x88, 0xa0, 0x14, 0xf0,
+ 0x65, 0x68, 0x2c, 0x9a, 0xe6, 0x38, 0x6e, 0xa4, 0xa4, 0xc6, 0xcf, 0xfa, 0x31, 0xa3, 0x06, 0x92,
+ 0xed, 0x82, 0xf8, 0xff, 0xd1, 0x1b, 0xb1, 0xeb, 0xf4, 0x20, 0xbd, 0xb5, 0x27, 0x2e, 0xda, 0xed,
+ 0x1b, 0x38, 0xb6, 0x6b, 0xcf, 0x1d, 0x7b, 0xcf, 0xd3, 0x7b, 0xe0, 0x72, 0x58, 0xdf, 0x80, 0x83,
+ 0x99, 0x56, 0x89, 0xe5, 0x8c, 0x97, 0x4c, 0xd7, 0x96, 0x66, 0x47, 0xea, 0x43, 0xbf, 0x4b, 0x1f,
+ 0x3f, 0x7d, 0x21, 0x17, 0x9e, 0xbd, 0x90, 0x0b, 0xaf, 0x5e, 0xc8, 0xe8, 0xdb, 0xa1, 0x8c, 0x7e,
+ 0x19, 0xca, 0xe8, 0xc9, 0x50, 0x46, 0x4f, 0x87, 0x32, 0xfa, 0x73, 0x28, 0xa3, 0xbf, 0x86, 0x72,
+ 0xe1, 0xd5, 0x50, 0x46, 0x3f, 0xbe, 0x94, 0x0b, 0x4f, 0x5f, 0xca, 0x85, 0x67, 0x2f, 0xe5, 0xc2,
+ 0x17, 0x95, 0x9e, 0x69, 0x10, 0x9b, 0x6e, 0x56, 0xd8, 0xbf, 0x13, 0xef, 0xfd, 0x13, 0x00, 0x00,
+ 0xff, 0xff, 0xfe, 0x5e, 0xa0, 0x57, 0x08, 0x11, 0x00, 0x00,
}
func (x MatchType) String() string {
@@ -1897,6 +1942,9 @@ func (this *LabelNamesRequest) Equal(that interface{}) bool {
if this.Limit != that1.Limit {
return false
}
+ if !this.Matchers.Equal(that1.Matchers) {
+ return false
+ }
return true
}
func (this *LabelNamesResponse) Equal(that interface{}) bool {
@@ -2012,6 +2060,9 @@ func (this *UserStatsResponse) Equal(that interface{}) bool {
if this.ActiveSeries != that1.ActiveSeries {
return false
}
+ if this.LoadedBlocks != that1.LoadedBlocks {
+ return false
+ }
return true
}
func (this *UserIDStatsResponse) Equal(that interface{}) bool {
@@ -2185,6 +2236,15 @@ func (this *MetricsMetadataRequest) Equal(that interface{}) bool {
} else if this == nil {
return false
}
+ if this.Limit != that1.Limit {
+ return false
+ }
+ if this.LimitPerMetric != that1.LimitPerMetric {
+ return false
+ }
+ if this.Metric != that1.Metric {
+ return false
+ }
return true
}
func (this *MetricsMetadataResponse) Equal(that interface{}) bool {
@@ -2524,11 +2584,14 @@ func (this *LabelNamesRequest) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 7)
+ s := make([]string, 0, 8)
s = append(s, "&client.LabelNamesRequest{")
s = append(s, "StartTimestampMs: "+fmt.Sprintf("%#v", this.StartTimestampMs)+",\n")
s = append(s, "EndTimestampMs: "+fmt.Sprintf("%#v", this.EndTimestampMs)+",\n")
s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n")
+ if this.Matchers != nil {
+ s = append(s, "Matchers: "+fmt.Sprintf("%#v", this.Matchers)+",\n")
+ }
s = append(s, "}")
return strings.Join(s, "")
}
@@ -2565,13 +2628,14 @@ func (this *UserStatsResponse) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 9)
+ s := make([]string, 0, 10)
s = append(s, "&client.UserStatsResponse{")
s = append(s, "IngestionRate: "+fmt.Sprintf("%#v", this.IngestionRate)+",\n")
s = append(s, "NumSeries: "+fmt.Sprintf("%#v", this.NumSeries)+",\n")
s = append(s, "ApiIngestionRate: "+fmt.Sprintf("%#v", this.ApiIngestionRate)+",\n")
s = append(s, "RuleIngestionRate: "+fmt.Sprintf("%#v", this.RuleIngestionRate)+",\n")
s = append(s, "ActiveSeries: "+fmt.Sprintf("%#v", this.ActiveSeries)+",\n")
+ s = append(s, "LoadedBlocks: "+fmt.Sprintf("%#v", this.LoadedBlocks)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -2643,8 +2707,11 @@ func (this *MetricsMetadataRequest) GoString() string {
if this == nil {
return "nil"
}
- s := make([]string, 0, 4)
+ s := make([]string, 0, 7)
s = append(s, "&client.MetricsMetadataRequest{")
+ s = append(s, "Limit: "+fmt.Sprintf("%#v", this.Limit)+",\n")
+ s = append(s, "LimitPerMetric: "+fmt.Sprintf("%#v", this.LimitPerMetric)+",\n")
+ s = append(s, "Metric: "+fmt.Sprintf("%#v", this.Metric)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@@ -2751,6 +2818,7 @@ const _ = grpc.SupportPackageIsVersion4
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type IngesterClient interface {
Push(ctx context.Context, in *cortexpb.WriteRequest, opts ...grpc.CallOption) (*cortexpb.WriteResponse, error)
+ PushStream(ctx context.Context, opts ...grpc.CallOption) (Ingester_PushStreamClient, error)
QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error)
QueryExemplars(ctx context.Context, in *ExemplarQueryRequest, opts ...grpc.CallOption) (*ExemplarQueryResponse, error)
LabelValues(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (*LabelValuesResponse, error)
@@ -2781,8 +2849,39 @@ func (c *ingesterClient) Push(ctx context.Context, in *cortexpb.WriteRequest, op
return out, nil
}
+func (c *ingesterClient) PushStream(ctx context.Context, opts ...grpc.CallOption) (Ingester_PushStreamClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[0], "/cortex.Ingester/PushStream", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &ingesterPushStreamClient{stream}
+ return x, nil
+}
+
+type Ingester_PushStreamClient interface {
+ Send(*cortexpb.StreamWriteRequest) error
+ Recv() (*cortexpb.WriteResponse, error)
+ grpc.ClientStream
+}
+
+type ingesterPushStreamClient struct {
+ grpc.ClientStream
+}
+
+func (x *ingesterPushStreamClient) Send(m *cortexpb.StreamWriteRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *ingesterPushStreamClient) Recv() (*cortexpb.WriteResponse, error) {
+ m := new(cortexpb.WriteResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
func (c *ingesterClient) QueryStream(ctx context.Context, in *QueryRequest, opts ...grpc.CallOption) (Ingester_QueryStreamClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[0], "/cortex.Ingester/QueryStream", opts...)
+ stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[1], "/cortex.Ingester/QueryStream", opts...)
if err != nil {
return nil, err
}
@@ -2832,7 +2931,7 @@ func (c *ingesterClient) LabelValues(ctx context.Context, in *LabelValuesRequest
}
func (c *ingesterClient) LabelValuesStream(ctx context.Context, in *LabelValuesRequest, opts ...grpc.CallOption) (Ingester_LabelValuesStreamClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[1], "/cortex.Ingester/LabelValuesStream", opts...)
+ stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[2], "/cortex.Ingester/LabelValuesStream", opts...)
if err != nil {
return nil, err
}
@@ -2873,7 +2972,7 @@ func (c *ingesterClient) LabelNames(ctx context.Context, in *LabelNamesRequest,
}
func (c *ingesterClient) LabelNamesStream(ctx context.Context, in *LabelNamesRequest, opts ...grpc.CallOption) (Ingester_LabelNamesStreamClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[2], "/cortex.Ingester/LabelNamesStream", opts...)
+ stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[3], "/cortex.Ingester/LabelNamesStream", opts...)
if err != nil {
return nil, err
}
@@ -2932,7 +3031,7 @@ func (c *ingesterClient) MetricsForLabelMatchers(ctx context.Context, in *Metric
}
func (c *ingesterClient) MetricsForLabelMatchersStream(ctx context.Context, in *MetricsForLabelMatchersRequest, opts ...grpc.CallOption) (Ingester_MetricsForLabelMatchersStreamClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[3], "/cortex.Ingester/MetricsForLabelMatchersStream", opts...)
+ stream, err := c.cc.NewStream(ctx, &_Ingester_serviceDesc.Streams[4], "/cortex.Ingester/MetricsForLabelMatchersStream", opts...)
if err != nil {
return nil, err
}
@@ -2975,6 +3074,7 @@ func (c *ingesterClient) MetricsMetadata(ctx context.Context, in *MetricsMetadat
// IngesterServer is the server API for Ingester service.
type IngesterServer interface {
Push(context.Context, *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error)
+ PushStream(Ingester_PushStreamServer) error
QueryStream(*QueryRequest, Ingester_QueryStreamServer) error
QueryExemplars(context.Context, *ExemplarQueryRequest) (*ExemplarQueryResponse, error)
LabelValues(context.Context, *LabelValuesRequest) (*LabelValuesResponse, error)
@@ -2995,6 +3095,9 @@ type UnimplementedIngesterServer struct {
func (*UnimplementedIngesterServer) Push(ctx context.Context, req *cortexpb.WriteRequest) (*cortexpb.WriteResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Push not implemented")
}
+func (*UnimplementedIngesterServer) PushStream(srv Ingester_PushStreamServer) error {
+ return status.Errorf(codes.Unimplemented, "method PushStream not implemented")
+}
func (*UnimplementedIngesterServer) QueryStream(req *QueryRequest, srv Ingester_QueryStreamServer) error {
return status.Errorf(codes.Unimplemented, "method QueryStream not implemented")
}
@@ -3051,6 +3154,32 @@ func _Ingester_Push_Handler(srv interface{}, ctx context.Context, dec func(inter
return interceptor(ctx, in, info, handler)
}
+func _Ingester_PushStream_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(IngesterServer).PushStream(&ingesterPushStreamServer{stream})
+}
+
+type Ingester_PushStreamServer interface {
+ Send(*cortexpb.WriteResponse) error
+ Recv() (*cortexpb.StreamWriteRequest, error)
+ grpc.ServerStream
+}
+
+type ingesterPushStreamServer struct {
+ grpc.ServerStream
+}
+
+func (x *ingesterPushStreamServer) Send(m *cortexpb.WriteResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *ingesterPushStreamServer) Recv() (*cortexpb.StreamWriteRequest, error) {
+ m := new(cortexpb.StreamWriteRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
func _Ingester_QueryStream_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(QueryRequest)
if err := stream.RecvMsg(m); err != nil {
@@ -3299,6 +3428,12 @@ var _Ingester_serviceDesc = grpc.ServiceDesc{
},
},
Streams: []grpc.StreamDesc{
+ {
+ StreamName: "PushStream",
+ Handler: _Ingester_PushStream_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
{
StreamName: "QueryStream",
Handler: _Ingester_QueryStream_Handler,
@@ -3743,6 +3878,18 @@ func (m *LabelNamesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.Matchers != nil {
+ {
+ size, err := m.Matchers.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintIngester(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
if m.Limit != 0 {
i = encodeVarintIngester(dAtA, i, uint64(m.Limit))
i--
@@ -3868,6 +4015,11 @@ func (m *UserStatsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
+ if m.LoadedBlocks != 0 {
+ i = encodeVarintIngester(dAtA, i, uint64(m.LoadedBlocks))
+ i--
+ dAtA[i] = 0x30
+ }
if m.ActiveSeries != 0 {
i = encodeVarintIngester(dAtA, i, uint64(m.ActiveSeries))
i--
@@ -4124,6 +4276,23 @@ func (m *MetricsMetadataRequest) MarshalToSizedBuffer(dAtA []byte) (int, error)
_ = i
var l int
_ = l
+ if len(m.Metric) > 0 {
+ i -= len(m.Metric)
+ copy(dAtA[i:], m.Metric)
+ i = encodeVarintIngester(dAtA, i, uint64(len(m.Metric)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.LimitPerMetric != 0 {
+ i = encodeVarintIngester(dAtA, i, uint64(m.LimitPerMetric))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Limit != 0 {
+ i = encodeVarintIngester(dAtA, i, uint64(m.Limit))
+ i--
+ dAtA[i] = 0x8
+ }
return len(dAtA) - i, nil
}
@@ -4603,6 +4772,10 @@ func (m *LabelNamesRequest) Size() (n int) {
if m.Limit != 0 {
n += 1 + sovIngester(uint64(m.Limit))
}
+ if m.Matchers != nil {
+ l = m.Matchers.Size()
+ n += 1 + l + sovIngester(uint64(l))
+ }
return n
}
@@ -4666,6 +4839,9 @@ func (m *UserStatsResponse) Size() (n int) {
if m.ActiveSeries != 0 {
n += 1 + sovIngester(uint64(m.ActiveSeries))
}
+ if m.LoadedBlocks != 0 {
+ n += 1 + sovIngester(uint64(m.LoadedBlocks))
+ }
return n
}
@@ -4761,6 +4937,16 @@ func (m *MetricsMetadataRequest) Size() (n int) {
}
var l int
_ = l
+ if m.Limit != 0 {
+ n += 1 + sovIngester(uint64(m.Limit))
+ }
+ if m.LimitPerMetric != 0 {
+ n += 1 + sovIngester(uint64(m.LimitPerMetric))
+ }
+ l = len(m.Metric)
+ if l > 0 {
+ n += 1 + l + sovIngester(uint64(l))
+ }
return n
}
@@ -5047,6 +5233,7 @@ func (this *LabelNamesRequest) String() string {
`StartTimestampMs:` + fmt.Sprintf("%v", this.StartTimestampMs) + `,`,
`EndTimestampMs:` + fmt.Sprintf("%v", this.EndTimestampMs) + `,`,
`Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
+ `Matchers:` + strings.Replace(this.Matchers.String(), "LabelMatchers", "LabelMatchers", 1) + `,`,
`}`,
}, "")
return s
@@ -5090,6 +5277,7 @@ func (this *UserStatsResponse) String() string {
`ApiIngestionRate:` + fmt.Sprintf("%v", this.ApiIngestionRate) + `,`,
`RuleIngestionRate:` + fmt.Sprintf("%v", this.RuleIngestionRate) + `,`,
`ActiveSeries:` + fmt.Sprintf("%v", this.ActiveSeries) + `,`,
+ `LoadedBlocks:` + fmt.Sprintf("%v", this.LoadedBlocks) + `,`,
`}`,
}, "")
return s
@@ -5173,6 +5361,9 @@ func (this *MetricsMetadataRequest) String() string {
return "nil"
}
s := strings.Join([]string{`&MetricsMetadataRequest{`,
+ `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
+ `LimitPerMetric:` + fmt.Sprintf("%v", this.LimitPerMetric) + `,`,
+ `Metric:` + fmt.Sprintf("%v", this.Metric) + `,`,
`}`,
}, "")
return s
@@ -6390,6 +6581,42 @@ func (m *LabelNamesRequest) Unmarshal(dAtA []byte) error {
break
}
}
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Matchers", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIngester
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthIngester
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthIngester
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Matchers == nil {
+ m.Matchers = &LabelMatchers{}
+ }
+ if err := m.Matchers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipIngester(dAtA[iNdEx:])
@@ -6737,6 +6964,25 @@ func (m *UserStatsResponse) Unmarshal(dAtA []byte) error {
break
}
}
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LoadedBlocks", wireType)
+ }
+ m.LoadedBlocks = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIngester
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.LoadedBlocks |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
default:
iNdEx = preIndex
skippy, err := skipIngester(dAtA[iNdEx:])
@@ -7316,6 +7562,76 @@ func (m *MetricsMetadataRequest) Unmarshal(dAtA []byte) error {
return fmt.Errorf("proto: MetricsMetadataRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+ }
+ m.Limit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIngester
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Limit |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LimitPerMetric", wireType)
+ }
+ m.LimitPerMetric = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIngester
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.LimitPerMetric |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowIngester
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthIngester
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthIngester
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Metric = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipIngester(dAtA[iNdEx:])
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.proto b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.proto
index e9db9685c..38b303967 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.proto
+++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/ingester.proto
@@ -13,6 +13,7 @@ option (gogoproto.unmarshaler_all) = true;
service Ingester {
rpc Push(cortexpb.WriteRequest) returns (cortexpb.WriteResponse) {};
+ rpc PushStream(stream cortexpb.StreamWriteRequest) returns (stream cortexpb.WriteResponse) {};
rpc QueryStream(QueryRequest) returns (stream QueryStreamResponse) {};
rpc QueryExemplars(ExemplarQueryRequest) returns (ExemplarQueryResponse) {};
@@ -82,6 +83,7 @@ message LabelNamesRequest {
int64 start_timestamp_ms = 1;
int64 end_timestamp_ms = 2;
int64 limit = 3;
+ LabelMatchers matchers = 4;
}
message LabelNamesResponse {
@@ -100,6 +102,7 @@ message UserStatsResponse {
double api_ingestion_rate = 3;
double rule_ingestion_rate = 4;
uint64 active_series = 5;
+ uint64 loaded_blocks = 6;
}
message UserIDStatsResponse {
@@ -127,6 +130,9 @@ message MetricsForLabelMatchersStreamResponse {
}
message MetricsMetadataRequest {
+ int64 limit = 1;
+ int64 limit_per_metric = 2;
+ string metric = 3;
}
message MetricsMetadataResponse {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/partialdata/partia_data.go b/vendor/github.com/cortexproject/cortex/pkg/querier/partialdata/partia_data.go
new file mode 100644
index 000000000..29968d941
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/querier/partialdata/partia_data.go
@@ -0,0 +1,13 @@
+package partialdata
+
+import (
+ "errors"
+)
+
+type IsCfgEnabledFunc func(userID string) bool
+
+var ErrPartialData = errors.New("query result may contain partial data")
+
+func IsPartialDataError(err error) bool {
+ return errors.Is(err, ErrPartialData)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go
index 70491b1a1..fb751e4fa 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go
@@ -271,7 +271,7 @@ heartbeatLoop:
func (l *BasicLifecycler) registerInstance(ctx context.Context) error {
var instanceDesc InstanceDesc
- err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ err := l.store.CAS(ctx, l.ringKey, func(in any) (out any, retry bool, err error) {
ringDesc := GetOrCreateRingDesc(in)
var exists bool
@@ -392,7 +392,7 @@ func (l *BasicLifecycler) verifyTokens(ctx context.Context) bool {
func (l *BasicLifecycler) unregisterInstance(ctx context.Context) error {
level.Info(l.logger).Log("msg", "unregistering instance from ring", "ring", l.ringName)
- err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ err := l.store.CAS(ctx, l.ringKey, func(in any) (out any, retry bool, err error) {
if in == nil {
return nil, false, fmt.Errorf("found empty ring when trying to unregister")
}
@@ -418,7 +418,7 @@ func (l *BasicLifecycler) unregisterInstance(ctx context.Context) error {
func (l *BasicLifecycler) updateInstance(ctx context.Context, update func(*Desc, *InstanceDesc) bool) error {
var instanceDesc InstanceDesc
- err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ err := l.store.CAS(ctx, l.ringKey, func(in any) (out any, retry bool, err error) {
ringDesc := GetOrCreateRingDesc(in)
var ok bool
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go
index 26e3cfa41..ebd23f4f6 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go
@@ -70,7 +70,7 @@ func (d *TokensPersistencyDelegate) OnRingInstanceRegister(lifecycler *BasicLife
return d.next.OnRingInstanceRegister(lifecycler, ringDesc, instanceExists, instanceID, instanceDesc)
}
- tokensFromFile, err := LoadTokensFromFile(d.tokensPath)
+ tokenFile, err := LoadTokenFile(d.tokensPath)
if err != nil {
if !os.IsNotExist(err) {
level.Error(d.logger).Log("msg", "error loading tokens from file", "err", err)
@@ -78,6 +78,7 @@ func (d *TokensPersistencyDelegate) OnRingInstanceRegister(lifecycler *BasicLife
return d.next.OnRingInstanceRegister(lifecycler, ringDesc, instanceExists, instanceID, instanceDesc)
}
+ tokensFromFile := tokenFile.Tokens
// Signal the next delegate that the tokens have been loaded, miming the
// case the instance exist in the ring (which is OK because the lifecycler
@@ -94,7 +95,8 @@ func (d *TokensPersistencyDelegate) OnRingInstanceRegister(lifecycler *BasicLife
func (d *TokensPersistencyDelegate) OnRingInstanceTokens(lifecycler *BasicLifecycler, tokens Tokens) {
if d.tokensPath != "" {
- if err := tokens.StoreToFile(d.tokensPath); err != nil {
+ tokenFile := TokenFile{Tokens: tokens}
+ if err := tokenFile.StoreToFile(d.tokensPath); err != nil {
level.Error(d.logger).Log("msg", "error storing tokens to disk", "path", d.tokensPath, "err", err)
}
}
@@ -139,14 +141,6 @@ func (d *AutoForgetDelegate) OnRingInstanceStopping(lifecycler *BasicLifecycler)
}
func (d *AutoForgetDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *InstanceDesc) {
- for id, instance := range ringDesc.Ingesters {
- lastHeartbeat := time.Unix(instance.GetTimestamp(), 0)
-
- if time.Since(lastHeartbeat) > d.forgetPeriod {
- level.Warn(d.logger).Log("msg", "auto-forgetting instance from the ring because it is unhealthy for a long time", "instance", id, "last_heartbeat", lastHeartbeat.String(), "forget_period", d.forgetPeriod)
- ringDesc.RemoveIngester(id)
- }
- }
-
+ AutoForgetFromRing(ringDesc, d.forgetPeriod, d.logger)
d.next.OnRingInstanceHeartbeat(lifecycler, ringDesc, instanceDesc)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go
index 7f063c20b..da44e1d8a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go
@@ -8,9 +8,14 @@ import (
"go.uber.org/atomic"
"google.golang.org/grpc/status"
+ "github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/httpgrpcutil"
)
+var (
+ noOpExecutor = util.NewNoOpExecutor()
+)
+
type batchTracker struct {
rpcsPending atomic.Int32
rpcsFailed atomic.Int32
@@ -66,12 +71,16 @@ func (i *itemTracker) getError() error {
// cleanup() is always called, either on an error before starting the batches or after they all finish.
//
// Not implemented as a method on Ring so we can test separately.
-func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error {
+func DoBatch(ctx context.Context, op Operation, r ReadRing, e util.AsyncExecutor, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error {
if r.InstancesCount() <= 0 {
cleanup()
return fmt.Errorf("DoBatch: InstancesCount <= 0")
}
+ if e == nil {
+ e = noOpExecutor
+ }
+
expectedTrackers := len(keys) * (r.ReplicationFactor() + 1) / r.InstancesCount()
itemTrackers := make([]itemTracker, len(keys))
instances := make(map[string]instance, r.InstancesCount())
@@ -115,11 +124,11 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb
wg.Add(len(instances))
for _, i := range instances {
- go func(i instance) {
+ e.Submit(func() {
err := callback(i.desc, i.indexes)
tracker.record(i, err)
wg.Done()
- }(i)
+ })
}
// Perform cleanup at the end.
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/http.go b/vendor/github.com/cortexproject/cortex/pkg/ring/http.go
index cbef6f3ce..023b716fe 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/http.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/http.go
@@ -94,7 +94,7 @@ func init() {
}
func (r *Ring) forget(ctx context.Context, id string) error {
- unregister := func(in interface{}) (out interface{}, retry bool, err error) {
+ unregister := func(in any) (out any, retry bool, err error) {
if in == nil {
return nil, false, fmt.Errorf("found empty ring when trying to unregister")
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go
index eae1ee251..163e48a6f 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go
@@ -39,11 +39,11 @@ var inmemoryStoreInit sync.Once
var inmemoryStore Client
// StoreConfig is a configuration used for building single store client, either
-// Consul, Etcd, Memberlist or MultiClient. It was extracted from Config to keep
+// Consul, DynamoDB, Etcd, Memberlist or MultiClient. It was extracted from Config to keep
// single-client config separate from final client-config (with all the wrappers)
type StoreConfig struct {
- DynamoDB dynamodb.Config `yaml:"dynamodb"`
Consul consul.Config `yaml:"consul"`
+ DynamoDB dynamodb.Config `yaml:"dynamodb"`
Etcd etcd.Config `yaml:"etcd"`
Multi MultiConfig `yaml:"multi"`
@@ -81,7 +81,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(flagsPrefix, defaultPrefix string, f
flagsPrefix = "ring."
}
f.StringVar(&cfg.Prefix, flagsPrefix+"prefix", defaultPrefix, "The prefix for the keys in the store. Should end with a /.")
- f.StringVar(&cfg.Store, flagsPrefix+"store", "consul", "Backend storage to use for the ring. Supported values are: consul, etcd, inmemory, memberlist, multi.")
+ f.StringVar(&cfg.Store, flagsPrefix+"store", "consul", "Backend storage to use for the ring. Supported values are: consul, dynamodb, etcd, inmemory, memberlist, multi.")
}
// Client is a high-level client for key-value stores (such as Etcd and
@@ -95,7 +95,7 @@ type Client interface {
// Get a specific key. Will use a codec to deserialise key to appropriate type.
// If the key does not exist, Get will return nil and no error.
- Get(ctx context.Context, key string) (interface{}, error)
+ Get(ctx context.Context, key string) (any, error)
// Delete a specific key. Deletions are best-effort and no error will
// be returned if the key does not exist.
@@ -108,19 +108,19 @@ type Client interface {
// with new value etc. Guarantees that only a single concurrent CAS
// succeeds. Callback can return nil to indicate it is happy with existing
// value.
- CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error
+ CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error
// WatchKey calls f whenever the value stored under key changes.
- WatchKey(ctx context.Context, key string, f func(interface{}) bool)
+ WatchKey(ctx context.Context, key string, f func(any) bool)
// WatchPrefix calls f whenever any value stored under prefix changes.
- WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool)
+ WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool)
// LastUpdateTime returns the time a key was last sync by the kv store
LastUpdateTime(key string) time.Time
}
-// NewClient creates a new Client (consul, etcd or inmemory) based on the config,
+// NewClient creates a new Client based on the config,
// encodes and decodes data for storage using the codec.
func NewClient(cfg Config, codec codec.Codec, reg prometheus.Registerer, logger log.Logger) (Client, error) {
if cfg.Mock != nil {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/clonable.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/clonable.go
index c3df74c62..5b0eb38c8 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/clonable.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/clonable.go
@@ -2,5 +2,5 @@ package codec
type Clonable interface {
// Clone should return a deep copy of the state.
- Clone() interface{}
+ Clone() any
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/codec.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/codec.go
index d701bbe20..9c88473e5 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/codec.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/codec.go
@@ -10,11 +10,11 @@ import (
// Codec allows KV clients to serialise and deserialise values.
type Codec interface {
- Decode([]byte) (interface{}, error)
- Encode(interface{}) ([]byte, error)
+ Decode([]byte) (any, error)
+ Encode(any) ([]byte, error)
- DecodeMultiKey(map[string][]byte) (interface{}, error)
- EncodeMultiKey(interface{}) (map[string][]byte, error)
+ DecodeMultiKey(map[string][]byte) (any, error)
+ EncodeMultiKey(any) (map[string][]byte, error)
// CodecID is a short identifier to communicate what codec should be used to decode the value.
// Once in use, this should be stable to avoid confusing other clients.
@@ -36,12 +36,12 @@ func (p Proto) CodecID() string {
}
// Decode implements Codec
-func (p Proto) Decode(bytes []byte) (interface{}, error) {
+func (p Proto) Decode(bytes []byte) (any, error) {
return p.decode(bytes, p.factory())
}
// DecodeMultiKey implements Codec
-func (p Proto) DecodeMultiKey(data map[string][]byte) (interface{}, error) {
+func (p Proto) DecodeMultiKey(data map[string][]byte) (any, error) {
msg := p.factory()
// Don't even try
out, ok := msg.(MultiKey)
@@ -50,7 +50,7 @@ func (p Proto) DecodeMultiKey(data map[string][]byte) (interface{}, error) {
}
if len(data) > 0 {
- res := make(map[string]interface{}, len(data))
+ res := make(map[string]any, len(data))
for key, bytes := range data {
decoded, err := p.decode(bytes, out.GetItemFactory())
if err != nil {
@@ -64,7 +64,7 @@ func (p Proto) DecodeMultiKey(data map[string][]byte) (interface{}, error) {
return out, nil
}
-func (p Proto) decode(bytes []byte, out proto.Message) (interface{}, error) {
+func (p Proto) decode(bytes []byte, out proto.Message) (any, error) {
bytes, err := snappy.Decode(nil, bytes)
if err != nil {
return nil, err
@@ -76,7 +76,7 @@ func (p Proto) decode(bytes []byte, out proto.Message) (interface{}, error) {
}
// Encode implements Codec
-func (p Proto) Encode(msg interface{}) ([]byte, error) {
+func (p Proto) Encode(msg any) ([]byte, error) {
bytes, err := proto.Marshal(msg.(proto.Message))
if err != nil {
return nil, err
@@ -85,7 +85,7 @@ func (p Proto) Encode(msg interface{}) ([]byte, error) {
}
// EncodeMultiKey implements Codec
-func (p Proto) EncodeMultiKey(msg interface{}) (map[string][]byte, error) {
+func (p Proto) EncodeMultiKey(msg any) (map[string][]byte, error) {
// Don't even try
r, ok := msg.(MultiKey)
if !ok || r == nil {
@@ -112,19 +112,19 @@ func (String) CodecID() string {
}
// Decode implements Codec.
-func (String) Decode(bytes []byte) (interface{}, error) {
+func (String) Decode(bytes []byte) (any, error) {
return string(bytes), nil
}
// Encode implements Codec.
-func (String) Encode(msg interface{}) ([]byte, error) {
+func (String) Encode(msg any) ([]byte, error) {
return []byte(msg.(string)), nil
}
-func (String) EncodeMultiKey(msg interface{}) (map[string][]byte, error) {
+func (String) EncodeMultiKey(msg any) (map[string][]byte, error) {
return nil, errors.New("String codec does not support EncodeMultiKey")
}
-func (String) DecodeMultiKey(map[string][]byte) (interface{}, error) {
+func (String) DecodeMultiKey(map[string][]byte) (any, error) {
return nil, errors.New("String codec does not support DecodeMultiKey")
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/multikey.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/multikey.go
index bd8802c4a..b2e9f12ab 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/multikey.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/codec/multikey.go
@@ -9,11 +9,11 @@ type MultiKey interface {
// SplitByID Split interface in array of key and value. THe key is a unique identifier of an instance in the ring. The value is
// interface with its data. The interface resultant need to be a proto.Message
- SplitByID() map[string]interface{}
+ SplitByID() map[string]any
// JoinIds update the current interface to add receiving key value information. The key is an unique identifier for an instance.
// The value is the information for that instance.
- JoinIds(in map[string]interface{})
+ JoinIds(in map[string]any)
// GetItemFactory method to be used for deserilaize the value information from an instance
GetItemFactory() proto.Message
@@ -21,5 +21,5 @@ type MultiKey interface {
// FindDifference returns the difference between two Multikeys. The returns are an interface which also implements Multikey
// with an array of keys which were changed, and an array of strings which are unique identifiers deleted. An error is
// returned when that does not implement the correct codec
- FindDifference(that MultiKey) (interface{}, []string, error)
+ FindDifference(that MultiKey) (any, []string, error)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go
index ab1c9da22..7e86bd8ae 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go
@@ -20,6 +20,7 @@ import (
"github.com/cortexproject/cortex/pkg/ring/kv/codec"
"github.com/cortexproject/cortex/pkg/util/backoff"
"github.com/cortexproject/cortex/pkg/util/flagext"
+ cortextls "github.com/cortexproject/cortex/pkg/util/tls"
)
const (
@@ -29,9 +30,6 @@ const (
var (
writeOptions = &consul.WriteOptions{}
- // ErrNotFound is returned by ConsulClient.Get.
- ErrNotFound = fmt.Errorf("Not found")
-
backoffConfig = backoff.Config{
MinBackoff: 1 * time.Second,
MaxBackoff: 1 * time.Minute,
@@ -40,12 +38,14 @@ var (
// Config to create a ConsulClient
type Config struct {
- Host string `yaml:"host"`
- ACLToken flagext.Secret `yaml:"acl_token"`
- HTTPClientTimeout time.Duration `yaml:"http_client_timeout"`
- ConsistentReads bool `yaml:"consistent_reads"`
- WatchKeyRateLimit float64 `yaml:"watch_rate_limit"` // Zero disables rate limit
- WatchKeyBurstSize int `yaml:"watch_burst_size"` // Burst when doing rate-limit, defaults to 1
+ Host string `yaml:"host"`
+ ACLToken flagext.Secret `yaml:"acl_token"`
+ HTTPClientTimeout time.Duration `yaml:"http_client_timeout"`
+ ConsistentReads bool `yaml:"consistent_reads"`
+ WatchKeyRateLimit float64 `yaml:"watch_rate_limit"` // Zero disables rate limit
+ WatchKeyBurstSize int `yaml:"watch_burst_size"` // Burst when doing rate-limit, defaults to 1
+ EnableTLS bool `yaml:"tls_enabled"`
+ TLS cortextls.ClientConfig `yaml:",inline"`
// Used in tests only.
MaxCasRetries int `yaml:"-"`
@@ -74,24 +74,62 @@ type Client struct {
func (cfg *Config) RegisterFlags(f *flag.FlagSet, prefix string) {
f.StringVar(&cfg.Host, prefix+"consul.hostname", "localhost:8500", "Hostname and port of Consul.")
f.Var(&cfg.ACLToken, prefix+"consul.acl-token", "ACL Token used to interact with Consul.")
- f.DurationVar(&cfg.HTTPClientTimeout, prefix+"consul.client-timeout", 2*longPollDuration, "HTTP timeout when talking to Consul")
+ f.DurationVar(&cfg.HTTPClientTimeout, prefix+"consul.client-timeout", 2*longPollDuration, "HTTP timeout when talking to Consul.")
f.BoolVar(&cfg.ConsistentReads, prefix+"consul.consistent-reads", false, "Enable consistent reads to Consul.")
f.Float64Var(&cfg.WatchKeyRateLimit, prefix+"consul.watch-rate-limit", 1, "Rate limit when watching key or prefix in Consul, in requests per second. 0 disables the rate limit.")
f.IntVar(&cfg.WatchKeyBurstSize, prefix+"consul.watch-burst-size", 1, "Burst size used in rate limit. Values less than 1 are treated as 1.")
+ f.BoolVar(&cfg.EnableTLS, prefix+"consul.tls-enabled", false, "Enable TLS.")
+ cfg.TLS.RegisterFlagsWithPrefix(prefix+"consul", f)
}
-// NewClient returns a new Client.
-func NewClient(cfg Config, codec codec.Codec, logger log.Logger, registerer prometheus.Registerer) (*Client, error) {
- client, err := consul.NewClient(&consul.Config{
+func (cfg *Config) GetTLS() *consul.TLSConfig {
+ return &consul.TLSConfig{
+ Address: cfg.TLS.ServerName,
+ CertFile: cfg.TLS.CertPath,
+ KeyFile: cfg.TLS.KeyPath,
+ CAFile: cfg.TLS.CAPath,
+ InsecureSkipVerify: cfg.TLS.InsecureSkipVerify,
+ }
+}
+
+func getConsulConfig(cfg Config) (*consul.Config, error) {
+ scheme := "http"
+ transport := cleanhttp.DefaultPooledTransport()
+
+ config := &consul.Config{
Address: cfg.Host,
Token: cfg.ACLToken.Value,
- Scheme: "http",
- HttpClient: &http.Client{
- Transport: cleanhttp.DefaultPooledTransport(),
- // See https://blog.cloudflare.com/the-complete-guide-to-golang-net-http-timeouts/
- Timeout: cfg.HTTPClientTimeout,
- },
- })
+ }
+
+ if cfg.EnableTLS {
+ tlsConfig := cfg.GetTLS()
+ tlsClientConfig, err := consul.SetupTLSConfig(tlsConfig)
+ if err != nil {
+ return nil, err
+ }
+ transport.TLSClientConfig = tlsClientConfig
+ scheme = "https"
+ config.TLSConfig = *tlsConfig
+ }
+
+ config.Scheme = scheme
+ config.HttpClient = &http.Client{
+ Transport: transport,
+ // See https://blog.cloudflare.com/the-complete-guide-to-golang-net-http-timeouts/
+ Timeout: cfg.HTTPClientTimeout,
+ }
+
+ return config, nil
+}
+
+// NewClient returns a new Client.
+func NewClient(cfg Config, codec codec.Codec, logger log.Logger, registerer prometheus.Registerer) (*Client, error) {
+ config, err := getConsulConfig(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ client, err := consul.NewClient(config)
if err != nil {
return nil, err
}
@@ -108,7 +146,7 @@ func NewClient(cfg Config, codec codec.Codec, logger log.Logger, registerer prom
}
// Put is mostly here for testing.
-func (c *Client) Put(ctx context.Context, key string, value interface{}) error {
+func (c *Client) Put(ctx context.Context, key string, value any) error {
bytes, err := c.codec.Encode(value)
if err != nil {
return err
@@ -125,13 +163,13 @@ func (c *Client) Put(ctx context.Context, key string, value interface{}) error {
// CAS atomically modifies a value in a callback.
// If value doesn't exist you'll get nil as an argument to your callback.
-func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error {
+func (c *Client) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error {
return instrument.CollectedRequest(ctx, "CAS loop", c.consulMetrics.consulRequestDuration, instrument.ErrorCode, func(ctx context.Context) error {
return c.cas(ctx, key, f)
})
}
-func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error {
+func (c *Client) cas(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error {
retries := c.cfg.MaxCasRetries
if retries == 0 {
retries = 10
@@ -155,7 +193,7 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou
level.Error(c.logger).Log("msg", "error getting key", "key", key, "err", err)
continue
}
- var intermediate interface{}
+ var intermediate any
if kvp != nil {
out, err := c.codec.Decode(kvp.Value)
if err != nil {
@@ -209,7 +247,7 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou
// value. To construct the deserialised value, a factory function should be
// supplied which generates an empty struct for WatchKey to deserialise
// into. This function blocks until the context is cancelled or f returns false.
-func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) {
+func (c *Client) WatchKey(ctx context.Context, key string, f func(any) bool) {
var (
backoff = backoff.New(ctx, backoffConfig)
index = uint64(0)
@@ -270,7 +308,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b
// WatchPrefix will watch a given prefix in Consul for new keys and changes to existing keys under that prefix.
// When the value under said key changes, the f callback is called with the deserialised value.
// Values in Consul are assumed to be JSON. This function blocks until the context is cancelled.
-func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) {
+func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) {
var (
backoff = backoff.New(ctx, backoffConfig)
index = uint64(0)
@@ -349,7 +387,7 @@ func (c *Client) List(ctx context.Context, prefix string) ([]string, error) {
}
// Get implements kv.Get.
-func (c *Client) Get(ctx context.Context, key string) (interface{}, error) {
+func (c *Client) Get(ctx context.Context, key string) (any, error) {
options := &consul.QueryOptions{
AllowStale: !c.cfg.ConsistentReads,
RequireConsistent: c.cfg.ConsistentReads,
@@ -396,9 +434,6 @@ func (c *Client) createRateLimiter() *rate.Limiter {
// burst is ignored when limit = rate.Inf
return rate.NewLimiter(rate.Inf, 0)
}
- burst := c.cfg.WatchKeyBurstSize
- if burst < 1 {
- burst = 1
- }
+ burst := max(c.cfg.WatchKeyBurstSize, 1)
return rate.NewLimiter(rate.Limit(c.cfg.WatchKeyRateLimit), burst)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/client.go
index 75fef517b..9c3e45b65 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/client.go
@@ -26,6 +26,7 @@ type Config struct {
TTL time.Duration `yaml:"ttl"`
PullerSyncTime time.Duration `yaml:"puller_sync_time"`
MaxCasRetries int `yaml:"max_cas_retries"`
+ Timeout time.Duration `yaml:"timeout"`
}
type Client struct {
@@ -53,6 +54,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet, prefix string) {
f.DurationVar(&cfg.TTL, prefix+"dynamodb.ttl-time", 0, "Time to expire items on dynamodb.")
f.DurationVar(&cfg.PullerSyncTime, prefix+"dynamodb.puller-sync-time", 60*time.Second, "Time to refresh local ring with information on dynamodb.")
f.IntVar(&cfg.MaxCasRetries, prefix+"dynamodb.max-cas-retries", maxCasRetries, "Maximum number of retries for DDB KV CAS.")
+ f.DurationVar(&cfg.Timeout, prefix+"dynamodb.timeout", 2*time.Minute, "Timeout of dynamoDbClient requests. Default is 2m.")
}
func NewClient(cfg Config, cc codec.Codec, logger log.Logger, registerer prometheus.Registerer) (*Client, error) {
@@ -69,8 +71,13 @@ func NewClient(cfg Config, cc codec.Codec, logger log.Logger, registerer prometh
MaxRetries: cfg.MaxCasRetries,
}
+ var kv dynamoDbClient
+ kv = dynamodbInstrumentation{kv: dynamoDB, ddbMetrics: ddbMetrics}
+ if cfg.Timeout > 0 {
+ kv = newDynamodbKVWithTimeout(kv, cfg.Timeout)
+ }
c := &Client{
- kv: dynamodbInstrumentation{kv: dynamoDB, ddbMetrics: ddbMetrics},
+ kv: kv,
codec: cc,
logger: ddbLog(logger),
ddbMetrics: ddbMetrics,
@@ -91,7 +98,7 @@ func (c *Client) List(ctx context.Context, key string) ([]string, error) {
return resp, err
}
-func (c *Client) Get(ctx context.Context, key string) (interface{}, error) {
+func (c *Client) Get(ctx context.Context, key string) (any, error) {
resp, _, err := c.kv.Query(ctx, dynamodbKey{primaryKey: key}, false)
if err != nil {
level.Warn(c.logger).Log("msg", "error Get", "key", key, "err", err)
@@ -128,7 +135,7 @@ func (c *Client) Delete(ctx context.Context, key string) error {
return err
}
-func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error {
+func (c *Client) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error {
bo := backoff.New(ctx, c.backoffConfig)
for bo.Ongoing() {
c.ddbMetrics.dynamodbCasAttempts.Inc()
@@ -178,9 +185,16 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou
continue
}
- putRequests := map[dynamodbKey][]byte{}
+ putRequests := map[dynamodbKey]dynamodbItem{}
for childKey, bytes := range buf {
- putRequests[dynamodbKey{primaryKey: key, sortKey: childKey}] = bytes
+ version := int64(0)
+ if ddbItem, ok := resp[childKey]; ok {
+ version = ddbItem.version
+ }
+ putRequests[dynamodbKey{primaryKey: key, sortKey: childKey}] = dynamodbItem{
+ data: bytes,
+ version: version,
+ }
}
deleteRequests := make([]dynamodbKey, 0, len(toDelete))
@@ -189,9 +203,13 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou
}
if len(putRequests) > 0 || len(deleteRequests) > 0 {
- err = c.kv.Batch(ctx, putRequests, deleteRequests)
+ retry, err := c.kv.Batch(ctx, putRequests, deleteRequests)
if err != nil {
- return err
+ if !retry {
+ return err
+ }
+ bo.Wait()
+ continue
}
c.updateStaleData(key, r, time.Now().UTC())
return nil
@@ -199,6 +217,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou
if len(putRequests) == 0 && len(deleteRequests) == 0 {
// no change detected, retry
+ level.Warn(c.logger).Log("msg", "no change detected in ring, retry CAS")
bo.Wait()
continue
}
@@ -210,7 +229,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou
return err
}
-func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) {
+func (c *Client) WatchKey(ctx context.Context, key string, f func(any) bool) {
bo := backoff.New(ctx, c.backoffConfig)
for bo.Ongoing() {
@@ -252,7 +271,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b
}
}
-func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) {
+func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) {
bo := backoff.New(ctx, c.backoffConfig)
for bo.Ongoing() {
@@ -265,8 +284,8 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string,
continue
}
- for key, bytes := range out {
- decoded, err := c.codec.Decode(bytes)
+ for key, ddbItem := range out {
+ decoded, err := c.codec.Decode(ddbItem.data)
if err != nil {
level.Error(c.logger).Log("msg", "error decoding key", "key", key, "err", err)
continue
@@ -285,8 +304,12 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string,
}
}
-func (c *Client) decodeMultikey(data map[string][]byte) (codec.MultiKey, error) {
- res, err := c.codec.DecodeMultiKey(data)
+func (c *Client) decodeMultikey(data map[string]dynamodbItem) (codec.MultiKey, error) {
+ multiKeyData := make(map[string][]byte, len(data))
+ for key, ddbItem := range data {
+ multiKeyData[key] = ddbItem.data
+ }
+ res, err := c.codec.DecodeMultiKey(multiKeyData)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/dynamodb.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/dynamodb.go
index 1e783189a..9bc4e99e3 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/dynamodb.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/dynamodb.go
@@ -2,15 +2,16 @@ package dynamodb
import (
"context"
+ "errors"
"fmt"
"math"
"strconv"
"time"
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/session"
- "github.com/aws/aws-sdk-go/service/dynamodb"
- "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
+ "github.com/aws/aws-sdk-go-v2/aws"
+ "github.com/aws/aws-sdk-go-v2/config"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb"
+ "github.com/aws/aws-sdk-go-v2/service/dynamodb/types"
"github.com/go-kit/log"
)
@@ -27,24 +28,37 @@ type dynamodbKey struct {
type dynamoDbClient interface {
List(ctx context.Context, key dynamodbKey) ([]string, float64, error)
- Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string][]byte, float64, error)
+ Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string]dynamodbItem, float64, error)
Delete(ctx context.Context, key dynamodbKey) error
Put(ctx context.Context, key dynamodbKey, data []byte) error
- Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) error
+ Batch(ctx context.Context, put map[dynamodbKey]dynamodbItem, delete []dynamodbKey) (bool, error)
+}
+
+type dynamoDBAPI interface {
+ dynamodb.QueryAPIClient
+ DeleteItem(ctx context.Context, params *dynamodb.DeleteItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.DeleteItemOutput, error)
+ PutItem(ctx context.Context, params *dynamodb.PutItemInput, optFns ...func(*dynamodb.Options)) (*dynamodb.PutItemOutput, error)
+ TransactWriteItems(ctx context.Context, params *dynamodb.TransactWriteItemsInput, optFns ...func(*dynamodb.Options)) (*dynamodb.TransactWriteItemsOutput, error)
}
type dynamodbKV struct {
- ddbClient dynamodbiface.DynamoDBAPI
+ ddbClient dynamoDBAPI
logger log.Logger
tableName *string
ttlValue time.Duration
}
+type dynamodbItem struct {
+ data []byte
+ version int64
+}
+
var (
primaryKey = "RingKey"
sortKey = "InstanceKey"
contentData = "Data"
timeToLive = "ttl"
+ version = "version"
)
func newDynamodbKV(cfg Config, logger log.Logger) (dynamodbKV, error) {
@@ -52,37 +66,37 @@ func newDynamodbKV(cfg Config, logger log.Logger) (dynamodbKV, error) {
return dynamodbKV{}, err
}
- sess, err := session.NewSession()
- if err != nil {
- return dynamodbKV{}, err
- }
+ awsConfig := []func(*config.LoadOptions) error{}
- awsCfg := aws.NewConfig()
if len(cfg.Region) > 0 {
- awsCfg = awsCfg.WithRegion(cfg.Region)
+ awsConfig = append(awsConfig, config.WithRegion(cfg.Region))
+ }
+
+ awsCfg, err := config.LoadDefaultConfig(
+ context.Background(),
+ awsConfig...,
+ )
+ if err != nil {
+ return dynamodbKV{}, err
}
- dynamoDB := dynamodb.New(sess, awsCfg)
+ dynamoDB := dynamodb.NewFromConfig(awsCfg)
- ddbKV := &dynamodbKV{
+ return dynamodbKV{
ddbClient: dynamoDB,
logger: logger,
tableName: aws.String(cfg.TableName),
ttlValue: cfg.TTL,
- }
-
- return *ddbKV, nil
+ }, nil
}
func validateConfigInput(cfg Config) error {
if len(cfg.TableName) < 3 {
return fmt.Errorf("invalid dynamodb table name: %s", cfg.TableName)
}
-
return nil
}
-// for testing
func (kv dynamodbKV) getTTL() time.Duration {
return kv.ttlValue
}
@@ -90,67 +104,90 @@ func (kv dynamodbKV) getTTL() time.Duration {
func (kv dynamodbKV) List(ctx context.Context, key dynamodbKey) ([]string, float64, error) {
var keys []string
var totalCapacity float64
+
input := &dynamodb.QueryInput{
TableName: kv.tableName,
- ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal),
- KeyConditions: map[string]*dynamodb.Condition{
+ ReturnConsumedCapacity: types.ReturnConsumedCapacityTotal,
+ KeyConditions: map[string]types.Condition{
primaryKey: {
- ComparisonOperator: aws.String("EQ"),
- AttributeValueList: []*dynamodb.AttributeValue{
- {
- S: aws.String(key.primaryKey),
- },
+ ComparisonOperator: types.ComparisonOperatorEq,
+ AttributeValueList: []types.AttributeValue{
+ &types.AttributeValueMemberS{Value: key.primaryKey},
},
},
},
- AttributesToGet: []*string{aws.String(sortKey)},
+ AttributesToGet: []string{sortKey},
}
- err := kv.ddbClient.QueryPagesWithContext(ctx, input, func(output *dynamodb.QueryOutput, _ bool) bool {
- totalCapacity += getCapacityUnits(output.ConsumedCapacity)
- for _, item := range output.Items {
- keys = append(keys, item[sortKey].String())
+ paginator := dynamodb.NewQueryPaginator(kv.ddbClient, input)
+
+ for paginator.HasMorePages() {
+ page, err := paginator.NextPage(ctx)
+ if err != nil {
+ return nil, totalCapacity, err
+ }
+ totalCapacity += getCapacityUnits(page.ConsumedCapacity)
+ for _, item := range page.Items {
+ if v, ok := item[sortKey].(*types.AttributeValueMemberS); ok {
+ keys = append(keys, v.Value)
+ }
}
- return true
- })
- if err != nil {
- return nil, totalCapacity, err
}
return keys, totalCapacity, nil
}
-func (kv dynamodbKV) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string][]byte, float64, error) {
- keys := make(map[string][]byte)
+func (kv dynamodbKV) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string]dynamodbItem, float64, error) {
+ keys := make(map[string]dynamodbItem)
var totalCapacity float64
- co := dynamodb.ComparisonOperatorEq
+
+ co := types.ComparisonOperatorEq
if isPrefix {
- co = dynamodb.ComparisonOperatorBeginsWith
+ co = types.ComparisonOperatorBeginsWith
}
+
input := &dynamodb.QueryInput{
TableName: kv.tableName,
- ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal),
- KeyConditions: map[string]*dynamodb.Condition{
+ ReturnConsumedCapacity: types.ReturnConsumedCapacityTotal,
+ KeyConditions: map[string]types.Condition{
primaryKey: {
- ComparisonOperator: aws.String(co),
- AttributeValueList: []*dynamodb.AttributeValue{
- {
- S: aws.String(key.primaryKey),
- },
+ ComparisonOperator: co,
+ AttributeValueList: []types.AttributeValue{
+ &types.AttributeValueMemberS{Value: key.primaryKey},
},
},
},
}
- err := kv.ddbClient.QueryPagesWithContext(ctx, input, func(output *dynamodb.QueryOutput, _ bool) bool {
- totalCapacity += getCapacityUnits(output.ConsumedCapacity)
- for _, item := range output.Items {
- keys[*item[sortKey].S] = item[contentData].B
+ paginator := dynamodb.NewQueryPaginator(kv.ddbClient, input)
+
+ for paginator.HasMorePages() {
+ page, err := paginator.NextPage(ctx)
+ if err != nil {
+ return nil, totalCapacity, err
+ }
+ totalCapacity += getCapacityUnits(page.ConsumedCapacity)
+
+ for _, item := range page.Items {
+ itemVersion := int64(0)
+ if v, ok := item[version].(*types.AttributeValueMemberN); ok {
+ parsedVersion, err := strconv.ParseInt(v.Value, 10, 0)
+ if err != nil {
+ kv.logger.Log("msg", "failed to parse item version", "version", v.Value, "err", err)
+ } else {
+ itemVersion = parsedVersion
+ }
+ }
+
+ if d, ok := item[contentData].(*types.AttributeValueMemberB); ok {
+ if s, ok := item[sortKey].(*types.AttributeValueMemberS); ok {
+ keys[s.Value] = dynamodbItem{
+ data: d.Value,
+ version: itemVersion,
+ }
+ }
+ }
}
- return true
- })
- if err != nil {
- return nil, totalCapacity, err
}
return keys, totalCapacity, nil
@@ -159,51 +196,50 @@ func (kv dynamodbKV) Query(ctx context.Context, key dynamodbKey, isPrefix bool)
func (kv dynamodbKV) Delete(ctx context.Context, key dynamodbKey) (float64, error) {
input := &dynamodb.DeleteItemInput{
TableName: kv.tableName,
- ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal),
+ ReturnConsumedCapacity: types.ReturnConsumedCapacityTotal,
Key: generateItemKey(key),
}
- totalCapacity := float64(0)
- output, err := kv.ddbClient.DeleteItemWithContext(ctx, input)
- if err != nil {
- totalCapacity = getCapacityUnits(output.ConsumedCapacity)
- }
+ output, err := kv.ddbClient.DeleteItem(ctx, input)
+ totalCapacity := getCapacityUnits(output.ConsumedCapacity)
return totalCapacity, err
}
func (kv dynamodbKV) Put(ctx context.Context, key dynamodbKey, data []byte) (float64, error) {
input := &dynamodb.PutItemInput{
TableName: kv.tableName,
- ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal),
- Item: kv.generatePutItemRequest(key, data),
- }
- totalCapacity := float64(0)
- output, err := kv.ddbClient.PutItemWithContext(ctx, input)
- if err != nil {
- totalCapacity = getCapacityUnits(output.ConsumedCapacity)
+ ReturnConsumedCapacity: types.ReturnConsumedCapacityTotal,
+ Item: kv.generatePutItemRequest(key, dynamodbItem{data: data}),
}
+ output, err := kv.ddbClient.PutItem(ctx, input)
+ totalCapacity := getCapacityUnits(output.ConsumedCapacity)
return totalCapacity, err
}
-func (kv dynamodbKV) Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) (float64, error) {
+func (kv dynamodbKV) Batch(ctx context.Context, put map[dynamodbKey]dynamodbItem, delete []dynamodbKey) (float64, bool, error) {
totalCapacity := float64(0)
writeRequestSize := len(put) + len(delete)
if writeRequestSize == 0 {
- return totalCapacity, nil
+ return totalCapacity, false, nil
}
- writeRequestsSlices := make([][]*dynamodb.WriteRequest, int(math.Ceil(float64(writeRequestSize)/float64(DdbBatchSizeLimit))))
- for i := 0; i < len(writeRequestsSlices); i++ {
- writeRequestsSlices[i] = make([]*dynamodb.WriteRequest, 0, DdbBatchSizeLimit)
+ writeRequestsSlices := make([][]types.TransactWriteItem, int(math.Ceil(float64(writeRequestSize)/float64(DdbBatchSizeLimit))))
+ for i := range writeRequestsSlices {
+ writeRequestsSlices[i] = make([]types.TransactWriteItem, 0, DdbBatchSizeLimit)
}
currIdx := 0
- for key, data := range put {
- item := kv.generatePutItemRequest(key, data)
- writeRequestsSlices[currIdx] = append(writeRequestsSlices[currIdx], &dynamodb.WriteRequest{
- PutRequest: &dynamodb.PutRequest{
- Item: item,
+ for key, ddbItem := range put {
+ item := kv.generatePutItemRequest(key, ddbItem)
+ ddbPut := &types.Put{
+ TableName: kv.tableName,
+ Item: item,
+ ConditionExpression: aws.String("attribute_not_exists(version) OR version = :v"),
+ ExpressionAttributeValues: map[string]types.AttributeValue{
+ ":v": &types.AttributeValueMemberN{Value: strconv.FormatInt(ddbItem.version, 10)},
},
- })
+ }
+
+ writeRequestsSlices[currIdx] = append(writeRequestsSlices[currIdx], types.TransactWriteItem{Put: ddbPut})
if len(writeRequestsSlices[currIdx]) == DdbBatchSizeLimit {
currIdx++
}
@@ -211,70 +247,103 @@ func (kv dynamodbKV) Batch(ctx context.Context, put map[dynamodbKey][]byte, dele
for _, key := range delete {
item := generateItemKey(key)
- writeRequestsSlices[currIdx] = append(writeRequestsSlices[currIdx], &dynamodb.WriteRequest{
- DeleteRequest: &dynamodb.DeleteRequest{
- Key: item,
- },
- })
+ ddbDelete := &types.Delete{
+ TableName: kv.tableName,
+ Key: item,
+ }
+ writeRequestsSlices[currIdx] = append(writeRequestsSlices[currIdx], types.TransactWriteItem{Delete: ddbDelete})
if len(writeRequestsSlices[currIdx]) == DdbBatchSizeLimit {
currIdx++
}
}
for _, slice := range writeRequestsSlices {
- input := &dynamodb.BatchWriteItemInput{
- ReturnConsumedCapacity: aws.String(dynamodb.ReturnConsumedCapacityTotal),
- RequestItems: map[string][]*dynamodb.WriteRequest{
- *kv.tableName: slice,
- },
+ if len(slice) == 0 {
+ continue
}
-
- resp, err := kv.ddbClient.BatchWriteItemWithContext(ctx, input)
+ resp, err := kv.ddbClient.TransactWriteItems(ctx, &dynamodb.TransactWriteItemsInput{
+ TransactItems: slice,
+ })
if err != nil {
- return totalCapacity, err
+ var checkFailed *types.ConditionalCheckFailedException
+ isCheckFailed := errors.As(err, &checkFailed)
+ if isCheckFailed {
+ kv.logger.Log("msg", "conditional check failed on DynamoDB Batch", "err", err)
+ }
+ return totalCapacity, isCheckFailed, err
}
for _, consumedCapacity := range resp.ConsumedCapacity {
- totalCapacity += getCapacityUnits(consumedCapacity)
- }
-
- if resp.UnprocessedItems != nil && len(resp.UnprocessedItems) > 0 {
- return totalCapacity, fmt.Errorf("error processing batch request for %s requests", resp.UnprocessedItems)
+ totalCapacity += getCapacityUnits(&consumedCapacity)
}
}
- return totalCapacity, nil
+ return totalCapacity, false, nil
}
-func (kv dynamodbKV) generatePutItemRequest(key dynamodbKey, data []byte) map[string]*dynamodb.AttributeValue {
+func (kv dynamodbKV) generatePutItemRequest(key dynamodbKey, ddbItem dynamodbItem) map[string]types.AttributeValue {
item := generateItemKey(key)
- item[contentData] = &dynamodb.AttributeValue{
- B: data,
- }
+ item[contentData] = &types.AttributeValueMemberB{Value: ddbItem.data}
+ item[version] = &types.AttributeValueMemberN{Value: strconv.FormatInt(ddbItem.version+1, 10)}
+
if kv.getTTL() > 0 {
- item[timeToLive] = &dynamodb.AttributeValue{
- N: aws.String(strconv.FormatInt(time.Now().UTC().Add(kv.getTTL()).Unix(), 10)),
+ item[timeToLive] = &types.AttributeValueMemberN{
+ Value: strconv.FormatInt(time.Now().UTC().Add(kv.getTTL()).Unix(), 10),
}
}
return item
}
-func generateItemKey(key dynamodbKey) map[string]*dynamodb.AttributeValue {
- resp := map[string]*dynamodb.AttributeValue{
- primaryKey: {
- S: aws.String(key.primaryKey),
- },
+type dynamodbKVWithTimeout struct {
+ ddbClient dynamoDbClient
+ timeout time.Duration
+}
+
+func newDynamodbKVWithTimeout(client dynamoDbClient, timeout time.Duration) *dynamodbKVWithTimeout {
+ return &dynamodbKVWithTimeout{ddbClient: client, timeout: timeout}
+}
+
+func (d *dynamodbKVWithTimeout) List(ctx context.Context, key dynamodbKey) ([]string, float64, error) {
+ ctx, cancel := context.WithTimeout(ctx, d.timeout)
+ defer cancel()
+ return d.ddbClient.List(ctx, key)
+}
+
+func (d *dynamodbKVWithTimeout) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string]dynamodbItem, float64, error) {
+ ctx, cancel := context.WithTimeout(ctx, d.timeout)
+ defer cancel()
+ return d.ddbClient.Query(ctx, key, isPrefix)
+}
+
+func (d *dynamodbKVWithTimeout) Delete(ctx context.Context, key dynamodbKey) error {
+ ctx, cancel := context.WithTimeout(ctx, d.timeout)
+ defer cancel()
+ return d.ddbClient.Delete(ctx, key)
+}
+
+func (d *dynamodbKVWithTimeout) Put(ctx context.Context, key dynamodbKey, data []byte) error {
+ ctx, cancel := context.WithTimeout(ctx, d.timeout)
+ defer cancel()
+ return d.ddbClient.Put(ctx, key, data)
+}
+
+func (d *dynamodbKVWithTimeout) Batch(ctx context.Context, put map[dynamodbKey]dynamodbItem, delete []dynamodbKey) (bool, error) {
+ ctx, cancel := context.WithTimeout(ctx, d.timeout)
+ defer cancel()
+ return d.ddbClient.Batch(ctx, put, delete)
+}
+
+func generateItemKey(key dynamodbKey) map[string]types.AttributeValue {
+ resp := map[string]types.AttributeValue{
+ primaryKey: &types.AttributeValueMemberS{Value: key.primaryKey},
}
if len(key.sortKey) > 0 {
- resp[sortKey] = &dynamodb.AttributeValue{
- S: aws.String(key.sortKey),
- }
+ resp[sortKey] = &types.AttributeValueMemberS{Value: key.sortKey}
}
-
return resp
}
-func getCapacityUnits(cap *dynamodb.ConsumedCapacity) float64 {
+func getCapacityUnits(cap *types.ConsumedCapacity) float64 {
if cap != nil && cap.CapacityUnits != nil {
return *cap.CapacityUnits
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/metrics.go
index fc5e35a9e..1d0f051da 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/metrics.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/dynamodb/metrics.go
@@ -17,9 +17,10 @@ type dynamodbInstrumentation struct {
}
type dynamodbMetrics struct {
- dynamodbRequestDuration *instrument.HistogramCollector
- dynamodbUsageMetrics *prometheus.CounterVec
- dynamodbCasAttempts prometheus.Counter
+ dynamodbRequestDuration *instrument.HistogramCollector
+ dynamodbUsageMetrics *prometheus.CounterVec
+ dynamodbCasAttempts prometheus.Counter
+ dynamodbConditionalCheckFailures prometheus.Counter
}
func newDynamoDbMetrics(registerer prometheus.Registerer) *dynamodbMetrics {
@@ -39,10 +40,16 @@ func newDynamoDbMetrics(registerer prometheus.Registerer) *dynamodbMetrics {
Help: "DynamoDB KV Store Attempted CAS operations",
})
+ dynamodbConditionalCheckFailures := promauto.With(registerer).NewCounter(prometheus.CounterOpts{
+ Name: "dynamodb_kv_conditional_check_failed_total",
+ Help: "Total number of DynamoDB conditional check failures",
+ })
+
dynamodbMetrics := dynamodbMetrics{
- dynamodbRequestDuration: dynamodbRequestDurationCollector,
- dynamodbUsageMetrics: dynamodbUsageMetrics,
- dynamodbCasAttempts: dynamodbCasAttempts,
+ dynamodbRequestDuration: dynamodbRequestDurationCollector,
+ dynamodbUsageMetrics: dynamodbUsageMetrics,
+ dynamodbCasAttempts: dynamodbCasAttempts,
+ dynamodbConditionalCheckFailures: dynamodbConditionalCheckFailures,
}
return &dynamodbMetrics
}
@@ -59,8 +66,8 @@ func (d dynamodbInstrumentation) List(ctx context.Context, key dynamodbKey) ([]s
return resp, totalCapacity, err
}
-func (d dynamodbInstrumentation) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string][]byte, float64, error) {
- var resp map[string][]byte
+func (d dynamodbInstrumentation) Query(ctx context.Context, key dynamodbKey, isPrefix bool) (map[string]dynamodbItem, float64, error) {
+ var resp map[string]dynamodbItem
var totalCapacity float64
err := instrument.CollectedRequest(ctx, "Query", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error {
var err error
@@ -87,12 +94,19 @@ func (d dynamodbInstrumentation) Put(ctx context.Context, key dynamodbKey, data
})
}
-func (d dynamodbInstrumentation) Batch(ctx context.Context, put map[dynamodbKey][]byte, delete []dynamodbKey) error {
- return instrument.CollectedRequest(ctx, "Batch", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error {
- totalCapacity, err := d.kv.Batch(ctx, put, delete)
+func (d dynamodbInstrumentation) Batch(ctx context.Context, put map[dynamodbKey]dynamodbItem, delete []dynamodbKey) (bool, error) {
+ retry := false
+ err := instrument.CollectedRequest(ctx, "Batch", d.ddbMetrics.dynamodbRequestDuration, errorCode, func(ctx context.Context) error {
+ var err error
+ totalCapacity, shouldRetry, err := d.kv.Batch(ctx, put, delete)
+ retry = shouldRetry
+ if retry {
+ d.ddbMetrics.dynamodbConditionalCheckFailures.Inc()
+ }
d.ddbMetrics.dynamodbUsageMetrics.WithLabelValues("Batch").Add(totalCapacity)
return err
})
+ return retry, err
}
// errorCode converts an error into an error code string.
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go
index ca7dcf050..1152bff5f 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go
@@ -122,7 +122,7 @@ func New(cfg Config, codec codec.Codec, logger log.Logger) (*Client, error) {
}
// CAS implements kv.Client.
-func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error {
+func (c *Client) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error {
var revision int64
var lastErr error
@@ -137,7 +137,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou
continue
}
- var intermediate interface{}
+ var intermediate any
if len(resp.Kvs) > 0 {
intermediate, err = c.codec.Decode(resp.Kvs[0].Value)
if err != nil {
@@ -195,7 +195,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou
}
// WatchKey implements kv.Client.
-func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) {
+func (c *Client) WatchKey(ctx context.Context, key string, f func(any) bool) {
backoff := backoff.New(ctx, backoff.Config{
MinBackoff: 1 * time.Second,
MaxBackoff: 1 * time.Minute,
@@ -236,7 +236,7 @@ outer:
}
// WatchPrefix implements kv.Client.
-func (c *Client) WatchPrefix(ctx context.Context, key string, f func(string, interface{}) bool) {
+func (c *Client) WatchPrefix(ctx context.Context, key string, f func(string, any) bool) {
backoff := backoff.New(ctx, backoff.Config{
MinBackoff: 1 * time.Second,
MaxBackoff: 1 * time.Minute,
@@ -298,7 +298,7 @@ func (c *Client) List(ctx context.Context, prefix string) ([]string, error) {
}
// Get implements kv.Client.
-func (c *Client) Get(ctx context.Context, key string) (interface{}, error) {
+func (c *Client) Get(ctx context.Context, key string) (any, error) {
opsCtx, cancel := c.opsContext(ctx)
defer cancel()
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go
index 6657b73a5..d567c2e5e 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go
@@ -2,6 +2,7 @@ package memberlist
import (
"fmt"
+ "slices"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
@@ -28,13 +29,7 @@ func (r ringBroadcast) Invalidates(old memberlist.Broadcast) bool {
// and this broadcast has resulted in a newer ring update, we can invalidate the old value
for _, oldName := range oldb.content {
- found := false
- for _, newName := range r.content {
- if oldName == newName {
- found = true
- break
- }
- }
+ found := slices.Contains(r.content, oldName)
if !found {
return false
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/dnsprovider.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/dnsprovider.go
index b51a5d055..2f98a91c9 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/dnsprovider.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/dnsprovider.go
@@ -8,7 +8,7 @@ import (
type DNSProvider interface {
// Resolve stores a list of provided addresses or their DNS records if requested.
// Implementations may have specific ways of interpreting addresses.
- Resolve(ctx context.Context, addrs []string) error
+ Resolve(ctx context.Context, addrs []string, flushOld bool) error
// Addresses returns the latest addresses present in the DNSProvider.
Addresses() []string
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv_init_service.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv_init_service.go
index c3350e584..7e0b1acb0 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv_init_service.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv_init_service.go
@@ -224,7 +224,7 @@ func viewKey(w http.ResponseWriter, store map[string]valueDesc, key string, form
formatValue(w, store[key].value, format)
}
-func formatValue(w http.ResponseWriter, val interface{}, format string) {
+func formatValue(w http.ResponseWriter, val any, format string) {
w.WriteHeader(200)
w.Header().Add("content-type", "text/plain")
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go
index 59a828e48..206157c28 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go
@@ -60,7 +60,7 @@ func (c *Client) List(ctx context.Context, prefix string) ([]string, error) {
}
// Get is part of kv.Client interface.
-func (c *Client) Get(ctx context.Context, key string) (interface{}, error) {
+func (c *Client) Get(ctx context.Context, key string) (any, error) {
err := c.awaitKVRunningOrStopping(ctx)
if err != nil {
return nil, err
@@ -75,7 +75,7 @@ func (c *Client) Delete(ctx context.Context, key string) error {
}
// CAS is part of kv.Client interface
-func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error {
+func (c *Client) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error {
err := c.awaitKVRunningOrStopping(ctx)
if err != nil {
return err
@@ -85,7 +85,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou
}
// WatchKey is part of kv.Client interface.
-func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) bool) {
+func (c *Client) WatchKey(ctx context.Context, key string, f func(any) bool) {
err := c.awaitKVRunningOrStopping(ctx)
if err != nil {
return
@@ -96,7 +96,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b
// WatchPrefix calls f whenever any value stored under prefix changes.
// Part of kv.Client interface.
-func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) {
+func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) {
err := c.awaitKVRunningOrStopping(ctx)
if err != nil {
return
@@ -596,7 +596,7 @@ func (m *KV) discoverMembers(ctx context.Context, members []string) []string {
}
}
- err := m.provider.Resolve(ctx, resolve)
+ err := m.provider.Resolve(ctx, resolve, true)
if err != nil {
level.Error(m.logger).Log("msg", "failed to resolve members", "addrs", strings.Join(resolve, ","))
}
@@ -658,13 +658,13 @@ func (m *KV) List(prefix string) []string {
// Get returns current value associated with given key.
// No communication with other nodes in the cluster is done here.
-func (m *KV) Get(key string, codec codec.Codec) (interface{}, error) {
+func (m *KV) Get(key string, codec codec.Codec) (any, error) {
val, _, err := m.get(key, codec)
return val, err
}
// Returns current value with removed tombstones.
-func (m *KV) get(key string, codec codec.Codec) (out interface{}, version uint, err error) {
+func (m *KV) get(key string, codec codec.Codec) (out any, version uint, err error) {
m.storeMu.Lock()
v := m.store[key].Clone()
m.storeMu.Unlock()
@@ -682,7 +682,7 @@ func (m *KV) get(key string, codec codec.Codec) (out interface{}, version uint,
// latest value. Notifications that arrive while 'f' is running are coalesced into one subsequent 'f' call.
//
// Watching ends when 'f' returns false, context is done, or this client is shut down.
-func (m *KV) WatchKey(ctx context.Context, key string, codec codec.Codec, f func(interface{}) bool) {
+func (m *KV) WatchKey(ctx context.Context, key string, codec codec.Codec, f func(any) bool) {
// keep one extra notification, to avoid missing notification if we're busy running the function
w := make(chan string, 1)
@@ -729,7 +729,7 @@ func (m *KV) WatchKey(ctx context.Context, key string, codec codec.Codec, f func
// some notifications may be lost.
//
// Watching ends when 'f' returns false, context is done, or this client is shut down.
-func (m *KV) WatchPrefix(ctx context.Context, prefix string, codec codec.Codec, f func(string, interface{}) bool) {
+func (m *KV) WatchPrefix(ctx context.Context, prefix string, codec codec.Codec, f func(string, any) bool) {
// we use bigger buffer here, since keys are interesting and we don't want to lose them.
w := make(chan string, 16)
@@ -828,7 +828,7 @@ func (m *KV) notifyWatchers(key string) {
// KV store, and change is broadcast to cluster peers. Merge function is called with CAS flag on, so that it can
// detect removals. If Merge doesn't result in any change (returns nil), then operation fails and is retried again.
// After too many failed retries, this method returns error.
-func (m *KV) CAS(ctx context.Context, key string, codec codec.Codec, f func(in interface{}) (out interface{}, retry bool, err error)) error {
+func (m *KV) CAS(ctx context.Context, key string, codec codec.Codec, f func(in any) (out any, retry bool, err error)) error {
var lastError error
outer:
@@ -885,7 +885,7 @@ outer:
// returns change, error (or nil, if CAS succeeded), and whether to retry or not.
// returns errNoChangeDetected if merge failed to detect change in f's output.
-func (m *KV) trySingleCas(key string, codec codec.Codec, f func(in interface{}) (out interface{}, retry bool, err error)) (Mergeable, uint, bool, error) {
+func (m *KV) trySingleCas(key string, codec codec.Codec, f func(in any) (out any, retry bool, err error)) (Mergeable, uint, bool, error) {
val, ver, err := m.get(key, codec)
if err != nil {
return nil, 0, false, fmt.Errorf("failed to get value: %v", err)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go
index 6ccb469b6..4574216b9 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go
@@ -30,7 +30,7 @@ func newMemberlistLoggerAdapter(logger log.Logger, logTimestamp bool) io.Writer
func (a loggerAdapter) Write(p []byte) (int, error) {
result := subexps(p)
- keyvals := []interface{}{}
+ keyvals := []any{}
var timestamp string
if date, ok := result["date"]; ok && date != "" {
timestamp = date
@@ -71,7 +71,7 @@ func (a loggerAdapter) Write(p []byte) (int, error) {
if msg, ok := result["msg"]; ok {
keyvals = append(keyvals, "msg", msg)
}
- if err := a.Logger.Log(keyvals...); err != nil {
+ if err := a.Log(keyvals...); err != nil {
return 0, err
}
return len(p), nil
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go
index 4dfd23a11..759140de8 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go
@@ -3,9 +3,9 @@ package memberlist
import (
"time"
- armonmetrics "github.com/armon/go-metrics"
- armonprometheus "github.com/armon/go-metrics/prometheus"
"github.com/go-kit/log/level"
+ armonmetrics "github.com/hashicorp/go-metrics"
+ armonprometheus "github.com/hashicorp/go-metrics/prometheus"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go
index 38ec3b59c..30ed8ff4a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go
@@ -71,8 +71,8 @@ func (m metrics) List(ctx context.Context, prefix string) ([]string, error) {
return result, err
}
-func (m metrics) Get(ctx context.Context, key string) (interface{}, error) {
- var result interface{}
+func (m metrics) Get(ctx context.Context, key string) (any, error) {
+ var result any
err := instrument.CollectedRequest(ctx, "GET", m.requestDuration, instrument.ErrorCode, func(ctx context.Context) error {
var err error
result, err = m.c.Get(ctx, key)
@@ -88,20 +88,20 @@ func (m metrics) Delete(ctx context.Context, key string) error {
return err
}
-func (m metrics) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error {
+func (m metrics) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error {
return instrument.CollectedRequest(ctx, "CAS", m.requestDuration, getCasErrorCode, func(ctx context.Context) error {
return m.c.CAS(ctx, key, f)
})
}
-func (m metrics) WatchKey(ctx context.Context, key string, f func(interface{}) bool) {
+func (m metrics) WatchKey(ctx context.Context, key string, f func(any) bool) {
_ = instrument.CollectedRequest(ctx, "WatchKey", m.requestDuration, instrument.ErrorCode, func(ctx context.Context) error {
m.c.WatchKey(ctx, key, f)
return nil
})
}
-func (m metrics) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) {
+func (m metrics) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) {
_ = instrument.CollectedRequest(ctx, "WatchPrefix", m.requestDuration, instrument.ErrorCode, func(ctx context.Context) error {
m.c.WatchPrefix(ctx, prefix, f)
return nil
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go
index cbe23106a..f889be60d 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go
@@ -21,7 +21,7 @@ func (m mockClient) List(ctx context.Context, prefix string) ([]string, error) {
return []string{}, nil
}
-func (m mockClient) Get(ctx context.Context, key string) (interface{}, error) {
+func (m mockClient) Get(ctx context.Context, key string) (any, error) {
return "", nil
}
@@ -29,14 +29,14 @@ func (m mockClient) Delete(ctx context.Context, key string) error {
return nil
}
-func (m mockClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error {
+func (m mockClient) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error {
return nil
}
-func (m mockClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) {
+func (m mockClient) WatchKey(ctx context.Context, key string, f func(any) bool) {
}
-func (m mockClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) {
+func (m mockClient) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) {
}
func (m mockClient) LastUpdateTime(key string) time.Time {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go
index e4ac994d7..98c2a04b6 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go
@@ -290,7 +290,7 @@ func (m *MultiClient) List(ctx context.Context, prefix string) ([]string, error)
}
// Get is a part of kv.Client interface.
-func (m *MultiClient) Get(ctx context.Context, key string) (interface{}, error) {
+func (m *MultiClient) Get(ctx context.Context, key string) (any, error) {
_, kv := m.getPrimaryClient()
return kv.client.Get(ctx, key)
}
@@ -302,11 +302,11 @@ func (m *MultiClient) Delete(ctx context.Context, key string) error {
}
// CAS is a part of kv.Client interface.
-func (m *MultiClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error {
+func (m *MultiClient) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error {
_, kv := m.getPrimaryClient()
- updatedValue := interface{}(nil)
- err := kv.client.CAS(ctx, key, func(in interface{}) (interface{}, bool, error) {
+ updatedValue := any(nil)
+ err := kv.client.CAS(ctx, key, func(in any) (any, bool, error) {
out, retry, err := f(in)
updatedValue = out
return out, retry, err
@@ -320,7 +320,7 @@ func (m *MultiClient) CAS(ctx context.Context, key string, f func(in interface{}
}
// WatchKey is a part of kv.Client interface.
-func (m *MultiClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) {
+func (m *MultiClient) WatchKey(ctx context.Context, key string, f func(any) bool) {
_ = m.runWithPrimaryClient(ctx, func(newCtx context.Context, primary kvclient) error {
primary.client.WatchKey(newCtx, key, f)
return newCtx.Err()
@@ -328,7 +328,7 @@ func (m *MultiClient) WatchKey(ctx context.Context, key string, f func(interface
}
// WatchPrefix is a part of kv.Client interface.
-func (m *MultiClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) {
+func (m *MultiClient) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) {
_ = m.runWithPrimaryClient(ctx, func(newCtx context.Context, primary kvclient) error {
primary.client.WatchPrefix(newCtx, prefix, f)
return newCtx.Err()
@@ -340,7 +340,7 @@ func (m *MultiClient) LastUpdateTime(key string) time.Time {
return kv.client.LastUpdateTime(key)
}
-func (m *MultiClient) writeToSecondary(ctx context.Context, primary kvclient, key string, newValue interface{}) {
+func (m *MultiClient) writeToSecondary(ctx context.Context, primary kvclient, key string, newValue any) {
if m.mirrorTimeout > 0 {
var cfn context.CancelFunc
ctx, cfn = context.WithTimeout(ctx, m.mirrorTimeout)
@@ -354,7 +354,7 @@ func (m *MultiClient) writeToSecondary(ctx context.Context, primary kvclient, ke
}
m.mirrorWritesCounter.Inc()
- err := kvc.client.CAS(ctx, key, func(in interface{}) (out interface{}, retry bool, err error) {
+ err := kvc.client.CAS(ctx, key, func(in any) (out any, retry bool, err error) {
// try once
return newValue, false, nil
})
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/prefix.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/prefix.go
index aba9b7a09..d9406b4ff 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/prefix.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/prefix.go
@@ -37,24 +37,24 @@ func (c *prefixedKVClient) List(ctx context.Context, prefix string) ([]string, e
// CAS atomically modifies a value in a callback. If the value doesn't exist,
// you'll get 'nil' as an argument to your callback.
-func (c *prefixedKVClient) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error {
+func (c *prefixedKVClient) CAS(ctx context.Context, key string, f func(in any) (out any, retry bool, err error)) error {
return c.client.CAS(ctx, c.prefix+key, f)
}
// WatchKey watches a key.
-func (c *prefixedKVClient) WatchKey(ctx context.Context, key string, f func(interface{}) bool) {
+func (c *prefixedKVClient) WatchKey(ctx context.Context, key string, f func(any) bool) {
c.client.WatchKey(ctx, c.prefix+key, f)
}
// WatchPrefix watches a prefix. For a prefix client it appends the prefix argument to the clients prefix.
-func (c *prefixedKVClient) WatchPrefix(ctx context.Context, prefix string, f func(string, interface{}) bool) {
- c.client.WatchPrefix(ctx, fmt.Sprintf("%s%s", c.prefix, prefix), func(k string, i interface{}) bool {
+func (c *prefixedKVClient) WatchPrefix(ctx context.Context, prefix string, f func(string, any) bool) {
+ c.client.WatchPrefix(ctx, fmt.Sprintf("%s%s", c.prefix, prefix), func(k string, i any) bool {
return f(strings.TrimPrefix(k, c.prefix), i)
})
}
// Get looks up a given object from its key.
-func (c *prefixedKVClient) Get(ctx context.Context, key string) (interface{}, error) {
+func (c *prefixedKVClient) Get(ctx context.Context, key string) (any, error) {
return c.client.Get(ctx, c.prefix+key)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go
index d4f1e5735..6038de227 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go
@@ -27,6 +27,17 @@ var (
errInvalidTokensGeneratorStrategy = errors.New("invalid token generator strategy")
)
+type LifecyclerDelegate interface {
+ // OnRingInstanceHeartbeat is called while the instance is updating its heartbeat
+ // in the ring.
+ OnRingInstanceHeartbeat(lifecycler *Lifecycler, ringDesc *Desc)
+}
+
+type DefaultLifecyclerDelegate struct{}
+
+func (d DefaultLifecyclerDelegate) OnRingInstanceHeartbeat(lifecycler *Lifecycler, ringDesc *Desc) {
+}
+
// LifecyclerConfig is the config to build a Lifecycler.
type LifecyclerConfig struct {
RingConfig Config `yaml:"ring"`
@@ -108,6 +119,7 @@ type Lifecycler struct {
cfg LifecyclerConfig
flushTransferer FlushTransferer
KVStore kv.Client
+ delegate LifecyclerDelegate
actorChan chan func()
autojoinChan chan struct{}
@@ -130,7 +142,7 @@ type Lifecycler struct {
// goes away and comes back empty. The state changes during lifecycle of instance.
stateMtx sync.RWMutex
state InstanceState
- tokens Tokens
+ tokenFile *TokenFile
registeredAt time.Time
// Controls the ready-reporting
@@ -150,6 +162,22 @@ type Lifecycler struct {
tg TokenGenerator
}
+func NewLifecyclerWithDelegate(
+ cfg LifecyclerConfig,
+ flushTransferer FlushTransferer,
+ ringName, ringKey string,
+ autoJoinOnStartup, flushOnShutdown bool,
+ logger log.Logger,
+ reg prometheus.Registerer,
+ delegate LifecyclerDelegate,
+) (*Lifecycler, error) {
+ l, err := NewLifecycler(cfg, flushTransferer, ringName, ringKey, autoJoinOnStartup, flushOnShutdown, logger, reg)
+ if l != nil {
+ l.delegate = delegate
+ }
+ return l, err
+}
+
// NewLifecycler creates new Lifecycler. It must be started via StartAsync.
func NewLifecycler(
cfg LifecyclerConfig,
@@ -205,9 +233,11 @@ func NewLifecycler(
actorChan: make(chan func()),
autojoinChan: make(chan struct{}, 1),
state: PENDING,
+ tokenFile: &TokenFile{PreviousState: ACTIVE},
lifecyclerMetrics: NewLifecyclerMetrics(ringName, reg),
logger: logger,
tg: tg,
+ delegate: &DefaultLifecyclerDelegate{},
}
l.lifecyclerMetrics.tokensToOwn.Set(float64(cfg.NumTokens))
@@ -301,6 +331,7 @@ func (i *Lifecycler) GetState() InstanceState {
func (i *Lifecycler) setState(state InstanceState) {
i.stateMtx.Lock()
defer i.stateMtx.Unlock()
+ level.Info(i.logger).Log("msg", "set state", "old_state", i.state, "new_state", state)
i.state = state
}
@@ -334,7 +365,7 @@ func (i *Lifecycler) ChangeState(ctx context.Context, state InstanceState) error
func (i *Lifecycler) getTokens() Tokens {
i.stateMtx.RLock()
defer i.stateMtx.RUnlock()
- return i.tokens
+ return i.tokenFile.Tokens
}
func (i *Lifecycler) setTokens(tokens Tokens) {
@@ -343,14 +374,54 @@ func (i *Lifecycler) setTokens(tokens Tokens) {
i.stateMtx.Lock()
defer i.stateMtx.Unlock()
- i.tokens = tokens
+ i.tokenFile.Tokens = tokens
if i.cfg.TokensFilePath != "" {
- if err := i.tokens.StoreToFile(i.cfg.TokensFilePath); err != nil {
+ if err := i.tokenFile.StoreToFile(i.cfg.TokensFilePath); err != nil {
level.Error(i.logger).Log("msg", "error storing tokens to disk", "path", i.cfg.TokensFilePath, "err", err)
}
}
}
+func (i *Lifecycler) getPreviousState() InstanceState {
+ i.stateMtx.RLock()
+ defer i.stateMtx.RUnlock()
+ return i.tokenFile.PreviousState
+}
+
+func (i *Lifecycler) setPreviousState(state InstanceState) {
+ i.stateMtx.Lock()
+ defer i.stateMtx.Unlock()
+
+ if !(state == ACTIVE || state == READONLY) { //nolint:staticcheck
+ level.Error(i.logger).Log("msg", "cannot store unsupported state to disk", "new_state", state, "old_state", i.tokenFile.PreviousState)
+ return
+ }
+
+ i.tokenFile.PreviousState = state
+ if i.cfg.TokensFilePath != "" {
+ if err := i.tokenFile.StoreToFile(i.cfg.TokensFilePath); err != nil {
+ level.Error(i.logger).Log("msg", "error storing state to disk", "path", i.cfg.TokensFilePath, "err", err)
+ } else {
+ level.Info(i.logger).Log("msg", "saved state to disk", "state", state, "path", i.cfg.TokensFilePath)
+ }
+ }
+}
+
+func (i *Lifecycler) loadTokenFile() (*TokenFile, error) {
+
+ t, err := LoadTokenFile(i.cfg.TokensFilePath)
+ if err != nil {
+ return nil, err
+ }
+
+ i.stateMtx.Lock()
+ defer i.stateMtx.Unlock()
+
+ i.tokenFile = t
+ level.Info(i.logger).Log("msg", "loaded token file", "state", i.tokenFile.PreviousState, "num_tokens", len(i.tokenFile.Tokens), "path", i.cfg.TokensFilePath)
+ return i.tokenFile, nil
+}
+
func (i *Lifecycler) getRegisteredAt() time.Time {
i.stateMtx.RLock()
defer i.stateMtx.RUnlock()
@@ -375,10 +446,10 @@ func (i *Lifecycler) ClaimTokensFor(ctx context.Context, ingesterID string) erro
fn := func() {
var tokens Tokens
- claimTokens := func(in interface{}) (out interface{}, retry bool, err error) {
+ claimTokens := func(in any) (out any, retry bool, err error) {
ringDesc, ok := in.(*Desc)
if !ok || ringDesc == nil {
- return nil, false, fmt.Errorf("Cannot claim tokens in an empty ring")
+ return nil, false, fmt.Errorf("cannot claim tokens in an empty ring")
}
tokens = ringDesc.ClaimTokens(ingesterID, i.ID)
@@ -449,7 +520,8 @@ func (i *Lifecycler) loop(ctx context.Context) error {
joined := false
// First, see if we exist in the cluster, update our state to match if we do,
// and add ourselves (without tokens) if we don't.
- if err := i.initRing(context.Background()); err != nil {
+ addedInRing, err := i.initRing(context.Background())
+ if err != nil {
return errors.Wrapf(err, "failed to join the ring %s", i.RingName)
}
@@ -462,18 +534,23 @@ func (i *Lifecycler) loop(ctx context.Context) error {
}
var heartbeatTickerChan <-chan time.Time
- if uint64(i.cfg.HeartbeatPeriod) > 0 {
- heartbeatTicker := time.NewTicker(i.cfg.HeartbeatPeriod)
- heartbeatTicker.Stop()
- // We are jittering for at least half of the time and max the time of the heartbeat.
- // If we jitter too soon, we can have problems of concurrency with autoJoin leaving the instance on ACTIVE without tokens
- time.AfterFunc(time.Duration(uint64(i.cfg.HeartbeatPeriod/2)+uint64(mathrand.Int63())%uint64(i.cfg.HeartbeatPeriod/2)), func() {
- i.heartbeat()
- heartbeatTicker.Reset(i.cfg.HeartbeatPeriod)
- })
- defer heartbeatTicker.Stop()
-
- heartbeatTickerChan = heartbeatTicker.C
+ startHeartbeat := func() {
+ if uint64(i.cfg.HeartbeatPeriod) > 0 {
+ heartbeatTicker := time.NewTicker(i.cfg.HeartbeatPeriod)
+ heartbeatTicker.Stop()
+ // We are jittering for at least half of the time and max the time of the heartbeat.
+ // If we jitter too soon, we can have problems of concurrency with autoJoin leaving the instance on ACTIVE without tokens
+ time.AfterFunc(time.Duration(uint64(i.cfg.HeartbeatPeriod/2)+uint64(mathrand.Int63())%uint64(i.cfg.HeartbeatPeriod/2)), func() {
+ i.heartbeat(ctx)
+ heartbeatTicker.Reset(i.cfg.HeartbeatPeriod)
+ })
+ defer heartbeatTicker.Stop()
+
+ heartbeatTickerChan = heartbeatTicker.C
+ }
+ }
+ if addedInRing {
+ startHeartbeat()
}
for {
@@ -494,17 +571,21 @@ func (i *Lifecycler) loop(ctx context.Context) error {
if i.cfg.ObservePeriod > 0 {
// let's observe the ring. By using JOINING state, this ingester will be ignored by LEAVING
// ingesters, but we also signal that it is not fully functional yet.
- if err := i.autoJoin(context.Background(), JOINING); err != nil {
+ if err := i.autoJoin(context.Background(), JOINING, addedInRing); err != nil {
return errors.Wrapf(err, "failed to pick tokens in the KV store, ring: %s", i.RingName)
}
level.Info(i.logger).Log("msg", "observing tokens before going ACTIVE", "ring", i.RingName)
observeChan = time.After(i.cfg.ObservePeriod)
} else {
- if err := i.autoJoin(context.Background(), ACTIVE); err != nil {
- return errors.Wrapf(err, "failed to pick tokens in the KV store, ring: %s", i.RingName)
+ if err := i.autoJoin(context.Background(), i.getPreviousState(), addedInRing); err != nil {
+ return errors.Wrapf(err, "failed to pick tokens in the KV store, ring: %s, state: %s", i.RingName, i.getPreviousState())
}
}
+
+ if !addedInRing {
+ startHeartbeat()
+ }
}
case <-observeChan:
@@ -519,9 +600,13 @@ func (i *Lifecycler) loop(ctx context.Context) error {
if i.verifyTokens(context.Background()) {
level.Info(i.logger).Log("msg", "token verification successful", "ring", i.RingName)
- err := i.changeState(context.Background(), ACTIVE)
+ err := i.changeState(context.Background(), i.getPreviousState())
if err != nil {
- level.Error(i.logger).Log("msg", "failed to set state to ACTIVE", "ring", i.RingName, "err", err)
+ level.Error(i.logger).Log("msg", "failed to set state", "ring", i.RingName, "state", i.getPreviousState(), "err", err)
+ }
+
+ if !addedInRing {
+ startHeartbeat()
}
} else {
level.Info(i.logger).Log("msg", "token verification failed, observing", "ring", i.RingName)
@@ -530,7 +615,7 @@ func (i *Lifecycler) loop(ctx context.Context) error {
}
case <-heartbeatTickerChan:
- i.heartbeat()
+ i.heartbeat(ctx)
case f := <-i.actorChan:
f()
@@ -541,9 +626,11 @@ func (i *Lifecycler) loop(ctx context.Context) error {
}
}
-func (i *Lifecycler) heartbeat() {
+func (i *Lifecycler) heartbeat(ctx context.Context) {
i.lifecyclerMetrics.consulHeartbeats.Inc()
- if err := i.updateConsul(context.Background()); err != nil {
+ ctx, cancel := context.WithTimeout(ctx, i.cfg.HeartbeatPeriod)
+ defer cancel()
+ if err := i.updateConsul(ctx); err != nil {
level.Error(i.logger).Log("msg", "failed to write to the KV store, sleeping", "ring", i.RingName, "err", err)
}
}
@@ -562,10 +649,21 @@ func (i *Lifecycler) stopping(runningError error) error {
heartbeatTickerStop, heartbeatTickerChan := newDisableableTicker(i.cfg.HeartbeatPeriod)
defer heartbeatTickerStop()
- // Mark ourselved as Leaving so no more samples are send to us.
- err := i.changeState(context.Background(), LEAVING)
- if err != nil {
- level.Error(i.logger).Log("msg", "failed to set state to LEAVING", "ring", i.RingName, "err", err)
+ // save current state into file
+ if i.cfg.TokensFilePath != "" {
+ currentState := i.GetState()
+ i.setPreviousState(currentState)
+ }
+
+ // We dont need to mark us as leaving if READONLY. There is not request sent to us.
+ // Also important to avoid this change so we dont have resharding(for querier) happen when READONLY restart as we extended shard on READONLY but not on LEAVING
+ // Query also keeps calling pods on LEAVING or JOINING not causing any difference if left on READONLY
+ if i.GetState() != READONLY {
+ // Mark ourselved as Leaving so no more samples are send to us.
+ err := i.changeState(context.Background(), LEAVING)
+ if err != nil {
+ level.Error(i.logger).Log("msg", "failed to set state to LEAVING", "ring", i.RingName, "err", err)
+ }
}
// Do the transferring / flushing on a background goroutine so we can continue
@@ -603,23 +701,28 @@ heartbeatLoop:
// initRing is the first thing we do when we start. It:
// - add an ingester entry to the ring
// - copies out our state and tokens if they exist
-func (i *Lifecycler) initRing(ctx context.Context) error {
+func (i *Lifecycler) initRing(ctx context.Context) (bool, error) {
var (
ringDesc *Desc
tokensFromFile Tokens
err error
)
+ addedInRing := true
if i.cfg.TokensFilePath != "" {
- tokensFromFile, err = LoadTokensFromFile(i.cfg.TokensFilePath)
+ tokenFile, err := i.loadTokenFile()
if err != nil && !os.IsNotExist(err) {
- level.Error(i.logger).Log("msg", "error loading tokens from file", "err", err)
+ level.Error(i.logger).Log("msg", "error loading tokens and previous state from file", "err", err)
+ }
+
+ if tokenFile != nil {
+ tokensFromFile = tokenFile.Tokens
}
} else {
level.Info(i.logger).Log("msg", "not loading tokens from file, tokens file path is empty")
}
- err = i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ err = i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) {
if in == nil {
ringDesc = NewDesc()
} else {
@@ -637,11 +740,16 @@ func (i *Lifecycler) initRing(ctx context.Context) error {
if len(tokensFromFile) > 0 {
level.Info(i.logger).Log("msg", "adding tokens from file", "num_tokens", len(tokensFromFile))
if len(tokensFromFile) >= i.cfg.NumTokens && i.autoJoinOnStartup {
- i.setState(ACTIVE)
+ i.setState(i.getPreviousState())
+ state := i.GetState()
+ ringDesc.AddIngester(i.ID, i.Addr, i.Zone, tokensFromFile, state, registeredAt)
+ level.Info(i.logger).Log("msg", "auto join on startup, adding with token and state", "ring", i.RingName, "state", state)
+ return ringDesc, true, nil
}
- ringDesc.AddIngester(i.ID, i.Addr, i.Zone, tokensFromFile, i.GetState(), registeredAt)
i.setTokens(tokensFromFile)
- return ringDesc, true, nil
+ // Do not return ring to CAS call since instance has not been added to ring yet.
+ addedInRing = false
+ return nil, true, nil
}
// Either we are a new ingester, or consul must have restarted
@@ -667,11 +775,11 @@ func (i *Lifecycler) initRing(ctx context.Context) error {
// If the ingester failed to clean its ring entry up in can leave its state in LEAVING
// OR unregister_on_shutdown=false
- // if autoJoinOnStartup, move it into ACTIVE to ensure the ingester joins the ring.
- // else set to PENDING
+ // if autoJoinOnStartup, move it into previous state based on token file (default: ACTIVE)
+ // to ensure the ingester joins the ring. else set to PENDING
if instanceDesc.State == LEAVING && len(instanceDesc.Tokens) != 0 {
if i.autoJoinOnStartup {
- instanceDesc.State = ACTIVE
+ instanceDesc.State = i.getPreviousState()
} else {
instanceDesc.State = PENDING
}
@@ -684,6 +792,9 @@ func (i *Lifecycler) initRing(ctx context.Context) error {
level.Info(i.logger).Log("msg", "existing entry found in ring", "state", i.GetState(), "tokens", len(tokens), "ring", i.RingName)
+ // Update the address if it has changed
+ instanceDesc.Addr = i.Addr
+
// Update the ring if the instance has been changed and the heartbeat is disabled.
// We dont need to update KV here when heartbeat is enabled as this info will eventually be update on KV
// on the next heartbeat
@@ -703,14 +814,14 @@ func (i *Lifecycler) initRing(ctx context.Context) error {
i.updateCounters(ringDesc)
}
- return err
+ return addedInRing, err
}
func (i *Lifecycler) RenewTokens(ratio float64, ctx context.Context) {
if ratio > 1 {
ratio = 1
}
- err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ err := i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) {
if in == nil {
return in, false, nil
}
@@ -726,7 +837,7 @@ func (i *Lifecycler) RenewTokens(ratio float64, ctx context.Context) {
ringTokens, _ := ringDesc.TokensFor(i.ID)
// Removing random tokens
- for i := 0; i < tokensToBeRenewed; i++ {
+ for range tokensToBeRenewed {
if len(ringTokens) == 0 {
break
}
@@ -758,7 +869,7 @@ func (i *Lifecycler) RenewTokens(ratio float64, ctx context.Context) {
func (i *Lifecycler) verifyTokens(ctx context.Context) bool {
result := false
- err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ err := i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) {
var ringDesc *Desc
if in == nil {
ringDesc = NewDesc()
@@ -809,7 +920,7 @@ func (i *Lifecycler) compareTokens(fromRing Tokens) bool {
return false
}
- for i := 0; i < len(tokens); i++ {
+ for i := range tokens {
if tokens[i] != fromRing[i] {
return false
}
@@ -818,10 +929,10 @@ func (i *Lifecycler) compareTokens(fromRing Tokens) bool {
}
// autoJoin selects random tokens & moves state to targetState
-func (i *Lifecycler) autoJoin(ctx context.Context, targetState InstanceState) error {
+func (i *Lifecycler) autoJoin(ctx context.Context, targetState InstanceState, alreadyInRing bool) error {
var ringDesc *Desc
- err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ err := i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) {
if in == nil {
ringDesc = NewDesc()
} else {
@@ -833,11 +944,16 @@ func (i *Lifecycler) autoJoin(ctx context.Context, targetState InstanceState) er
// At this point, we should not have any tokens, and we should be in PENDING state.
// Need to make sure we didn't change the num of tokens configured
myTokens, _ := ringDesc.TokensFor(i.ID)
+ if !alreadyInRing {
+ myTokens = i.getTokens()
+ }
needTokens := i.cfg.NumTokens - len(myTokens)
if needTokens == 0 && myTokens.Equals(i.getTokens()) {
// Tokens have been verified. No need to change them.
- ringDesc.AddIngester(i.ID, i.Addr, i.Zone, i.getTokens(), i.GetState(), i.getRegisteredAt())
+ state := i.GetState()
+ ringDesc.AddIngester(i.ID, i.Addr, i.Zone, i.getTokens(), state, i.getRegisteredAt())
+ level.Info(i.logger).Log("msg", "auto joined with existing tokens", "ring", i.RingName, "state", state)
return ringDesc, true, nil
}
@@ -851,7 +967,9 @@ func (i *Lifecycler) autoJoin(ctx context.Context, targetState InstanceState) er
sort.Sort(myTokens)
i.setTokens(myTokens)
- ringDesc.AddIngester(i.ID, i.Addr, i.Zone, i.getTokens(), i.GetState(), i.getRegisteredAt())
+ state := i.GetState()
+ ringDesc.AddIngester(i.ID, i.Addr, i.Zone, i.getTokens(), state, i.getRegisteredAt())
+ level.Info(i.logger).Log("msg", "auto joined with new tokens", "ring", i.RingName, "state", state)
return ringDesc, true, nil
})
@@ -869,7 +987,7 @@ func (i *Lifecycler) autoJoin(ctx context.Context, targetState InstanceState) er
func (i *Lifecycler) updateConsul(ctx context.Context) error {
var ringDesc *Desc
- err := i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ err := i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) {
if in == nil {
ringDesc = NewDesc()
} else {
@@ -889,6 +1007,7 @@ func (i *Lifecycler) updateConsul(ctx context.Context) error {
instanceDesc.RegisteredTimestamp = i.getRegisteredAt().Unix()
ringDesc.Ingesters[i.ID] = instanceDesc
}
+ i.delegate.OnRingInstanceHeartbeat(i, ringDesc)
return ringDesc, true, nil
})
@@ -906,16 +1025,28 @@ func (i *Lifecycler) updateConsul(ctx context.Context) error {
func (i *Lifecycler) changeState(ctx context.Context, state InstanceState) error {
currState := i.GetState()
// Only the following state transitions can be triggered externally
- if !((currState == PENDING && state == JOINING) || // triggered by TransferChunks at the beginning
- (currState == JOINING && state == PENDING) || // triggered by TransferChunks on failure
- (currState == JOINING && state == ACTIVE) || // triggered by TransferChunks on success
+ //nolint:staticcheck
+ if !((currState == PENDING && state == JOINING) ||
+ (currState == JOINING && state == PENDING) ||
+ (currState == JOINING && state == ACTIVE) ||
+ (currState == JOINING && state == READONLY) ||
(currState == PENDING && state == ACTIVE) || // triggered by autoJoin
- (currState == ACTIVE && state == LEAVING)) { // triggered by shutdown
- return fmt.Errorf("Changing instance state from %v -> %v is disallowed", currState, state)
+ (currState == PENDING && state == READONLY) || // triggered by autoJoin
+ (currState == ACTIVE && state == LEAVING) || // triggered by shutdown
+ (currState == ACTIVE && state == READONLY) || // triggered by ingester mode
+ (currState == READONLY && state == ACTIVE) || // triggered by ingester mode
+ (currState == READONLY && state == LEAVING)) { // triggered by shutdown
+ return fmt.Errorf("changing instance state from %v -> %v is disallowed", currState, state)
}
level.Info(i.logger).Log("msg", "changing instance state from", "old_state", currState, "new_state", state, "ring", i.RingName)
i.setState(state)
+
+ //The instances is rejoining the ring. It should reset its registered time.
+ if currState == READONLY && state == ACTIVE {
+ registeredAt := time.Now()
+ i.setRegisteredAt(registeredAt)
+ }
return i.updateConsul(ctx)
}
@@ -990,7 +1121,7 @@ func (i *Lifecycler) processShutdown(ctx context.Context) {
func (i *Lifecycler) unregister(ctx context.Context) error {
level.Debug(i.logger).Log("msg", "unregistering instance from ring", "ring", i.RingName)
- return i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) {
+ return i.KVStore.CAS(ctx, i.RingKey, func(in any) (out any, retry bool, err error) {
if in == nil {
return nil, false, fmt.Errorf("found empty ring when trying to unregister")
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler_delegates.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler_delegates.go
new file mode 100644
index 000000000..854fd8544
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler_delegates.go
@@ -0,0 +1,28 @@
+package ring
+
+import (
+ "time"
+
+ "github.com/go-kit/log"
+)
+
+// AutoForgetDelegate automatically remove an instance from the ring if the last
+// heartbeat is older than a configured period.
+type LifecyclerAutoForgetDelegate struct {
+ next LifecyclerDelegate
+ logger log.Logger
+ forgetPeriod time.Duration
+}
+
+func NewLifecyclerAutoForgetDelegate(forgetPeriod time.Duration, next LifecyclerDelegate, logger log.Logger) *LifecyclerAutoForgetDelegate {
+ return &LifecyclerAutoForgetDelegate{
+ next: next,
+ logger: logger,
+ forgetPeriod: forgetPeriod,
+ }
+}
+
+func (d *LifecyclerAutoForgetDelegate) OnRingInstanceHeartbeat(lifecycler *Lifecycler, ringDesc *Desc) {
+ AutoForgetFromRing(ringDesc, d.forgetPeriod, d.logger)
+ d.next.OnRingInstanceHeartbeat(lifecycler, ringDesc)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/mock.go b/vendor/github.com/cortexproject/cortex/pkg/ring/mock.go
new file mode 100644
index 000000000..c29af17de
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/mock.go
@@ -0,0 +1,79 @@
+package ring
+
+import (
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/stretchr/testify/mock"
+)
+
+type RingMock struct {
+ mock.Mock
+}
+
+func (r *RingMock) GetInstanceIdByAddr(addr string) (string, error) {
+ return "", nil
+}
+
+func (r *RingMock) Collect(ch chan<- prometheus.Metric) {}
+
+func (r *RingMock) Describe(ch chan<- *prometheus.Desc) {}
+
+func (r *RingMock) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts []string, bufZones map[string]int) (ReplicationSet, error) {
+ args := r.Called(key, op, bufDescs, bufHosts, bufZones)
+ return args.Get(0).(ReplicationSet), args.Error(1)
+}
+
+func (r *RingMock) GetAllHealthy(op Operation) (ReplicationSet, error) {
+ args := r.Called(op)
+ return args.Get(0).(ReplicationSet), args.Error(1)
+}
+
+func (r *RingMock) GetInstanceDescsForOperation(op Operation) (map[string]InstanceDesc, error) {
+ args := r.Called(op)
+ return args.Get(0).(map[string]InstanceDesc), args.Error(1)
+}
+
+func (r *RingMock) GetAllInstanceDescs(op Operation) ([]InstanceDesc, []InstanceDesc, error) {
+ args := r.Called(op)
+ return args.Get(0).([]InstanceDesc), make([]InstanceDesc, 0), args.Error(1)
+}
+
+func (r *RingMock) GetReplicationSetForOperation(op Operation) (ReplicationSet, error) {
+ args := r.Called(op)
+ return args.Get(0).(ReplicationSet), args.Error(1)
+}
+
+func (r *RingMock) ReplicationFactor() int {
+ return 0
+}
+
+func (r *RingMock) InstancesCount() int {
+ return 0
+}
+
+func (r *RingMock) ShuffleShard(identifier string, size int) ReadRing {
+ args := r.Called(identifier, size)
+ return args.Get(0).(ReadRing)
+}
+
+func (r *RingMock) ShuffleShardWithZoneStability(identifier string, size int) ReadRing {
+ args := r.Called(identifier, size)
+ return args.Get(0).(ReadRing)
+}
+
+func (r *RingMock) GetInstanceState(instanceID string) (InstanceState, error) {
+ args := r.Called(instanceID)
+ return args.Get(0).(InstanceState), args.Error(1)
+}
+
+func (r *RingMock) ShuffleShardWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time) ReadRing {
+ args := r.Called(identifier, size, lookbackPeriod, now)
+ return args.Get(0).(ReadRing)
+}
+
+func (r *RingMock) HasInstance(instanceID string) bool {
+ return true
+}
+
+func (r *RingMock) CleanupShuffleShardCache(identifier string) {}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go
index 503ab63e6..82d0f9ccb 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go
@@ -3,6 +3,7 @@ package ring
import (
"container/heap"
"fmt"
+ "maps"
"sort"
"sync"
"time"
@@ -153,7 +154,7 @@ func (i *InstanceDesc) IsReady(storageLastUpdated time.Time, heartbeatTimeout ti
if !i.IsHeartbeatHealthy(heartbeatTimeout, storageLastUpdated) {
return fmt.Errorf("instance %s past heartbeat timeout", i.Addr)
}
- if i.State != ACTIVE {
+ if i.State != ACTIVE && i.State != READONLY {
return fmt.Errorf("instance %s in state %v", i.Addr, i.State)
}
return nil
@@ -355,7 +356,7 @@ func tokensEqual(lhs, rhs []uint32) bool {
if len(lhs) != len(rhs) {
return false
}
- for i := 0; i < len(lhs); i++ {
+ for i := range lhs {
if lhs[i] != rhs[i] {
return false
}
@@ -363,7 +364,7 @@ func tokensEqual(lhs, rhs []uint32) bool {
return true
}
-var tokenMapPool = sync.Pool{New: func() interface{} { return make(map[uint32]struct{}) }}
+var tokenMapPool = sync.Pool{New: func() any { return make(map[uint32]struct{}) }}
func conflictingTokensExist(normalizedIngesters map[string]InstanceDesc) bool {
tokensMap := tokenMapPool.Get().(map[uint32]struct{})
@@ -472,14 +473,21 @@ func (d *Desc) RemoveTombstones(limit time.Time) (total, removed int) {
}
// Clone returns a deep copy of the ring state.
-func (d *Desc) Clone() interface{} {
+func (d *Desc) Clone() any {
return proto.Clone(d).(*Desc)
}
func (d *Desc) getTokensInfo() map[uint32]instanceInfo {
out := map[uint32]instanceInfo{}
- for instanceID, instance := range d.Ingesters {
+ instanceIDs := []string{}
+ for key := range d.Ingesters {
+ instanceIDs = append(instanceIDs, key)
+ }
+ sort.Strings(instanceIDs)
+
+ for _, instanceID := range instanceIDs {
+ instance := d.Ingesters[instanceID]
info := instanceInfo{
InstanceID: instanceID,
Zone: instance.Zone,
@@ -543,10 +551,11 @@ type CompareResult int
const (
Equal CompareResult = iota // Both rings contain same exact instances.
EqualButStatesAndTimestamps // Both rings contain the same instances with the same data except states and timestamps (may differ).
+ EqualButReadOnly // Both rings contain the same instances but Write ring can change due to ReadOnly update
Different // Rings have different set of instances, or their information don't match.
)
-// RingCompare compares this ring against another one and returns one of Equal, EqualButStatesAndTimestamps or Different.
+// RingCompare compares this ring against another one and returns one of Equal, EqualButStatesAndTimestamps, EqualButReadOnly or Different.
func (d *Desc) RingCompare(o *Desc) CompareResult {
if d == nil {
if o == nil || len(o.Ingesters) == 0 {
@@ -566,6 +575,7 @@ func (d *Desc) RingCompare(o *Desc) CompareResult {
}
equalStatesAndTimestamps := true
+ equalReadOnly := true
for name, ing := range d.Ingesters {
oing, ok := o.Ingesters[name]
@@ -600,17 +610,24 @@ func (d *Desc) RingCompare(o *Desc) CompareResult {
}
if ing.State != oing.State {
- equalStatesAndTimestamps = false
+ if ing.State == READONLY || oing.State == READONLY {
+ equalReadOnly = false
+ } else {
+ equalStatesAndTimestamps = false
+ }
}
}
- if equalStatesAndTimestamps {
- return Equal
+ if !equalReadOnly {
+ return EqualButReadOnly
}
- return EqualButStatesAndTimestamps
+ if !equalStatesAndTimestamps {
+ return EqualButStatesAndTimestamps
+ }
+ return Equal
}
-func GetOrCreateRingDesc(d interface{}) *Desc {
+func GetOrCreateRingDesc(d any) *Desc {
if d == nil {
return NewDesc()
}
@@ -633,11 +650,11 @@ func (h TokensHeap) Less(i, j int) bool {
return h[i][0] < h[j][0]
}
-func (h *TokensHeap) Push(x interface{}) {
+func (h *TokensHeap) Push(x any) {
*h = append(*h, x.([]uint32))
}
-func (h *TokensHeap) Pop() interface{} {
+func (h *TokensHeap) Pop() any {
old := *h
n := len(old)
x := old[n-1]
@@ -693,8 +710,8 @@ func MergeTokensByZone(zones map[string][][]uint32) map[string][]uint32 {
return out
}
-func (d *Desc) SplitByID() map[string]interface{} {
- out := make(map[string]interface{}, len(d.Ingesters))
+func (d *Desc) SplitByID() map[string]any {
+ out := make(map[string]any, len(d.Ingesters))
for key := range d.Ingesters {
in := d.Ingesters[key]
out[key] = &in
@@ -702,7 +719,7 @@ func (d *Desc) SplitByID() map[string]interface{} {
return out
}
-func (d *Desc) JoinIds(in map[string]interface{}) {
+func (d *Desc) JoinIds(in map[string]any) {
for key, value := range in {
d.Ingesters[key] = *(value.(*InstanceDesc))
}
@@ -712,7 +729,7 @@ func (d *Desc) GetItemFactory() proto.Message {
return &InstanceDesc{}
}
-func (d *Desc) FindDifference(o codec.MultiKey) (interface{}, []string, error) {
+func (d *Desc) FindDifference(o codec.MultiKey) (any, []string, error) {
out, ok := o.(*Desc)
if !ok {
// This method only deals with non-nil rings.
@@ -738,9 +755,7 @@ func (d *Desc) FindDifference(o codec.MultiKey) (interface{}, []string, error) {
//If existent data is empty
if d == nil {
- for key, value := range out.Ingesters {
- toUpdated.Ingesters[key] = value
- }
+ maps.Copy(toUpdated.Ingesters, out.Ingesters)
return toUpdated, toDelete, nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go
index 0182207fd..c534d919b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go
@@ -2,8 +2,12 @@ package ring
import (
"context"
+ "fmt"
"sort"
"time"
+
+ "github.com/cortexproject/cortex/pkg/querier/partialdata"
+ "github.com/cortexproject/cortex/pkg/util/validation"
)
// ReplicationSet describes the instances to talk to for a given key, and how
@@ -23,9 +27,9 @@ type ReplicationSet struct {
// Do function f in parallel for all replicas in the set, erroring is we exceed
// MaxErrors and returning early otherwise. zoneResultsQuorum allows only include
// results from zones that already reach quorum to improve performance.
-func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, zoneResultsQuorum bool, f func(context.Context, *InstanceDesc) (interface{}, error)) ([]interface{}, error) {
+func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, zoneResultsQuorum bool, partialDataEnabled bool, f func(context.Context, *InstanceDesc) (any, error)) ([]any, error) {
type instanceResult struct {
- res interface{}
+ res any
err error
instance *InstanceDesc
}
@@ -68,12 +72,16 @@ func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, zoneResults
}(i, &r.Instances[i])
}
- for !tracker.succeeded() {
+ for !tracker.succeeded() && !tracker.finished() {
select {
case res := <-ch:
tracker.done(res.instance, res.res, res.err)
if res.err != nil {
- if tracker.failed() {
+ if tracker.failed() && (!partialDataEnabled || tracker.failedCompletely()) {
+ return nil, res.err
+ }
+
+ if validation.IsLimitError(res.err) {
return nil, res.err
}
@@ -88,6 +96,14 @@ func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, zoneResults
}
}
+ if partialDataEnabled && tracker.failed() {
+ finalErr := partialdata.ErrPartialData
+ for _, partialErr := range tracker.getErrors() {
+ finalErr = fmt.Errorf("%w: %w", finalErr, partialErr)
+ }
+ return tracker.getResults(), finalErr
+ }
+
return tracker.getResults(), nil
}
@@ -164,7 +180,7 @@ func hasReplicationSetChangedExcluding(before, after ReplicationSet, exclude fun
sort.Sort(ByAddr(beforeInstances))
sort.Sort(ByAddr(afterInstances))
- for i := 0; i < len(beforeInstances); i++ {
+ for i := range beforeInstances {
b := beforeInstances[i]
a := afterInstances[i]
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go
index dd2290974..0ea465cfd 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go
@@ -1,10 +1,17 @@
package ring
+import (
+ "fmt"
+)
+
type replicationSetResultTracker interface {
// Signals an instance has done the execution, either successful (no error)
// or failed (with error). If successful, result will be recorded and can
// be accessed via getResults.
- done(instance *InstanceDesc, result interface{}, err error)
+ done(instance *InstanceDesc, result any, err error)
+
+ // Returns true if all instances are done executing
+ finished() bool
// Returns true if the minimum number of successful results have been received.
succeeded() bool
@@ -12,8 +19,14 @@ type replicationSetResultTracker interface {
// Returns true if the maximum number of failed executions have been reached.
failed() bool
+ // Returns true if executions failed in all instances or all zones.
+ failedCompletely() bool
+
// Returns recorded results.
- getResults() []interface{}
+ getResults() []any
+
+ // Returns errors
+ getErrors() []error
}
type defaultResultTracker struct {
@@ -21,7 +34,9 @@ type defaultResultTracker struct {
numSucceeded int
numErrors int
maxErrors int
- results []interface{}
+ results []any
+ numInstances int
+ errors []error
}
func newDefaultResultTracker(instances []InstanceDesc, maxErrors int) *defaultResultTracker {
@@ -30,19 +45,26 @@ func newDefaultResultTracker(instances []InstanceDesc, maxErrors int) *defaultRe
numSucceeded: 0,
numErrors: 0,
maxErrors: maxErrors,
- results: make([]interface{}, 0, len(instances)),
+ errors: make([]error, 0, len(instances)),
+ results: make([]any, 0, len(instances)),
+ numInstances: len(instances),
}
}
-func (t *defaultResultTracker) done(_ *InstanceDesc, result interface{}, err error) {
+func (t *defaultResultTracker) done(instance *InstanceDesc, result any, err error) {
if err == nil {
t.numSucceeded++
t.results = append(t.results, result)
} else {
+ t.errors = append(t.errors, fmt.Errorf("(%s) %w", instance.GetAddr(), err))
t.numErrors++
}
}
+func (t *defaultResultTracker) finished() bool {
+ return t.numSucceeded+t.numErrors == t.numInstances
+}
+
func (t *defaultResultTracker) succeeded() bool {
return t.numSucceeded >= t.minSucceeded
}
@@ -51,10 +73,18 @@ func (t *defaultResultTracker) failed() bool {
return t.numErrors > t.maxErrors
}
-func (t *defaultResultTracker) getResults() []interface{} {
+func (t *defaultResultTracker) failedCompletely() bool {
+ return t.numInstances == t.numErrors
+}
+
+func (t *defaultResultTracker) getResults() []any {
return t.results
}
+func (t *defaultResultTracker) getErrors() []error {
+ return t.errors
+}
+
// zoneAwareResultTracker tracks the results per zone.
// All instances in a zone must succeed in order for the zone to succeed.
type zoneAwareResultTracker struct {
@@ -62,9 +92,12 @@ type zoneAwareResultTracker struct {
failuresByZone map[string]int
minSuccessfulZones int
maxUnavailableZones int
- resultsPerZone map[string][]interface{}
+ resultsPerZone map[string][]any
numInstances int
zoneResultsQuorum bool
+ zoneCount int
+ doneCount int
+ errors []error
}
func newZoneAwareResultTracker(instances []InstanceDesc, maxUnavailableZones int, zoneResultsQuorum bool) *zoneAwareResultTracker {
@@ -74,30 +107,38 @@ func newZoneAwareResultTracker(instances []InstanceDesc, maxUnavailableZones int
maxUnavailableZones: maxUnavailableZones,
numInstances: len(instances),
zoneResultsQuorum: zoneResultsQuorum,
+ errors: make([]error, 0, len(instances)),
}
for _, instance := range instances {
t.waitingByZone[instance.Zone]++
}
t.minSuccessfulZones = len(t.waitingByZone) - maxUnavailableZones
- t.resultsPerZone = make(map[string][]interface{}, len(t.waitingByZone))
+ t.resultsPerZone = make(map[string][]any, len(t.waitingByZone))
+ t.zoneCount = len(t.waitingByZone)
return t
}
-func (t *zoneAwareResultTracker) done(instance *InstanceDesc, result interface{}, err error) {
+func (t *zoneAwareResultTracker) done(instance *InstanceDesc, result any, err error) {
if err != nil {
t.failuresByZone[instance.Zone]++
+ t.errors = append(t.errors, fmt.Errorf("(%s, %s) %w", instance.GetAddr(), instance.GetZone(), err))
} else {
if _, ok := t.resultsPerZone[instance.Zone]; !ok {
// If it is the first result in the zone, then total number of instances
// in this zone should be number of waiting required.
- t.resultsPerZone[instance.Zone] = make([]interface{}, 0, t.waitingByZone[instance.Zone])
+ t.resultsPerZone[instance.Zone] = make([]any, 0, t.waitingByZone[instance.Zone])
}
t.resultsPerZone[instance.Zone] = append(t.resultsPerZone[instance.Zone], result)
}
t.waitingByZone[instance.Zone]--
+ t.doneCount++
+}
+
+func (t *zoneAwareResultTracker) finished() bool {
+ return t.doneCount == t.numInstances
}
func (t *zoneAwareResultTracker) succeeded() bool {
@@ -119,8 +160,15 @@ func (t *zoneAwareResultTracker) failed() bool {
return failedZones > t.maxUnavailableZones
}
-func (t *zoneAwareResultTracker) getResults() []interface{} {
- results := make([]interface{}, 0, t.numInstances)
+func (t *zoneAwareResultTracker) failedCompletely() bool {
+ failedZones := len(t.failuresByZone)
+ allZonesFailed := failedZones == t.zoneCount
+ atLeastHalfOfFleetFailed := len(t.errors) >= t.numInstances/2
+ return allZonesFailed || (t.failed() && atLeastHalfOfFleetFailed)
+}
+
+func (t *zoneAwareResultTracker) getResults() []any {
+ results := make([]any, 0, t.numInstances)
if t.zoneResultsQuorum {
for zone, waiting := range t.waitingByZone {
// No need to check failuresByZone since tracker
@@ -136,3 +184,7 @@ func (t *zoneAwareResultTracker) getResults() []interface{} {
}
return results
}
+
+func (t *zoneAwareResultTracker) getErrors() []error {
+ return t.errors
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go
index 38b1d4848..6b121adea 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go
@@ -8,6 +8,7 @@ import (
"fmt"
"math"
"math/rand"
+ "slices"
"sync"
"time"
@@ -19,7 +20,6 @@ import (
"github.com/cortexproject/cortex/pkg/ring/kv"
shardUtil "github.com/cortexproject/cortex/pkg/ring/shard"
- "github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
"github.com/cortexproject/cortex/pkg/util/services"
)
@@ -106,13 +106,17 @@ var (
})
// WriteNoExtend is like Write, but with no replicaset extension.
- WriteNoExtend = NewOp([]InstanceState{ACTIVE}, nil)
+ WriteNoExtend = NewOp([]InstanceState{ACTIVE}, func(s InstanceState) bool {
+ // We want to skip instances that are READONLY. So we will increase the size of replication
+ // for the key
+ return s == READONLY
+ })
- // Read operation that extends the replica set if an instance is not ACTIVE, LEAVING OR JOINING
- Read = NewOp([]InstanceState{ACTIVE, PENDING, LEAVING, JOINING}, func(s InstanceState) bool {
+ // Read operation that extends the replica set if an instance is not ACTIVE, PENDING, LEAVING, JOINING OR READONLY
+ Read = NewOp([]InstanceState{ACTIVE, PENDING, LEAVING, JOINING, READONLY}, func(s InstanceState) bool {
// To match Write with extended replica set we have to also increase the
// size of the replica set for Read, but we can read from LEAVING ingesters.
- return s != ACTIVE && s != LEAVING && s != JOINING
+ return s != ACTIVE && s != LEAVING && s != JOINING && s != READONLY
})
// Reporting is a special value for inquiring about health.
@@ -197,7 +201,8 @@ type Ring struct {
// List of zones for which there's at least 1 instance in the ring. This list is guaranteed
// to be sorted alphabetically.
- ringZones []string
+ ringZones []string
+ previousRingZones []string
// Cache of shuffle-sharded subrings per identifier. Invalidated when topology changes.
// If set to nil, no caching is done (used by tests, and subrings).
@@ -258,7 +263,7 @@ func NewWithStoreClientAndStrategy(cfg Config, name, key string, store kv.Client
Name: "ring_members",
Help: "Number of members in the ring",
ConstLabels: map[string]string{"name": name}},
- []string{"state"}),
+ []string{"state", "zone"}),
totalTokensGauge: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "ring_tokens_total",
Help: "Number of tokens in the ring",
@@ -302,7 +307,7 @@ func (r *Ring) loop(ctx context.Context) error {
r.updateRingMetrics(Different)
r.mtx.Unlock()
- r.KVClient.WatchKey(ctx, r.key, func(value interface{}) bool {
+ r.KVClient.WatchKey(ctx, r.key, func(value any) bool {
if value == nil {
level.Info(r.logger).Log("msg", "ring doesn't exist in KV store yet")
return true
@@ -322,19 +327,23 @@ func (r *Ring) updateRingState(ringDesc *Desc) {
// Filter out all instances belonging to excluded zones.
if len(r.cfg.ExcludedZones) > 0 {
for instanceID, instance := range ringDesc.Ingesters {
- if util.StringsContain(r.cfg.ExcludedZones, instance.Zone) {
+ if slices.Contains(r.cfg.ExcludedZones, instance.Zone) {
delete(ringDesc.Ingesters, instanceID)
}
}
}
rc := prevRing.RingCompare(ringDesc)
- if rc == Equal || rc == EqualButStatesAndTimestamps {
+ if rc == Equal || rc == EqualButStatesAndTimestamps || rc == EqualButReadOnly {
// No need to update tokens or zones. Only states and timestamps
// have changed. (If Equal, nothing has changed, but that doesn't happen
// when watching the ring for updates).
r.mtx.Lock()
r.ringDesc = ringDesc
+ if rc == EqualButReadOnly && r.shuffledSubringCache != nil {
+ // Invalidate all cached subrings.
+ r.shuffledSubringCache = make(map[subringCacheKey]*Ring)
+ }
r.updateRingMetrics(rc)
r.mtx.Unlock()
return
@@ -354,6 +363,7 @@ func (r *Ring) updateRingState(ringDesc *Desc) {
r.ringTokensByZone = ringTokensByZone
r.ringInstanceByToken = ringInstanceByToken
r.ringInstanceIdByAddr = ringInstanceByAddr
+ r.previousRingZones = r.ringZones
r.ringZones = ringZones
r.lastTopologyChange = now
if r.shuffledSubringCache != nil {
@@ -401,7 +411,7 @@ func (r *Ring) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts [
}
// We want n *distinct* instances.
- if util.StringsContain(distinctHosts, info.InstanceID) {
+ if slices.Contains(distinctHosts, info.InstanceID) {
continue
}
@@ -579,10 +589,7 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro
} else {
// Calculate the number of required instances;
// ensure we always require at least RF-1 when RF=3.
- numRequired := len(r.ringDesc.Ingesters)
- if numRequired < r.cfg.ReplicationFactor {
- numRequired = r.cfg.ReplicationFactor
- }
+ numRequired := max(len(r.ringDesc.Ingesters), r.cfg.ReplicationFactor)
// We can tolerate this many failures
numRequired -= r.cfg.ReplicationFactor / 2
@@ -657,12 +664,19 @@ func (r *Ring) updateRingMetrics(compareResult CompareResult) {
return
}
- numByState := map[string]int{}
+ numByStateByZone := map[string]map[string]int{}
oldestTimestampByState := map[string]int64{}
// Initialized to zero so we emit zero-metrics (instead of not emitting anything)
- for _, s := range []string{unhealthy, ACTIVE.String(), LEAVING.String(), PENDING.String(), JOINING.String()} {
- numByState[s] = 0
+ for _, s := range []string{unhealthy, ACTIVE.String(), LEAVING.String(), PENDING.String(), JOINING.String(), READONLY.String()} {
+ numByStateByZone[s] = map[string]int{}
+ // make sure removed zones got zero value
+ for _, zone := range r.previousRingZones {
+ numByStateByZone[s][zone] = 0
+ }
+ for _, zone := range r.ringZones {
+ numByStateByZone[s][zone] = 0
+ }
oldestTimestampByState[s] = 0
}
@@ -671,14 +685,19 @@ func (r *Ring) updateRingMetrics(compareResult CompareResult) {
if !r.IsHealthy(&instance, Reporting, r.KVClient.LastUpdateTime(r.key)) {
s = unhealthy
}
- numByState[s]++
+ if _, ok := numByStateByZone[s]; !ok {
+ numByStateByZone[s] = map[string]int{}
+ }
+ numByStateByZone[s][instance.Zone]++
if oldestTimestampByState[s] == 0 || instance.Timestamp < oldestTimestampByState[s] {
oldestTimestampByState[s] = instance.Timestamp
}
}
- for state, count := range numByState {
- r.numMembersGaugeVec.WithLabelValues(state).Set(float64(count))
+ for state, zones := range numByStateByZone {
+ for zone, count := range zones {
+ r.numMembersGaugeVec.WithLabelValues(state, zone).Set(float64(count))
+ }
}
for state, timestamp := range oldestTimestampByState {
r.oldestTimestampGaugeVec.WithLabelValues(state).Set(float64(timestamp))
@@ -848,7 +867,9 @@ func (r *Ring) shuffleShard(identifier string, size int, lookbackPeriod time.Dur
// If the lookback is enabled and this instance has been registered within the lookback period
// then we should include it in the subring but continuing selecting instances.
- if lookbackPeriod > 0 && instance.RegisteredTimestamp >= lookbackUntil {
+ // If an instance is in READONLY we should always extend. The write path will filter it out when GetRing.
+ // The read path should extend to get new ingester used on write
+ if (lookbackPeriod > 0 && instance.RegisteredTimestamp >= lookbackUntil) || instance.State == READONLY {
continue
}
@@ -995,7 +1016,7 @@ func NewOp(healthyStates []InstanceState, shouldExtendReplicaSet func(s Instance
}
if shouldExtendReplicaSet != nil {
- for _, s := range []InstanceState{ACTIVE, LEAVING, PENDING, JOINING, LEFT} {
+ for _, s := range []InstanceState{ACTIVE, LEAVING, PENDING, JOINING, LEFT, READONLY} {
if shouldExtendReplicaSet(s) {
op |= (0x10000 << s)
}
@@ -1019,3 +1040,14 @@ func (op Operation) ShouldExtendReplicaSetOnState(s InstanceState) bool {
// All states are healthy, no states extend replica set.
var allStatesRingOperation = Operation(0x0000ffff)
+
+func AutoForgetFromRing(ringDesc *Desc, forgetPeriod time.Duration, logger log.Logger) {
+ for id, instance := range ringDesc.Ingesters {
+ lastHeartbeat := time.Unix(instance.GetTimestamp(), 0)
+
+ if time.Since(lastHeartbeat) > forgetPeriod {
+ level.Warn(logger).Log("msg", "auto-forgetting instance from the ring because it is unhealthy for a long time", "instance", id, "last_heartbeat", lastHeartbeat.String(), "forget_period", forgetPeriod)
+ ringDesc.RemoveIngester(id)
+ }
+ }
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go
index bba74142c..87a4c59b5 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go
@@ -36,7 +36,8 @@ const (
JOINING InstanceState = 3
// This state is only used by gossiping code to distribute information about
// instances that have been removed from the ring. Ring users should not use it directly.
- LEFT InstanceState = 4
+ LEFT InstanceState = 4
+ READONLY InstanceState = 5
)
var InstanceState_name = map[int32]string{
@@ -45,14 +46,16 @@ var InstanceState_name = map[int32]string{
2: "PENDING",
3: "JOINING",
4: "LEFT",
+ 5: "READONLY",
}
var InstanceState_value = map[string]int32{
- "ACTIVE": 0,
- "LEAVING": 1,
- "PENDING": 2,
- "JOINING": 3,
- "LEFT": 4,
+ "ACTIVE": 0,
+ "LEAVING": 1,
+ "PENDING": 2,
+ "JOINING": 3,
+ "LEFT": 4,
+ "READONLY": 5,
}
func (InstanceState) EnumDescriptor() ([]byte, []int) {
@@ -210,33 +213,34 @@ func init() {
func init() { proto.RegisterFile("ring.proto", fileDescriptor_26381ed67e202a6e) }
var fileDescriptor_26381ed67e202a6e = []byte{
- // 409 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xc1, 0x8a, 0xd3, 0x50,
- 0x18, 0x85, 0xef, 0x9f, 0xdc, 0x64, 0xd2, 0xbf, 0xce, 0x10, 0xee, 0x0c, 0x12, 0x07, 0xb9, 0x86,
- 0x59, 0x45, 0x17, 0x15, 0xab, 0x0b, 0x11, 0x5c, 0xcc, 0x38, 0x51, 0x12, 0x4a, 0x1d, 0x62, 0x99,
- 0xad, 0xc4, 0xe9, 0x25, 0x84, 0xb1, 0x49, 0x49, 0xae, 0x42, 0x5d, 0xf9, 0x08, 0xbe, 0x80, 0x7b,
- 0x1f, 0xa5, 0xcb, 0xae, 0xa4, 0x2b, 0xb1, 0xe9, 0xc6, 0x65, 0x1f, 0x41, 0x6e, 0xd2, 0x92, 0xe9,
- 0xee, 0x9c, 0xff, 0x9c, 0x9c, 0x2f, 0x81, 0x20, 0x16, 0x69, 0x96, 0xf4, 0xa6, 0x45, 0x2e, 0x73,
- 0x46, 0x95, 0x3e, 0x3d, 0x49, 0xf2, 0x24, 0xaf, 0x0f, 0x4f, 0x95, 0x6a, 0xb2, 0xb3, 0x9f, 0x80,
- 0xf4, 0x52, 0x94, 0x37, 0xec, 0x35, 0x76, 0xd2, 0x2c, 0x11, 0xa5, 0x14, 0x45, 0xe9, 0x80, 0xab,
- 0x7b, 0xdd, 0xfe, 0x83, 0x5e, 0x3d, 0xa2, 0xe2, 0x5e, 0xb0, 0xcb, 0xfc, 0x4c, 0x16, 0xb3, 0x0b,
- 0x3a, 0xff, 0xf3, 0x88, 0x44, 0xed, 0x13, 0xa7, 0x57, 0x78, 0xb4, 0x5f, 0x61, 0x36, 0xea, 0xb7,
- 0x62, 0xe6, 0x80, 0x0b, 0x5e, 0x27, 0x52, 0x92, 0x79, 0x68, 0x7c, 0x8d, 0x3f, 0x7f, 0x11, 0x8e,
- 0xe6, 0x82, 0xd7, 0xed, 0xb3, 0x66, 0x3e, 0xc8, 0x4a, 0x19, 0x67, 0x37, 0x42, 0x61, 0xa2, 0xa6,
- 0xf0, 0x4a, 0x7b, 0x09, 0x21, 0xb5, 0x34, 0x5b, 0x3f, 0xfb, 0x0d, 0x78, 0xef, 0x6e, 0x83, 0x31,
- 0xa4, 0xf1, 0x78, 0x5c, 0x6c, 0x77, 0x6b, 0xcd, 0x1e, 0x62, 0x47, 0xa6, 0x13, 0x51, 0xca, 0x78,
- 0x32, 0xad, 0xc7, 0xf5, 0xa8, 0x3d, 0xb0, 0xc7, 0x68, 0x94, 0x32, 0x96, 0xc2, 0xd1, 0x5d, 0xf0,
- 0x8e, 0xfa, 0xc7, 0xfb, 0xd8, 0x0f, 0x2a, 0x8a, 0x9a, 0x06, 0xbb, 0x8f, 0xa6, 0xcc, 0x6f, 0x45,
- 0x56, 0x3a, 0xa6, 0xab, 0x7b, 0x87, 0xd1, 0xd6, 0x29, 0xe8, 0xb7, 0x3c, 0x13, 0xce, 0x41, 0x03,
- 0x55, 0x9a, 0x3d, 0xc3, 0x93, 0x42, 0x24, 0xa9, 0xfa, 0x62, 0x31, 0xfe, 0xd8, 0xf2, 0xad, 0x9a,
- 0x7f, 0xdc, 0x66, 0xa3, 0x5d, 0x14, 0x52, 0x8b, 0xda, 0x46, 0x48, 0x2d, 0xc3, 0x36, 0x9f, 0x0c,
- 0xf0, 0x70, 0xef, 0x15, 0x18, 0xa2, 0x79, 0xfe, 0x66, 0x14, 0x5c, 0xfb, 0x36, 0x61, 0x5d, 0x3c,
- 0x18, 0xf8, 0xe7, 0xd7, 0xc1, 0xf0, 0x9d, 0x0d, 0xca, 0x5c, 0xf9, 0xc3, 0x4b, 0x65, 0x34, 0x65,
- 0xc2, 0xf7, 0xc1, 0x50, 0x19, 0x9d, 0x59, 0x48, 0x07, 0xfe, 0xdb, 0x91, 0x4d, 0x2f, 0x5e, 0x2c,
- 0x56, 0x9c, 0x2c, 0x57, 0x9c, 0x6c, 0x56, 0x1c, 0xbe, 0x57, 0x1c, 0x7e, 0x55, 0x1c, 0xe6, 0x15,
- 0x87, 0x45, 0xc5, 0xe1, 0x6f, 0xc5, 0xe1, 0x5f, 0xc5, 0xc9, 0xa6, 0xe2, 0xf0, 0x63, 0xcd, 0xc9,
- 0x62, 0xcd, 0xc9, 0x72, 0xcd, 0xc9, 0x27, 0xb3, 0xfe, 0x07, 0x9e, 0xff, 0x0f, 0x00, 0x00, 0xff,
- 0xff, 0xd3, 0x1c, 0x09, 0x3a, 0x2d, 0x02, 0x00, 0x00,
+ // 423 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0x41, 0x6b, 0xd4, 0x40,
+ 0x1c, 0xc5, 0xe7, 0xbf, 0x99, 0xa4, 0xd9, 0xff, 0xb6, 0x65, 0x98, 0x16, 0x89, 0x45, 0xc6, 0xd0,
+ 0x53, 0xf4, 0xb0, 0xe2, 0xea, 0x41, 0x04, 0x0f, 0x5b, 0x37, 0x4a, 0xc2, 0x92, 0x96, 0xb8, 0x14,
+ 0xf4, 0x22, 0xb1, 0x3b, 0x84, 0x50, 0x9b, 0x94, 0x64, 0x14, 0xea, 0xc9, 0x8f, 0xe0, 0x17, 0xf0,
+ 0xee, 0x47, 0xe9, 0x71, 0x4f, 0xd2, 0x93, 0xb8, 0xd9, 0x8b, 0xc7, 0x7e, 0x04, 0x99, 0xa4, 0x25,
+ 0xdd, 0xdb, 0x7b, 0xff, 0xf7, 0xf2, 0x7e, 0x09, 0x04, 0xb1, 0xcc, 0xf2, 0x74, 0x78, 0x5e, 0x16,
+ 0xaa, 0xe0, 0x54, 0xeb, 0xbd, 0xdd, 0xb4, 0x48, 0x8b, 0xe6, 0xf0, 0x44, 0xab, 0x36, 0xdb, 0xff,
+ 0x09, 0x48, 0x27, 0xb2, 0x3a, 0xe1, 0xaf, 0xb0, 0x9f, 0xe5, 0xa9, 0xac, 0x94, 0x2c, 0x2b, 0x07,
+ 0x5c, 0xc3, 0x1b, 0x8c, 0xee, 0x0f, 0x9b, 0x11, 0x1d, 0x0f, 0x83, 0xdb, 0xcc, 0xcf, 0x55, 0x79,
+ 0x71, 0x40, 0x2f, 0xff, 0x3c, 0x24, 0x71, 0xf7, 0xc4, 0xde, 0x11, 0x6e, 0xaf, 0x57, 0x38, 0x43,
+ 0xe3, 0x54, 0x5e, 0x38, 0xe0, 0x82, 0xd7, 0x8f, 0xb5, 0xe4, 0x1e, 0x9a, 0x5f, 0x93, 0xcf, 0x5f,
+ 0xa4, 0xd3, 0x73, 0xc1, 0x1b, 0x8c, 0x78, 0x3b, 0x1f, 0xe4, 0x95, 0x4a, 0xf2, 0x13, 0xa9, 0x31,
+ 0x71, 0x5b, 0x78, 0xd9, 0x7b, 0x01, 0x21, 0xb5, 0x7b, 0xcc, 0xd8, 0xff, 0x0d, 0xb8, 0x79, 0xb7,
+ 0xc1, 0x39, 0xd2, 0x64, 0x3e, 0x2f, 0x6f, 0x76, 0x1b, 0xcd, 0x1f, 0x60, 0x5f, 0x65, 0x67, 0xb2,
+ 0x52, 0xc9, 0xd9, 0x79, 0x33, 0x6e, 0xc4, 0xdd, 0x81, 0x3f, 0x42, 0xb3, 0x52, 0x89, 0x92, 0x8e,
+ 0xe1, 0x82, 0xb7, 0x3d, 0xda, 0x59, 0xc7, 0xbe, 0xd3, 0x51, 0xdc, 0x36, 0xf8, 0x3d, 0xb4, 0x54,
+ 0x71, 0x2a, 0xf3, 0xca, 0xb1, 0x5c, 0xc3, 0xdb, 0x8a, 0x6f, 0x9c, 0x86, 0x7e, 0x2b, 0x72, 0xe9,
+ 0x6c, 0xb4, 0x50, 0xad, 0xf9, 0x53, 0xdc, 0x2d, 0x65, 0x9a, 0xe9, 0x2f, 0x96, 0xf3, 0x8f, 0x1d,
+ 0xdf, 0x6e, 0xf8, 0x3b, 0x5d, 0x36, 0xbb, 0x8d, 0x42, 0x6a, 0x53, 0x66, 0x86, 0xd4, 0x36, 0x99,
+ 0xf5, 0xf8, 0x03, 0x6e, 0xad, 0xbd, 0x02, 0x47, 0xb4, 0xc6, 0xaf, 0x67, 0xc1, 0xb1, 0xcf, 0x08,
+ 0x1f, 0xe0, 0xc6, 0xd4, 0x1f, 0x1f, 0x07, 0xd1, 0x5b, 0x06, 0xda, 0x1c, 0xf9, 0xd1, 0x44, 0x9b,
+ 0x9e, 0x36, 0xe1, 0x61, 0x10, 0x69, 0x63, 0x70, 0x1b, 0xe9, 0xd4, 0x7f, 0x33, 0x63, 0x94, 0x6f,
+ 0xa2, 0x1d, 0xfb, 0xe3, 0xc9, 0x61, 0x34, 0x7d, 0xcf, 0xcc, 0x83, 0xe7, 0x8b, 0xa5, 0x20, 0x57,
+ 0x4b, 0x41, 0xae, 0x97, 0x02, 0xbe, 0xd7, 0x02, 0x7e, 0xd5, 0x02, 0x2e, 0x6b, 0x01, 0x8b, 0x5a,
+ 0xc0, 0xdf, 0x5a, 0xc0, 0xbf, 0x5a, 0x90, 0xeb, 0x5a, 0xc0, 0x8f, 0x95, 0x20, 0x8b, 0x95, 0x20,
+ 0x57, 0x2b, 0x41, 0x3e, 0x59, 0xcd, 0x1f, 0xf1, 0xec, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x75,
+ 0x1d, 0x75, 0xff, 0x3b, 0x02, 0x00, 0x00,
}
func (x InstanceState) String() string {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto
index 5dfeea8fa..44a2cad27 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto
@@ -51,4 +51,6 @@ enum InstanceState {
// This state is only used by gossiping code to distribute information about
// instances that have been removed from the ring. Ring users should not use it directly.
LEFT = 4;
+
+ READONLY= 5;
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/token_file.go b/vendor/github.com/cortexproject/cortex/pkg/ring/token_file.go
new file mode 100644
index 000000000..328c0fa13
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/token_file.go
@@ -0,0 +1,79 @@
+package ring
+
+import (
+ "encoding/json"
+ "errors"
+ "os"
+ "sort"
+)
+
+type TokenFile struct {
+ PreviousState InstanceState `json:"previousState,omitempty"`
+ Tokens Tokens `json:"tokens"`
+}
+
+// StoreToFile stores the tokens in the given directory.
+func (l TokenFile) StoreToFile(tokenFilePath string) error {
+ if tokenFilePath == "" {
+ return errors.New("path is empty")
+ }
+
+ // If any operations failed further in the function, we keep the temporary
+ // file hanging around for debugging.
+ f, err := os.Create(tokenFilePath + ".tmp")
+ if err != nil {
+ return err
+ }
+
+ defer func() {
+ // If the file was not closed, then there must already be an error, hence ignore
+ // the error (if any) from f.Close(). If the file was already closed, then
+ // we would ignore the error in that case too.
+ _ = f.Close()
+ }()
+
+ b, err := json.Marshal(l)
+ if err != nil {
+ return err
+ }
+ if _, err = f.Write(b); err != nil {
+ return err
+ }
+
+ if err := f.Close(); err != nil {
+ return err
+ }
+
+ // Tokens successfully written, replace the temporary file with the actual file path.
+ return os.Rename(f.Name(), tokenFilePath)
+}
+
+func LoadTokenFile(tokenFilePath string) (*TokenFile, error) {
+ b, err := os.ReadFile(tokenFilePath)
+ if err != nil {
+ return nil, err
+ }
+ t := TokenFile{}
+ err = json.Unmarshal(b, &t)
+
+ // Tokens may have been written to file by an older version which
+ // doesn't guarantee sorted tokens, so we enforce sorting here.
+ if !sort.IsSorted(t.Tokens) {
+ sort.Sort(t.Tokens)
+ }
+
+ return &t, err
+}
+
+func (p InstanceState) MarshalJSON() ([]byte, error) {
+ ss := InstanceState_name[int32(p)]
+ return json.Marshal(ss)
+}
+func (p *InstanceState) UnmarshalJSON(data []byte) error {
+ res := ""
+ if err := json.Unmarshal(data, &res); err != nil {
+ return err
+ }
+ *p = InstanceState(InstanceState_value[res])
+ return nil
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/token_generator.go b/vendor/github.com/cortexproject/cortex/pkg/ring/token_generator.go
index 27cb3987d..59f3db23a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/token_generator.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/token_generator.go
@@ -4,6 +4,7 @@ import (
"container/heap"
"math"
"math/rand"
+ "slices"
"sort"
"strings"
"time"
@@ -59,9 +60,7 @@ func (g *RandomTokenGenerator) GenerateTokens(ring *Desc, _, _ string, numTokens
}
// Ensure returned tokens are sorted.
- sort.Slice(tokens, func(i, j int) bool {
- return tokens[i] < tokens[j]
- })
+ slices.Sort(tokens)
return tokens
}
@@ -136,7 +135,11 @@ func (g *MinimizeSpreadTokenGenerator) GenerateTokens(ring *Desc, id, zone strin
for i := 1; i <= len(zonalTokens); i++ {
index := i % len(zonalTokens)
if tokenInstanceId, ok := usedTokens[zonalTokens[index]]; ok && tokenInstanceId != id {
- instanceDistance := tokensPerInstanceWithDistance[tokenInstanceId]
+ instanceDistance, ok := tokensPerInstanceWithDistance[tokenInstanceId]
+ if !ok {
+ continue // Same token is shared to an ingester in different zone, skip
+ }
+
instanceDistance.tokens = append(instanceDistance.tokens, &tokenDistanceEntry{
token: zonalTokens[index],
prev: zonalTokens[i-1],
@@ -231,9 +234,7 @@ func (g *MinimizeSpreadTokenGenerator) GenerateTokens(ring *Desc, id, zone strin
}
}
- sort.Slice(r, func(i, j int) bool {
- return r[i] < r[j]
- })
+ slices.Sort(r)
return r
}
@@ -287,7 +288,7 @@ func tokenDistance(from, to uint32) int64 {
}
func findFirst(n int, f func(int) bool) int {
- for i := 0; i < n; i++ {
+ for i := range n {
if f(i) {
return i
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/tokens.go b/vendor/github.com/cortexproject/cortex/pkg/ring/tokens.go
index cf4999ff5..48accde1b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/tokens.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/tokens.go
@@ -1,9 +1,6 @@
package ring
import (
- "encoding/json"
- "errors"
- "os"
"sort"
)
@@ -24,7 +21,7 @@ func (t Tokens) Equals(other Tokens) bool {
sort.Sort(mine)
sort.Sort(other)
- for i := 0; i < len(mine); i++ {
+ for i := range mine {
if mine[i] != other[i] {
return false
}
@@ -32,76 +29,3 @@ func (t Tokens) Equals(other Tokens) bool {
return true
}
-
-// StoreToFile stores the tokens in the given directory.
-func (t Tokens) StoreToFile(tokenFilePath string) error {
- if tokenFilePath == "" {
- return errors.New("path is empty")
- }
-
- // If any operations failed further in the function, we keep the temporary
- // file hanging around for debugging.
- f, err := os.Create(tokenFilePath + ".tmp")
- if err != nil {
- return err
- }
-
- defer func() {
- // If the file was not closed, then there must already be an error, hence ignore
- // the error (if any) from f.Close(). If the file was already closed, then
- // we would ignore the error in that case too.
- _ = f.Close()
- }()
-
- b, err := t.Marshal()
- if err != nil {
- return err
- }
- if _, err = f.Write(b); err != nil {
- return err
- }
-
- if err := f.Close(); err != nil {
- return err
- }
-
- // Tokens successfully written, replace the temporary file with the actual file path.
- return os.Rename(f.Name(), tokenFilePath)
-}
-
-// LoadTokensFromFile loads tokens from given file path.
-func LoadTokensFromFile(tokenFilePath string) (Tokens, error) {
- b, err := os.ReadFile(tokenFilePath)
- if err != nil {
- return nil, err
- }
- var t Tokens
- err = t.Unmarshal(b)
-
- // Tokens may have been written to file by an older version which
- // doesn't guarantee sorted tokens, so we enforce sorting here.
- if !sort.IsSorted(t) {
- sort.Sort(t)
- }
-
- return t, err
-}
-
-// Marshal encodes the tokens into JSON.
-func (t Tokens) Marshal() ([]byte, error) {
- return json.Marshal(tokensJSON{Tokens: t})
-}
-
-// Unmarshal reads the tokens from JSON byte stream.
-func (t *Tokens) Unmarshal(b []byte) error {
- tj := tokensJSON{}
- if err := json.Unmarshal(b, &tj); err != nil {
- return err
- }
- *t = tj.Tokens
- return nil
-}
-
-type tokensJSON struct {
- Tokens []uint32 `json:"tokens"`
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/util.go b/vendor/github.com/cortexproject/cortex/pkg/ring/util.go
index e05fc32a0..66a176c05 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/ring/util.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/ring/util.go
@@ -177,7 +177,7 @@ func getFirstAddressOf(names []string, logger log.Logger) (string, error) {
return ipAddr, nil
}
if ipAddr == "" {
- return "", fmt.Errorf("No address found for %s", names)
+ return "", fmt.Errorf("no address found for %s", names)
}
if strings.HasPrefix(ipAddr, `169.254.`) {
level.Warn(logger).Log("msg", "using automatic private ip", "address", ipAddr)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/azure/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/azure/bucket_client.go
index ad26463eb..d01a27281 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/azure/bucket_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/azure/bucket_client.go
@@ -1,15 +1,16 @@
package azure
import (
+ "net/http"
+
"github.com/go-kit/log"
"github.com/prometheus/common/model"
"github.com/thanos-io/objstore"
"github.com/thanos-io/objstore/exthttp"
"github.com/thanos-io/objstore/providers/azure"
- yaml "gopkg.in/yaml.v2"
)
-func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucket, error) {
+func NewBucketClient(cfg Config, hedgedRoundTripper func(rt http.RoundTripper) http.RoundTripper, name string, logger log.Logger) (objstore.Bucket, error) {
bucketConfig := azure.Config{
StorageAccountName: cfg.StorageAccountName,
StorageAccountKey: cfg.StorageAccountKey.Value,
@@ -30,12 +31,5 @@ func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucke
},
}
- // Thanos currently doesn't support passing the config as is, but expects a YAML,
- // so we're going to serialize it.
- serialized, err := yaml.Marshal(bucketConfig)
- if err != nil {
- return nil, err
- }
-
- return azure.NewBucket(logger, serialized, name)
+ return azure.NewBucketWithConfig(logger, bucketConfig, name, hedgedRoundTripper)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/bucket_util.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/bucket_util.go
index 4228b468d..c068d0869 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/bucket_util.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/bucket_util.go
@@ -7,27 +7,45 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/thanos-io/objstore"
+ "go.uber.org/atomic"
+
+ "github.com/cortexproject/cortex/pkg/util/concurrency"
)
// DeletePrefix removes all objects with given prefix, recursively.
// It returns number of deleted objects.
// If deletion of any object fails, it returns error and stops.
-func DeletePrefix(ctx context.Context, bkt objstore.Bucket, prefix string, logger log.Logger) (int, error) {
- result := 0
- err := bkt.Iter(ctx, prefix, func(name string) error {
- if strings.HasSuffix(name, objstore.DirDelim) {
- deleted, err := DeletePrefix(ctx, bkt, name, logger)
- result += deleted
- return err
- }
+func DeletePrefix(ctx context.Context, bkt objstore.Bucket, prefix string, logger log.Logger, maxConcurrency int) (int, error) {
+ keys, err := ListPrefixes(ctx, bkt, prefix, logger)
+ if err != nil {
+ return 0, err
+ }
+ result := atomic.NewInt32(0)
+ err = concurrency.ForEach(ctx, concurrency.CreateJobsFromStrings(keys), maxConcurrency, func(ctx context.Context, key any) error {
+ name := key.(string)
if err := bkt.Delete(ctx, name); err != nil {
return err
}
- result++
+ result.Inc()
level.Debug(logger).Log("msg", "deleted file", "file", name)
return nil
})
- return result, err
+ return int(result.Load()), err
+}
+
+func ListPrefixes(ctx context.Context, bkt objstore.Bucket, prefix string, logger log.Logger) ([]string, error) {
+ var keys []string
+ err := bkt.Iter(ctx, prefix, func(name string) error {
+ if strings.HasSuffix(name, objstore.DirDelim) {
+ moreKeys, err := ListPrefixes(ctx, bkt, name, logger)
+ keys = append(keys, moreKeys...)
+ return err
+ }
+
+ keys = append(keys, name)
+ return nil
+ })
+ return keys, err
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client.go
index 7d120cd6d..e13a49593 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client.go
@@ -5,6 +5,8 @@ import (
"errors"
"flag"
"fmt"
+ "net/http"
+ "slices"
"strings"
"github.com/go-kit/log"
@@ -17,7 +19,6 @@ import (
"github.com/cortexproject/cortex/pkg/storage/bucket/gcs"
"github.com/cortexproject/cortex/pkg/storage/bucket/s3"
"github.com/cortexproject/cortex/pkg/storage/bucket/swift"
- "github.com/cortexproject/cortex/pkg/util"
)
const (
@@ -89,7 +90,7 @@ func (cfg *Config) RegisterFlagsWithPrefixAndBackend(prefix string, f *flag.Flag
}
func (cfg *Config) Validate() error {
- if !util.StringsContain(cfg.supportedBackends(), cfg.Backend) {
+ if !slices.Contains(cfg.supportedBackends(), cfg.Backend) {
return ErrUnsupportedStorageBackend
}
@@ -103,17 +104,17 @@ func (cfg *Config) Validate() error {
}
// NewClient creates a new bucket client based on the configured backend
-func NewClient(ctx context.Context, cfg Config, name string, logger log.Logger, reg prometheus.Registerer) (bucket objstore.InstrumentedBucket, err error) {
+func NewClient(ctx context.Context, cfg Config, hedgedRoundTripper func(rt http.RoundTripper) http.RoundTripper, name string, logger log.Logger, reg prometheus.Registerer) (bucket objstore.InstrumentedBucket, err error) {
var client objstore.Bucket
switch cfg.Backend {
case S3:
- client, err = s3.NewBucketClient(cfg.S3, name, logger)
+ client, err = s3.NewBucketClient(cfg.S3, hedgedRoundTripper, name, logger)
case GCS:
- client, err = gcs.NewBucketClient(ctx, cfg.GCS, name, logger)
+ client, err = gcs.NewBucketClient(ctx, cfg.GCS, hedgedRoundTripper, name, logger)
case Azure:
- client, err = azure.NewBucketClient(cfg.Azure, name, logger)
+ client, err = azure.NewBucketClient(cfg.Azure, hedgedRoundTripper, name, logger)
case Swift:
- client, err = swift.NewBucketClient(cfg.Swift, name, logger)
+ client, err = swift.NewBucketClient(cfg.Swift, hedgedRoundTripper, name, logger)
case Filesystem:
client, err = filesystem.NewBucketClient(cfg.Filesystem)
default:
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client_mock.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client_mock.go
index e503a027e..d641067ae 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client_mock.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client_mock.go
@@ -5,6 +5,7 @@ import (
"context"
"errors"
"io"
+ "strings"
"sync"
"time"
@@ -23,6 +24,10 @@ type ClientMock struct {
uploaded sync.Map
}
+func (m *ClientMock) Provider() objstore.ObjProvider {
+ return objstore.FILESYSTEM
+}
+
func (m *ClientMock) WithExpectedErrs(objstore.IsOpFailureExpectedFunc) objstore.Bucket {
return m
}
@@ -32,16 +37,21 @@ func (m *ClientMock) ReaderWithExpectedErrs(objstore.IsOpFailureExpectedFunc) ob
}
// Upload mocks objstore.Bucket.Upload()
-func (m *ClientMock) Upload(ctx context.Context, name string, r io.Reader) error {
+func (m *ClientMock) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error {
if _, ok := m.uploaded.Load(name); ok {
m.uploaded.Store(name, true)
}
- args := m.Called(ctx, name, r)
- return args.Error(0)
+ if len(opts) > 0 {
+ args := m.Called(ctx, name, r, opts)
+ return args.Error(0)
+ } else {
+ args := m.Called(ctx, name, r)
+ return args.Error(0)
+ }
}
func (m *ClientMock) MockUpload(name string, err error) {
- m.On("Upload", mock.Anything, name, mock.Anything).Return(err)
+ m.On("Upload", mock.Anything, name, mock.Anything, mock.Anything).Return(err)
}
// Delete mocks objstore.Bucket.Delete()
@@ -57,12 +67,58 @@ func (m *ClientMock) Name() string {
return "mock"
}
+func (m *ClientMock) IterWithAttributes(ctx context.Context, dir string, f func(attrs objstore.IterObjectAttributes) error, options ...objstore.IterOption) error {
+ args := m.Called(ctx, dir, f, options)
+ return args.Error(0)
+}
+
+func (m *ClientMock) SupportedIterOptions() []objstore.IterOptionType {
+ args := m.Called()
+ return args.Get(0).([]objstore.IterOptionType)
+}
+
// Iter mocks objstore.Bucket.Iter()
func (m *ClientMock) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error {
args := m.Called(ctx, dir, f, options)
return args.Error(0)
}
+func (m *ClientMock) MockIterWithAttributes(prefix string, objects []string, err error, cb func()) {
+ m.On("IterWithAttributes", mock.Anything, prefix, mock.Anything, mock.Anything).Return(err).Run(func(args mock.Arguments) {
+ f := args.Get(2).(func(attrs objstore.IterObjectAttributes) error)
+ opts := args.Get(3).([]objstore.IterOption)
+
+ // Determine if recursive flag is passed
+ params := objstore.ApplyIterOptions(opts...)
+ recursive := params.Recursive
+
+ for _, o := range objects {
+ // Check if object is under current prefix
+ if !strings.HasPrefix(o, prefix) {
+ continue
+ }
+
+ // Extract the remaining path after prefix
+ suffix := strings.TrimPrefix(o, prefix)
+
+ // If not recursive and there's a slash in the remaining path, skip it
+ if !recursive && strings.Contains(suffix, "/") {
+ continue
+ }
+
+ attrs := objstore.IterObjectAttributes{
+ Name: o,
+ }
+ if cb != nil {
+ cb()
+ }
+ if err := f(attrs); err != nil {
+ break
+ }
+ }
+ })
+}
+
// MockIter is a convenient method to mock Iter()
func (m *ClientMock) MockIter(prefix string, objects []string, err error) {
m.MockIterWithCallback(prefix, objects, err, nil)
@@ -71,6 +127,7 @@ func (m *ClientMock) MockIter(prefix string, objects []string, err error) {
// MockIterWithCallback is a convenient method to mock Iter() and get a callback called when the Iter
// API is called.
func (m *ClientMock) MockIterWithCallback(prefix string, objects []string, err error, cb func()) {
+ m.MockIterWithAttributes(prefix, objects, err, cb)
m.On("Iter", mock.Anything, prefix, mock.Anything, mock.Anything).Return(err).Run(func(args mock.Arguments) {
if cb != nil {
cb()
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/gcs/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/gcs/bucket_client.go
index 01a65d922..d5a08f465 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/gcs/bucket_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/gcs/bucket_client.go
@@ -2,26 +2,19 @@ package gcs
import (
"context"
+ "net/http"
"github.com/go-kit/log"
"github.com/thanos-io/objstore"
"github.com/thanos-io/objstore/providers/gcs"
- yaml "gopkg.in/yaml.v2"
)
// NewBucketClient creates a new GCS bucket client
-func NewBucketClient(ctx context.Context, cfg Config, name string, logger log.Logger) (objstore.Bucket, error) {
+func NewBucketClient(ctx context.Context, cfg Config, hedgedRoundTripper func(rt http.RoundTripper) http.RoundTripper, name string, logger log.Logger) (objstore.Bucket, error) {
bucketConfig := gcs.Config{
Bucket: cfg.BucketName,
ServiceAccount: cfg.ServiceAccount.Value,
}
- // Thanos currently doesn't support passing the config as is, but expects a YAML,
- // so we're going to serialize it.
- serialized, err := yaml.Marshal(bucketConfig)
- if err != nil {
- return nil, err
- }
-
- return gcs.NewBucket(ctx, logger, serialized, name)
+ return gcs.NewBucketWithConfig(ctx, logger, bucketConfig, name, hedgedRoundTripper)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/hedged_request.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/hedged_request.go
new file mode 100644
index 000000000..e0e3ba77d
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/hedged_request.go
@@ -0,0 +1,43 @@
+package bucket
+
+import (
+ "errors"
+ "flag"
+ "net/http"
+
+ "github.com/thanos-io/thanos/pkg/exthttp"
+)
+
+var (
+ errInvalidQuantile = errors.New("invalid hedged request quantile, it must be between 0 and 1")
+)
+
+type HedgedRequestConfig struct {
+ Enabled bool `yaml:"enabled"`
+ MaxRequests uint `yaml:"max_requests"`
+ Quantile float64 `yaml:"quantile"`
+}
+
+func (cfg *HedgedRequestConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) {
+ f.BoolVar(&cfg.Enabled, prefix+"hedged-request.enabled", false, "If true, hedged requests are applied to object store calls. It can help with reducing tail latency.")
+ f.UintVar(&cfg.MaxRequests, prefix+"hedged-request.max-requests", 3, "Maximum number of hedged requests allowed for each initial request. A high number can reduce latency but increase internal calls.")
+ f.Float64Var(&cfg.Quantile, prefix+"hedged-request.quantile", 0.9, "It is used to calculate a latency threshold to trigger hedged requests. For example, additional requests are triggered when the initial request response time exceeds the 90th percentile.")
+}
+
+func (cfg *HedgedRequestConfig) GetHedgedRoundTripper() func(rt http.RoundTripper) http.RoundTripper {
+ return exthttp.CreateHedgedTransportWithConfig(exthttp.CustomBucketConfig{
+ HedgingConfig: exthttp.HedgingConfig{
+ Enabled: cfg.Enabled,
+ UpTo: cfg.MaxRequests,
+ Quantile: cfg.Quantile,
+ },
+ })
+}
+
+func (cfg *HedgedRequestConfig) Validate() error {
+ if cfg.Quantile > 1 || cfg.Quantile < 0 {
+ return errInvalidQuantile
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/prefixed_bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/prefixed_bucket_client.go
index f6606e654..1f979df31 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/prefixed_bucket_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/prefixed_bucket_client.go
@@ -31,8 +31,8 @@ func (b *PrefixedBucketClient) Close() error {
}
// Upload the contents of the reader as an object into the bucket.
-func (b *PrefixedBucketClient) Upload(ctx context.Context, name string, r io.Reader) (err error) {
- err = b.bucket.Upload(ctx, b.fullName(name), r)
+func (b *PrefixedBucketClient) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) (err error) {
+ err = b.bucket.Upload(ctx, b.fullName(name), r, opts...)
return
}
@@ -44,6 +44,20 @@ func (b *PrefixedBucketClient) Delete(ctx context.Context, name string) error {
// Name returns the bucket name for the provider.
func (b *PrefixedBucketClient) Name() string { return b.bucket.Name() }
+// IterWithAttributes calls f for each entry in the given directory (not recursive.). The argument to f is the object attributes
+// including the prefix of the inspected directory. The configured prefix will be stripped
+// before supplied function is applied.
+func (b *PrefixedBucketClient) IterWithAttributes(ctx context.Context, dir string, f func(attrs objstore.IterObjectAttributes) error, options ...objstore.IterOption) error {
+ return b.bucket.IterWithAttributes(ctx, b.fullName(dir), func(attrs objstore.IterObjectAttributes) error {
+ attrs.Name = strings.TrimPrefix(attrs.Name, b.prefix+objstore.DirDelim)
+ return f(attrs)
+ }, options...)
+}
+
+func (b *PrefixedBucketClient) SupportedIterOptions() []objstore.IterOptionType {
+ return b.bucket.SupportedIterOptions()
+}
+
// Iter calls f for each entry in the given directory (not recursive.). The argument to f is the full
// object name including the prefix of the inspected directory. The configured prefix will be stripped
// before supplied function is applied.
@@ -100,3 +114,7 @@ func (b *PrefixedBucketClient) WithExpectedErrs(fn objstore.IsOpFailureExpectedF
}
return b
}
+
+func (b *PrefixedBucketClient) Provider() objstore.ObjProvider {
+ return b.bucket.Provider()
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go
index 7a72e6513..8d3ed4a63 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go
@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"io"
+ "net/http"
"time"
"github.com/go-kit/log"
@@ -21,13 +22,13 @@ var defaultRetryMinBackoff = 5 * time.Second
var defaultRetryMaxBackoff = 1 * time.Minute
// NewBucketClient creates a new S3 bucket client
-func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucket, error) {
+func NewBucketClient(cfg Config, hedgedRoundTripper func(rt http.RoundTripper) http.RoundTripper, name string, logger log.Logger) (objstore.Bucket, error) {
s3Cfg, err := newS3Config(cfg)
if err != nil {
return nil, err
}
- bucket, err := s3.NewBucketWithConfig(logger, s3Cfg, name)
+ bucket, err := s3.NewBucketWithConfig(logger, s3Cfg, name, hedgedRoundTripper)
if err != nil {
return nil, err
}
@@ -47,7 +48,7 @@ func NewBucketReaderClient(cfg Config, name string, logger log.Logger) (objstore
return nil, err
}
- bucket, err := s3.NewBucketWithConfig(logger, s3Cfg, name)
+ bucket, err := s3.NewBucketWithConfig(logger, s3Cfg, name, nil)
if err != nil {
return nil, err
}
@@ -82,14 +83,15 @@ func newS3Config(cfg Config) (s3.Config, error) {
}
return s3.Config{
- Bucket: cfg.BucketName,
- Endpoint: cfg.Endpoint,
- Region: cfg.Region,
- AccessKey: cfg.AccessKeyID,
- SecretKey: cfg.SecretAccessKey.Value,
- Insecure: cfg.Insecure,
- SSEConfig: sseCfg,
- SendContentMd5: cfg.SendContentMd5,
+ Bucket: cfg.BucketName,
+ Endpoint: cfg.Endpoint,
+ Region: cfg.Region,
+ DisableDualstack: cfg.DisableDualstack,
+ AccessKey: cfg.AccessKeyID,
+ SecretKey: cfg.SecretAccessKey.Value,
+ Insecure: cfg.Insecure,
+ SSEConfig: sseCfg,
+ SendContentMd5: cfg.SendContentMd5,
HTTPConfig: s3.HTTPConfig{
IdleConnTimeout: model.Duration(cfg.HTTP.IdleConnTimeout),
ResponseHeaderTimeout: model.Duration(cfg.HTTP.ResponseHeaderTimeout),
@@ -102,9 +104,10 @@ func newS3Config(cfg Config) (s3.Config, error) {
Transport: cfg.HTTP.Transport,
},
// Enforce signature version 2 if CLI flag is set
- SignatureV2: cfg.SignatureVersion == SignatureVersionV2,
- BucketLookupType: bucketLookupType,
- AWSSDKAuth: cfg.AccessKeyID == "",
+ ListObjectsVersion: cfg.ListObjectsVersion,
+ SignatureV2: cfg.SignatureVersion == SignatureVersionV2,
+ BucketLookupType: bucketLookupType,
+ AWSSDKAuth: cfg.AccessKeyID == "",
}, nil
}
@@ -116,6 +119,10 @@ type BucketWithRetries struct {
retryMaxBackoff time.Duration
}
+func (b *BucketWithRetries) Provider() objstore.ObjProvider {
+ return b.bucket.Provider()
+}
+
func (b *BucketWithRetries) retry(ctx context.Context, f func() error, operationInfo string) error {
var lastErr error
retries := backoff.New(ctx, backoff.Config{
@@ -148,6 +155,16 @@ func (b *BucketWithRetries) Name() string {
return b.bucket.Name()
}
+func (b *BucketWithRetries) IterWithAttributes(ctx context.Context, dir string, f func(attrs objstore.IterObjectAttributes) error, options ...objstore.IterOption) error {
+ return b.retry(ctx, func() error {
+ return b.bucket.IterWithAttributes(ctx, dir, f, options...)
+ }, fmt.Sprintf("IterWithAttributes %s", dir))
+}
+
+func (b *BucketWithRetries) SupportedIterOptions() []objstore.IterOptionType {
+ return b.bucket.SupportedIterOptions()
+}
+
func (b *BucketWithRetries) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error {
return b.retry(ctx, func() error {
return b.bucket.Iter(ctx, dir, f, options...)
@@ -178,12 +195,12 @@ func (b *BucketWithRetries) Exists(ctx context.Context, name string) (exists boo
return
}
-func (b *BucketWithRetries) Upload(ctx context.Context, name string, r io.Reader) error {
+func (b *BucketWithRetries) Upload(ctx context.Context, name string, r io.Reader, uploadOpts ...objstore.ObjectUploadOption) error {
rs, ok := r.(io.ReadSeeker)
if !ok {
// Skip retry if incoming Reader is not seekable to avoid
// loading entire content into memory
- err := b.bucket.Upload(ctx, name, r)
+ err := b.bucket.Upload(ctx, name, r, uploadOpts...)
if err != nil {
level.Warn(b.logger).Log("msg", "skip upload retry as reader is not seekable", "file", name, "err", err)
}
@@ -193,7 +210,7 @@ func (b *BucketWithRetries) Upload(ctx context.Context, name string, r io.Reader
if _, err := rs.Seek(0, io.SeekStart); err != nil {
return err
}
- return b.bucket.Upload(ctx, name, rs)
+ return b.bucket.Upload(ctx, name, rs, uploadOpts...)
}, fmt.Sprintf("Upload %s", name))
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go
index e13807913..f5778f343 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go
@@ -5,6 +5,7 @@ import (
"flag"
"fmt"
"net/http"
+ "slices"
"strings"
"github.com/minio/minio-go/v7/pkg/encrypt"
@@ -12,7 +13,6 @@ import (
"github.com/thanos-io/objstore/providers/s3"
bucket_http "github.com/cortexproject/cortex/pkg/storage/bucket/http"
- "github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
)
@@ -31,16 +31,21 @@ const (
BucketAutoLookup = "auto"
BucketVirtualHostLookup = "virtual-hosted"
BucketPathLookup = "path"
+
+ ListObjectsVersionV1 = "v1"
+ ListObjectsVersionV2 = "v2"
)
var (
supportedSignatureVersions = []string{SignatureVersionV4, SignatureVersionV2}
supportedSSETypes = []string{SSEKMS, SSES3}
supportedBucketLookupTypes = []string{BucketAutoLookup, BucketVirtualHostLookup, BucketPathLookup}
+ supportedListObjectsVersion = []string{ListObjectsVersionV1, ListObjectsVersionV2}
errUnsupportedSignatureVersion = errors.New("unsupported signature version")
errUnsupportedSSEType = errors.New("unsupported S3 SSE type")
errInvalidSSEContext = errors.New("invalid S3 SSE encryption context")
errInvalidBucketLookupType = errors.New("invalid bucket lookup type")
+ errInvalidListObjectsVersion = errors.New("invalid list object version")
)
// HTTPConfig stores the http.Transport configuration for the s3 minio client.
@@ -58,15 +63,17 @@ func (cfg *HTTPConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
// Config holds the config options for an S3 backend
type Config struct {
- Endpoint string `yaml:"endpoint"`
- Region string `yaml:"region"`
- BucketName string `yaml:"bucket_name"`
- SecretAccessKey flagext.Secret `yaml:"secret_access_key"`
- AccessKeyID string `yaml:"access_key_id"`
- Insecure bool `yaml:"insecure"`
- SignatureVersion string `yaml:"signature_version"`
- BucketLookupType string `yaml:"bucket_lookup_type"`
- SendContentMd5 bool `yaml:"send_content_md5"`
+ Endpoint string `yaml:"endpoint"`
+ Region string `yaml:"region"`
+ BucketName string `yaml:"bucket_name"`
+ DisableDualstack bool `yaml:"disable_dualstack"`
+ SecretAccessKey flagext.Secret `yaml:"secret_access_key"`
+ AccessKeyID string `yaml:"access_key_id"`
+ Insecure bool `yaml:"insecure"`
+ SignatureVersion string `yaml:"signature_version"`
+ BucketLookupType string `yaml:"bucket_lookup_type"`
+ SendContentMd5 bool `yaml:"send_content_md5"`
+ ListObjectsVersion string `yaml:"list_objects_version"`
SSE SSEConfig `yaml:"sse"`
HTTP HTTPConfig `yaml:"http"`
@@ -83,23 +90,30 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.Var(&cfg.SecretAccessKey, prefix+"s3.secret-access-key", "S3 secret access key")
f.StringVar(&cfg.BucketName, prefix+"s3.bucket-name", "", "S3 bucket name")
f.StringVar(&cfg.Region, prefix+"s3.region", "", "S3 region. If unset, the client will issue a S3 GetBucketLocation API call to autodetect it.")
+ f.BoolVar(&cfg.DisableDualstack, prefix+"s3.disable-dualstack", false, "If enabled, S3 endpoint will use the non-dualstack variant.")
f.StringVar(&cfg.Endpoint, prefix+"s3.endpoint", "", "The S3 bucket endpoint. It could be an AWS S3 endpoint listed at https://docs.aws.amazon.com/general/latest/gr/s3.html or the address of an S3-compatible service in hostname:port format.")
f.BoolVar(&cfg.Insecure, prefix+"s3.insecure", false, "If enabled, use http:// for the S3 endpoint instead of https://. This could be useful in local dev/test environments while using an S3-compatible backend storage, like Minio.")
f.StringVar(&cfg.SignatureVersion, prefix+"s3.signature-version", SignatureVersionV4, fmt.Sprintf("The signature version to use for authenticating against S3. Supported values are: %s.", strings.Join(supportedSignatureVersions, ", ")))
f.StringVar(&cfg.BucketLookupType, prefix+"s3.bucket-lookup-type", BucketAutoLookup, fmt.Sprintf("The s3 bucket lookup style. Supported values are: %s.", strings.Join(supportedBucketLookupTypes, ", ")))
f.BoolVar(&cfg.SendContentMd5, prefix+"s3.send-content-md5", true, "If true, attach MD5 checksum when upload objects and S3 uses MD5 checksum algorithm to verify the provided digest. If false, use CRC32C algorithm instead.")
+ f.StringVar(&cfg.ListObjectsVersion, prefix+"s3.list-objects-version", "", fmt.Sprintf("The list api version. Supported values are: %s, and ''.", strings.Join(supportedListObjectsVersion, ", ")))
cfg.SSE.RegisterFlagsWithPrefix(prefix+"s3.sse.", f)
cfg.HTTP.RegisterFlagsWithPrefix(prefix, f)
}
// Validate config and returns error on failure
func (cfg *Config) Validate() error {
- if !util.StringsContain(supportedSignatureVersions, cfg.SignatureVersion) {
+ if !slices.Contains(supportedSignatureVersions, cfg.SignatureVersion) {
return errUnsupportedSignatureVersion
}
- if !util.StringsContain(supportedBucketLookupTypes, cfg.BucketLookupType) {
+ if !slices.Contains(supportedBucketLookupTypes, cfg.BucketLookupType) {
return errInvalidBucketLookupType
}
+ if cfg.ListObjectsVersion != "" {
+ if !slices.Contains(supportedListObjectsVersion, cfg.ListObjectsVersion) {
+ return errInvalidListObjectsVersion
+ }
+ }
if err := cfg.SSE.Validate(); err != nil {
return err
@@ -141,7 +155,7 @@ func (cfg *SSEConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
}
func (cfg *SSEConfig) Validate() error {
- if cfg.Type != "" && !util.StringsContain(supportedSSETypes, cfg.Type) {
+ if cfg.Type != "" && !slices.Contains(supportedSSETypes, cfg.Type) {
return errUnsupportedSSEType
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/sse_bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/sse_bucket_client.go
index b88e25e39..1f645ab65 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/sse_bucket_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/sse_bucket_client.go
@@ -51,7 +51,7 @@ func (b *SSEBucketClient) Close() error {
}
// Upload the contents of the reader as an object into the bucket.
-func (b *SSEBucketClient) Upload(ctx context.Context, name string, r io.Reader) error {
+func (b *SSEBucketClient) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error {
if sse, err := b.getCustomS3SSEConfig(); err != nil {
return err
} else if sse != nil {
@@ -60,7 +60,11 @@ func (b *SSEBucketClient) Upload(ctx context.Context, name string, r io.Reader)
ctx = s3.ContextWithSSEConfig(ctx, sse)
}
- return b.bucket.Upload(ctx, name, r)
+ return b.bucket.Upload(ctx, name, r, opts...)
+}
+
+func (b *SSEBucketClient) Provider() objstore.ObjProvider {
+ return b.bucket.Provider()
}
// Delete implements objstore.Bucket.
@@ -98,6 +102,14 @@ func (b *SSEBucketClient) getCustomS3SSEConfig() (encrypt.ServerSide, error) {
return sse, nil
}
+func (b *SSEBucketClient) IterWithAttributes(ctx context.Context, dir string, f func(attrs objstore.IterObjectAttributes) error, options ...objstore.IterOption) error {
+ return b.bucket.IterWithAttributes(ctx, dir, f, options...)
+}
+
+func (b *SSEBucketClient) SupportedIterOptions() []objstore.IterOptionType {
+ return b.bucket.SupportedIterOptions()
+}
+
// Iter implements objstore.Bucket.
func (b *SSEBucketClient) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error {
return b.bucket.Iter(ctx, dir, f, options...)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/bucket_client.go
index 1a83cddd0..cd76cabfb 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/bucket_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/bucket_client.go
@@ -1,46 +1,43 @@
package swift
import (
+ "net/http"
+
"github.com/go-kit/log"
"github.com/prometheus/common/model"
"github.com/thanos-io/objstore"
"github.com/thanos-io/objstore/providers/swift"
- yaml "gopkg.in/yaml.v2"
)
// NewBucketClient creates a new Swift bucket client
-func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucket, error) {
+func NewBucketClient(cfg Config, hedgedRoundTripper func(rt http.RoundTripper) http.RoundTripper, _ string, logger log.Logger) (objstore.Bucket, error) {
bucketConfig := swift.Config{
- AuthVersion: cfg.AuthVersion,
- AuthUrl: cfg.AuthURL,
- Username: cfg.Username,
- UserDomainName: cfg.UserDomainName,
- UserDomainID: cfg.UserDomainID,
- UserId: cfg.UserID,
- Password: cfg.Password,
- DomainId: cfg.DomainID,
- DomainName: cfg.DomainName,
- ProjectID: cfg.ProjectID,
- ProjectName: cfg.ProjectName,
- ProjectDomainID: cfg.ProjectDomainID,
- ProjectDomainName: cfg.ProjectDomainName,
- RegionName: cfg.RegionName,
- ContainerName: cfg.ContainerName,
- Retries: cfg.MaxRetries,
- ConnectTimeout: model.Duration(cfg.ConnectTimeout),
- Timeout: model.Duration(cfg.RequestTimeout),
+ AuthVersion: cfg.AuthVersion,
+ AuthUrl: cfg.AuthURL,
+ ApplicationCredentialID: cfg.ApplicationCredentialID,
+ ApplicationCredentialName: cfg.ApplicationCredentialName,
+ ApplicationCredentialSecret: cfg.ApplicationCredentialSecret,
+ Username: cfg.Username,
+ UserDomainName: cfg.UserDomainName,
+ UserDomainID: cfg.UserDomainID,
+ UserId: cfg.UserID,
+ Password: cfg.Password,
+ DomainId: cfg.DomainID,
+ DomainName: cfg.DomainName,
+ ProjectID: cfg.ProjectID,
+ ProjectName: cfg.ProjectName,
+ ProjectDomainID: cfg.ProjectDomainID,
+ ProjectDomainName: cfg.ProjectDomainName,
+ RegionName: cfg.RegionName,
+ ContainerName: cfg.ContainerName,
+ Retries: cfg.MaxRetries,
+ ConnectTimeout: model.Duration(cfg.ConnectTimeout),
+ Timeout: model.Duration(cfg.RequestTimeout),
// Hard-coded defaults.
ChunkSize: swift.DefaultConfig.ChunkSize,
UseDynamicLargeObjects: false,
}
- // Thanos currently doesn't support passing the config as is, but expects a YAML,
- // so we're going to serialize it.
- serialized, err := yaml.Marshal(bucketConfig)
- if err != nil {
- return nil, err
- }
-
- return swift.NewContainer(logger, serialized)
+ return swift.NewContainerFromConfig(logger, &bucketConfig, false, hedgedRoundTripper)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/config.go
index 783621f88..110da5030 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/config.go
@@ -7,24 +7,27 @@ import (
// Config holds the config options for Swift backend
type Config struct {
- AuthVersion int `yaml:"auth_version"`
- AuthURL string `yaml:"auth_url"`
- Username string `yaml:"username"`
- UserDomainName string `yaml:"user_domain_name"`
- UserDomainID string `yaml:"user_domain_id"`
- UserID string `yaml:"user_id"`
- Password string `yaml:"password"`
- DomainID string `yaml:"domain_id"`
- DomainName string `yaml:"domain_name"`
- ProjectID string `yaml:"project_id"`
- ProjectName string `yaml:"project_name"`
- ProjectDomainID string `yaml:"project_domain_id"`
- ProjectDomainName string `yaml:"project_domain_name"`
- RegionName string `yaml:"region_name"`
- ContainerName string `yaml:"container_name"`
- MaxRetries int `yaml:"max_retries"`
- ConnectTimeout time.Duration `yaml:"connect_timeout"`
- RequestTimeout time.Duration `yaml:"request_timeout"`
+ AuthVersion int `yaml:"auth_version"`
+ AuthURL string `yaml:"auth_url"`
+ ApplicationCredentialID string `yaml:"application_credential_id"`
+ ApplicationCredentialName string `yaml:"application_credential_name"`
+ ApplicationCredentialSecret string `yaml:"application_credential_secret"`
+ Username string `yaml:"username"`
+ UserDomainName string `yaml:"user_domain_name"`
+ UserDomainID string `yaml:"user_domain_id"`
+ UserID string `yaml:"user_id"`
+ Password string `yaml:"password"`
+ DomainID string `yaml:"domain_id"`
+ DomainName string `yaml:"domain_name"`
+ ProjectID string `yaml:"project_id"`
+ ProjectName string `yaml:"project_name"`
+ ProjectDomainID string `yaml:"project_domain_id"`
+ ProjectDomainName string `yaml:"project_domain_name"`
+ RegionName string `yaml:"region_name"`
+ ContainerName string `yaml:"container_name"`
+ MaxRetries int `yaml:"max_retries"`
+ ConnectTimeout time.Duration `yaml:"connect_timeout"`
+ RequestTimeout time.Duration `yaml:"request_timeout"`
}
// RegisterFlags registers the flags for Swift storage
@@ -47,6 +50,9 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
f.StringVar(&cfg.ProjectName, prefix+"swift.project-name", "", "OpenStack Swift project name (v2,v3 auth only).")
f.StringVar(&cfg.ProjectDomainID, prefix+"swift.project-domain-id", "", "ID of the OpenStack Swift project's domain (v3 auth only), only needed if it differs the from user domain.")
f.StringVar(&cfg.ProjectDomainName, prefix+"swift.project-domain-name", "", "Name of the OpenStack Swift project's domain (v3 auth only), only needed if it differs from the user domain.")
+ f.StringVar(&cfg.ApplicationCredentialID, prefix+"swift.application-credential-id", "", "OpenStack Swift application credential ID.")
+ f.StringVar(&cfg.ApplicationCredentialName, prefix+"swift.application-credential-name", "", "OpenStack Swift application credential name.")
+ f.StringVar(&cfg.ApplicationCredentialSecret, prefix+"swift.application-credential-secret", "", "OpenStack Swift application credential secret.")
f.StringVar(&cfg.RegionName, prefix+"swift.region-name", "", "OpenStack Swift Region to use (v2,v3 auth only).")
f.StringVar(&cfg.ContainerName, prefix+"swift.container-name", "", "Name of the OpenStack Swift container to put chunks in.")
f.IntVar(&cfg.MaxRetries, prefix+"swift.max-retries", 3, "Max retries on requests error.")
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/parquet/converter_marker.go b/vendor/github.com/cortexproject/cortex/pkg/storage/parquet/converter_marker.go
new file mode 100644
index 000000000..f53b4f537
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/parquet/converter_marker.go
@@ -0,0 +1,66 @@
+package parquet
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+ "path"
+
+ "github.com/efficientgo/core/errors"
+ "github.com/go-kit/log"
+ "github.com/oklog/ulid/v2"
+ "github.com/thanos-io/objstore"
+ "github.com/thanos-io/thanos/pkg/runutil"
+
+ "github.com/cortexproject/cortex/pkg/storage/tsdb"
+)
+
+const (
+ ConverterMarkerPrefix = "parquet-markers"
+ ConverterMarkerFileName = "parquet-converter-mark.json"
+ CurrentVersion = 1
+)
+
+type ConverterMark struct {
+ Version int `json:"version"`
+}
+
+func ReadConverterMark(ctx context.Context, id ulid.ULID, userBkt objstore.InstrumentedBucket, logger log.Logger) (*ConverterMark, error) {
+ markerPath := path.Join(id.String(), ConverterMarkerFileName)
+ reader, err := userBkt.WithExpectedErrs(tsdb.IsOneOfTheExpectedErrors(userBkt.IsAccessDeniedErr, userBkt.IsObjNotFoundErr)).Get(ctx, markerPath)
+ if err != nil {
+ if userBkt.IsObjNotFoundErr(err) || userBkt.IsAccessDeniedErr(err) {
+ return &ConverterMark{}, nil
+ }
+
+ return &ConverterMark{}, err
+ }
+ defer runutil.CloseWithLogOnErr(logger, reader, "close parquet converter marker file reader")
+
+ metaContent, err := io.ReadAll(reader)
+ if err != nil {
+ return nil, errors.Wrapf(err, "read file: %s", ConverterMarkerFileName)
+ }
+
+ marker := ConverterMark{}
+ err = json.Unmarshal(metaContent, &marker)
+ return &marker, err
+}
+
+func WriteConverterMark(ctx context.Context, id ulid.ULID, userBkt objstore.Bucket) error {
+ marker := ConverterMark{
+ Version: CurrentVersion,
+ }
+ markerPath := path.Join(id.String(), ConverterMarkerFileName)
+ b, err := json.Marshal(marker)
+ if err != nil {
+ return err
+ }
+ return userBkt.Upload(ctx, markerPath, bytes.NewReader(b))
+}
+
+// ConverterMarkMeta is used in Bucket Index. It might not be the same as ConverterMark.
+type ConverterMarkMeta struct {
+ Version int `json:"version"`
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/parquet/util.go b/vendor/github.com/cortexproject/cortex/pkg/storage/parquet/util.go
new file mode 100644
index 000000000..1e08ab301
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/parquet/util.go
@@ -0,0 +1,38 @@
+package parquet
+
+func ShouldConvertBlockToParquet(mint, maxt int64, timeRanges []int64) bool {
+ // We assume timeRanges[0] is the TSDB block duration (2h), and we don't convert them.
+ return getBlockTimeRange(mint, maxt, timeRanges) > timeRanges[0]
+}
+
+func getBlockTimeRange(mint, maxt int64, timeRanges []int64) int64 {
+ timeRange := int64(0)
+ // fallback logic to guess block time range based
+ // on MaxTime and MinTime
+ blockRange := maxt - mint
+ for _, tr := range timeRanges {
+ rangeStart := getRangeStart(mint, tr)
+ rangeEnd := rangeStart + tr
+ if tr >= blockRange && rangeEnd >= maxt {
+ timeRange = tr
+ break
+ }
+ }
+ // If the block range is too big and cannot fit any configured time range, just fallback to the final time range.
+ // This might not be accurate but should be good enough to decide if we want to convert the block to Parquet.
+ // For this to work, at least 2 block ranges are required.
+ if len(timeRanges) > 0 && timeRange == int64(0) {
+ return timeRanges[len(timeRanges)-1]
+ }
+ return timeRange
+}
+
+func getRangeStart(mint, tr int64) int64 {
+ // Compute start of aligned time range of size tr closest to the current block's start.
+ // This code has been copied from TSDB.
+ if mint >= 0 {
+ return tr * (mint / tr)
+ }
+
+ return tr * ((mint - tr + 1) / tr)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/block_ids_fetcher.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/block_ids_fetcher.go
index 5a9d3470f..f942b7009 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/block_ids_fetcher.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/block_ids_fetcher.go
@@ -5,7 +5,7 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
- "github.com/oklog/ulid"
+ "github.com/oklog/ulid/v2"
"github.com/pkg/errors"
"github.com/thanos-io/objstore"
"github.com/thanos-io/thanos/pkg/block"
@@ -33,20 +33,20 @@ func NewBlockLister(logger log.Logger, bkt objstore.Bucket, userID string, cfgPr
}
}
-func (f *BlockLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch chan<- ulid.ULID) (partialBlocks map[ulid.ULID]bool, err error) {
+func (f *BlockLister) GetActiveAndPartialBlockIDs(ctx context.Context, activeBlocks chan<- block.ActiveBlockFetchData) (partialBlocks map[ulid.ULID]bool, err error) {
// Fetch the bucket index.
idx, err := ReadIndex(ctx, f.bkt, f.userID, f.cfgProvider, f.logger)
if errors.Is(err, ErrIndexNotFound) {
// This is a legit case happening when the first blocks of a tenant have recently been uploaded by ingesters
// and their bucket index has not been created yet.
// Fallback to BaseBlockIDsFetcher.
- return f.baseLister.GetActiveAndPartialBlockIDs(ctx, ch)
+ return f.baseLister.GetActiveAndPartialBlockIDs(ctx, activeBlocks)
}
if errors.Is(err, ErrIndexCorrupted) {
// In case a single tenant bucket index is corrupted, we want to return empty active blocks and parital blocks, so skipping this compaction cycle
level.Error(f.logger).Log("msg", "corrupted bucket index found", "user", f.userID, "err", err)
// Fallback to BaseBlockIDsFetcher.
- return f.baseLister.GetActiveAndPartialBlockIDs(ctx, ch)
+ return f.baseLister.GetActiveAndPartialBlockIDs(ctx, activeBlocks)
}
if errors.Is(err, bucket.ErrCustomerManagedKeyAccessDenied) {
@@ -73,7 +73,7 @@ func (f *BlockLister) GetActiveAndPartialBlockIDs(ctx context.Context, ch chan<-
select {
case <-ctx.Done():
return nil, ctx.Err()
- case ch <- b.ID:
+ case activeBlocks <- block.ActiveBlockFetchData{ULID: b.ID}:
}
}
return nil, nil
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/index.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/index.go
index 8ef150155..75a5ba3f4 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/index.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/index.go
@@ -3,14 +3,16 @@ package bucketindex
import (
"fmt"
"path/filepath"
+ "slices"
"strings"
"time"
- "github.com/oklog/ulid"
+ "github.com/oklog/ulid/v2"
"github.com/prometheus/prometheus/tsdb"
"github.com/thanos-io/thanos/pkg/block"
"github.com/thanos-io/thanos/pkg/block/metadata"
+ "github.com/cortexproject/cortex/pkg/storage/parquet"
cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb"
"github.com/cortexproject/cortex/pkg/util"
)
@@ -51,19 +53,46 @@ func (idx *Index) GetUpdatedAt() time.Time {
func (idx *Index) RemoveBlock(id ulid.ULID) {
for i := 0; i < len(idx.Blocks); i++ {
if idx.Blocks[i].ID == id {
- idx.Blocks = append(idx.Blocks[:i], idx.Blocks[i+1:]...)
+ idx.Blocks = slices.Delete(idx.Blocks, i, i+1)
break
}
}
for i := 0; i < len(idx.BlockDeletionMarks); i++ {
if idx.BlockDeletionMarks[i].ID == id {
- idx.BlockDeletionMarks = append(idx.BlockDeletionMarks[:i], idx.BlockDeletionMarks[i+1:]...)
+ idx.BlockDeletionMarks = slices.Delete(idx.BlockDeletionMarks, i, i+1)
break
}
}
}
+func (idx *Index) IsEmpty() bool {
+ return len(idx.Blocks) == 0 && len(idx.BlockDeletionMarks) == 0
+}
+
+// ParquetBlocks returns all blocks that are available in Parquet format.
+func (idx *Index) ParquetBlocks() []*Block {
+ blocks := make([]*Block, 0, len(idx.Blocks))
+ for _, b := range idx.Blocks {
+ if b.Parquet != nil {
+ blocks = append(blocks, b)
+ }
+ }
+ return blocks
+}
+
+// NonParquetBlocks returns all blocks that are not available in Parquet format.
+func (idx *Index) NonParquetBlocks() []*Block {
+ blocks := make([]*Block, 0, len(idx.Blocks))
+ for _, b := range idx.Blocks {
+ if b.Parquet != nil {
+ continue
+ }
+ blocks = append(blocks, b)
+ }
+ return blocks
+}
+
// Block holds the information about a block in the index.
type Block struct {
// Block ID.
@@ -87,6 +116,9 @@ type Block struct {
// UploadedAt is a unix timestamp (seconds precision) of when the block has been completed to be uploaded
// to the storage.
UploadedAt int64 `json:"uploaded_at"`
+
+ // Parquet metadata if exists. If doesn't exist it will be nil.
+ Parquet *parquet.ConverterMarkMeta `json:"parquet,omitempty"`
}
// Within returns whether the block contains samples within the provided range.
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers.go
index e47aaf057..922bd2f59 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers.go
@@ -7,7 +7,7 @@ import (
"path/filepath"
"strings"
- "github.com/oklog/ulid"
+ "github.com/oklog/ulid/v2"
"github.com/pkg/errors"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/thanos-io/objstore"
@@ -15,6 +15,7 @@ import (
"github.com/thanos-io/thanos/pkg/block/metadata"
"github.com/cortexproject/cortex/pkg/storage/bucket"
+ "github.com/cortexproject/cortex/pkg/storage/parquet"
)
const (
@@ -23,8 +24,9 @@ const (
var (
MarkersMap = map[string]func(ulid.ULID) string{
- metadata.DeletionMarkFilename: BlockDeletionMarkFilepath,
- metadata.NoCompactMarkFilename: NoCompactMarkFilenameMarkFilepath,
+ metadata.DeletionMarkFilename: BlockDeletionMarkFilepath,
+ metadata.NoCompactMarkFilename: NoCompactMarkFilenameMarkFilepath,
+ parquet.ConverterMarkerFileName: ConverterMarkFilePath,
}
)
@@ -40,6 +42,10 @@ func NoCompactMarkFilenameMarkFilepath(blockID ulid.ULID) string {
return fmt.Sprintf("%s/%s-%s", MarkersPathname, blockID.String(), metadata.NoCompactMarkFilename)
}
+func ConverterMarkFilePath(blockID ulid.ULID) string {
+ return fmt.Sprintf("%s/%s-%s", parquet.ConverterMarkerPrefix, blockID.String(), parquet.ConverterMarkerFileName)
+}
+
// IsBlockDeletionMarkFilename returns whether the input filename matches the expected pattern
// of block deletion markers stored in the markers location.
func IsBlockDeletionMarkFilename(name string) (ulid.ULID, bool) {
@@ -76,6 +82,24 @@ func IsBlockNoCompactMarkFilename(name string) (ulid.ULID, bool) {
return id, err == nil
}
+// IsBlockParquetConverterMarkFilename returns whether the input filename matches the expected pattern
+// of block parquet converter markers stored in the markers location.
+func IsBlockParquetConverterMarkFilename(name string) (ulid.ULID, bool) {
+ parts := strings.SplitN(name, "-", 2)
+ if len(parts) != 2 {
+ return ulid.ULID{}, false
+ }
+
+ // Ensure the 2nd part matches the parquet converter mark filename.
+ if parts[1] != parquet.ConverterMarkerFileName {
+ return ulid.ULID{}, false
+ }
+
+ // Ensure the 1st part is a valid block ID.
+ id, err := ulid.Parse(filepath.Base(parts[0]))
+ return id, err == nil
+}
+
// MigrateBlockDeletionMarksToGlobalLocation list all tenant's blocks and, for each of them, look for
// a deletion mark in the block location. Found deletion marks are copied to the global markers location.
// The migration continues on error and returns once all blocks have been checked.
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers_bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers_bucket_client.go
index c0dbb6e76..1773db2a6 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers_bucket_client.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers_bucket_client.go
@@ -24,11 +24,15 @@ func BucketWithGlobalMarkers(b objstore.InstrumentedBucket) objstore.Instrumente
}
}
+func (b *globalMarkersBucket) Provider() objstore.ObjProvider {
+ return b.parent.Provider()
+}
+
// Upload implements objstore.Bucket.
-func (b *globalMarkersBucket) Upload(ctx context.Context, name string, r io.Reader) error {
+func (b *globalMarkersBucket) Upload(ctx context.Context, name string, r io.Reader, opts ...objstore.ObjectUploadOption) error {
globalMarkPath, ok := b.isMark(name)
if !ok {
- return b.parent.Upload(ctx, name, r)
+ return b.parent.Upload(ctx, name, r, opts...)
}
// Read the marker.
@@ -38,12 +42,12 @@ func (b *globalMarkersBucket) Upload(ctx context.Context, name string, r io.Read
}
// Upload it to the global marker's location.
- if err := b.parent.Upload(ctx, globalMarkPath, bytes.NewReader(body)); err != nil {
+ if err := b.parent.Upload(ctx, globalMarkPath, bytes.NewReader(body), opts...); err != nil {
return err
}
// Upload it to the original location too.
- return b.parent.Upload(ctx, name, bytes.NewReader(body))
+ return b.parent.Upload(ctx, name, bytes.NewReader(body), opts...)
}
// Delete implements objstore.Bucket.
@@ -75,6 +79,16 @@ func (b *globalMarkersBucket) Close() error {
return b.parent.Close()
}
+// IterWithAttributes implements objstore.Bucket.
+func (b *globalMarkersBucket) IterWithAttributes(ctx context.Context, dir string, f func(attrs objstore.IterObjectAttributes) error, options ...objstore.IterOption) error {
+ return b.parent.IterWithAttributes(ctx, dir, f, options...)
+}
+
+// SupportedIterOptions implements objstore.Bucket.
+func (b *globalMarkersBucket) SupportedIterOptions() []objstore.IterOptionType {
+ return b.parent.SupportedIterOptions()
+}
+
// Iter implements objstore.Bucket.
func (b *globalMarkersBucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error {
return b.parent.Iter(ctx, dir, f, options...)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/updater.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/updater.go
index cee3e6e3b..fd7efcba4 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/updater.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/updater.go
@@ -9,12 +9,13 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
- "github.com/oklog/ulid"
+ "github.com/oklog/ulid/v2"
"github.com/pkg/errors"
"github.com/thanos-io/objstore"
"github.com/thanos-io/thanos/pkg/block"
"github.com/thanos-io/thanos/pkg/block/metadata"
+ "github.com/cortexproject/cortex/pkg/storage/parquet"
"github.com/cortexproject/cortex/pkg/storage/tsdb"
"github.com/cortexproject/cortex/pkg/storage/bucket"
@@ -33,8 +34,9 @@ var (
// Updater is responsible to generate an update in-memory bucket index.
type Updater struct {
- bkt objstore.InstrumentedBucket
- logger log.Logger
+ bkt objstore.InstrumentedBucket
+ logger log.Logger
+ parquetEnabled bool
}
func NewUpdater(bkt objstore.Bucket, userID string, cfgProvider bucket.TenantConfigProvider, logger log.Logger) *Updater {
@@ -44,11 +46,18 @@ func NewUpdater(bkt objstore.Bucket, userID string, cfgProvider bucket.TenantCon
}
}
+func (w *Updater) EnableParquet() *Updater {
+ w.parquetEnabled = true
+ return w
+}
+
// UpdateIndex generates the bucket index and returns it, without storing it to the storage.
// If the old index is not passed in input, then the bucket index will be generated from scratch.
func (w *Updater) UpdateIndex(ctx context.Context, old *Index) (*Index, map[ulid.ULID]error, int64, error) {
- var oldBlocks []*Block
- var oldBlockDeletionMarks []*BlockDeletionMark
+ var (
+ oldBlocks []*Block
+ oldBlockDeletionMarks []*BlockDeletionMark
+ )
// Read the old index, if provided.
if old != nil {
@@ -65,6 +74,11 @@ func (w *Updater) UpdateIndex(ctx context.Context, old *Index) (*Index, map[ulid
if err != nil {
return nil, nil, 0, err
}
+ if w.parquetEnabled {
+ if err := w.updateParquetBlocks(ctx, blocks); err != nil {
+ return nil, nil, 0, err
+ }
+ }
return &Index{
Version: IndexVersion1,
@@ -180,6 +194,23 @@ func (w *Updater) updateBlockIndexEntry(ctx context.Context, id ulid.ULID) (*Blo
return block, nil
}
+func (w *Updater) updateParquetBlockIndexEntry(ctx context.Context, id ulid.ULID, block *Block) error {
+ marker, err := parquet.ReadConverterMark(ctx, id, w.bkt, w.logger)
+ if err != nil {
+ return errors.Wrapf(err, "read parquet converter marker file: %v", path.Join(id.String(), parquet.ConverterMarkerFileName))
+ }
+ // Could be not found or access denied.
+ // Just treat it as no parquet block available.
+ if marker == nil || marker.Version == 0 {
+ return nil
+ }
+
+ block.Parquet = &parquet.ConverterMarkMeta{
+ Version: marker.Version,
+ }
+ return nil
+}
+
func (w *Updater) updateBlockMarks(ctx context.Context, old []*BlockDeletionMark) ([]*BlockDeletionMark, map[ulid.ULID]struct{}, int64, error) {
out := make([]*BlockDeletionMark, 0, len(old))
deletedBlocks := map[ulid.ULID]struct{}{}
@@ -249,3 +280,31 @@ func (w *Updater) updateBlockDeletionMarkIndexEntry(ctx context.Context, id ulid
return BlockDeletionMarkFromThanosMarker(&m), nil
}
+
+func (w *Updater) updateParquetBlocks(ctx context.Context, blocks []*Block) error {
+ discoveredParquetBlocks := map[ulid.ULID]struct{}{}
+
+ // Find all parquet markers in the storage.
+ if err := w.bkt.Iter(ctx, parquet.ConverterMarkerPrefix+"/", func(name string) error {
+ if blockID, ok := IsBlockParquetConverterMarkFilename(path.Base(name)); ok {
+ discoveredParquetBlocks[blockID] = struct{}{}
+ }
+
+ return nil
+ }); err != nil {
+ return errors.Wrap(err, "list block parquet converter marks")
+ }
+
+ // Check if parquet mark has been uploaded or deleted for the block.
+ for _, m := range blocks {
+ if _, ok := discoveredParquetBlocks[m.ID]; ok {
+ if err := w.updateParquetBlockIndexEntry(ctx, m.ID, m); err != nil {
+ return err
+ }
+ } else if m.Parquet != nil {
+ // Converter marker removed. Reset parquet field.
+ m.Parquet = nil
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/cached_chunks_querier.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/cached_chunks_querier.go
new file mode 100644
index 000000000..ab3b11c4f
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/cached_chunks_querier.go
@@ -0,0 +1,128 @@
+package tsdb
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/oklog/ulid/v2"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/storage"
+ prom_tsdb "github.com/prometheus/prometheus/tsdb"
+ tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
+ "github.com/prometheus/prometheus/tsdb/tombstones"
+ "github.com/prometheus/prometheus/util/annotations"
+)
+
+/*
+ This file is basically a copy from https://github.com/prometheus/prometheus/blob/e2e01c1cffbfc4f26f5e9fe6138af87d7ff16122/tsdb/querier.go
+ with the difference that the PostingsForMatchers function is called from the Postings Cache
+*/
+
+type blockBaseQuerier struct {
+ blockID ulid.ULID
+ index prom_tsdb.IndexReader
+ chunks prom_tsdb.ChunkReader
+ tombstones tombstones.Reader
+
+ closed bool
+
+ mint, maxt int64
+}
+
+func newBlockBaseQuerier(b prom_tsdb.BlockReader, mint, maxt int64) (*blockBaseQuerier, error) {
+ indexr, err := b.Index()
+ if err != nil {
+ return nil, fmt.Errorf("open index reader: %w", err)
+ }
+ chunkr, err := b.Chunks()
+ if err != nil {
+ indexr.Close()
+ return nil, fmt.Errorf("open chunk reader: %w", err)
+ }
+ tombsr, err := b.Tombstones()
+ if err != nil {
+ indexr.Close()
+ chunkr.Close()
+ return nil, fmt.Errorf("open tombstone reader: %w", err)
+ }
+
+ if tombsr == nil {
+ tombsr = tombstones.NewMemTombstones()
+ }
+ return &blockBaseQuerier{
+ blockID: b.Meta().ULID,
+ mint: mint,
+ maxt: maxt,
+ index: indexr,
+ chunks: chunkr,
+ tombstones: tombsr,
+ }, nil
+}
+
+func (q *blockBaseQuerier) LabelValues(ctx context.Context, name string, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+ res, err := q.index.SortedLabelValues(ctx, name, hints, matchers...)
+ return res, nil, err
+}
+
+func (q *blockBaseQuerier) LabelNames(ctx context.Context, hints *storage.LabelHints, matchers ...*labels.Matcher) ([]string, annotations.Annotations, error) {
+ res, err := q.index.LabelNames(ctx, matchers...)
+ return res, nil, err
+}
+
+func (q *blockBaseQuerier) Close() error {
+ if q.closed {
+ return errors.New("block querier already closed")
+ }
+
+ errs := tsdb_errors.NewMulti(
+ q.index.Close(),
+ q.chunks.Close(),
+ q.tombstones.Close(),
+ )
+ q.closed = true
+ return errs.Err()
+}
+
+type cachedBlockChunkQuerier struct {
+ *blockBaseQuerier
+
+ cache ExpandedPostingsCache
+}
+
+func NewCachedBlockChunkQuerier(cache ExpandedPostingsCache, b prom_tsdb.BlockReader, mint, maxt int64) (storage.ChunkQuerier, error) {
+ q, err := newBlockBaseQuerier(b, mint, maxt)
+ if err != nil {
+ return nil, err
+ }
+ return &cachedBlockChunkQuerier{blockBaseQuerier: q, cache: cache}, nil
+}
+
+func (q *cachedBlockChunkQuerier) Select(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms ...*labels.Matcher) storage.ChunkSeriesSet {
+ return selectChunkSeriesSet(ctx, sortSeries, hints, ms, q.blockID, q.index, q.chunks, q.tombstones, q.mint, q.maxt, q.cache)
+}
+
+func selectChunkSeriesSet(ctx context.Context, sortSeries bool, hints *storage.SelectHints, ms []*labels.Matcher,
+ blockID ulid.ULID, index prom_tsdb.IndexReader, chunks prom_tsdb.ChunkReader, tombstones tombstones.Reader, mint, maxt int64,
+ cache ExpandedPostingsCache,
+) storage.ChunkSeriesSet {
+ disableTrimming := false
+ sharded := hints != nil && hints.ShardCount > 0
+
+ if hints != nil {
+ mint = hints.Start
+ maxt = hints.End
+ disableTrimming = hints.DisableTrimming
+ }
+ p, err := cache.PostingsForMatchers(ctx, blockID, index, ms...)
+ if err != nil {
+ return storage.ErrChunkSeriesSet(err)
+ }
+ if sharded {
+ p = index.ShardedPostings(p, hints.ShardIndex, hints.ShardCount)
+ }
+ if sortSeries {
+ p = index.SortedPostings(p)
+ }
+ return prom_tsdb.NewBlockChunkSeriesSet(blockID, index, chunks, tombstones, p, mint, maxt, disableTrimming)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go
index cbd9efee5..404438033 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go
@@ -5,12 +5,14 @@ import (
"fmt"
"path/filepath"
"regexp"
+ "slices"
"strings"
"time"
+ "github.com/alecthomas/units"
"github.com/go-kit/log"
"github.com/golang/snappy"
- "github.com/oklog/ulid"
+ "github.com/oklog/ulid/v2"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/thanos-io/objstore"
@@ -18,36 +20,75 @@ import (
"github.com/thanos-io/thanos/pkg/block/metadata"
"github.com/thanos-io/thanos/pkg/cache"
"github.com/thanos-io/thanos/pkg/cacheutil"
+ "github.com/thanos-io/thanos/pkg/model"
storecache "github.com/thanos-io/thanos/pkg/store/cache"
)
+var (
+ supportedBucketCacheBackends = []string{CacheBackendInMemory, CacheBackendMemcached, CacheBackendRedis}
+
+ errUnsupportedBucketCacheBackend = errors.New("unsupported cache backend")
+ errDuplicatedBucketCacheBackend = errors.New("duplicated cache backend")
+)
+
const (
CacheBackendMemcached = "memcached"
CacheBackendRedis = "redis"
+ CacheBackendInMemory = "inmemory"
)
-type CacheBackend struct {
- Backend string `yaml:"backend"`
- Memcached MemcachedClientConfig `yaml:"memcached"`
- Redis RedisClientConfig `yaml:"redis"`
+type BucketCacheBackend struct {
+ Backend string `yaml:"backend"`
+ InMemory InMemoryBucketCacheConfig `yaml:"inmemory"`
+ Memcached MemcachedClientConfig `yaml:"memcached"`
+ Redis RedisClientConfig `yaml:"redis"`
+ MultiLevel MultiLevelBucketCacheConfig `yaml:"multilevel"`
}
// Validate the config.
-func (cfg *CacheBackend) Validate() error {
- switch cfg.Backend {
- case CacheBackendMemcached:
- return cfg.Memcached.Validate()
- case CacheBackendRedis:
- return cfg.Redis.Validate()
- case "":
- default:
- return fmt.Errorf("unsupported cache backend: %s", cfg.Backend)
+func (cfg *BucketCacheBackend) Validate() error {
+ if cfg.Backend == "" {
+ return nil
+ }
+
+ splitBackends := strings.Split(cfg.Backend, ",")
+ configuredBackends := map[string]struct{}{}
+
+ if len(splitBackends) > 1 {
+ if err := cfg.MultiLevel.Validate(); err != nil {
+ return err
+ }
+ }
+
+ for _, backend := range splitBackends {
+ if !slices.Contains(supportedBucketCacheBackends, backend) {
+ return errUnsupportedBucketCacheBackend
+ }
+
+ if _, ok := configuredBackends[backend]; ok {
+ return errDuplicatedBucketCacheBackend
+ }
+
+ switch backend {
+ case CacheBackendMemcached:
+ if err := cfg.Memcached.Validate(); err != nil {
+ return err
+ }
+ case CacheBackendRedis:
+ if err := cfg.Redis.Validate(); err != nil {
+ return err
+ }
+ case CacheBackendInMemory:
+ }
+
+ configuredBackends[backend] = struct{}{}
}
+
return nil
}
type ChunksCacheConfig struct {
- CacheBackend `yaml:",inline"`
+ BucketCacheBackend `yaml:",inline"`
SubrangeSize int64 `yaml:"subrange_size"`
MaxGetRangeRequests int `yaml:"max_get_range_requests"`
@@ -56,42 +97,74 @@ type ChunksCacheConfig struct {
}
func (cfg *ChunksCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) {
- f.StringVar(&cfg.Backend, prefix+"backend", "", fmt.Sprintf("Backend for chunks cache, if not empty. Supported values: %s.", CacheBackendMemcached))
+ f.StringVar(&cfg.Backend, prefix+"backend", "", fmt.Sprintf("The chunks cache backend type. Single or Multiple cache backend can be provided. "+
+ "Supported values in single cache: %s, %s, %s, and '' (disable). "+
+ "Supported values in multi level cache: a comma-separated list of (%s)", CacheBackendMemcached, CacheBackendRedis, CacheBackendInMemory, strings.Join(supportedBucketCacheBackends, ", ")))
cfg.Memcached.RegisterFlagsWithPrefix(f, prefix+"memcached.")
cfg.Redis.RegisterFlagsWithPrefix(f, prefix+"redis.")
+ cfg.InMemory.RegisterFlagsWithPrefix(f, prefix+"inmemory.", "chunks")
+ cfg.MultiLevel.RegisterFlagsWithPrefix(f, prefix+"multilevel.")
f.Int64Var(&cfg.SubrangeSize, prefix+"subrange-size", 16000, "Size of each subrange that bucket object is split into for better caching.")
f.IntVar(&cfg.MaxGetRangeRequests, prefix+"max-get-range-requests", 3, "Maximum number of sub-GetRange requests that a single GetRange request can be split into when fetching chunks. Zero or negative value = unlimited number of sub-requests.")
f.DurationVar(&cfg.AttributesTTL, prefix+"attributes-ttl", 168*time.Hour, "TTL for caching object attributes for chunks.")
f.DurationVar(&cfg.SubrangeTTL, prefix+"subrange-ttl", 24*time.Hour, "TTL for caching individual chunks subranges.")
+
+ // In the multi level chunk cache, backfill TTL follows subrange TTL
+ cfg.MultiLevel.BackFillTTL = cfg.SubrangeTTL
}
func (cfg *ChunksCacheConfig) Validate() error {
- return cfg.CacheBackend.Validate()
+ return cfg.BucketCacheBackend.Validate()
}
-type MetadataCacheConfig struct {
- CacheBackend `yaml:",inline"`
+type InMemoryBucketCacheConfig struct {
+ MaxSizeBytes uint64 `yaml:"max_size_bytes"`
+}
+
+func (cfg *InMemoryBucketCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string, item string) {
+ f.Uint64Var(&cfg.MaxSizeBytes, prefix+"max-size-bytes", uint64(1*units.Gibibyte), fmt.Sprintf("Maximum size in bytes of in-memory %s cache used (shared between all tenants).", item))
+}
+
+func (cfg *InMemoryBucketCacheConfig) toInMemoryCacheConfig() cache.InMemoryCacheConfig {
+ maxCacheSize := model.Bytes(cfg.MaxSizeBytes)
+
+ // Calculate the max item size.
+ maxItemSize := min(defaultMaxItemSize, maxCacheSize)
+
+ return cache.InMemoryCacheConfig{
+ MaxSize: maxCacheSize,
+ MaxItemSize: maxItemSize,
+ }
+}
- TenantsListTTL time.Duration `yaml:"tenants_list_ttl"`
- TenantBlocksListTTL time.Duration `yaml:"tenant_blocks_list_ttl"`
- ChunksListTTL time.Duration `yaml:"chunks_list_ttl"`
- MetafileExistsTTL time.Duration `yaml:"metafile_exists_ttl"`
- MetafileDoesntExistTTL time.Duration `yaml:"metafile_doesnt_exist_ttl"`
- MetafileContentTTL time.Duration `yaml:"metafile_content_ttl"`
- MetafileMaxSize int `yaml:"metafile_max_size_bytes"`
- MetafileAttributesTTL time.Duration `yaml:"metafile_attributes_ttl"`
- BlockIndexAttributesTTL time.Duration `yaml:"block_index_attributes_ttl"`
- BucketIndexContentTTL time.Duration `yaml:"bucket_index_content_ttl"`
- BucketIndexMaxSize int `yaml:"bucket_index_max_size_bytes"`
+type MetadataCacheConfig struct {
+ BucketCacheBackend `yaml:",inline"`
+
+ TenantsListTTL time.Duration `yaml:"tenants_list_ttl"`
+ TenantBlocksListTTL time.Duration `yaml:"tenant_blocks_list_ttl"`
+ ChunksListTTL time.Duration `yaml:"chunks_list_ttl"`
+ MetafileExistsTTL time.Duration `yaml:"metafile_exists_ttl"`
+ MetafileDoesntExistTTL time.Duration `yaml:"metafile_doesnt_exist_ttl"`
+ MetafileContentTTL time.Duration `yaml:"metafile_content_ttl"`
+ MetafileMaxSize int `yaml:"metafile_max_size_bytes"`
+ MetafileAttributesTTL time.Duration `yaml:"metafile_attributes_ttl"`
+ BlockIndexAttributesTTL time.Duration `yaml:"block_index_attributes_ttl"`
+ BucketIndexContentTTL time.Duration `yaml:"bucket_index_content_ttl"`
+ BucketIndexMaxSize int `yaml:"bucket_index_max_size_bytes"`
+ PartitionedGroupsListTTL time.Duration `yaml:"partitioned_groups_list_ttl"`
}
func (cfg *MetadataCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) {
- f.StringVar(&cfg.Backend, prefix+"backend", "", fmt.Sprintf("Backend for metadata cache, if not empty. Supported values: %s.", CacheBackendMemcached))
+ f.StringVar(&cfg.Backend, prefix+"backend", "", fmt.Sprintf("The metadata cache backend type. Single or Multiple cache backend can be provided. "+
+ "Supported values in single cache: %s, %s, %s, and '' (disable). "+
+ "Supported values in multi level cache: a comma-separated list of (%s)", CacheBackendMemcached, CacheBackendRedis, CacheBackendInMemory, strings.Join(supportedBucketCacheBackends, ", ")))
cfg.Memcached.RegisterFlagsWithPrefix(f, prefix+"memcached.")
cfg.Redis.RegisterFlagsWithPrefix(f, prefix+"redis.")
+ cfg.InMemory.RegisterFlagsWithPrefix(f, prefix+"inmemory.", "metadata")
+ cfg.MultiLevel.RegisterFlagsWithPrefix(f, prefix+"multilevel.")
f.DurationVar(&cfg.TenantsListTTL, prefix+"tenants-list-ttl", 15*time.Minute, "How long to cache list of tenants in the bucket.")
f.DurationVar(&cfg.TenantBlocksListTTL, prefix+"tenant-blocks-list-ttl", 5*time.Minute, "How long to cache list of blocks for each tenant.")
@@ -104,17 +177,50 @@ func (cfg *MetadataCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix
f.DurationVar(&cfg.BlockIndexAttributesTTL, prefix+"block-index-attributes-ttl", 168*time.Hour, "How long to cache attributes of the block index.")
f.DurationVar(&cfg.BucketIndexContentTTL, prefix+"bucket-index-content-ttl", 5*time.Minute, "How long to cache content of the bucket index.")
f.IntVar(&cfg.BucketIndexMaxSize, prefix+"bucket-index-max-size-bytes", 1*1024*1024, "Maximum size of bucket index content to cache in bytes. Caching will be skipped if the content exceeds this size. This is useful to avoid network round trip for large content if the configured caching backend has an hard limit on cached items size (in this case, you should set this limit to the same limit in the caching backend).")
+ f.DurationVar(&cfg.PartitionedGroupsListTTL, prefix+"partitioned-groups-list-ttl", 0, "How long to cache list of partitioned groups for an user. 0 disables caching")
}
func (cfg *MetadataCacheConfig) Validate() error {
- return cfg.CacheBackend.Validate()
+ return cfg.BucketCacheBackend.Validate()
+}
+
+type ParquetLabelsCacheConfig struct {
+ BucketCacheBackend `yaml:",inline"`
+
+ SubrangeSize int64 `yaml:"subrange_size"`
+ MaxGetRangeRequests int `yaml:"max_get_range_requests"`
+ AttributesTTL time.Duration `yaml:"attributes_ttl"`
+ SubrangeTTL time.Duration `yaml:"subrange_ttl"`
+}
+
+func (cfg *ParquetLabelsCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) {
+ f.StringVar(&cfg.Backend, prefix+"backend", "", fmt.Sprintf("The parquet labels cache backend type. Single or Multiple cache backend can be provided. "+
+ "Supported values in single cache: %s, %s, %s, and '' (disable). "+
+ "Supported values in multi level cache: a comma-separated list of (%s)", CacheBackendMemcached, CacheBackendRedis, CacheBackendInMemory, strings.Join(supportedBucketCacheBackends, ", ")))
+
+ cfg.Memcached.RegisterFlagsWithPrefix(f, prefix+"memcached.")
+ cfg.Redis.RegisterFlagsWithPrefix(f, prefix+"redis.")
+ cfg.InMemory.RegisterFlagsWithPrefix(f, prefix+"inmemory.", "parquet-labels")
+ cfg.MultiLevel.RegisterFlagsWithPrefix(f, prefix+"multilevel.")
+
+ f.Int64Var(&cfg.SubrangeSize, prefix+"subrange-size", 16000, "Size of each subrange that bucket object is split into for better caching.")
+ f.IntVar(&cfg.MaxGetRangeRequests, prefix+"max-get-range-requests", 3, "Maximum number of sub-GetRange requests that a single GetRange request can be split into when fetching parquet labels file. Zero or negative value = unlimited number of sub-requests.")
+ f.DurationVar(&cfg.AttributesTTL, prefix+"attributes-ttl", 168*time.Hour, "TTL for caching object attributes for parquet labels file.")
+ f.DurationVar(&cfg.SubrangeTTL, prefix+"subrange-ttl", 24*time.Hour, "TTL for caching individual subranges.")
+
+ // In the multi level parquet labels cache, backfill TTL follows subrange TTL
+ cfg.MultiLevel.BackFillTTL = cfg.SubrangeTTL
}
-func CreateCachingBucket(chunksConfig ChunksCacheConfig, metadataConfig MetadataCacheConfig, matchers Matchers, bkt objstore.InstrumentedBucket, logger log.Logger, reg prometheus.Registerer) (objstore.InstrumentedBucket, error) {
+func (cfg *ParquetLabelsCacheConfig) Validate() error {
+ return cfg.BucketCacheBackend.Validate()
+}
+
+func CreateCachingBucket(chunksConfig ChunksCacheConfig, metadataConfig MetadataCacheConfig, parquetLabelsConfig ParquetLabelsCacheConfig, matchers Matchers, bkt objstore.InstrumentedBucket, logger log.Logger, reg prometheus.Registerer) (objstore.InstrumentedBucket, error) {
cfg := cache.NewCachingBucketConfig()
cachingConfigured := false
- chunksCache, err := createCache("chunks-cache", &chunksConfig.CacheBackend, logger, reg)
+ chunksCache, err := createBucketCache("chunks-cache", &chunksConfig.BucketCacheBackend, logger, reg)
if err != nil {
return nil, errors.Wrapf(err, "chunks-cache")
}
@@ -122,9 +228,10 @@ func CreateCachingBucket(chunksConfig ChunksCacheConfig, metadataConfig Metadata
cachingConfigured = true
chunksCache = cache.NewTracingCache(chunksCache)
cfg.CacheGetRange("chunks", chunksCache, matchers.GetChunksMatcher(), chunksConfig.SubrangeSize, chunksConfig.AttributesTTL, chunksConfig.SubrangeTTL, chunksConfig.MaxGetRangeRequests)
+ cfg.CacheGetRange("parquet-chunks", chunksCache, matchers.GetParquetChunksMatcher(), chunksConfig.SubrangeSize, chunksConfig.AttributesTTL, chunksConfig.SubrangeTTL, chunksConfig.MaxGetRangeRequests)
}
- metadataCache, err := createCache("metadata-cache", &metadataConfig.CacheBackend, logger, reg)
+ metadataCache, err := createBucketCache("metadata-cache", &metadataConfig.BucketCacheBackend, logger, reg)
if err != nil {
return nil, errors.Wrapf(err, "metadata-cache")
}
@@ -144,6 +251,16 @@ func CreateCachingBucket(chunksConfig ChunksCacheConfig, metadataConfig Metadata
cfg.CacheIter("chunks-iter", metadataCache, matchers.GetChunksIterMatcher(), metadataConfig.ChunksListTTL, codec, "")
}
+ parquetLabelsCache, err := createBucketCache("parquet-labels-cache", &parquetLabelsConfig.BucketCacheBackend, logger, reg)
+ if err != nil {
+ return nil, errors.Wrapf(err, "parquet-labels-cache")
+ }
+ if parquetLabelsCache != nil {
+ cachingConfigured = true
+ parquetLabelsCache = cache.NewTracingCache(parquetLabelsCache)
+ cfg.CacheGetRange("parquet-labels", parquetLabelsCache, matchers.GetParquetLabelsMatcher(), parquetLabelsConfig.SubrangeSize, parquetLabelsConfig.AttributesTTL, parquetLabelsConfig.SubrangeTTL, parquetLabelsConfig.MaxGetRangeRequests)
+ }
+
if !cachingConfigured {
// No caching is configured.
return bkt, nil
@@ -152,30 +269,88 @@ func CreateCachingBucket(chunksConfig ChunksCacheConfig, metadataConfig Metadata
return storecache.NewCachingBucket(bkt, cfg, logger, reg)
}
-func createCache(cacheName string, cacheBackend *CacheBackend, logger log.Logger, reg prometheus.Registerer) (cache.Cache, error) {
- switch cacheBackend.Backend {
- case "":
- // No caching.
- return nil, nil
+func CreateCachingBucketForCompactor(metadataConfig MetadataCacheConfig, cleaner bool, bkt objstore.InstrumentedBucket, logger log.Logger, reg prometheus.Registerer) (objstore.InstrumentedBucket, error) {
+ matchers := NewMatchers()
+ // Do not cache block deletion marker for compactor
+ matchers.SetMetaFileMatcher(func(name string) bool {
+ return strings.HasSuffix(name, "/"+metadata.MetaFilename) || strings.HasSuffix(name, "/"+TenantDeletionMarkFile)
+ })
+ cfg := cache.NewCachingBucketConfig()
+ cachingConfigured := false
- case CacheBackendMemcached:
- var client cacheutil.MemcachedClient
- client, err := cacheutil.NewMemcachedClientWithConfig(logger, cacheName, cacheBackend.Memcached.ToMemcachedClientConfig(), reg)
- if err != nil {
- return nil, errors.Wrapf(err, "failed to create memcached client")
- }
- return cache.NewMemcachedCache(cacheName, logger, client, reg), nil
+ metadataCache, err := createBucketCache("metadata-cache", &metadataConfig.BucketCacheBackend, logger, reg)
+ if err != nil {
+ return nil, errors.Wrapf(err, "metadata-cache")
+ }
+ if metadataCache != nil {
+ cachingConfigured = true
+ metadataCache = cache.NewTracingCache(metadataCache)
- case CacheBackendRedis:
- redisCache, err := cacheutil.NewRedisClientWithConfig(logger, cacheName, cacheBackend.Redis.ToRedisClientConfig(), reg)
- if err != nil {
- return nil, errors.Wrapf(err, "failed to create redis client")
+ codec := snappyIterCodec{storecache.JSONIterCodec{}}
+ cfg.CacheIter("tenants-iter", metadataCache, matchers.GetTenantsIterMatcher(), metadataConfig.TenantsListTTL, codec, "")
+ cfg.CacheAttributes("metafile", metadataCache, matchers.GetMetafileMatcher(), metadataConfig.MetafileAttributesTTL)
+
+ // Don't cache bucket index get and tenant blocks iter if it is cleaner.
+ if !cleaner {
+ cfg.CacheExists("metafile", metadataCache, matchers.GetMetafileMatcher(), metadataConfig.MetafileExistsTTL, metadataConfig.MetafileDoesntExistTTL)
+ cfg.CacheGet("metafile", metadataCache, matchers.GetMetafileMatcher(), metadataConfig.MetafileMaxSize, metadataConfig.MetafileContentTTL, metadataConfig.MetafileExistsTTL, metadataConfig.MetafileDoesntExistTTL)
+ cfg.CacheGet("bucket-index", metadataCache, matchers.GetBucketIndexMatcher(), metadataConfig.BucketIndexMaxSize, metadataConfig.BucketIndexContentTTL /* do not cache exist / not exist: */, 0, 0)
+ cfg.CacheIter("tenant-blocks-iter", metadataCache, matchers.GetTenantBlocksIterMatcher(), metadataConfig.TenantBlocksListTTL, codec, "")
+ } else {
+ // Cache only GET for metadata and don't cache exists and not exists.
+ cfg.CacheGet("metafile", metadataCache, matchers.GetMetafileMatcher(), metadataConfig.MetafileMaxSize, metadataConfig.MetafileContentTTL, 0, 0)
+
+ if metadataConfig.PartitionedGroupsListTTL > 0 {
+ //Avoid double iter when running cleanActiveUser and emitUserMetrics
+ cfg.CacheIter("partitioned-groups-iter", metadataCache, matchers.GetPartitionedGroupsIterMatcher(), metadataConfig.PartitionedGroupsListTTL, codec, "")
+ }
}
- return cache.NewRedisCache(cacheName, logger, redisCache, reg), nil
+ }
- default:
- return nil, errors.Errorf("unsupported cache type for cache %s: %s", cacheName, cacheBackend.Backend)
+ if !cachingConfigured {
+ // No caching is configured.
+ return bkt, nil
}
+
+ return storecache.NewCachingBucket(bkt, cfg, logger, reg)
+}
+
+func createBucketCache(cacheName string, cacheBackend *BucketCacheBackend, logger log.Logger, reg prometheus.Registerer) (cache.Cache, error) {
+ if cacheBackend.Backend == "" {
+ // No caching.
+ return nil, nil
+ }
+
+ splitBackends := strings.Split(cacheBackend.Backend, ",")
+ var (
+ caches []cache.Cache
+ )
+
+ for _, backend := range splitBackends {
+ switch backend {
+ case CacheBackendInMemory:
+ inMemoryCache, err := cache.NewInMemoryCacheWithConfig(cacheName, logger, reg, cacheBackend.InMemory.toInMemoryCacheConfig())
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to create in-memory chunk cache")
+ }
+ caches = append(caches, inMemoryCache)
+ case CacheBackendMemcached:
+ var client cacheutil.MemcachedClient
+ client, err := cacheutil.NewMemcachedClientWithConfig(logger, cacheName, cacheBackend.Memcached.ToMemcachedClientConfig(), reg)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to create memcached client")
+ }
+ caches = append(caches, cache.NewMemcachedCache(cacheName, logger, client, reg))
+ case CacheBackendRedis:
+ redisCache, err := cacheutil.NewRedisClientWithConfig(logger, cacheName, cacheBackend.Redis.ToRedisClientConfig(), reg)
+ if err != nil {
+ return nil, errors.Wrapf(err, "failed to create redis client")
+ }
+ caches = append(caches, cache.NewRedisCache(cacheName, logger, redisCache, reg))
+ }
+ }
+
+ return newMultiLevelBucketCache(cacheName, cacheBackend.MultiLevel, reg, caches...), nil
}
type Matchers struct {
@@ -185,12 +360,15 @@ type Matchers struct {
func NewMatchers() Matchers {
matcherMap := make(map[string]func(string) bool)
matcherMap["chunks"] = isTSDBChunkFile
+ matcherMap["parquet-chunks"] = isParquetChunkFile
+ matcherMap["parquet-labels"] = isParquetLabelsFile
matcherMap["metafile"] = isMetaFile
matcherMap["block-index"] = isBlockIndexFile
matcherMap["bucket-index"] = isBucketIndexFiles
matcherMap["tenants-iter"] = isTenantsDir
matcherMap["tenant-blocks-iter"] = isTenantBlocksDir
matcherMap["chunks-iter"] = isChunksDir
+ matcherMap["partitioned-groups-iter"] = isPartitionedGroupsDir
return Matchers{
matcherMap: matcherMap,
}
@@ -204,6 +382,14 @@ func (m *Matchers) SetChunksMatcher(f func(string) bool) {
m.matcherMap["chunks"] = f
}
+func (m *Matchers) SetParquetChunksMatcher(f func(string) bool) {
+ m.matcherMap["parquet-chunks"] = f
+}
+
+func (m *Matchers) SetParquetLabelsMatcher(f func(string) bool) {
+ m.matcherMap["parquet-labels"] = f
+}
+
func (m *Matchers) SetBlockIndexMatcher(f func(string) bool) {
m.matcherMap["block-index"] = f
}
@@ -224,10 +410,22 @@ func (m *Matchers) SetChunksIterMatcher(f func(string) bool) {
m.matcherMap["chunks-iter"] = f
}
+func (m *Matchers) SetPartitionedGroupsIterMatcher(f func(string) bool) {
+ m.matcherMap["partitioned-groups-iter"] = f
+}
+
func (m *Matchers) GetChunksMatcher() func(string) bool {
return m.matcherMap["chunks"]
}
+func (m *Matchers) GetParquetChunksMatcher() func(string) bool {
+ return m.matcherMap["parquet-chunks"]
+}
+
+func (m *Matchers) GetParquetLabelsMatcher() func(string) bool {
+ return m.matcherMap["parquet-labels"]
+}
+
func (m *Matchers) GetMetafileMatcher() func(string) bool {
return m.matcherMap["metafile"]
}
@@ -252,10 +450,18 @@ func (m *Matchers) GetChunksIterMatcher() func(string) bool {
return m.matcherMap["chunks-iter"]
}
+func (m *Matchers) GetPartitionedGroupsIterMatcher() func(string) bool {
+ return m.matcherMap["partitioned-groups-iter"]
+}
+
var chunksMatcher = regexp.MustCompile(`^.*/chunks/\d+$`)
func isTSDBChunkFile(name string) bool { return chunksMatcher.MatchString(name) }
+func isParquetChunkFile(name string) bool { return strings.HasSuffix(name, "chunks.parquet") }
+
+func isParquetLabelsFile(name string) bool { return strings.HasSuffix(name, "labels.parquet") }
+
func isMetaFile(name string) bool {
return strings.HasSuffix(name, "/"+metadata.MetaFilename) || strings.HasSuffix(name, "/"+metadata.DeletionMarkFilename) || strings.HasSuffix(name, "/"+TenantDeletionMarkFile)
}
@@ -289,6 +495,10 @@ func isChunksDir(name string) bool {
return strings.HasSuffix(name, "/chunks")
}
+func isPartitionedGroupsDir(name string) bool {
+ return strings.HasSuffix(name, "/partitioned-groups")
+}
+
type snappyIterCodec struct {
cache.IterCodec
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go
index bd3099dba..b51ad077b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go
@@ -4,6 +4,7 @@ import (
"flag"
"fmt"
"path/filepath"
+ "slices"
"strings"
"time"
@@ -15,7 +16,8 @@ import (
"github.com/thanos-io/thanos/pkg/store"
"github.com/cortexproject/cortex/pkg/storage/bucket"
- "github.com/cortexproject/cortex/pkg/util"
+ "github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
)
const (
@@ -30,6 +32,9 @@ const (
// How often are open TSDBs checked for being idle and closed.
DefaultCloseIdleTSDBInterval = 5 * time.Minute
+ // How often expired items are cleaned from the PostingsCache
+ ExpandedCachingExpireInterval = 5 * time.Minute
+
// How often to check for tenant deletion mark.
DeletionMarkCheckInterval = 1 * time.Hour
@@ -42,27 +47,30 @@ const (
// Validation errors
var (
- errInvalidShipConcurrency = errors.New("invalid TSDB ship concurrency")
- errInvalidOpeningConcurrency = errors.New("invalid TSDB opening concurrency")
- errInvalidCompactionInterval = errors.New("invalid TSDB compaction interval")
- errInvalidCompactionConcurrency = errors.New("invalid TSDB compaction concurrency")
- errInvalidWALSegmentSizeBytes = errors.New("invalid TSDB WAL segment size bytes")
- errInvalidStripeSize = errors.New("invalid TSDB stripe size")
- errInvalidOutOfOrderCapMax = errors.New("invalid TSDB OOO chunks capacity (in samples)")
- errEmptyBlockranges = errors.New("empty block ranges for TSDB")
-
- ErrInvalidBucketIndexBlockDiscoveryStrategy = errors.New("bucket index block discovery strategy can only be enabled when bucket index is enabled")
- ErrBlockDiscoveryStrategy = errors.New("invalid block discovery strategy")
- ErrInvalidTokenBucketBytesLimiterMode = errors.New("invalid token bucket bytes limiter mode")
+ errInvalidShipConcurrency = errors.New("invalid TSDB ship concurrency")
+ errInvalidOpeningConcurrency = errors.New("invalid TSDB opening concurrency")
+ errInvalidCompactionInterval = errors.New("invalid TSDB compaction interval")
+ errInvalidCompactionConcurrency = errors.New("invalid TSDB compaction concurrency")
+ errInvalidWALSegmentSizeBytes = errors.New("invalid TSDB WAL segment size bytes")
+ errInvalidStripeSize = errors.New("invalid TSDB stripe size")
+ errInvalidOutOfOrderCapMax = errors.New("invalid TSDB OOO chunks capacity (in samples)")
+ errEmptyBlockranges = errors.New("empty block ranges for TSDB")
+ errUnSupportedWALCompressionType = errors.New("unsupported WAL compression type, valid types are (zstd, snappy and '')")
+
+ ErrInvalidBucketIndexBlockDiscoveryStrategy = errors.New("bucket index block discovery strategy can only be enabled when bucket index is enabled")
+ ErrBlockDiscoveryStrategy = errors.New("invalid block discovery strategy")
+ ErrInvalidTokenBucketBytesLimiterMode = errors.New("invalid token bucket bytes limiter mode")
+ ErrInvalidLazyExpandedPostingGroupMaxKeySeriesRatio = errors.New("lazy expanded posting group max key series ratio needs to be equal or greater than 0")
)
// BlocksStorageConfig holds the config information for the blocks storage.
//
//nolint:revive
type BlocksStorageConfig struct {
- Bucket bucket.Config `yaml:",inline"`
- BucketStore BucketStoreConfig `yaml:"bucket_store" doc:"description=This configures how the querier and store-gateway discover and synchronize blocks stored in the bucket."`
- TSDB TSDBConfig `yaml:"tsdb"`
+ Bucket bucket.Config `yaml:",inline"`
+ BucketStore BucketStoreConfig `yaml:"bucket_store" doc:"description=This configures how the querier and store-gateway discover and synchronize blocks stored in the bucket."`
+ TSDB TSDBConfig `yaml:"tsdb"`
+ UsersScanner UsersScannerConfig `yaml:"users_scanner"`
}
// DurationList is the block ranges for a tsdb
@@ -102,11 +110,12 @@ func (d *DurationList) ToMilliseconds() []int64 {
return values
}
-// RegisterFlags registers the TSDB flags
+// RegisterFlags registers the block storage flags
func (cfg *BlocksStorageConfig) RegisterFlags(f *flag.FlagSet) {
cfg.Bucket.RegisterFlagsWithPrefix("blocks-storage.", f)
cfg.BucketStore.RegisterFlags(f)
cfg.TSDB.RegisterFlags(f)
+ cfg.UsersScanner.RegisterFlagsWithPrefix("blocks-storage", f)
}
// Validate the config.
@@ -119,6 +128,10 @@ func (cfg *BlocksStorageConfig) Validate() error {
return err
}
+ if err := cfg.UsersScanner.Validate(); err != nil {
+ return err
+ }
+
return cfg.BucketStore.Validate()
}
@@ -136,7 +149,7 @@ type TSDBConfig struct {
HeadCompactionIdleTimeout time.Duration `yaml:"head_compaction_idle_timeout"`
HeadChunksWriteBufferSize int `yaml:"head_chunks_write_buffer_size_bytes"`
StripeSize int `yaml:"stripe_size"`
- WALCompressionEnabled bool `yaml:"wal_compression_enabled"`
+ WALCompressionType string `yaml:"wal_compression_type"`
WALSegmentSizeBytes int `yaml:"wal_segment_size_bytes"`
FlushBlocksOnShutdown bool `yaml:"flush_blocks_on_shutdown"`
CloseIdleTSDBTimeout time.Duration `yaml:"close_idle_tsdb_timeout"`
@@ -153,6 +166,9 @@ type TSDBConfig struct {
// How often to check for idle TSDBs for closing. DefaultCloseIdleTSDBInterval is not suitable for testing, so tests can override.
CloseIdleTSDBInterval time.Duration `yaml:"-"`
+ // How often expired items are cleaned from the PostingsCache. ExpandedCachingExpireInterval is not suitable for testing, so tests can override.
+ ExpandedCachingExpireInterval time.Duration `yaml:"-"`
+
// Positive value enables experimental support for exemplars. 0 or less to disable.
MaxExemplars int `yaml:"max_exemplars"`
@@ -162,8 +178,8 @@ type TSDBConfig struct {
// OutOfOrderCapMax is maximum capacity for OOO chunks (in samples).
OutOfOrderCapMax int64 `yaml:"out_of_order_cap_max"`
- // Enable native histogram ingestion.
- EnableNativeHistograms bool `yaml:"enable_native_histograms"`
+ // Posting Cache Configuration for TSDB
+ PostingsCache TSDBPostingsCacheConfig `yaml:"expanded_postings_cache" doc:"description=[EXPERIMENTAL] If enabled, ingesters will cache expanded postings when querying blocks. Caching can be configured separately for the head and compacted blocks."`
}
// RegisterFlags registers the TSDBConfig flags.
@@ -183,7 +199,7 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) {
f.DurationVar(&cfg.HeadCompactionIdleTimeout, "blocks-storage.tsdb.head-compaction-idle-timeout", 1*time.Hour, "If TSDB head is idle for this duration, it is compacted. Note that up to 25% jitter is added to the value to avoid ingesters compacting concurrently. 0 means disabled.")
f.IntVar(&cfg.HeadChunksWriteBufferSize, "blocks-storage.tsdb.head-chunks-write-buffer-size-bytes", chunks.DefaultWriteBufferSize, "The write buffer size used by the head chunks mapper. Lower values reduce memory utilisation on clusters with a large number of tenants at the cost of increased disk I/O operations.")
f.IntVar(&cfg.StripeSize, "blocks-storage.tsdb.stripe-size", 16384, "The number of shards of series to use in TSDB (must be a power of 2). Reducing this will decrease memory footprint, but can negatively impact performance.")
- f.BoolVar(&cfg.WALCompressionEnabled, "blocks-storage.tsdb.wal-compression-enabled", false, "True to enable TSDB WAL compression.")
+ f.StringVar(&cfg.WALCompressionType, "blocks-storage.tsdb.wal-compression-type", "", "TSDB WAL type. Supported values are: 'snappy', 'zstd' and '' (disable compression)")
f.IntVar(&cfg.WALSegmentSizeBytes, "blocks-storage.tsdb.wal-segment-size-bytes", wlog.DefaultSegmentSize, "TSDB WAL segments files max size (bytes).")
f.BoolVar(&cfg.FlushBlocksOnShutdown, "blocks-storage.tsdb.flush-blocks-on-shutdown", false, "True to flush blocks to storage on shutdown. If false, incomplete blocks will be reused after restart.")
f.DurationVar(&cfg.CloseIdleTSDBTimeout, "blocks-storage.tsdb.close-idle-tsdb-timeout", 0, "If TSDB has not received any data for this duration, and all blocks from TSDB have been shipped, TSDB is closed and deleted from local disk. If set to positive value, this value should be equal or higher than -querier.query-ingesters-within flag to make sure that TSDB is not closed prematurely, which could cause partial query results. 0 or negative value disables closing of idle TSDB.")
@@ -191,7 +207,10 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) {
f.IntVar(&cfg.MaxExemplars, "blocks-storage.tsdb.max-exemplars", 0, "Deprecated, use maxExemplars in limits instead. If the MaxExemplars value in limits is set to zero, cortex will fallback on this value. This setting enables support for exemplars in TSDB and sets the maximum number that will be stored. 0 or less means disabled.")
f.BoolVar(&cfg.MemorySnapshotOnShutdown, "blocks-storage.tsdb.memory-snapshot-on-shutdown", false, "True to enable snapshotting of in-memory TSDB data on disk when shutting down.")
f.Int64Var(&cfg.OutOfOrderCapMax, "blocks-storage.tsdb.out-of-order-cap-max", tsdb.DefaultOutOfOrderCapMax, "[EXPERIMENTAL] Configures the maximum number of samples per chunk that can be out-of-order.")
- f.BoolVar(&cfg.EnableNativeHistograms, "blocks-storage.tsdb.enable-native-histograms", false, "[EXPERIMENTAL] True to enable native histogram.")
+
+ flagext.DeprecatedFlag(f, "blocks-storage.tsdb.wal-compression-enabled", "Deprecated (use blocks-storage.tsdb.wal-compression-type instead): True to enable TSDB WAL compression.", util_log.Logger)
+
+ cfg.PostingsCache.RegisterFlagsWithPrefix("blocks-storage.", f)
}
// Validate the config.
@@ -232,6 +251,13 @@ func (cfg *TSDBConfig) Validate() error {
return errInvalidOutOfOrderCapMax
}
+ switch cfg.WALCompressionType {
+ case "snappy", "zstd", "":
+ // valid
+ default:
+ return errUnSupportedWALCompressionType
+ }
+
return nil
}
@@ -248,21 +274,24 @@ func (cfg *TSDBConfig) IsBlocksShippingEnabled() bool {
// BucketStoreConfig holds the config information for Bucket Stores used by the querier and store-gateway.
type BucketStoreConfig struct {
- SyncDir string `yaml:"sync_dir"`
- SyncInterval time.Duration `yaml:"sync_interval"`
- MaxConcurrent int `yaml:"max_concurrent"`
- MaxInflightRequests int `yaml:"max_inflight_requests"`
- TenantSyncConcurrency int `yaml:"tenant_sync_concurrency"`
- BlockSyncConcurrency int `yaml:"block_sync_concurrency"`
- MetaSyncConcurrency int `yaml:"meta_sync_concurrency"`
- ConsistencyDelay time.Duration `yaml:"consistency_delay"`
- IndexCache IndexCacheConfig `yaml:"index_cache"`
- ChunksCache ChunksCacheConfig `yaml:"chunks_cache"`
- MetadataCache MetadataCacheConfig `yaml:"metadata_cache"`
- IgnoreDeletionMarksDelay time.Duration `yaml:"ignore_deletion_mark_delay"`
- IgnoreBlocksWithin time.Duration `yaml:"ignore_blocks_within"`
- BucketIndex BucketIndexConfig `yaml:"bucket_index"`
- BlockDiscoveryStrategy string `yaml:"block_discovery_strategy"`
+ SyncDir string `yaml:"sync_dir"`
+ SyncInterval time.Duration `yaml:"sync_interval"`
+ MaxConcurrent int `yaml:"max_concurrent"`
+ MaxInflightRequests int `yaml:"max_inflight_requests"`
+ TenantSyncConcurrency int `yaml:"tenant_sync_concurrency"`
+ BlockSyncConcurrency int `yaml:"block_sync_concurrency"`
+ MetaSyncConcurrency int `yaml:"meta_sync_concurrency"`
+ ConsistencyDelay time.Duration `yaml:"consistency_delay"`
+ IndexCache IndexCacheConfig `yaml:"index_cache"`
+ ChunksCache ChunksCacheConfig `yaml:"chunks_cache"`
+ MetadataCache MetadataCacheConfig `yaml:"metadata_cache"`
+ ParquetLabelsCache ParquetLabelsCacheConfig `yaml:"parquet_labels_cache"`
+ MatchersCacheMaxItems int `yaml:"matchers_cache_max_items"`
+ IgnoreDeletionMarksDelay time.Duration `yaml:"ignore_deletion_mark_delay"`
+ IgnoreBlocksWithin time.Duration `yaml:"ignore_blocks_within"`
+ IgnoreBlocksBefore time.Duration `yaml:"ignore_blocks_before"`
+ BucketIndex BucketIndexConfig `yaml:"bucket_index"`
+ BlockDiscoveryStrategy string `yaml:"block_discovery_strategy"`
// Chunk pool.
MaxChunkPoolBytes uint64 `yaml:"max_chunk_pool_bytes"`
@@ -276,6 +305,9 @@ type BucketStoreConfig struct {
// Controls whether lazy expanded posting optimization is enabled or not.
LazyExpandedPostingsEnabled bool `yaml:"lazy_expanded_postings_enabled"`
+ // Controls whether expanded posting group is marked as lazy or not depending on number of keys to fetch.
+ LazyExpandedPostingGroupMaxKeySeriesRatio float64 `yaml:"lazy_expanded_posting_group_max_key_series_ratio"`
+
// Controls the partitioner, used to aggregate multiple GET object API requests.
// The config option is hidden until experimental.
PartitionerMaxGapBytes uint64 `yaml:"partitioner_max_gap_bytes" doc:"hidden"`
@@ -317,6 +349,7 @@ func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) {
cfg.IndexCache.RegisterFlagsWithPrefix(f, "blocks-storage.bucket-store.index-cache.")
cfg.ChunksCache.RegisterFlagsWithPrefix(f, "blocks-storage.bucket-store.chunks-cache.")
cfg.MetadataCache.RegisterFlagsWithPrefix(f, "blocks-storage.bucket-store.metadata-cache.")
+ cfg.ParquetLabelsCache.RegisterFlagsWithPrefix(f, "blocks-storage.bucket-store.parquet-labels-cache.")
cfg.BucketIndex.RegisterFlagsWithPrefix(f, "blocks-storage.bucket-store.bucket-index.")
f.StringVar(&cfg.SyncDir, "blocks-storage.bucket-store.sync-dir", "tsdb-sync", "Directory to store synchronized TSDB index headers.")
@@ -334,6 +367,7 @@ func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) {
"The idea of ignore-deletion-marks-delay is to ignore blocks that are marked for deletion with some delay. This ensures store can still serve blocks that are meant to be deleted but do not have a replacement yet. "+
"Default is 6h, half of the default value for -compactor.deletion-delay.")
f.DurationVar(&cfg.IgnoreBlocksWithin, "blocks-storage.bucket-store.ignore-blocks-within", 0, "The blocks created since `now() - ignore_blocks_within` will not be synced. This should be used together with `-querier.query-store-after` to filter out the blocks that are too new to be queried. A reasonable value for this flag would be `-querier.query-store-after - blocks-storage.bucket-store.bucket-index.max-stale-period` to give some buffer. 0 to disable.")
+ f.DurationVar(&cfg.IgnoreBlocksBefore, "blocks-storage.bucket-store.ignore-blocks-before", 0, "The blocks created before `now() - ignore_blocks_before` will not be synced. 0 to disable.")
f.IntVar(&cfg.PostingOffsetsInMemSampling, "blocks-storage.bucket-store.posting-offsets-in-mem-sampling", store.DefaultPostingOffsetInMemorySampling, "Controls what is the ratio of postings offsets that the store will hold in memory.")
f.BoolVar(&cfg.IndexHeaderLazyLoadingEnabled, "blocks-storage.bucket-store.index-header-lazy-loading-enabled", false, "If enabled, store-gateway will lazily memory-map an index-header only once required by a query.")
f.DurationVar(&cfg.IndexHeaderLazyLoadingIdleTimeout, "blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout", 20*time.Minute, "If index-header lazy loading is enabled and this setting is > 0, the store-gateway will release memory-mapped index-headers after 'idle timeout' inactivity.")
@@ -341,6 +375,7 @@ func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) {
f.Uint64Var(&cfg.EstimatedMaxSeriesSizeBytes, "blocks-storage.bucket-store.estimated-max-series-size-bytes", store.EstimatedMaxSeriesSize, "Estimated max series size in bytes. Setting a large value might result in over fetching data while a small value might result in data refetch. Default value is 64KB.")
f.Uint64Var(&cfg.EstimatedMaxChunkSizeBytes, "blocks-storage.bucket-store.estimated-max-chunk-size-bytes", store.EstimatedMaxChunkSize, "Estimated max chunk size in bytes. Setting a large value might result in over fetching data while a small value might result in data refetch. Default value is 16KiB.")
f.BoolVar(&cfg.LazyExpandedPostingsEnabled, "blocks-storage.bucket-store.lazy-expanded-postings-enabled", false, "If true, Store Gateway will estimate postings size and try to lazily expand postings if it downloads less data than expanding all postings.")
+ f.Float64Var(&cfg.LazyExpandedPostingGroupMaxKeySeriesRatio, "blocks-storage.bucket-store.lazy-expanded-posting-group-max-key-series-ratio", 100, "Mark posting group as lazy if it fetches more keys than R * max series the query should fetch. With R set to 100, a posting group which fetches 100K keys will be marked as lazy if the current query only fetches 1000 series. This config is only valid if lazy expanded posting is enabled. 0 disables the limit.")
f.IntVar(&cfg.SeriesBatchSize, "blocks-storage.bucket-store.series-batch-size", store.SeriesBatchSize, "Controls how many series to fetch per batch in Store Gateway. Default value is 10000.")
f.StringVar(&cfg.BlockDiscoveryStrategy, "blocks-storage.bucket-store.block-discovery-strategy", string(ConcurrentDiscovery), "One of "+strings.Join(supportedBlockDiscoveryStrategies, ", ")+". When set to concurrent, stores will concurrently issue one call per directory to discover active blocks in the bucket. The recursive strategy iterates through all objects in the bucket, recursively traversing into each directory. This avoids N+1 calls at the expense of having slower bucket iterations. bucket_index strategy can be used in Compactor only and utilizes the existing bucket index to fetch block IDs to sync. This avoids iterating the bucket but can be impacted by delays of cleaner creating bucket index.")
f.StringVar(&cfg.TokenBucketBytesLimiter.Mode, "blocks-storage.bucket-store.token-bucket-bytes-limiter.mode", string(TokenBucketBytesLimiterDisabled), fmt.Sprintf("Token bucket bytes limiter mode. Supported values are: %s", strings.Join(supportedTokenBucketBytesLimiterModes, ", ")))
@@ -353,6 +388,7 @@ func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) {
f.Float64Var(&cfg.TokenBucketBytesLimiter.TouchedSeriesTokenFactor, "blocks-storage.bucket-store.token-bucket-bytes-limiter.touched-series-token-factor", 25, "Multiplication factor used for touched series token")
f.Float64Var(&cfg.TokenBucketBytesLimiter.FetchedChunksTokenFactor, "blocks-storage.bucket-store.token-bucket-bytes-limiter.fetched-chunks-token-factor", 0, "Multiplication factor used for fetched chunks token")
f.Float64Var(&cfg.TokenBucketBytesLimiter.TouchedChunksTokenFactor, "blocks-storage.bucket-store.token-bucket-bytes-limiter.touched-chunks-token-factor", 1, "Multiplication factor used for touched chunks token")
+ f.IntVar(&cfg.MatchersCacheMaxItems, "blocks-storage.bucket-store.matchers-cache-max-items", 0, "Maximum number of entries in the regex matchers cache. 0 to disable.")
}
// Validate the config.
@@ -369,12 +405,19 @@ func (cfg *BucketStoreConfig) Validate() error {
if err != nil {
return errors.Wrap(err, "metadata-cache configuration")
}
- if !util.StringsContain(supportedBlockDiscoveryStrategies, cfg.BlockDiscoveryStrategy) {
+ err = cfg.ParquetLabelsCache.Validate()
+ if err != nil {
+ return errors.Wrap(err, "parquet-labels-cache configuration")
+ }
+ if !slices.Contains(supportedBlockDiscoveryStrategies, cfg.BlockDiscoveryStrategy) {
return ErrInvalidBucketIndexBlockDiscoveryStrategy
}
- if !util.StringsContain(supportedTokenBucketBytesLimiterModes, cfg.TokenBucketBytesLimiter.Mode) {
+ if !slices.Contains(supportedTokenBucketBytesLimiterModes, cfg.TokenBucketBytesLimiter.Mode) {
return ErrInvalidTokenBucketBytesLimiterMode
}
+ if cfg.LazyExpandedPostingGroupMaxKeySeriesRatio < 0 {
+ return ErrInvalidLazyExpandedPostingGroupMaxKeySeriesRatio
+ }
return nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/expanded_postings_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/expanded_postings_cache.go
new file mode 100644
index 000000000..ded95d975
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/expanded_postings_cache.go
@@ -0,0 +1,514 @@
+package tsdb
+
+import (
+ "container/list"
+ "context"
+ "flag"
+ "slices"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/oklog/ulid/v2"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/prometheus/prometheus/tsdb"
+ "github.com/prometheus/prometheus/tsdb/index"
+ "github.com/segmentio/fasthash/fnv1a"
+
+ "github.com/cortexproject/cortex/pkg/util/extract"
+ logutil "github.com/cortexproject/cortex/pkg/util/log"
+)
+
+var (
+ rangeHeadULID = ulid.MustParse("0000000000XXXXXXXRANGEHEAD")
+ headULID = ulid.MustParse("0000000000XXXXXXXXXXXXHEAD")
+)
+
+const (
+ // size of the seed array. Each seed is a 64bits int (8 bytes)
+ // totaling 16mb
+ seedArraySize = 2 * 1024 * 1024
+
+ numOfSeedsStripes = 512
+)
+
+type ExpandedPostingsCacheMetrics struct {
+ CacheRequests *prometheus.CounterVec
+ CacheHits *prometheus.CounterVec
+ CacheEvicts *prometheus.CounterVec
+ CacheMiss *prometheus.CounterVec
+ NonCacheableQueries *prometheus.CounterVec
+}
+
+func NewPostingCacheMetrics(r prometheus.Registerer) *ExpandedPostingsCacheMetrics {
+ return &ExpandedPostingsCacheMetrics{
+ CacheRequests: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+ Name: "cortex_ingester_expanded_postings_cache_requests_total",
+ Help: "Total number of requests to the cache.",
+ }, []string{"cache"}),
+ CacheHits: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+ Name: "cortex_ingester_expanded_postings_cache_hits_total",
+ Help: "Total number of hit requests to the cache.",
+ }, []string{"cache"}),
+ CacheMiss: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+ Name: "cortex_ingester_expanded_postings_cache_miss_total",
+ Help: "Total number of miss requests to the cache.",
+ }, []string{"cache", "reason"}),
+ CacheEvicts: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+ Name: "cortex_ingester_expanded_postings_cache_evicts_total",
+ Help: "Total number of evictions in the cache, excluding items that got evicted due to TTL.",
+ }, []string{"cache", "reason"}),
+ NonCacheableQueries: promauto.With(r).NewCounterVec(prometheus.CounterOpts{
+ Name: "cortex_ingester_expanded_postings_non_cacheable_queries_total",
+ Help: "Total number of non cacheable queries.",
+ }, []string{"cache"}),
+ }
+}
+
+type TSDBPostingsCacheConfig struct {
+ Head PostingsCacheConfig `yaml:"head" doc:"description=If enabled, ingesters will cache expanded postings for the head block. Only queries with with an equal matcher for metric __name__ are cached."`
+ Blocks PostingsCacheConfig `yaml:"blocks" doc:"description=If enabled, ingesters will cache expanded postings for the compacted blocks. The cache is shared between all blocks."`
+
+ // The configurations below are used only for testing purpose
+ PostingsForMatchers func(ctx context.Context, ix tsdb.IndexReader, ms ...*labels.Matcher) (index.Postings, error) `yaml:"-"`
+ SeedSize int `yaml:"-"`
+ timeNow func() time.Time `yaml:"-"`
+}
+
+type PostingsCacheConfig struct {
+ Enabled bool `yaml:"enabled"`
+ MaxBytes int64 `yaml:"max_bytes"`
+ Ttl time.Duration `yaml:"ttl"`
+}
+
+func (cfg *TSDBPostingsCacheConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ cfg.Head.RegisterFlagsWithPrefix(prefix, "head", f)
+ cfg.Blocks.RegisterFlagsWithPrefix(prefix, "block", f)
+}
+
+// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet
+func (cfg *PostingsCacheConfig) RegisterFlagsWithPrefix(prefix, block string, f *flag.FlagSet) {
+ f.Int64Var(&cfg.MaxBytes, prefix+"expanded_postings_cache."+block+".max-bytes", 10*1024*1024, "Max bytes for postings cache")
+ f.DurationVar(&cfg.Ttl, prefix+"expanded_postings_cache."+block+".ttl", 10*time.Minute, "TTL for postings cache")
+ f.BoolVar(&cfg.Enabled, prefix+"expanded_postings_cache."+block+".enabled", false, "Whether the postings cache is enabled or not")
+}
+
+type ExpandedPostingsCacheFactory struct {
+ seedByHash *seedByHash
+ cfg TSDBPostingsCacheConfig
+}
+
+func NewExpandedPostingsCacheFactory(cfg TSDBPostingsCacheConfig) *ExpandedPostingsCacheFactory {
+ if cfg.Head.Enabled || cfg.Blocks.Enabled {
+ if cfg.SeedSize == 0 {
+ cfg.SeedSize = seedArraySize
+ }
+ logutil.WarnExperimentalUse("expanded postings cache")
+ return &ExpandedPostingsCacheFactory{
+ cfg: cfg,
+ seedByHash: newSeedByHash(cfg.SeedSize),
+ }
+ }
+
+ return nil
+}
+
+func (f *ExpandedPostingsCacheFactory) NewExpandedPostingsCache(userId string, metrics *ExpandedPostingsCacheMetrics) ExpandedPostingsCache {
+ return newBlocksPostingsForMatchersCache(userId, f.cfg, metrics, f.seedByHash)
+}
+
+type ExpandedPostingsCache interface {
+ PostingsForMatchers(ctx context.Context, blockID ulid.ULID, ix tsdb.IndexReader, ms ...*labels.Matcher) (index.Postings, error)
+ ExpireSeries(metric labels.Labels)
+ PurgeExpiredItems()
+ Clear()
+ Size() int
+}
+
+type blocksPostingsForMatchersCache struct {
+ userId string
+
+ headCache *fifoCache[[]storage.SeriesRef]
+ blocksCache *fifoCache[[]storage.SeriesRef]
+ postingsForMatchersFunc func(ctx context.Context, ix tsdb.IndexReader, ms ...*labels.Matcher) (index.Postings, error)
+ timeNow func() time.Time
+
+ metrics *ExpandedPostingsCacheMetrics
+ seedByHash *seedByHash
+}
+
+func (c *blocksPostingsForMatchersCache) Clear() {
+ c.headCache.clear()
+ c.blocksCache.clear()
+}
+
+func newBlocksPostingsForMatchersCache(userId string, cfg TSDBPostingsCacheConfig, metrics *ExpandedPostingsCacheMetrics, seedByHash *seedByHash) ExpandedPostingsCache {
+ if cfg.PostingsForMatchers == nil {
+ cfg.PostingsForMatchers = tsdb.PostingsForMatchers
+ }
+
+ if cfg.timeNow == nil {
+ cfg.timeNow = time.Now
+ }
+
+ return &blocksPostingsForMatchersCache{
+ headCache: newFifoCache[[]storage.SeriesRef](cfg.Head, "head", metrics, cfg.timeNow),
+ blocksCache: newFifoCache[[]storage.SeriesRef](cfg.Blocks, "block", metrics, cfg.timeNow),
+ postingsForMatchersFunc: cfg.PostingsForMatchers,
+ timeNow: cfg.timeNow,
+ metrics: metrics,
+ seedByHash: seedByHash,
+ userId: userId,
+ }
+}
+
+func (c *blocksPostingsForMatchersCache) ExpireSeries(metric labels.Labels) {
+ metricName, err := extract.MetricNameFromLabels(metric)
+ if err != nil {
+ return
+ }
+ c.seedByHash.incrementSeed(c.userId, metricName)
+}
+
+func (c *blocksPostingsForMatchersCache) PurgeExpiredItems() {
+ c.headCache.expire()
+ c.blocksCache.expire()
+}
+
+func (c *blocksPostingsForMatchersCache) Size() int {
+ return c.headCache.size() + c.blocksCache.size()
+}
+
+func (c *blocksPostingsForMatchersCache) PostingsForMatchers(ctx context.Context, blockID ulid.ULID, ix tsdb.IndexReader, ms ...*labels.Matcher) (index.Postings, error) {
+ return c.fetchPostings(blockID, ix, ms...)(ctx)
+}
+
+func (c *blocksPostingsForMatchersCache) fetchPostings(blockID ulid.ULID, ix tsdb.IndexReader, ms ...*labels.Matcher) func(context.Context) (index.Postings, error) {
+ var seed string
+ cache := c.blocksCache
+
+ // If is a head block, lets add the seed on the cache key so we can
+ // invalidate the cache when new series are created for this metric name
+ if isHeadBlock(blockID) {
+ cache = c.headCache
+ if cache.cfg.Enabled {
+ metricName, ok := metricNameFromMatcher(ms)
+ // Lets not cache head if we don;t find an equal matcher for the label __name__
+ if !ok {
+ c.metrics.NonCacheableQueries.WithLabelValues(cache.name).Inc()
+ return func(ctx context.Context) (index.Postings, error) {
+ return tsdb.PostingsForMatchers(ctx, ix, ms...)
+ }
+ }
+
+ seed = c.getSeedForMetricName(metricName)
+ }
+ }
+
+ // Let's bypass cache if not enabled
+ if !cache.cfg.Enabled {
+ return func(ctx context.Context) (index.Postings, error) {
+ return tsdb.PostingsForMatchers(ctx, ix, ms...)
+ }
+ }
+
+ c.metrics.CacheRequests.WithLabelValues(cache.name).Inc()
+
+ fetch := func() ([]storage.SeriesRef, int64, error) {
+ // Use context.Background() as this promise is maybe shared across calls
+ postings, err := c.postingsForMatchersFunc(context.Background(), ix, ms...)
+
+ if err == nil {
+ ids, err := index.ExpandPostings(postings)
+ return ids, int64(len(ids) * 8), err
+ }
+
+ return nil, 0, err
+ }
+
+ key := cacheKey(seed, blockID, ms...)
+ promise, loaded := cache.getPromiseForKey(key, fetch)
+ if loaded {
+ c.metrics.CacheHits.WithLabelValues(cache.name).Inc()
+ }
+
+ return c.result(promise)
+}
+
+func (c *blocksPostingsForMatchersCache) result(ce *cacheEntryPromise[[]storage.SeriesRef]) func(ctx context.Context) (index.Postings, error) {
+ return func(ctx context.Context) (index.Postings, error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-ce.done:
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+ return index.NewListPostings(ce.v), ce.err
+ }
+ }
+}
+
+func (c *blocksPostingsForMatchersCache) getSeedForMetricName(metricName string) string {
+ return c.seedByHash.getSeed(c.userId, metricName)
+}
+
+func cacheKey(seed string, blockID ulid.ULID, ms ...*labels.Matcher) string {
+ slices.SortFunc(ms, func(i, j *labels.Matcher) int {
+ if i.Type != j.Type {
+ return int(i.Type - j.Type)
+ }
+ if i.Name != j.Name {
+ return strings.Compare(i.Name, j.Name)
+ }
+ if i.Value != j.Value {
+ return strings.Compare(i.Value, j.Value)
+ }
+ return 0
+ })
+
+ const (
+ typeLen = 2
+ sepLen = 1
+ )
+
+ size := len(seed) + len(blockID.String()) + 2*sepLen
+ for _, m := range ms {
+ size += len(m.Name) + len(m.Value) + typeLen + sepLen
+ }
+ sb := strings.Builder{}
+ sb.Grow(size)
+ sb.WriteString(seed)
+ sb.WriteByte('|')
+ sb.WriteString(blockID.String())
+ sb.WriteByte('|')
+ for _, m := range ms {
+ sb.WriteString(m.Name)
+ sb.WriteString(m.Type.String())
+ sb.WriteString(m.Value)
+ sb.WriteByte('|')
+ }
+ key := sb.String()
+ return key
+}
+
+func isHeadBlock(blockID ulid.ULID) bool {
+ return blockID == rangeHeadULID || blockID == headULID
+}
+
+func metricNameFromMatcher(ms []*labels.Matcher) (string, bool) {
+ for _, m := range ms {
+ if m.Name == labels.MetricName && m.Type == labels.MatchEqual {
+ return m.Value, true
+ }
+ }
+
+ return "", false
+}
+
+type seedByHash struct {
+ strippedLock []sync.RWMutex
+ seedByHash []int
+}
+
+func newSeedByHash(size int) *seedByHash {
+ return &seedByHash{
+ seedByHash: make([]int, size),
+ strippedLock: make([]sync.RWMutex, numOfSeedsStripes),
+ }
+}
+
+func (s *seedByHash) getSeed(userId string, v string) string {
+ h := memHashString(userId, v)
+ i := h % uint64(len(s.seedByHash))
+ l := i % uint64(len(s.strippedLock))
+ s.strippedLock[l].RLock()
+ defer s.strippedLock[l].RUnlock()
+ return strconv.Itoa(s.seedByHash[i])
+}
+
+func (s *seedByHash) incrementSeed(userId string, v string) {
+ h := memHashString(userId, v)
+ i := h % uint64(len(s.seedByHash))
+ l := i % uint64(len(s.strippedLock))
+ s.strippedLock[l].Lock()
+ defer s.strippedLock[l].Unlock()
+ s.seedByHash[i]++
+}
+
+type fifoCache[V any] struct {
+ cfg PostingsCacheConfig
+ cachedValues *sync.Map
+ timeNow func() time.Time
+ name string
+ metrics ExpandedPostingsCacheMetrics
+
+ // Fields from here should be locked
+ cachedMtx sync.RWMutex
+ cached *list.List
+ cachedBytes int64
+}
+
+func newFifoCache[V any](cfg PostingsCacheConfig, name string, metrics *ExpandedPostingsCacheMetrics, timeNow func() time.Time) *fifoCache[V] {
+ return &fifoCache[V]{
+ cachedValues: new(sync.Map),
+ cached: list.New(),
+ cfg: cfg,
+ timeNow: timeNow,
+ name: name,
+ metrics: *metrics,
+ }
+}
+
+func (c *fifoCache[V]) clear() {
+ c.cachedMtx.Lock()
+ defer c.cachedMtx.Unlock()
+ c.cached = list.New()
+ c.cachedBytes = 0
+ c.cachedValues = new(sync.Map)
+}
+
+func (c *fifoCache[V]) expire() {
+ if c.cfg.Ttl <= 0 {
+ return
+ }
+ c.cachedMtx.RLock()
+ if _, r := c.shouldEvictHead(); !r {
+ c.cachedMtx.RUnlock()
+ return
+ }
+ c.cachedMtx.RUnlock()
+ c.cachedMtx.Lock()
+ defer c.cachedMtx.Unlock()
+ for reason, r := c.shouldEvictHead(); r; reason, r = c.shouldEvictHead() {
+ c.metrics.CacheEvicts.WithLabelValues(c.name, reason).Inc()
+ c.evictHead()
+ }
+}
+
+func (c *fifoCache[V]) size() int {
+ c.cachedMtx.RLock()
+ defer c.cachedMtx.RUnlock()
+ return c.cached.Len()
+}
+
+func (c *fifoCache[V]) getPromiseForKey(k string, fetch func() (V, int64, error)) (*cacheEntryPromise[V], bool) {
+ r := &cacheEntryPromise[V]{
+ done: make(chan struct{}),
+ }
+ defer close(r.done)
+
+ if !c.cfg.Enabled {
+ r.v, _, r.err = fetch()
+ return r, false
+ }
+
+ loaded, ok := c.cachedValues.LoadOrStore(k, r)
+
+ if !ok {
+ c.metrics.CacheMiss.WithLabelValues(c.name, "miss").Inc()
+ r.v, r.sizeBytes, r.err = fetch()
+ r.sizeBytes += int64(len(k))
+ r.ts = c.timeNow()
+ c.created(k, r.sizeBytes)
+ c.expire()
+ }
+
+ if ok {
+ // If the promise is already in the cache, lets wait it to fetch the data.
+ <-loaded.(*cacheEntryPromise[V]).done
+
+ // If is cached but is expired, lets try to replace the cache value.
+ if loaded.(*cacheEntryPromise[V]).isExpired(c.cfg.Ttl, c.timeNow()) && c.cachedValues.CompareAndSwap(k, loaded, r) {
+ c.metrics.CacheMiss.WithLabelValues(c.name, "expired").Inc()
+ r.v, r.sizeBytes, r.err = fetch()
+ r.sizeBytes += int64(len(k))
+ c.updateSize(loaded.(*cacheEntryPromise[V]).sizeBytes, r.sizeBytes)
+ loaded = r
+ r.ts = c.timeNow()
+ ok = false
+ }
+ }
+
+ return loaded.(*cacheEntryPromise[V]), ok
+}
+
+func (c *fifoCache[V]) contains(k string) bool {
+ _, ok := c.cachedValues.Load(k)
+ return ok
+}
+
+func (c *fifoCache[V]) shouldEvictHead() (string, bool) {
+ h := c.cached.Front()
+ if h == nil {
+ return "", false
+ }
+
+ if c.cachedBytes > c.cfg.MaxBytes {
+ return "full", true
+ }
+
+ key := h.Value.(string)
+
+ if l, ok := c.cachedValues.Load(key); ok {
+ if l.(*cacheEntryPromise[V]).isExpired(c.cfg.Ttl, c.timeNow()) {
+ return "expired", true
+ }
+ }
+
+ return "", false
+}
+
+func (c *fifoCache[V]) evictHead() {
+ front := c.cached.Front()
+ c.cached.Remove(front)
+ oldestKey := front.Value.(string)
+ if oldest, loaded := c.cachedValues.LoadAndDelete(oldestKey); loaded {
+ c.cachedBytes -= oldest.(*cacheEntryPromise[V]).sizeBytes
+ }
+}
+
+func (c *fifoCache[V]) created(key string, sizeBytes int64) {
+ if c.cfg.Ttl <= 0 {
+ c.cachedValues.Delete(key)
+ return
+ }
+ c.cachedMtx.Lock()
+ defer c.cachedMtx.Unlock()
+ c.cached.PushBack(key)
+ c.cachedBytes += sizeBytes
+}
+
+func (c *fifoCache[V]) updateSize(oldSize, newSizeBytes int64) {
+ if oldSize == newSizeBytes {
+ return
+ }
+
+ c.cachedMtx.Lock()
+ defer c.cachedMtx.Unlock()
+ c.cachedBytes += newSizeBytes - oldSize
+}
+
+type cacheEntryPromise[V any] struct {
+ ts time.Time
+ sizeBytes int64
+
+ done chan struct{}
+ v V
+ err error
+}
+
+func (ce *cacheEntryPromise[V]) isExpired(ttl time.Duration, now time.Time) bool {
+ ts := ce.ts
+ r := now.Sub(ts)
+ return r >= ttl
+}
+
+func memHashString(userId, v string) uint64 {
+ h := fnv1a.HashString64(userId)
+ return fnv1a.AddString64(h, v)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go
index 24dcd0941..7c1011f74 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/index_cache.go
@@ -3,6 +3,7 @@ package tsdb
import (
"flag"
"fmt"
+ "slices"
"strings"
"time"
@@ -14,7 +15,6 @@ import (
"github.com/thanos-io/thanos/pkg/model"
storecache "github.com/thanos-io/thanos/pkg/store/cache"
- "github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/flagext"
)
@@ -84,7 +84,7 @@ func (cfg *IndexCacheConfig) Validate() error {
}
for _, backend := range splitBackends {
- if !util.StringsContain(supportedIndexCacheBackends, backend) {
+ if !slices.Contains(supportedIndexCacheBackends, backend) {
return errUnsupportedIndexCacheBackend
}
@@ -92,15 +92,16 @@ func (cfg *IndexCacheConfig) Validate() error {
return errors.WithMessagef(errDuplicatedIndexCacheBackend, "duplicated backend: %v", backend)
}
- if backend == IndexCacheBackendMemcached {
+ switch backend {
+ case IndexCacheBackendMemcached:
if err := cfg.Memcached.Validate(); err != nil {
return err
}
- } else if backend == IndexCacheBackendRedis {
+ case IndexCacheBackendRedis:
if err := cfg.Redis.Validate(); err != nil {
return err
}
- } else {
+ default:
if err := cfg.InMemory.Validate(); err != nil {
return err
}
@@ -132,7 +133,7 @@ func (cfg *MultiLevelIndexCacheConfig) Validate() error {
}
func (cfg *MultiLevelIndexCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) {
- f.IntVar(&cfg.MaxAsyncConcurrency, prefix+"max-async-concurrency", 50, "The maximum number of concurrent asynchronous operations can occur when backfilling cache items.")
+ f.IntVar(&cfg.MaxAsyncConcurrency, prefix+"max-async-concurrency", 3, "The maximum number of concurrent asynchronous operations can occur when backfilling cache items.")
f.IntVar(&cfg.MaxAsyncBufferSize, prefix+"max-async-buffer-size", 10000, "The maximum number of enqueued asynchronous operations allowed when backfilling cache items.")
f.IntVar(&cfg.MaxBackfillItems, prefix+"max-backfill-items", 10000, "The maximum number of items to backfill per asynchronous operation.")
}
@@ -208,7 +209,7 @@ func NewIndexCache(cfg IndexCacheConfig, logger log.Logger, registerer prometheu
case IndexCacheBackendInMemory:
c, err := newInMemoryIndexCache(cfg.InMemory, logger, iReg)
if err != nil {
- return c, err
+ return nil, err
}
caches = append(caches, c)
enabledItems = append(enabledItems, cfg.InMemory.EnabledItems)
@@ -248,10 +249,7 @@ func newInMemoryIndexCache(cfg InMemoryIndexCacheConfig, logger log.Logger, regi
maxCacheSize := model.Bytes(cfg.MaxSizeBytes)
// Calculate the max item size.
- maxItemSize := defaultMaxItemSize
- if maxItemSize > maxCacheSize {
- maxItemSize = maxCacheSize
- }
+ maxItemSize := min(defaultMaxItemSize, maxCacheSize)
return NewInMemoryIndexCacheWithConfig(logger, nil, registerer, storecache.InMemoryIndexCacheConfig{
MaxSize: maxCacheSize,
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/inmemory_index_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/inmemory_index_cache.go
index 95e10803c..ec8a8d8b1 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/inmemory_index_cache.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/inmemory_index_cache.go
@@ -7,7 +7,7 @@ import (
"github.com/VictoriaMetrics/fastcache"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
- "github.com/oklog/ulid"
+ "github.com/oklog/ulid/v2"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/matchers_cache_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/matchers_cache_metrics.go
new file mode 100644
index 000000000..ea999a304
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/matchers_cache_metrics.go
@@ -0,0 +1,79 @@
+package tsdb
+
+import (
+ "fmt"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/prometheus/client_golang/prometheus"
+
+ "github.com/cortexproject/cortex/pkg/util"
+)
+
+type MatcherCacheMetrics struct {
+ r *prometheus.Registry
+ logger log.Logger
+
+ requestsTotal *prometheus.Desc
+ hitsTotal *prometheus.Desc
+ numItems *prometheus.Desc
+ maxItems *prometheus.Desc
+ evicted *prometheus.Desc
+}
+
+func NewMatchCacheMetrics(prefix string, r *prometheus.Registry, l log.Logger) *MatcherCacheMetrics {
+ m := &MatcherCacheMetrics{
+ r: r,
+ logger: l,
+ requestsTotal: prometheus.NewDesc(
+ fmt.Sprintf("%v_matchers_cache_requests_total", prefix),
+ "Total number of cache requests for series matchers",
+ nil, nil),
+ hitsTotal: prometheus.NewDesc(
+ fmt.Sprintf("%v_matchers_cache_hits_total", prefix),
+ "Total number of cache hits for series matchers",
+ nil, nil),
+ numItems: prometheus.NewDesc(
+ fmt.Sprintf("%v_matchers_cache_items", prefix),
+ "Total number of cached items",
+ nil, nil),
+ maxItems: prometheus.NewDesc(
+ fmt.Sprintf("%v_matchers_cache_max_items", prefix),
+ "Maximum number of items that can be cached",
+ nil, nil),
+ evicted: prometheus.NewDesc(
+ fmt.Sprintf("%v_matchers_cache_evicted_total", prefix),
+ "Total number of items evicted from the cache",
+ nil, nil),
+ }
+ return m
+}
+
+func (m *MatcherCacheMetrics) Describe(out chan<- *prometheus.Desc) {
+ out <- m.requestsTotal
+ out <- m.hitsTotal
+ out <- m.numItems
+ out <- m.maxItems
+ out <- m.evicted
+}
+
+func (m *MatcherCacheMetrics) Collect(out chan<- prometheus.Metric) {
+ gm, err := m.r.Gather()
+ if err != nil {
+ level.Warn(m.logger).Log("msg", "failed to gather metrics from registry", "err", err)
+ return
+ }
+
+ mfm, err := util.NewMetricFamilyMap(gm)
+
+ if err != nil {
+ level.Warn(m.logger).Log("msg", "failed to create metric family map", "err", err)
+ return
+ }
+
+ out <- prometheus.MustNewConstMetric(m.requestsTotal, prometheus.CounterValue, mfm.SumCounters("thanos_matchers_cache_requests_total"))
+ out <- prometheus.MustNewConstMetric(m.hitsTotal, prometheus.CounterValue, mfm.SumCounters("thanos_matchers_cache_hits_total"))
+ out <- prometheus.MustNewConstMetric(m.numItems, prometheus.GaugeValue, mfm.SumGauges("thanos_matchers_cache_items"))
+ out <- prometheus.MustNewConstMetric(m.maxItems, prometheus.GaugeValue, mfm.SumGauges("thanos_matchers_cache_max_items"))
+ out <- prometheus.MustNewConstMetric(m.evicted, prometheus.CounterValue, mfm.SumCounters("thanos_matchers_cache_evicted_total"))
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/memcache_client_config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/memcache_client_config.go
index 2a04bcbcf..239ea228b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/memcache_client_config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/memcache_client_config.go
@@ -26,7 +26,7 @@ func (cfg *MemcachedClientConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefi
f.StringVar(&cfg.Addresses, prefix+"addresses", "", "Comma separated list of memcached addresses. Supported prefixes are: dns+ (looked up as an A/AAAA query), dnssrv+ (looked up as a SRV query, dnssrvnoa+ (looked up as a SRV query, with no A/AAAA lookup made after that).")
f.DurationVar(&cfg.Timeout, prefix+"timeout", 100*time.Millisecond, "The socket read/write timeout.")
f.IntVar(&cfg.MaxIdleConnections, prefix+"max-idle-connections", 16, "The maximum number of idle connections that will be maintained per address.")
- f.IntVar(&cfg.MaxAsyncConcurrency, prefix+"max-async-concurrency", 50, "The maximum number of concurrent asynchronous operations can occur.")
+ f.IntVar(&cfg.MaxAsyncConcurrency, prefix+"max-async-concurrency", 3, "The maximum number of concurrent asynchronous operations can occur.")
f.IntVar(&cfg.MaxAsyncBufferSize, prefix+"max-async-buffer-size", 10000, "The maximum number of enqueued asynchronous operations allowed.")
f.IntVar(&cfg.MaxGetMultiConcurrency, prefix+"max-get-multi-concurrency", 100, "The maximum number of concurrent connections running get operations. If set to 0, concurrency is unlimited.")
f.IntVar(&cfg.MaxGetMultiBatchSize, prefix+"max-get-multi-batch-size", 0, "The maximum number of keys a single underlying get operation should run. If more keys are specified, internally keys are split into multiple batches and fetched concurrently, honoring the max concurrency. If set to 0, the max batch size is unlimited.")
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/meta_extensions.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/meta_extensions.go
new file mode 100644
index 000000000..b6b8a7acf
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/meta_extensions.go
@@ -0,0 +1,71 @@
+package tsdb
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/thanos-io/thanos/pkg/block/metadata"
+)
+
+type CortexMetaExtensions struct {
+ PartitionInfo *PartitionInfo `json:"partition_info,omitempty"`
+ TimeRange int64 `json:"time_range,omitempty"`
+}
+
+type PartitionInfo struct {
+ PartitionedGroupID uint32 `json:"partitioned_group_id"`
+ PartitionCount int `json:"partition_count"`
+ PartitionID int `json:"partition_id"`
+ PartitionedGroupCreationTime int64 `json:"partitioned_group_creation_time"`
+}
+
+var (
+ DefaultPartitionInfo = PartitionInfo{
+ PartitionedGroupID: 0,
+ PartitionID: 0,
+ PartitionCount: 1,
+ PartitionedGroupCreationTime: 0,
+ }
+)
+
+func (c *CortexMetaExtensions) TimeRangeStr() string {
+ return strconv.FormatInt(c.TimeRange, 10)
+}
+
+func ConvertToCortexMetaExtensions(extensions any) (*CortexMetaExtensions, error) {
+ defaultPartitionInfo := DefaultPartitionInfo
+ cortexExtensions, err := metadata.ConvertExtensions(extensions, &CortexMetaExtensions{
+ PartitionInfo: &defaultPartitionInfo,
+ })
+
+ if err != nil {
+ return nil, err
+ }
+ if cortexExtensions == nil {
+ return nil, nil
+ }
+ converted, ok := cortexExtensions.(*CortexMetaExtensions)
+ if !ok {
+ return nil, fmt.Errorf("unable to convert extensions to CortexMetaExtensions")
+ }
+ return converted, nil
+}
+
+func ConvertToPartitionInfo(extensions any) (*PartitionInfo, error) {
+ cortexExtensions, err := ConvertToCortexMetaExtensions(extensions)
+ if err != nil {
+ return nil, err
+ }
+ if cortexExtensions == nil {
+ return nil, nil
+ }
+ return cortexExtensions.PartitionInfo, nil
+}
+
+func GetCortexMetaExtensionsFromMeta(meta metadata.Meta) (*CortexMetaExtensions, error) {
+ return ConvertToCortexMetaExtensions(meta.Thanos.Extensions)
+}
+
+func GetPartitionInfo(meta metadata.Meta) (*PartitionInfo, error) {
+ return ConvertToPartitionInfo(meta.Thanos.Extensions)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/multilevel_bucket_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/multilevel_bucket_cache.go
new file mode 100644
index 000000000..f9e2b4fbf
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/multilevel_bucket_cache.go
@@ -0,0 +1,176 @@
+package tsdb
+
+import (
+ "context"
+ "errors"
+ "flag"
+ "fmt"
+ "maps"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/thanos-io/thanos/pkg/cache"
+ "github.com/thanos-io/thanos/pkg/cacheutil"
+)
+
+type multiLevelBucketCache struct {
+ name string
+ caches []cache.Cache
+
+ backfillProcessor *cacheutil.AsyncOperationProcessor
+ fetchLatency *prometheus.HistogramVec
+ backFillLatency *prometheus.HistogramVec
+ storeDroppedItems prometheus.Counter
+ backfillDroppedItems prometheus.Counter
+ maxBackfillItems int
+ backfillTTL time.Duration
+}
+
+type MultiLevelBucketCacheConfig struct {
+ MaxAsyncConcurrency int `yaml:"max_async_concurrency"`
+ MaxAsyncBufferSize int `yaml:"max_async_buffer_size"`
+ MaxBackfillItems int `yaml:"max_backfill_items"`
+
+ BackFillTTL time.Duration `yaml:"-"`
+}
+
+func (cfg *MultiLevelBucketCacheConfig) Validate() error {
+ if cfg.MaxAsyncBufferSize <= 0 {
+ return errInvalidMaxAsyncBufferSize
+ }
+ if cfg.MaxAsyncConcurrency <= 0 {
+ return errInvalidMaxAsyncConcurrency
+ }
+ if cfg.MaxBackfillItems <= 0 {
+ return errInvalidMaxBackfillItems
+ }
+ return nil
+}
+
+func (cfg *MultiLevelBucketCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) {
+ f.IntVar(&cfg.MaxAsyncConcurrency, prefix+"max-async-concurrency", 3, "The maximum number of concurrent asynchronous operations can occur when backfilling cache items.")
+ f.IntVar(&cfg.MaxAsyncBufferSize, prefix+"max-async-buffer-size", 10000, "The maximum number of enqueued asynchronous operations allowed when backfilling cache items.")
+ f.IntVar(&cfg.MaxBackfillItems, prefix+"max-backfill-items", 10000, "The maximum number of items to backfill per asynchronous operation.")
+}
+
+func newMultiLevelBucketCache(name string, cfg MultiLevelBucketCacheConfig, reg prometheus.Registerer, c ...cache.Cache) cache.Cache {
+ if len(c) == 1 {
+ return c[0]
+ }
+
+ itemName := ""
+ metricHelpText := ""
+ switch name {
+ case "chunks-cache":
+ itemName = "chunks_cache"
+ metricHelpText = "chunks cache"
+ case "metadata-cache":
+ itemName = "metadata_cache"
+ metricHelpText = "metadata cache"
+ case "parquet-labels-cache":
+ itemName = "parquet_labels_cache"
+ metricHelpText = "parquet labels cache"
+ default:
+ itemName = name
+ }
+
+ return &multiLevelBucketCache{
+ name: name,
+ caches: c,
+ backfillProcessor: cacheutil.NewAsyncOperationProcessor(cfg.MaxAsyncBufferSize, cfg.MaxAsyncConcurrency),
+ fetchLatency: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{
+ Name: fmt.Sprintf("cortex_store_multilevel_%s_fetch_duration_seconds", itemName),
+ Help: fmt.Sprintf("Histogram to track latency to fetch items from multi level %s", metricHelpText),
+ Buckets: []float64{0.01, 0.1, 0.3, 0.6, 1, 3, 6, 10, 15, 20, 25, 30, 40, 50, 60, 90},
+ }, nil),
+ backFillLatency: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{
+ Name: fmt.Sprintf("cortex_store_multilevel_%s_backfill_duration_seconds", itemName),
+ Help: fmt.Sprintf("Histogram to track latency to backfill items from multi level %s", metricHelpText),
+ Buckets: []float64{0.01, 0.1, 0.3, 0.6, 1, 3, 6, 10, 15, 20, 25, 30, 40, 50, 60, 90},
+ }, nil),
+ storeDroppedItems: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Name: fmt.Sprintf("cortex_store_multilevel_%s_backfill_dropped_items_total", itemName),
+ Help: fmt.Sprintf("Total number of items dropped due to async buffer full when backfilling multilevel %s", metricHelpText),
+ }),
+ backfillDroppedItems: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Name: fmt.Sprintf("cortex_store_multilevel_%s_store_dropped_items_total", itemName),
+ Help: fmt.Sprintf("Total number of items dropped due to async buffer full when storing multilevel %s", metricHelpText),
+ }),
+ maxBackfillItems: cfg.MaxBackfillItems,
+ backfillTTL: cfg.BackFillTTL,
+ }
+}
+
+func (m *multiLevelBucketCache) Store(data map[string][]byte, ttl time.Duration) {
+ for _, c := range m.caches {
+ if err := m.backfillProcessor.EnqueueAsync(func() {
+ c.Store(data, ttl)
+ }); errors.Is(err, cacheutil.ErrAsyncBufferFull) {
+ m.storeDroppedItems.Inc()
+ }
+ }
+}
+
+func (m *multiLevelBucketCache) Fetch(ctx context.Context, keys []string) map[string][]byte {
+ timer := prometheus.NewTimer(m.fetchLatency.WithLabelValues())
+ defer timer.ObserveDuration()
+
+ missingKeys := keys
+ hits := map[string][]byte{}
+ backfillItems := make([]map[string][]byte, len(m.caches)-1)
+
+ for i, c := range m.caches {
+ if i < len(m.caches)-1 {
+ backfillItems[i] = map[string][]byte{}
+ }
+ if ctx.Err() != nil {
+ return nil
+ }
+ if data := c.Fetch(ctx, missingKeys); len(data) > 0 {
+ maps.Copy(hits, data)
+
+ if i > 0 && len(hits) > 0 {
+ // lets fetch only the mising keys
+ m := missingKeys[:0]
+ for _, key := range missingKeys {
+ if _, ok := hits[key]; !ok {
+ m = append(m, key)
+ }
+ }
+
+ missingKeys = m
+
+ maps.Copy(backfillItems[i-1], hits)
+ }
+
+ if len(hits) == len(keys) {
+ // fetch done
+ break
+ }
+ }
+ }
+
+ defer func() {
+ backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues())
+ defer backFillTimer.ObserveDuration()
+
+ for i, values := range backfillItems {
+ if len(values) == 0 {
+ continue
+ }
+
+ if err := m.backfillProcessor.EnqueueAsync(func() {
+ m.caches[i].Store(values, m.backfillTTL)
+ }); errors.Is(err, cacheutil.ErrAsyncBufferFull) {
+ m.backfillDroppedItems.Inc()
+ }
+ }
+ }()
+
+ return hits
+}
+
+func (m *multiLevelBucketCache) Name() string {
+ return m.name
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/multilevel_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/multilevel_cache.go
deleted file mode 100644
index 5d41ac118..000000000
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/multilevel_cache.go
+++ /dev/null
@@ -1,257 +0,0 @@
-package tsdb
-
-import (
- "context"
- "errors"
-
- "github.com/oklog/ulid"
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promauto"
- "github.com/prometheus/prometheus/model/labels"
- "github.com/prometheus/prometheus/storage"
- "github.com/thanos-io/thanos/pkg/cacheutil"
- storecache "github.com/thanos-io/thanos/pkg/store/cache"
- "golang.org/x/exp/slices"
-)
-
-type multiLevelCache struct {
- postingsCaches, seriesCaches, expandedPostingCaches []storecache.IndexCache
-
- fetchLatency *prometheus.HistogramVec
- backFillLatency *prometheus.HistogramVec
- backfillProcessor *cacheutil.AsyncOperationProcessor
- backfillDroppedItems map[string]prometheus.Counter
- storeDroppedItems map[string]prometheus.Counter
-
- maxBackfillItems int
-}
-
-func (m *multiLevelCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte, tenant string) {
- for _, c := range m.postingsCaches {
- cache := c
- if err := m.backfillProcessor.EnqueueAsync(func() {
- cache.StorePostings(blockID, l, v, tenant)
- }); errors.Is(err, cacheutil.ErrAsyncBufferFull) {
- m.storeDroppedItems[storecache.CacheTypePostings].Inc()
- }
- }
-}
-
-func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.ULID, keys []labels.Label, tenant string) (hits map[labels.Label][]byte, misses []labels.Label) {
- timer := prometheus.NewTimer(m.fetchLatency.WithLabelValues(storecache.CacheTypePostings))
- defer timer.ObserveDuration()
-
- misses = keys
- hits = map[labels.Label][]byte{}
- backfillItems := make([]map[labels.Label][]byte, len(m.postingsCaches)-1)
- for i, c := range m.postingsCaches {
- if i < len(m.postingsCaches)-1 {
- backfillItems[i] = map[labels.Label][]byte{}
- }
- if ctx.Err() != nil {
- return
- }
- h, mi := c.FetchMultiPostings(ctx, blockID, misses, tenant)
- misses = mi
-
- for label, bytes := range h {
- hits[label] = bytes
- }
-
- if i > 0 {
- backfillItems[i-1] = h
- }
-
- if len(misses) == 0 {
- break
- }
- }
-
- defer func() {
- backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(storecache.CacheTypePostings))
- defer backFillTimer.ObserveDuration()
- for i, values := range backfillItems {
- i := i
- values := values
- if len(values) == 0 {
- continue
- }
- if err := m.backfillProcessor.EnqueueAsync(func() {
- cnt := 0
- for lbl, b := range values {
- m.postingsCaches[i].StorePostings(blockID, lbl, b, tenant)
- cnt++
- if cnt == m.maxBackfillItems {
- m.backfillDroppedItems[storecache.CacheTypePostings].Add(float64(len(values) - cnt))
- return
- }
- }
- }); errors.Is(err, cacheutil.ErrAsyncBufferFull) {
- m.backfillDroppedItems[storecache.CacheTypePostings].Add(float64(len(values)))
- }
- }
- }()
-
- return hits, misses
-}
-
-func (m *multiLevelCache) StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte, tenant string) {
- for _, c := range m.expandedPostingCaches {
- cache := c
- if err := m.backfillProcessor.EnqueueAsync(func() {
- cache.StoreExpandedPostings(blockID, matchers, v, tenant)
- }); errors.Is(err, cacheutil.ErrAsyncBufferFull) {
- m.storeDroppedItems[storecache.CacheTypeExpandedPostings].Inc()
- }
- }
-}
-
-func (m *multiLevelCache) FetchExpandedPostings(ctx context.Context, blockID ulid.ULID, matchers []*labels.Matcher, tenant string) ([]byte, bool) {
- timer := prometheus.NewTimer(m.fetchLatency.WithLabelValues(storecache.CacheTypeExpandedPostings))
- defer timer.ObserveDuration()
-
- for i, c := range m.expandedPostingCaches {
- if ctx.Err() != nil {
- return nil, false
- }
- if d, h := c.FetchExpandedPostings(ctx, blockID, matchers, tenant); h {
- if i > 0 {
- backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(storecache.CacheTypeExpandedPostings))
- if err := m.backfillProcessor.EnqueueAsync(func() {
- m.expandedPostingCaches[i-1].StoreExpandedPostings(blockID, matchers, d, tenant)
- }); errors.Is(err, cacheutil.ErrAsyncBufferFull) {
- m.backfillDroppedItems[storecache.CacheTypeExpandedPostings].Inc()
- }
- backFillTimer.ObserveDuration()
- }
- return d, h
- }
- }
-
- return []byte{}, false
-}
-
-func (m *multiLevelCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) {
- for _, c := range m.seriesCaches {
- cache := c
- if err := m.backfillProcessor.EnqueueAsync(func() {
- cache.StoreSeries(blockID, id, v, tenant)
- }); errors.Is(err, cacheutil.ErrAsyncBufferFull) {
- m.storeDroppedItems[storecache.CacheTypeSeries].Inc()
- }
- }
-}
-
-func (m *multiLevelCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef, tenant string) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) {
- timer := prometheus.NewTimer(m.fetchLatency.WithLabelValues(storecache.CacheTypeSeries))
- defer timer.ObserveDuration()
-
- misses = ids
- hits = map[storage.SeriesRef][]byte{}
- backfillItems := make([]map[storage.SeriesRef][]byte, len(m.seriesCaches)-1)
-
- for i, c := range m.seriesCaches {
- if i < len(m.seriesCaches)-1 {
- backfillItems[i] = map[storage.SeriesRef][]byte{}
- }
- if ctx.Err() != nil {
- return
- }
- h, miss := c.FetchMultiSeries(ctx, blockID, misses, tenant)
- misses = miss
-
- for label, bytes := range h {
- hits[label] = bytes
- }
-
- if i > 0 && len(h) > 0 {
- backfillItems[i-1] = h
- }
-
- if len(misses) == 0 {
- break
- }
- }
-
- defer func() {
- backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(storecache.CacheTypeSeries))
- defer backFillTimer.ObserveDuration()
- for i, values := range backfillItems {
- i := i
- values := values
- if len(values) == 0 {
- continue
- }
- if err := m.backfillProcessor.EnqueueAsync(func() {
- cnt := 0
- for ref, b := range values {
- m.seriesCaches[i].StoreSeries(blockID, ref, b, tenant)
- cnt++
- if cnt == m.maxBackfillItems {
- m.backfillDroppedItems[storecache.CacheTypeSeries].Add(float64(len(values) - cnt))
- return
- }
- }
- }); errors.Is(err, cacheutil.ErrAsyncBufferFull) {
- m.backfillDroppedItems[storecache.CacheTypeSeries].Add(float64(len(values)))
- }
- }
- }()
-
- return hits, misses
-}
-
-func filterCachesByItem(enabledItems [][]string, cachedItem string, c ...storecache.IndexCache) []storecache.IndexCache {
- filteredCaches := make([]storecache.IndexCache, 0, len(c))
- for i := range enabledItems {
- if len(enabledItems[i]) == 0 || slices.Contains(enabledItems[i], cachedItem) {
- filteredCaches = append(filteredCaches, c[i])
- }
- }
- return filteredCaches
-}
-
-func newMultiLevelCache(reg prometheus.Registerer, cfg MultiLevelIndexCacheConfig, enabledItems [][]string, c ...storecache.IndexCache) storecache.IndexCache {
- if len(c) == 1 {
- if len(enabledItems[0]) == 0 {
- return c[0]
- }
- return storecache.NewFilteredIndexCache(c[0], enabledItems[0])
- }
-
- backfillDroppedItems := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
- Name: "cortex_store_multilevel_index_cache_backfill_dropped_items_total",
- Help: "Total number of items dropped due to async buffer full when backfilling multilevel cache ",
- }, []string{"item_type"})
- storeDroppedItems := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
- Name: "cortex_store_multilevel_index_cache_store_dropped_items_total",
- Help: "Total number of items dropped due to async buffer full when storing multilevel cache ",
- }, []string{"item_type"})
- return &multiLevelCache{
- postingsCaches: filterCachesByItem(enabledItems, storecache.CacheTypePostings, c...),
- seriesCaches: filterCachesByItem(enabledItems, storecache.CacheTypeSeries, c...),
- expandedPostingCaches: filterCachesByItem(enabledItems, storecache.CacheTypeExpandedPostings, c...),
- backfillProcessor: cacheutil.NewAsyncOperationProcessor(cfg.MaxAsyncBufferSize, cfg.MaxAsyncConcurrency),
- fetchLatency: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{
- Name: "cortex_store_multilevel_index_cache_fetch_duration_seconds",
- Help: "Histogram to track latency to fetch items from multi level index cache",
- Buckets: []float64{0.01, 0.1, 0.3, 0.6, 1, 3, 6, 10, 15, 20, 25, 30, 40, 50, 60, 90},
- }, []string{"item_type"}),
- backFillLatency: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{
- Name: "cortex_store_multilevel_index_cache_backfill_duration_seconds",
- Help: "Histogram to track latency to backfill items from multi level index cache",
- Buckets: []float64{0.01, 0.1, 0.3, 0.6, 1, 3, 6, 10, 15, 20, 25, 30, 40, 50, 60, 90},
- }, []string{"item_type"}),
- backfillDroppedItems: map[string]prometheus.Counter{
- storecache.CacheTypePostings: backfillDroppedItems.WithLabelValues(storecache.CacheTypePostings),
- storecache.CacheTypeSeries: backfillDroppedItems.WithLabelValues(storecache.CacheTypeSeries),
- storecache.CacheTypeExpandedPostings: backfillDroppedItems.WithLabelValues(storecache.CacheTypeExpandedPostings),
- },
- storeDroppedItems: map[string]prometheus.Counter{
- storecache.CacheTypePostings: storeDroppedItems.WithLabelValues(storecache.CacheTypePostings),
- storecache.CacheTypeSeries: storeDroppedItems.WithLabelValues(storecache.CacheTypeSeries),
- storecache.CacheTypeExpandedPostings: storeDroppedItems.WithLabelValues(storecache.CacheTypeExpandedPostings),
- },
- maxBackfillItems: cfg.MaxBackfillItems,
- }
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/multilevel_index_cache.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/multilevel_index_cache.go
new file mode 100644
index 000000000..bab35f747
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/multilevel_index_cache.go
@@ -0,0 +1,250 @@
+package tsdb
+
+import (
+ "context"
+ "errors"
+ "maps"
+ "slices"
+
+ "github.com/oklog/ulid/v2"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/storage"
+ "github.com/thanos-io/thanos/pkg/cacheutil"
+ storecache "github.com/thanos-io/thanos/pkg/store/cache"
+)
+
+type multiLevelCache struct {
+ postingsCaches, seriesCaches, expandedPostingCaches []storecache.IndexCache
+
+ fetchLatency *prometheus.HistogramVec
+ backFillLatency *prometheus.HistogramVec
+ backfillProcessor *cacheutil.AsyncOperationProcessor
+ backfillDroppedItems map[string]prometheus.Counter
+ storeDroppedItems map[string]prometheus.Counter
+
+ maxBackfillItems int
+}
+
+func (m *multiLevelCache) StorePostings(blockID ulid.ULID, l labels.Label, v []byte, tenant string) {
+ for _, c := range m.postingsCaches {
+ cache := c
+ if err := m.backfillProcessor.EnqueueAsync(func() {
+ cache.StorePostings(blockID, l, v, tenant)
+ }); errors.Is(err, cacheutil.ErrAsyncBufferFull) {
+ m.storeDroppedItems[storecache.CacheTypePostings].Inc()
+ }
+ }
+}
+
+func (m *multiLevelCache) FetchMultiPostings(ctx context.Context, blockID ulid.ULID, keys []labels.Label, tenant string) (hits map[labels.Label][]byte, misses []labels.Label) {
+ timer := prometheus.NewTimer(m.fetchLatency.WithLabelValues(storecache.CacheTypePostings))
+ defer timer.ObserveDuration()
+
+ misses = keys
+ hits = map[labels.Label][]byte{}
+ backfillItems := make([]map[labels.Label][]byte, len(m.postingsCaches)-1)
+ for i, c := range m.postingsCaches {
+ if i < len(m.postingsCaches)-1 {
+ backfillItems[i] = map[labels.Label][]byte{}
+ }
+ if ctx.Err() != nil {
+ return
+ }
+ h, mi := c.FetchMultiPostings(ctx, blockID, misses, tenant)
+ misses = mi
+
+ maps.Copy(hits, h)
+
+ if i > 0 {
+ backfillItems[i-1] = h
+ }
+
+ if len(misses) == 0 {
+ break
+ }
+ }
+
+ defer func() {
+ backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(storecache.CacheTypePostings))
+ defer backFillTimer.ObserveDuration()
+ for i, values := range backfillItems {
+ if len(values) == 0 {
+ continue
+ }
+ if err := m.backfillProcessor.EnqueueAsync(func() {
+ cnt := 0
+ for lbl, b := range values {
+ m.postingsCaches[i].StorePostings(blockID, lbl, b, tenant)
+ cnt++
+ if cnt == m.maxBackfillItems {
+ m.backfillDroppedItems[storecache.CacheTypePostings].Add(float64(len(values) - cnt))
+ return
+ }
+ }
+ }); errors.Is(err, cacheutil.ErrAsyncBufferFull) {
+ m.backfillDroppedItems[storecache.CacheTypePostings].Add(float64(len(values)))
+ }
+ }
+ }()
+
+ return hits, misses
+}
+
+func (m *multiLevelCache) StoreExpandedPostings(blockID ulid.ULID, matchers []*labels.Matcher, v []byte, tenant string) {
+ for _, c := range m.expandedPostingCaches {
+ cache := c
+ if err := m.backfillProcessor.EnqueueAsync(func() {
+ cache.StoreExpandedPostings(blockID, matchers, v, tenant)
+ }); errors.Is(err, cacheutil.ErrAsyncBufferFull) {
+ m.storeDroppedItems[storecache.CacheTypeExpandedPostings].Inc()
+ }
+ }
+}
+
+func (m *multiLevelCache) FetchExpandedPostings(ctx context.Context, blockID ulid.ULID, matchers []*labels.Matcher, tenant string) ([]byte, bool) {
+ timer := prometheus.NewTimer(m.fetchLatency.WithLabelValues(storecache.CacheTypeExpandedPostings))
+ defer timer.ObserveDuration()
+
+ for i, c := range m.expandedPostingCaches {
+ if ctx.Err() != nil {
+ return nil, false
+ }
+ if d, h := c.FetchExpandedPostings(ctx, blockID, matchers, tenant); h {
+ if i > 0 {
+ backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(storecache.CacheTypeExpandedPostings))
+ if err := m.backfillProcessor.EnqueueAsync(func() {
+ m.expandedPostingCaches[i-1].StoreExpandedPostings(blockID, matchers, d, tenant)
+ }); errors.Is(err, cacheutil.ErrAsyncBufferFull) {
+ m.backfillDroppedItems[storecache.CacheTypeExpandedPostings].Inc()
+ }
+ backFillTimer.ObserveDuration()
+ }
+ return d, h
+ }
+ }
+
+ return []byte{}, false
+}
+
+func (m *multiLevelCache) StoreSeries(blockID ulid.ULID, id storage.SeriesRef, v []byte, tenant string) {
+ for _, c := range m.seriesCaches {
+ cache := c
+ if err := m.backfillProcessor.EnqueueAsync(func() {
+ cache.StoreSeries(blockID, id, v, tenant)
+ }); errors.Is(err, cacheutil.ErrAsyncBufferFull) {
+ m.storeDroppedItems[storecache.CacheTypeSeries].Inc()
+ }
+ }
+}
+
+func (m *multiLevelCache) FetchMultiSeries(ctx context.Context, blockID ulid.ULID, ids []storage.SeriesRef, tenant string) (hits map[storage.SeriesRef][]byte, misses []storage.SeriesRef) {
+ timer := prometheus.NewTimer(m.fetchLatency.WithLabelValues(storecache.CacheTypeSeries))
+ defer timer.ObserveDuration()
+
+ misses = ids
+ hits = map[storage.SeriesRef][]byte{}
+ backfillItems := make([]map[storage.SeriesRef][]byte, len(m.seriesCaches)-1)
+
+ for i, c := range m.seriesCaches {
+ if i < len(m.seriesCaches)-1 {
+ backfillItems[i] = map[storage.SeriesRef][]byte{}
+ }
+ if ctx.Err() != nil {
+ return
+ }
+ h, miss := c.FetchMultiSeries(ctx, blockID, misses, tenant)
+ misses = miss
+
+ maps.Copy(hits, h)
+
+ if i > 0 && len(h) > 0 {
+ backfillItems[i-1] = h
+ }
+
+ if len(misses) == 0 {
+ break
+ }
+ }
+
+ defer func() {
+ backFillTimer := prometheus.NewTimer(m.backFillLatency.WithLabelValues(storecache.CacheTypeSeries))
+ defer backFillTimer.ObserveDuration()
+ for i, values := range backfillItems {
+ if len(values) == 0 {
+ continue
+ }
+ if err := m.backfillProcessor.EnqueueAsync(func() {
+ cnt := 0
+ for ref, b := range values {
+ m.seriesCaches[i].StoreSeries(blockID, ref, b, tenant)
+ cnt++
+ if cnt == m.maxBackfillItems {
+ m.backfillDroppedItems[storecache.CacheTypeSeries].Add(float64(len(values) - cnt))
+ return
+ }
+ }
+ }); errors.Is(err, cacheutil.ErrAsyncBufferFull) {
+ m.backfillDroppedItems[storecache.CacheTypeSeries].Add(float64(len(values)))
+ }
+ }
+ }()
+
+ return hits, misses
+}
+
+func filterCachesByItem(enabledItems [][]string, cachedItem string, c ...storecache.IndexCache) []storecache.IndexCache {
+ filteredCaches := make([]storecache.IndexCache, 0, len(c))
+ for i := range enabledItems {
+ if len(enabledItems[i]) == 0 || slices.Contains(enabledItems[i], cachedItem) {
+ filteredCaches = append(filteredCaches, c[i])
+ }
+ }
+ return filteredCaches
+}
+
+func newMultiLevelCache(reg prometheus.Registerer, cfg MultiLevelIndexCacheConfig, enabledItems [][]string, c ...storecache.IndexCache) storecache.IndexCache {
+ if len(c) == 1 {
+ if len(enabledItems[0]) == 0 {
+ return c[0]
+ }
+ return storecache.NewFilteredIndexCache(c[0], enabledItems[0])
+ }
+
+ backfillDroppedItems := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
+ Name: "cortex_store_multilevel_index_cache_backfill_dropped_items_total",
+ Help: "Total number of items dropped due to async buffer full when backfilling multilevel cache ",
+ }, []string{"item_type"})
+ storeDroppedItems := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
+ Name: "cortex_store_multilevel_index_cache_store_dropped_items_total",
+ Help: "Total number of items dropped due to async buffer full when storing multilevel cache ",
+ }, []string{"item_type"})
+ return &multiLevelCache{
+ postingsCaches: filterCachesByItem(enabledItems, storecache.CacheTypePostings, c...),
+ seriesCaches: filterCachesByItem(enabledItems, storecache.CacheTypeSeries, c...),
+ expandedPostingCaches: filterCachesByItem(enabledItems, storecache.CacheTypeExpandedPostings, c...),
+ backfillProcessor: cacheutil.NewAsyncOperationProcessor(cfg.MaxAsyncBufferSize, cfg.MaxAsyncConcurrency),
+ fetchLatency: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{
+ Name: "cortex_store_multilevel_index_cache_fetch_duration_seconds",
+ Help: "Histogram to track latency to fetch items from multi level index cache",
+ Buckets: []float64{0.01, 0.1, 0.3, 0.6, 1, 3, 6, 10, 15, 20, 25, 30, 40, 50, 60, 90},
+ }, []string{"item_type"}),
+ backFillLatency: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{
+ Name: "cortex_store_multilevel_index_cache_backfill_duration_seconds",
+ Help: "Histogram to track latency to backfill items from multi level index cache",
+ Buckets: []float64{0.01, 0.1, 0.3, 0.6, 1, 3, 6, 10, 15, 20, 25, 30, 40, 50, 60, 90},
+ }, []string{"item_type"}),
+ backfillDroppedItems: map[string]prometheus.Counter{
+ storecache.CacheTypePostings: backfillDroppedItems.WithLabelValues(storecache.CacheTypePostings),
+ storecache.CacheTypeSeries: backfillDroppedItems.WithLabelValues(storecache.CacheTypeSeries),
+ storecache.CacheTypeExpandedPostings: backfillDroppedItems.WithLabelValues(storecache.CacheTypeExpandedPostings),
+ },
+ storeDroppedItems: map[string]prometheus.Counter{
+ storecache.CacheTypePostings: storeDroppedItems.WithLabelValues(storecache.CacheTypePostings),
+ storecache.CacheTypeSeries: storeDroppedItems.WithLabelValues(storecache.CacheTypeSeries),
+ storecache.CacheTypeExpandedPostings: storeDroppedItems.WithLabelValues(storecache.CacheTypeExpandedPostings),
+ },
+ maxBackfillItems: cfg.MaxBackfillItems,
+ }
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/redis_client_config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/redis_client_config.go
index 02afdd8c9..deb871f0b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/redis_client_config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/redis_client_config.go
@@ -2,7 +2,6 @@ package tsdb
import (
"flag"
-
"time"
"github.com/pkg/errors"
@@ -55,12 +54,12 @@ func (cfg *RedisClientConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix st
f.IntVar(&cfg.GetMultiBatchSize, prefix+"get-multi-batch-size", 100, "The maximum size per batch for mget.")
f.IntVar(&cfg.MaxSetMultiConcurrency, prefix+"max-set-multi-concurrency", 100, "The maximum number of concurrent SetMulti() operations. If set to 0, concurrency is unlimited.")
f.IntVar(&cfg.SetMultiBatchSize, prefix+"set-multi-batch-size", 100, "The maximum size per batch for pipeline set.")
- f.IntVar(&cfg.MaxAsyncConcurrency, prefix+"max-async-concurrency", 50, "The maximum number of concurrent asynchronous operations can occur.")
+ f.IntVar(&cfg.MaxAsyncConcurrency, prefix+"max-async-concurrency", 3, "The maximum number of concurrent asynchronous operations can occur.")
f.IntVar(&cfg.MaxAsyncBufferSize, prefix+"max-async-buffer-size", 10000, "The maximum number of enqueued asynchronous operations allowed.")
f.StringVar(&cfg.MasterName, prefix+"master-name", "", "Specifies the master's name. Must be not empty for Redis Sentinel.")
f.IntVar(&cfg.CacheSize, prefix+"cache-size", 0, "If not zero then client-side caching is enabled. Client-side caching is when data is stored in memory instead of fetching data each time. See https://redis.io/docs/manual/client-side-caching/ for more info.")
f.BoolVar(&cfg.TLSEnabled, prefix+"tls-enabled", false, "Whether to enable tls for redis connection.")
- cfg.TLS.RegisterFlagsWithPrefix(prefix, f)
+ cfg.TLS.RegisterFlagsWithPrefix(prefix[:len(prefix)-1], f)
cfg.SetAsyncCircuitBreaker.RegisterFlagsWithPrefix(f, prefix+"set-async.")
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go
index 33ba7b22c..5e0eda2d3 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go
@@ -11,7 +11,7 @@ import (
"github.com/pkg/errors"
"github.com/thanos-io/objstore"
- "github.com/cortexproject/cortex/pkg/util"
+ "github.com/cortexproject/cortex/pkg/tenant"
util_log "github.com/cortexproject/cortex/pkg/util/log"
)
@@ -77,7 +77,7 @@ func GetLocalDeletionMarkPath(userID string) string {
}
func GetGlobalDeletionMarkPath(userID string) string {
- return path.Join(util.GlobalMarkersDir, userID, TenantDeletionMarkFile)
+ return path.Join(tenant.GlobalMarkersDir, userID, TenantDeletionMarkFile)
}
func exists(ctx context.Context, bkt objstore.BucketReader, markerFile string) (bool, error) {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/users_scanner.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/users_scanner.go
deleted file mode 100644
index 0fe24395b..000000000
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/users_scanner.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package tsdb
-
-import (
- "context"
- "strings"
-
- "github.com/go-kit/log"
- "github.com/go-kit/log/level"
- "github.com/thanos-io/objstore"
-
- "github.com/cortexproject/cortex/pkg/util"
-)
-
-// AllUsers returns true to each call and should be used whenever the UsersScanner should not filter out
-// any user due to sharding.
-func AllUsers(user string) (bool, error) {
- if user == util.GlobalMarkersDir {
- return false, nil
- }
- return true, nil
-}
-
-type UsersScanner struct {
- bucketClient objstore.Bucket
- logger log.Logger
- isOwned func(userID string) (bool, error)
-}
-
-func NewUsersScanner(bucketClient objstore.Bucket, isOwned func(userID string) (bool, error), logger log.Logger) *UsersScanner {
- return &UsersScanner{
- bucketClient: bucketClient,
- logger: logger,
- isOwned: isOwned,
- }
-}
-
-// ScanUsers returns a fresh list of users found in the storage, that are not marked for deletion,
-// and list of users marked for deletion.
-//
-// If sharding is enabled, returned lists contains only the users owned by this instance.
-func (s *UsersScanner) ScanUsers(ctx context.Context) (users, markedForDeletion []string, err error) {
- scannedUsers := make(map[string]struct{})
-
- // Scan users in the bucket.
- err = s.bucketClient.Iter(ctx, "", func(entry string) error {
- userID := strings.TrimSuffix(entry, "/")
- scannedUsers[userID] = struct{}{}
- return nil
- })
- if err != nil {
- return nil, nil, err
- }
-
- // Scan users from the __markers__ directory.
- err = s.bucketClient.Iter(ctx, util.GlobalMarkersDir, func(entry string) error {
- // entry will be of the form __markers__//
- parts := strings.Split(entry, objstore.DirDelim)
- userID := parts[1]
- scannedUsers[userID] = struct{}{}
- return nil
- })
- if err != nil {
- return nil, nil, err
- }
-
- for userID := range scannedUsers {
- // Filter out users not owned by this instance.
- if owned, err := s.isOwned(userID); err != nil {
- level.Warn(s.logger).Log("msg", "unable to check if user is owned by this shard", "user", userID, "err", err)
- } else if !owned {
- continue
- }
-
- // Filter users marked for deletion
- if deletionMarkExists, err := TenantDeletionMarkExists(ctx, s.bucketClient, userID); err != nil {
- level.Warn(s.logger).Log("msg", "unable to check if user is marked for deletion", "user", userID, "err", err)
- } else if deletionMarkExists {
- markedForDeletion = append(markedForDeletion, userID)
- continue
- }
-
- // The remaining are the active users owned by this instance.
- users = append(users, userID)
- }
-
- return users, markedForDeletion, nil
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/users_scanner_config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/users_scanner_config.go
new file mode 100644
index 000000000..5b556d9e4
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/users_scanner_config.go
@@ -0,0 +1,47 @@
+package tsdb
+
+import (
+ "flag"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+type UsersScannerConfig struct {
+ Strategy string `yaml:"strategy"`
+ MaxStalePeriod time.Duration `yaml:"max_stale_period"`
+ CacheTTL time.Duration `yaml:"cache_ttl"`
+}
+
+const (
+ UserScanStrategyList = "list"
+ UserScanStrategyUserIndex = "user_index"
+)
+
+var (
+ ErrInvalidUserScannerStrategy = errors.New("invalid user scanner strategy")
+ ErrInvalidMaxStalePeriod = errors.New("max stale period must be positive")
+ ErrInvalidCacheTTL = errors.New("cache TTL must be >= 0")
+ supportedStrategies = []string{UserScanStrategyList, UserScanStrategyUserIndex}
+)
+
+func (c *UsersScannerConfig) Validate() error {
+ if c.Strategy != UserScanStrategyList && c.Strategy != UserScanStrategyUserIndex {
+ return ErrInvalidUserScannerStrategy
+ }
+ if c.Strategy == UserScanStrategyUserIndex && c.MaxStalePeriod <= 0 {
+ return ErrInvalidMaxStalePeriod
+ }
+ if c.CacheTTL < 0 {
+ return ErrInvalidCacheTTL
+ }
+ return nil
+}
+
+func (c *UsersScannerConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ f.StringVar(&c.Strategy, prefix+".users-scanner.strategy", UserScanStrategyList, fmt.Sprintf("Strategy to use to scan users. Supported values are: %s.", strings.Join(supportedStrategies, ", ")))
+ f.DurationVar(&c.MaxStalePeriod, prefix+".users-scanner.user-index.max-stale-period", time.Hour, "Maximum period of time to consider the user index as stale. Fall back to the base scanner if stale. Only valid when strategy is user_index.")
+ f.DurationVar(&c.CacheTTL, prefix+".users-scanner.cache-ttl", 0, "TTL of the cached users. 0 disables caching and relies on caching at bucket client level.")
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/util.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/util.go
index 014b510d3..e9a700742 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/util.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/util.go
@@ -1,7 +1,7 @@
package tsdb
import (
- "github.com/oklog/ulid"
+ "github.com/oklog/ulid/v2"
"github.com/thanos-io/objstore"
"github.com/cortexproject/cortex/pkg/ingester/client"
diff --git a/vendor/github.com/cortexproject/cortex/pkg/tenant/resolver.go b/vendor/github.com/cortexproject/cortex/pkg/tenant/resolver.go
index f0fd8abfe..3505030c6 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/tenant/resolver.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/tenant/resolver.go
@@ -2,7 +2,6 @@ package tenant
import (
"context"
- "errors"
"net/http"
"strings"
@@ -67,15 +66,9 @@ type SingleResolver struct {
// `\` can be found.
func containsUnsafePathSegments(id string) bool {
// handle the relative reference to current and parent path.
- if id == "." || id == ".." {
- return true
- }
-
- return strings.ContainsAny(id, "\\/")
+ return id == "." || id == ".."
}
-var errInvalidTenantID = errors.New("invalid tenant ID")
-
func (t *SingleResolver) TenantID(ctx context.Context) (string, error) {
//lint:ignore faillint wrapper around upstream method
id, err := user.ExtractOrgID(ctx)
@@ -83,8 +76,8 @@ func (t *SingleResolver) TenantID(ctx context.Context) (string, error) {
return "", err
}
- if containsUnsafePathSegments(id) {
- return "", errInvalidTenantID
+ if err := ValidTenantID(id); err != nil {
+ return "", err
}
return id, nil
@@ -130,13 +123,15 @@ func (t *MultiResolver) TenantIDs(ctx context.Context) ([]string, error) {
}
orgIDs := strings.Split(orgID, tenantIDsLabelSeparator)
+
+ return ValidateOrgIDs(orgIDs)
+}
+
+func ValidateOrgIDs(orgIDs []string) ([]string, error) {
for _, orgID := range orgIDs {
if err := ValidTenantID(orgID); err != nil {
return nil, err
}
- if containsUnsafePathSegments(orgID) {
- return nil, errInvalidTenantID
- }
}
return NormalizeTenantIDs(orgIDs), nil
diff --git a/vendor/github.com/cortexproject/cortex/pkg/tenant/tenant.go b/vendor/github.com/cortexproject/cortex/pkg/tenant/tenant.go
index c7c772648..f8e97c653 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/tenant/tenant.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/tenant/tenant.go
@@ -10,8 +10,13 @@ import (
"github.com/weaveworks/common/user"
)
+const GlobalMarkersDir = "__markers__"
+
var (
- errTenantIDTooLong = errors.New("tenant ID is too long: max 150 characters")
+ errTenantIDTooLong = errors.New("tenant ID is too long: max 150 characters")
+ errTenantIDUnsafe = errors.New("tenant ID is '.' or '..'")
+ errTenantIDMarkers = errors.New("tenant ID '__markers__' is not allowed")
+ errTenantIDUserIndex = errors.New("tenant ID 'user-index.json.gz' is not allowed")
)
type errTenantIDUnsupportedCharacter struct {
@@ -29,7 +34,7 @@ func (e *errTenantIDUnsupportedCharacter) Error() string {
const tenantIDsLabelSeparator = "|"
-// NormalizeTenantIDs is creating a normalized form by sortiing and de-duplicating the list of tenantIDs
+// NormalizeTenantIDs is creating a normalized form by sorting and de-duplicating the list of tenantIDs
func NormalizeTenantIDs(tenantIDs []string) []string {
sort.Strings(tenantIDs)
@@ -49,7 +54,7 @@ func NormalizeTenantIDs(tenantIDs []string) []string {
return tenantIDs[0:posOut]
}
-// ValidTenantID
+// ValidTenantID validate tenantID
func ValidTenantID(s string) error {
// check if it contains invalid runes
for pos, r := range s {
@@ -61,6 +66,18 @@ func ValidTenantID(s string) error {
}
}
+ if err := CheckTenantIDLength(s); err != nil {
+ return err
+ }
+
+ if err := CheckTenantIDIsSupported(s); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func CheckTenantIDLength(s string) error {
if len(s) > 150 {
return errTenantIDTooLong
}
@@ -68,6 +85,24 @@ func ValidTenantID(s string) error {
return nil
}
+func CheckTenantIDIsSupported(s string) error {
+ // check tenantID is "__markers__"
+ if s == GlobalMarkersDir {
+ return errTenantIDMarkers
+ }
+
+ if s == "user-index.json.gz" {
+ return errTenantIDUserIndex
+ }
+
+ // check tenantID is "." or ".."
+ if containsUnsafePathSegments(s) {
+ return errTenantIDUnsafe
+ }
+
+ return nil
+}
+
func JoinTenantIDs(tenantIDs []string) string {
return strings.Join(tenantIDs, tenantIDsLabelSeparator)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/active_user.go b/vendor/github.com/cortexproject/cortex/pkg/util/active_user.go
index f735dc227..2fde25ed1 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/active_user.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/active_user.go
@@ -94,6 +94,18 @@ func (m *ActiveUsers) PurgeInactiveUsers(deadline int64) []string {
return inactive
}
+func (m *ActiveUsers) ActiveUsers(deadline int64) []string {
+ m.mu.RLock()
+ active := make([]string, 0, len(m.timestamps))
+ defer m.mu.RUnlock()
+ for userID, ts := range m.timestamps {
+ if ts.Load() > deadline {
+ active = append(active, userID)
+ }
+ }
+ return active
+}
+
// ActiveUsersCleanupService tracks active users, and periodically purges inactive ones while running.
type ActiveUsersCleanupService struct {
services.Service
@@ -129,3 +141,7 @@ func (s *ActiveUsersCleanupService) iteration(_ context.Context) error {
}
return nil
}
+
+func (s *ActiveUsersCleanupService) ActiveUsers() []string {
+ return s.activeUsers.ActiveUsers(time.Now().Add(-s.inactiveTimeout).UnixNano())
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/allowed_tenants.go b/vendor/github.com/cortexproject/cortex/pkg/util/allowed_tenants.go
index d5b7af773..ff406b381 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/allowed_tenants.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/allowed_tenants.go
@@ -1,6 +1,6 @@
package util
-const GlobalMarkersDir = "__markers__"
+import "github.com/cortexproject/cortex/pkg/tenant"
// AllowedTenants that can answer whether tenant is allowed or not based on configuration.
// Default value (nil) allows all tenants.
@@ -36,7 +36,7 @@ func NewAllowedTenants(enabled []string, disabled []string) *AllowedTenants {
}
func (a *AllowedTenants) IsAllowed(tenantID string) bool {
- if tenantID == GlobalMarkersDir {
+ if tenantID == tenant.GlobalMarkersDir {
// __markers__ is reserved for global markers and no tenant should be allowed to have that name.
return false
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/buffer.go b/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/buffer.go
new file mode 100644
index 000000000..623b9a707
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/buffer.go
@@ -0,0 +1,26 @@
+package concurrency
+
+import (
+ "bytes"
+ "sync"
+)
+
+// SyncBuffer is a io.writer implementation with atomic writes. It only keeps data in memory.
+type SyncBuffer struct {
+ mu sync.Mutex
+ buf bytes.Buffer
+}
+
+func (sb *SyncBuffer) Write(p []byte) (n int, err error) {
+ sb.mu.Lock()
+ defer sb.mu.Unlock()
+
+ return sb.buf.Write(p)
+}
+
+func (sb *SyncBuffer) String() string {
+ sb.mu.Lock()
+ defer sb.mu.Unlock()
+
+ return sb.buf.String()
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/runner.go b/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/runner.go
new file mode 100644
index 000000000..df9b5e37a
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/concurrency/runner.go
@@ -0,0 +1,107 @@
+package concurrency
+
+import (
+ "context"
+ "sync"
+
+ "golang.org/x/sync/errgroup"
+
+ "github.com/cortexproject/cortex/pkg/util/multierror"
+)
+
+// ForEachUser runs the provided userFunc for each userIDs up to concurrency concurrent workers.
+// In case userFunc returns error, it will continue to process remaining users but returns an
+// error with all errors userFunc has returned.
+func ForEachUser(ctx context.Context, userIDs []string, concurrency int, userFunc func(ctx context.Context, userID string) error) error {
+ if len(userIDs) == 0 {
+ return nil
+ }
+
+ // Push all jobs to a channel.
+ ch := make(chan string, len(userIDs))
+ for _, userID := range userIDs {
+ ch <- userID
+ }
+ close(ch)
+
+ // Keep track of all errors occurred.
+ errs := multierror.MultiError{}
+ errsMx := sync.Mutex{}
+
+ wg := sync.WaitGroup{}
+ routines := min(concurrency, len(userIDs))
+ for range routines {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+
+ for userID := range ch {
+ // Ensure the context has not been canceled (ie. shutdown has been triggered).
+ if ctx.Err() != nil {
+ break
+ }
+
+ if err := userFunc(ctx, userID); err != nil {
+ errsMx.Lock()
+ errs.Add(err)
+ errsMx.Unlock()
+ }
+ }
+ }()
+ }
+
+ // wait for ongoing workers to finish.
+ wg.Wait()
+
+ if ctx.Err() != nil {
+ return ctx.Err()
+ }
+
+ return errs.Err()
+}
+
+// ForEach runs the provided jobFunc for each job up to concurrency concurrent workers.
+// The execution breaks on first error encountered.
+func ForEach(ctx context.Context, jobs []any, concurrency int, jobFunc func(ctx context.Context, job any) error) error {
+ if len(jobs) == 0 {
+ return nil
+ }
+
+ // Push all jobs to a channel.
+ ch := make(chan any, len(jobs))
+ for _, job := range jobs {
+ ch <- job
+ }
+ close(ch)
+
+ // Start workers to process jobs.
+ g, ctx := errgroup.WithContext(ctx)
+ routines := min(concurrency, len(jobs))
+ for range routines {
+ g.Go(func() error {
+ for job := range ch {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
+ if err := jobFunc(ctx, job); err != nil {
+ return err
+ }
+ }
+
+ return nil
+ })
+ }
+
+ // Wait until done (or context has canceled).
+ return g.Wait()
+}
+
+// CreateJobsFromStrings is a utility to create jobs from an slice of strings.
+func CreateJobsFromStrings(values []string) []any {
+ jobs := make([]any, len(values))
+ for i := range values {
+ jobs[i] = values[i]
+ }
+ return jobs
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/config.go b/vendor/github.com/cortexproject/cortex/pkg/util/config.go
index e1032d0f6..9bf1c7184 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/config.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/config.go
@@ -6,8 +6,8 @@ import (
)
// DiffConfig utility function that returns the diff between two config map objects
-func DiffConfig(defaultConfig, actualConfig map[interface{}]interface{}) (map[interface{}]interface{}, error) {
- output := make(map[interface{}]interface{})
+func DiffConfig(defaultConfig, actualConfig map[any]any) (map[any]any, error) {
+ output := make(map[any]any)
for key, value := range actualConfig {
@@ -33,8 +33,8 @@ func DiffConfig(defaultConfig, actualConfig map[interface{}]interface{}) (map[in
if !ok || defaultV != v {
output[key] = v
}
- case []interface{}:
- defaultV, ok := defaultValue.([]interface{})
+ case []any:
+ defaultV, ok := defaultValue.([]any)
if !ok || !reflect.DeepEqual(defaultV, v) {
output[key] = v
}
@@ -47,8 +47,8 @@ func DiffConfig(defaultConfig, actualConfig map[interface{}]interface{}) (map[in
if defaultValue != nil {
output[key] = v
}
- case map[interface{}]interface{}:
- defaultV, ok := defaultValue.(map[interface{}]interface{})
+ case map[any]any:
+ defaultV, ok := defaultValue.(map[any]any)
if !ok {
output[key] = value
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/discardedseries/perlabelset_tracker.go b/vendor/github.com/cortexproject/cortex/pkg/util/discardedseries/perlabelset_tracker.go
new file mode 100644
index 000000000..1d5e0a9a1
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/discardedseries/perlabelset_tracker.go
@@ -0,0 +1,141 @@
+package discardedseries
+
+import (
+ "sync"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// TODO: if we change per labelset series limit from one reasoning to many, we can remove the hardcoded reasoning and add an extra reasoning map
+const (
+ perLabelsetSeriesLimit = "per_labelset_series_limit"
+)
+
+type labelsetCounterStruct struct {
+ *sync.RWMutex
+ labelsetSeriesMap map[uint64]*seriesCounterStruct
+}
+
+type DiscardedSeriesPerLabelsetTracker struct {
+ *sync.RWMutex
+ userLabelsetMap map[string]*labelsetCounterStruct
+ discardedSeriesPerLabelsetGauge *prometheus.GaugeVec
+}
+
+func NewDiscardedSeriesPerLabelsetTracker(discardedSeriesPerLabelsetGauge *prometheus.GaugeVec) *DiscardedSeriesPerLabelsetTracker {
+ tracker := &DiscardedSeriesPerLabelsetTracker{
+ RWMutex: &sync.RWMutex{},
+ userLabelsetMap: make(map[string]*labelsetCounterStruct),
+ discardedSeriesPerLabelsetGauge: discardedSeriesPerLabelsetGauge,
+ }
+ return tracker
+}
+
+func (t *DiscardedSeriesPerLabelsetTracker) Track(user string, series uint64, matchedLabelsetHash uint64, matchedLabelsetId string) {
+ t.RLock()
+ labelsetCounter, ok := t.userLabelsetMap[user]
+ t.RUnlock()
+ if !ok {
+ t.Lock()
+ labelsetCounter, ok = t.userLabelsetMap[user]
+ if !ok {
+ labelsetCounter = &labelsetCounterStruct{
+ RWMutex: &sync.RWMutex{},
+ labelsetSeriesMap: make(map[uint64]*seriesCounterStruct),
+ }
+ t.userLabelsetMap[user] = labelsetCounter
+ }
+ t.Unlock()
+ }
+
+ labelsetCounter.RLock()
+ seriesCounter, ok := labelsetCounter.labelsetSeriesMap[matchedLabelsetHash]
+ labelsetCounter.RUnlock()
+ if !ok {
+ labelsetCounter.Lock()
+ seriesCounter, ok = labelsetCounter.labelsetSeriesMap[matchedLabelsetHash]
+ if !ok {
+ seriesCounter = &seriesCounterStruct{
+ RWMutex: &sync.RWMutex{},
+ seriesCountMap: make(map[uint64]struct{}),
+ labelsetId: matchedLabelsetId,
+ }
+ labelsetCounter.labelsetSeriesMap[matchedLabelsetHash] = seriesCounter
+ }
+ labelsetCounter.Unlock()
+ }
+
+ seriesCounter.RLock()
+ _, ok = seriesCounter.seriesCountMap[series]
+ seriesCounter.RUnlock()
+ if !ok {
+ seriesCounter.Lock()
+ _, ok = seriesCounter.seriesCountMap[series]
+ if !ok {
+ seriesCounter.seriesCountMap[series] = struct{}{}
+ }
+ seriesCounter.Unlock()
+ }
+}
+
+func (t *DiscardedSeriesPerLabelsetTracker) UpdateMetrics() {
+ usersToDelete := make([]string, 0)
+ t.RLock()
+ for user, labelsetCounter := range t.userLabelsetMap {
+ labelsetsToDelete := make([]uint64, 0)
+ labelsetCounter.RLock()
+ if len(labelsetCounter.labelsetSeriesMap) == 0 {
+ usersToDelete = append(usersToDelete, user)
+ }
+ for labelsetHash, seriesCounter := range labelsetCounter.labelsetSeriesMap {
+ seriesCounter.Lock()
+ count := len(seriesCounter.seriesCountMap)
+ t.discardedSeriesPerLabelsetGauge.WithLabelValues(perLabelsetSeriesLimit, user, seriesCounter.labelsetId).Set(float64(count))
+ clear(seriesCounter.seriesCountMap)
+ if count == 0 {
+ labelsetsToDelete = append(labelsetsToDelete, labelsetHash)
+ }
+ seriesCounter.Unlock()
+ }
+ labelsetCounter.RUnlock()
+ if len(labelsetsToDelete) > 0 {
+ labelsetCounter.Lock()
+ for _, labelsetHash := range labelsetsToDelete {
+ if _, ok := labelsetCounter.labelsetSeriesMap[labelsetHash]; ok {
+ labelsetId := labelsetCounter.labelsetSeriesMap[labelsetHash].labelsetId
+ t.discardedSeriesPerLabelsetGauge.DeleteLabelValues(perLabelsetSeriesLimit, user, labelsetId)
+ delete(labelsetCounter.labelsetSeriesMap, labelsetHash)
+ }
+ }
+ labelsetCounter.Unlock()
+ }
+ }
+ t.RUnlock()
+ if len(usersToDelete) > 0 {
+ t.Lock()
+ for _, user := range usersToDelete {
+ delete(t.userLabelsetMap, user)
+ }
+ t.Unlock()
+ }
+}
+
+func (t *DiscardedSeriesPerLabelsetTracker) StartVendDiscardedSeriesMetricGoroutine() {
+ go func() {
+ ticker := time.NewTicker(vendMetricsInterval)
+ for range ticker.C {
+ t.UpdateMetrics()
+ }
+ }()
+}
+
+// only used in testing
+func (t *DiscardedSeriesPerLabelsetTracker) getSeriesCount(user string, labelsetLimitHash uint64) int {
+ if labelsetCounter, ok := t.userLabelsetMap[user]; ok {
+ if seriesCounter, ok := labelsetCounter.labelsetSeriesMap[labelsetLimitHash]; ok {
+ return len(seriesCounter.seriesCountMap)
+ }
+ }
+ return 0
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/discardedseries/tracker.go b/vendor/github.com/cortexproject/cortex/pkg/util/discardedseries/tracker.go
new file mode 100644
index 000000000..4a7c6e456
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/discardedseries/tracker.go
@@ -0,0 +1,133 @@
+package discardedseries
+
+import (
+ "sync"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+const (
+ vendMetricsInterval = 30 * time.Second
+)
+
+type seriesCounterStruct struct {
+ *sync.RWMutex
+ seriesCountMap map[uint64]struct{}
+ labelsetId string
+}
+
+type userCounterStruct struct {
+ *sync.RWMutex
+ userSeriesMap map[string]*seriesCounterStruct
+}
+
+type DiscardedSeriesTracker struct {
+ *sync.RWMutex
+ reasonUserMap map[string]*userCounterStruct
+ discardedSeriesGauge *prometheus.GaugeVec
+}
+
+func NewDiscardedSeriesTracker(discardedSeriesGauge *prometheus.GaugeVec) *DiscardedSeriesTracker {
+ tracker := &DiscardedSeriesTracker{
+ RWMutex: &sync.RWMutex{},
+ reasonUserMap: make(map[string]*userCounterStruct),
+ discardedSeriesGauge: discardedSeriesGauge,
+ }
+ return tracker
+}
+
+func (t *DiscardedSeriesTracker) Track(reason string, user string, series uint64) {
+ t.RLock()
+ userCounter, ok := t.reasonUserMap[reason]
+ t.RUnlock()
+ if !ok {
+ t.Lock()
+ userCounter, ok = t.reasonUserMap[reason]
+ if !ok {
+ userCounter = &userCounterStruct{
+ RWMutex: &sync.RWMutex{},
+ userSeriesMap: make(map[string]*seriesCounterStruct),
+ }
+ t.reasonUserMap[reason] = userCounter
+ }
+ t.Unlock()
+ }
+
+ userCounter.RLock()
+ seriesCounter, ok := userCounter.userSeriesMap[user]
+ userCounter.RUnlock()
+ if !ok {
+ userCounter.Lock()
+ seriesCounter, ok = userCounter.userSeriesMap[user]
+ if !ok {
+ seriesCounter = &seriesCounterStruct{
+ RWMutex: &sync.RWMutex{},
+ seriesCountMap: make(map[uint64]struct{}),
+ }
+ userCounter.userSeriesMap[user] = seriesCounter
+ }
+ userCounter.Unlock()
+ }
+
+ seriesCounter.RLock()
+ _, ok = seriesCounter.seriesCountMap[series]
+ seriesCounter.RUnlock()
+ if !ok {
+ seriesCounter.Lock()
+ _, ok = seriesCounter.seriesCountMap[series]
+ if !ok {
+ seriesCounter.seriesCountMap[series] = struct{}{}
+ }
+ seriesCounter.Unlock()
+ }
+}
+
+func (t *DiscardedSeriesTracker) UpdateMetrics() {
+ t.RLock()
+ for reason, userCounter := range t.reasonUserMap {
+ usersToDelete := make([]string, 0)
+ userCounter.RLock()
+ for user, seriesCounter := range userCounter.userSeriesMap {
+ seriesCounter.Lock()
+ count := len(seriesCounter.seriesCountMap)
+ t.discardedSeriesGauge.WithLabelValues(reason, user).Set(float64(count))
+ clear(seriesCounter.seriesCountMap)
+ if count == 0 {
+ usersToDelete = append(usersToDelete, user)
+ }
+ seriesCounter.Unlock()
+ }
+ userCounter.RUnlock()
+ if len(usersToDelete) > 0 {
+ userCounter.Lock()
+ for _, user := range usersToDelete {
+ if _, ok := userCounter.userSeriesMap[user]; ok {
+ t.discardedSeriesGauge.DeleteLabelValues(reason, user)
+ delete(userCounter.userSeriesMap, user)
+ }
+ }
+ userCounter.Unlock()
+ }
+ }
+ t.RUnlock()
+}
+
+func (t *DiscardedSeriesTracker) StartVendDiscardedSeriesMetricGoroutine() {
+ go func() {
+ ticker := time.NewTicker(vendMetricsInterval)
+ for range ticker.C {
+ t.UpdateMetrics()
+ }
+ }()
+}
+
+// only used in testing
+func (t *DiscardedSeriesTracker) getSeriesCount(reason string, user string) int {
+ if userCounter, ok := t.reasonUserMap[reason]; ok {
+ if seriesCounter, ok := userCounter.userSeriesMap[user]; ok {
+ return len(seriesCounter.seriesCountMap)
+ }
+ }
+ return 0
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/events.go b/vendor/github.com/cortexproject/cortex/pkg/util/events.go
index 312f43714..07453ad19 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/events.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/events.go
@@ -13,8 +13,8 @@ import (
var (
// interface{} vars to avoid allocation on every call
- key interface{} = "level" // masquerade as a level like debug, warn
- event interface{} = "event"
+ key any = "level" // masquerade as a level like debug, warn
+ event any = "event"
eventLogger = log.NewNopLogger()
)
@@ -46,7 +46,7 @@ type samplingFilter struct {
count atomic.Int64
}
-func (e *samplingFilter) Log(keyvals ...interface{}) error {
+func (e *samplingFilter) Log(keyvals ...any) error {
count := e.count.Inc()
if count%int64(e.freq) == 0 {
return e.next.Log(keyvals...)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go b/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go
new file mode 100644
index 000000000..e76e1a9cb
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/extract/extract.go
@@ -0,0 +1,78 @@
+package extract
+
+import (
+ "fmt"
+
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+
+ "github.com/cortexproject/cortex/pkg/cortexpb"
+)
+
+var (
+ errNoMetricNameLabel = fmt.Errorf("no metric name label")
+)
+
+// MetricNameFromLabelAdapters extracts the metric name from a list of LabelPairs.
+// The returned metric name string is a copy of the label value.
+func MetricNameFromLabelAdapters(labels []cortexpb.LabelAdapter) (string, error) {
+ unsafeMetricName, err := UnsafeMetricNameFromLabelAdapters(labels)
+ if err != nil {
+ return "", err
+ }
+
+ // Force a string copy since LabelAdapter is often a pointer into
+ // a large gRPC buffer which we don't want to keep alive on the heap.
+ return string([]byte(unsafeMetricName)), nil
+}
+
+// UnsafeMetricNameFromLabelAdapters extracts the metric name from a list of LabelPairs.
+// The returned metric name string is a reference to the label value (no copy).
+func UnsafeMetricNameFromLabelAdapters(labels []cortexpb.LabelAdapter) (string, error) {
+ for _, label := range labels {
+ if label.Name == model.MetricNameLabel {
+ return label.Value, nil
+ }
+ }
+ return "", errNoMetricNameLabel
+}
+
+// MetricNameFromMetric extract the metric name from a model.Metric
+func MetricNameFromMetric(m model.Metric) (model.LabelValue, error) {
+ if value, found := m[model.MetricNameLabel]; found {
+ return value, nil
+ }
+ return "", fmt.Errorf("no MetricNameLabel for chunk")
+}
+
+// MetricNameMatcherFromMatchers extracts the metric name from a set of matchers
+func MetricNameMatcherFromMatchers(matchers []*labels.Matcher) (*labels.Matcher, []*labels.Matcher, bool) {
+ // Handle the case where there is no metric name and all matchers have been
+ // filtered out e.g. {foo=""}.
+ if len(matchers) == 0 {
+ return nil, matchers, false
+ }
+
+ outMatchers := make([]*labels.Matcher, len(matchers)-1)
+ for i, matcher := range matchers {
+ if matcher.Name != model.MetricNameLabel {
+ continue
+ }
+
+ // Copy other matchers, excluding the found metric name matcher
+ copy(outMatchers, matchers[:i])
+ copy(outMatchers[i:], matchers[i+1:])
+ return matcher, outMatchers, true
+ }
+ // Return all matchers if none are metric name matchers
+ return nil, matchers, false
+}
+
+// MetricNameFromLabels extracts the metric name from a list of Prometheus Labels.
+func MetricNameFromLabels(lbls labels.Labels) (metricName string, err error) {
+ metricName = lbls.Get(model.MetricNameLabel)
+ if metricName == "" {
+ err = errNoMetricNameLabel
+ }
+ return
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/extract_forwarded.go b/vendor/github.com/cortexproject/cortex/pkg/util/extract_forwarded.go
index 9cacf3b3a..8c1aae382 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/extract_forwarded.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/extract_forwarded.go
@@ -24,12 +24,8 @@ func GetSourceIPsFromOutgoingCtx(ctx context.Context) string {
// GetSourceIPsFromIncomingCtx extracts the source field from the GRPC context
func GetSourceIPsFromIncomingCtx(ctx context.Context) string {
- md, ok := metadata.FromIncomingContext(ctx)
- if !ok {
- return ""
- }
- ipAddresses, ok := md[ipAddressesKey]
- if !ok {
+ ipAddresses := metadata.ValueFromIncomingContext(ctx, ipAddressesKey)
+ if ipAddresses == nil || len(ipAddresses) != 1 {
return ""
}
return ipAddresses[0]
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/cidr.go b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/cidr.go
index 72b93b680..bb7a19c53 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/cidr.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/cidr.go
@@ -46,9 +46,9 @@ func (c CIDRSliceCSV) String() string {
// Set implements flag.Value
func (c *CIDRSliceCSV) Set(s string) error {
- parts := strings.Split(s, ",")
+ parts := strings.SplitSeq(s, ",")
- for _, part := range parts {
+ for part := range parts {
cidr := &CIDR{}
if err := cidr.Set(part); err != nil {
return errors.Wrapf(err, "cidr: %s", part)
@@ -61,7 +61,7 @@ func (c *CIDRSliceCSV) Set(s string) error {
}
// UnmarshalYAML implements yaml.Unmarshaler.
-func (c *CIDRSliceCSV) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (c *CIDRSliceCSV) UnmarshalYAML(unmarshal func(any) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
@@ -77,6 +77,6 @@ func (c *CIDRSliceCSV) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
// MarshalYAML implements yaml.Marshaler.
-func (c CIDRSliceCSV) MarshalYAML() (interface{}, error) {
+func (c CIDRSliceCSV) MarshalYAML() (any, error) {
return c.String(), nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/day.go b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/day.go
index 9db695c83..30aa897af 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/day.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/day.go
@@ -45,7 +45,7 @@ func (v *DayValue) IsSet() bool {
}
// UnmarshalYAML implements yaml.Unmarshaler.
-func (v *DayValue) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (v *DayValue) UnmarshalYAML(unmarshal func(any) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
@@ -54,6 +54,6 @@ func (v *DayValue) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
// MarshalYAML implements yaml.Marshaler.
-func (v DayValue) MarshalYAML() (interface{}, error) {
+func (v DayValue) MarshalYAML() (any, error) {
return v.Time.Time().UTC().Format("2006-01-02"), nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/secret.go b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/secret.go
index aa7101b14..e588b4a24 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/secret.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/secret.go
@@ -16,7 +16,7 @@ func (v *Secret) Set(s string) error {
}
// UnmarshalYAML implements yaml.Unmarshaler.
-func (v *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (v *Secret) UnmarshalYAML(unmarshal func(any) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
@@ -26,7 +26,7 @@ func (v *Secret) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
// MarshalYAML implements yaml.Marshaler.
-func (v Secret) MarshalYAML() (interface{}, error) {
+func (v Secret) MarshalYAML() (any, error) {
if len(v.Value) == 0 {
return "", nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/stringslicecsv.go b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/stringslicecsv.go
index 47ccd54ca..1f1aff6f1 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/stringslicecsv.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/stringslicecsv.go
@@ -18,7 +18,7 @@ func (v *StringSliceCSV) Set(s string) error {
}
// UnmarshalYAML implements yaml.Unmarshaler.
-func (v *StringSliceCSV) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (v *StringSliceCSV) UnmarshalYAML(unmarshal func(any) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
@@ -28,6 +28,6 @@ func (v *StringSliceCSV) UnmarshalYAML(unmarshal func(interface{}) error) error
}
// MarshalYAML implements yaml.Marshaler.
-func (v StringSliceCSV) MarshalYAML() (interface{}, error) {
+func (v StringSliceCSV) MarshalYAML() (any, error) {
return v.String(), nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/time.go b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/time.go
index 452857e9d..c00d0b7d2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/time.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/time.go
@@ -46,7 +46,7 @@ func (t *Time) Set(s string) error {
}
// UnmarshalYAML implements yaml.Unmarshaler.
-func (t *Time) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (t *Time) UnmarshalYAML(unmarshal func(any) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
@@ -55,6 +55,6 @@ func (t *Time) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
// MarshalYAML implements yaml.Marshaler.
-func (t Time) MarshalYAML() (interface{}, error) {
+func (t Time) MarshalYAML() (any, error) {
return t.String(), nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/url.go b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/url.go
index 3b3b8303b..338a0fb87 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/url.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/url.go
@@ -26,7 +26,7 @@ func (v *URLValue) Set(s string) error {
}
// UnmarshalYAML implements yaml.Unmarshaler.
-func (v *URLValue) UnmarshalYAML(unmarshal func(interface{}) error) error {
+func (v *URLValue) UnmarshalYAML(unmarshal func(any) error) error {
var s string
if err := unmarshal(&s); err != nil {
return err
@@ -42,7 +42,7 @@ func (v *URLValue) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
// MarshalYAML implements yaml.Marshaler.
-func (v URLValue) MarshalYAML() (interface{}, error) {
+func (v URLValue) MarshalYAML() (any, error) {
if v.URL == nil {
return "", nil
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/backoff_retry.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/backoff_retry.go
index 525497e6b..c50fffeee 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/backoff_retry.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/backoff_retry.go
@@ -12,7 +12,7 @@ import (
// NewBackoffRetry gRPC middleware.
func NewBackoffRetry(cfg backoff.Config) grpc.UnaryClientInterceptor {
- return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
backoff := backoff.New(ctx, cfg)
for backoff.Ongoing() {
err := invoker(ctx, method, req, reply, cc, opts...)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go
index edd767ad4..6adb8139a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go
@@ -8,6 +8,7 @@ import (
middleware "github.com/grpc-ecosystem/go-grpc-middleware"
"github.com/pkg/errors"
"google.golang.org/grpc"
+ grpcbackoff "google.golang.org/grpc/backoff"
"google.golang.org/grpc/encoding/gzip"
"google.golang.org/grpc/keepalive"
@@ -32,6 +33,13 @@ type Config struct {
TLSEnabled bool `yaml:"tls_enabled"`
TLS tls.ClientConfig `yaml:",inline"`
SignWriteRequestsEnabled bool `yaml:"-"`
+
+ ConnectTimeout time.Duration `yaml:"connect_timeout"`
+}
+
+type ConfigWithHealthCheck struct {
+ Config `yaml:",inline"`
+ HealthCheckConfig HealthCheckConfig `yaml:"healthcheck_config" doc:"description=EXPERIMENTAL: If enabled, gRPC clients perform health checks for each target and fail the request if the target is marked as unhealthy."`
}
// RegisterFlags registers flags.
@@ -39,6 +47,11 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.RegisterFlagsWithPrefix("", "", f)
}
+func (cfg *ConfigWithHealthCheck) RegisterFlagsWithPrefix(prefix, defaultGrpcCompression string, f *flag.FlagSet) {
+ cfg.Config.RegisterFlagsWithPrefix(prefix, defaultGrpcCompression, f)
+ cfg.HealthCheckConfig.RegisterFlagsWithPrefix(prefix, f)
+}
+
// RegisterFlagsWithPrefix registers flags with prefix.
func (cfg *Config) RegisterFlagsWithPrefix(prefix, defaultGrpcCompression string, f *flag.FlagSet) {
f.IntVar(&cfg.MaxRecvMsgSize, prefix+".grpc-max-recv-msg-size", 100<<20, "gRPC client max receive message size (bytes).")
@@ -48,6 +61,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix, defaultGrpcCompression string
f.IntVar(&cfg.RateLimitBurst, prefix+".grpc-client-rate-limit-burst", 0, "Rate limit burst for gRPC client.")
f.BoolVar(&cfg.BackoffOnRatelimits, prefix+".backoff-on-ratelimits", false, "Enable backoff and retry when we hit ratelimits.")
f.BoolVar(&cfg.TLSEnabled, prefix+".tls-enabled", cfg.TLSEnabled, "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.")
+ f.DurationVar(&cfg.ConnectTimeout, prefix+".connect-timeout", 5*time.Second, "The maximum amount of time to establish a connection. A value of 0 means using default gRPC client connect timeout 20s.")
cfg.BackoffConfig.RegisterFlagsWithPrefix(prefix, f)
@@ -75,6 +89,15 @@ func (cfg *Config) CallOptions() []grpc.CallOption {
return opts
}
+func (cfg *ConfigWithHealthCheck) DialOption(unaryClientInterceptors []grpc.UnaryClientInterceptor, streamClientInterceptors []grpc.StreamClientInterceptor) ([]grpc.DialOption, error) {
+ if cfg.HealthCheckConfig.HealthCheckInterceptors != nil {
+ unaryClientInterceptors = append(unaryClientInterceptors, cfg.HealthCheckConfig.UnaryHealthCheckInterceptor(cfg))
+ streamClientInterceptors = append(streamClientInterceptors, cfg.HealthCheckConfig.StreamClientInterceptor(cfg))
+ }
+
+ return cfg.Config.DialOption(unaryClientInterceptors, streamClientInterceptors)
+}
+
// DialOption returns the config as a grpc.DialOptions.
func (cfg *Config) DialOption(unaryClientInterceptors []grpc.UnaryClientInterceptor, streamClientInterceptors []grpc.StreamClientInterceptor) ([]grpc.DialOption, error) {
var opts []grpc.DialOption
@@ -92,6 +115,16 @@ func (cfg *Config) DialOption(unaryClientInterceptors []grpc.UnaryClientIntercep
unaryClientInterceptors = append([]grpc.UnaryClientInterceptor{NewRateLimiter(cfg)}, unaryClientInterceptors...)
}
+ if cfg.ConnectTimeout > 0 {
+ opts = append(
+ opts,
+ grpc.WithConnectParams(grpc.ConnectParams{
+ Backoff: grpcbackoff.DefaultConfig,
+ MinConnectTimeout: cfg.ConnectTimeout,
+ }),
+ )
+ }
+
if cfg.SignWriteRequestsEnabled {
unaryClientInterceptors = append(unaryClientInterceptors, UnarySigningClientInterceptor)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/health_check.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/health_check.go
new file mode 100644
index 000000000..921791efd
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/health_check.go
@@ -0,0 +1,251 @@
+package grpcclient
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "io"
+ "sync"
+ "time"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/gogo/status"
+ "github.com/weaveworks/common/user"
+ "go.uber.org/atomic"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/health/grpc_health_v1"
+
+ "github.com/cortexproject/cortex/pkg/util/services"
+)
+
+var (
+ unhealthyErr = status.Error(codes.Unavailable, "instance marked as unhealthy")
+)
+
+type HealthCheckConfig struct {
+ *HealthCheckInterceptors `yaml:"-"`
+
+ UnhealthyThreshold int64 `yaml:"unhealthy_threshold"`
+ Interval time.Duration `yaml:"interval"`
+ Timeout time.Duration `yaml:"timeout"`
+}
+
+// RegisterFlagsWithPrefix for Config.
+func (cfg *HealthCheckConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) {
+ f.Int64Var(&cfg.UnhealthyThreshold, prefix+".healthcheck.unhealthy-threshold", 0, "The number of consecutive failed health checks required before considering a target unhealthy. 0 means disabled.")
+ f.DurationVar(&cfg.Timeout, prefix+".healthcheck.timeout", 1*time.Second, "The amount of time during which no response from a target means a failed health check.")
+ f.DurationVar(&cfg.Interval, prefix+".healthcheck.interval", 5*time.Second, "The approximate amount of time between health checks of an individual target.")
+}
+
+type healthCheckClient struct {
+ grpc_health_v1.HealthClient
+ io.Closer
+}
+
+type healthCheckEntry struct {
+ address string
+ clientConfig *ConfigWithHealthCheck
+ lastCheckTime atomic.Time
+ lastTickTime atomic.Time
+ unhealthyCount atomic.Int64
+
+ healthCheckClientMutex sync.RWMutex
+ healthCheckClient *healthCheckClient
+}
+
+type HealthCheckInterceptors struct {
+ services.Service
+ logger log.Logger
+
+ sync.RWMutex
+ activeInstances map[string]*healthCheckEntry
+
+ instanceGcTimeout time.Duration
+ healthClientFactory func(cc *grpc.ClientConn) (grpc_health_v1.HealthClient, io.Closer)
+}
+
+func NewHealthCheckInterceptors(logger log.Logger) *HealthCheckInterceptors {
+ h := &HealthCheckInterceptors{
+ logger: logger,
+ instanceGcTimeout: 2 * time.Minute,
+ healthClientFactory: func(cc *grpc.ClientConn) (grpc_health_v1.HealthClient, io.Closer) {
+ return grpc_health_v1.NewHealthClient(cc), cc
+ },
+ activeInstances: make(map[string]*healthCheckEntry),
+ }
+
+ h.Service = services.
+ NewTimerService(time.Second, nil, h.iteration, nil).WithName("Grp Client HealthCheck Interceptors")
+ return h
+}
+
+func (e *healthCheckEntry) isHealthy() bool {
+ return e.unhealthyCount.Load() < e.clientConfig.HealthCheckConfig.UnhealthyThreshold
+}
+
+func (e *healthCheckEntry) recordHealth(err error) error {
+ if err != nil {
+ e.unhealthyCount.Inc()
+ } else {
+ e.unhealthyCount.Store(0)
+ }
+
+ return err
+}
+
+func (e *healthCheckEntry) tick() {
+ e.lastTickTime.Store(time.Now())
+}
+
+func (e *healthCheckEntry) close() error {
+ e.healthCheckClientMutex.Lock()
+ defer e.healthCheckClientMutex.Unlock()
+
+ if e.healthCheckClient != nil {
+ err := e.healthCheckClient.Close()
+ e.healthCheckClient = nil
+ return err
+ }
+
+ return nil
+}
+
+func (e *healthCheckEntry) getClient(factory func(cc *grpc.ClientConn) (grpc_health_v1.HealthClient, io.Closer)) (*healthCheckClient, error) {
+ e.healthCheckClientMutex.RLock()
+ c := e.healthCheckClient
+ e.healthCheckClientMutex.RUnlock()
+
+ if c != nil {
+ return c, nil
+ }
+
+ e.healthCheckClientMutex.Lock()
+ defer e.healthCheckClientMutex.Unlock()
+
+ if e.healthCheckClient == nil {
+ dialOpts, err := e.clientConfig.Config.DialOption(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ conn, err := grpc.NewClient(e.address, dialOpts...)
+ if err != nil {
+ return nil, err
+ }
+
+ client, closer := factory(conn)
+ e.healthCheckClient = &healthCheckClient{
+ HealthClient: client,
+ Closer: closer,
+ }
+ }
+
+ return e.healthCheckClient, nil
+}
+
+func (h *HealthCheckInterceptors) registeredInstances() []*healthCheckEntry {
+ h.RLock()
+ defer h.RUnlock()
+ r := make([]*healthCheckEntry, 0, len(h.activeInstances))
+ for _, i := range h.activeInstances {
+ r = append(r, i)
+ }
+
+ return r
+}
+
+func (h *HealthCheckInterceptors) iteration(ctx context.Context) error {
+ level.Debug(h.logger).Log("msg", "Performing health check", "registeredInstances", len(h.registeredInstances()))
+ for _, instance := range h.registeredInstances() {
+ if time.Since(instance.lastTickTime.Load()) >= h.instanceGcTimeout {
+ h.Lock()
+ if err := instance.close(); err != nil {
+ level.Warn(h.logger).Log("msg", "Error closing health check", "err", err)
+ }
+ delete(h.activeInstances, instance.address)
+ h.Unlock()
+ continue
+ }
+
+ if time.Since(instance.lastCheckTime.Load()) < instance.clientConfig.HealthCheckConfig.Interval {
+ continue
+ }
+
+ instance.lastCheckTime.Store(time.Now())
+
+ go func(i *healthCheckEntry) {
+ client, err := i.getClient(h.healthClientFactory)
+
+ if err != nil {
+ level.Error(h.logger).Log("msg", "error creating healthcheck client to perform healthcheck", "address", i.address, "err", err)
+ return
+ }
+
+ if err := i.recordHealth(healthCheck(client, i.clientConfig.HealthCheckConfig.Timeout)); !i.isHealthy() {
+ level.Warn(h.logger).Log("msg", "instance marked as unhealthy", "address", i.address, "err", err)
+ }
+ }(instance)
+ }
+ return nil
+}
+
+func (h *HealthCheckInterceptors) getOrAddHealthCheckEntry(address string, clientConfig *ConfigWithHealthCheck) *healthCheckEntry {
+ h.RLock()
+ e := h.activeInstances[address]
+ h.RUnlock()
+
+ if e != nil {
+ return e
+ }
+
+ h.Lock()
+ defer h.Unlock()
+
+ if _, ok := h.activeInstances[address]; !ok {
+ h.activeInstances[address] = &healthCheckEntry{
+ address: address,
+ clientConfig: clientConfig,
+ }
+ }
+
+ return h.activeInstances[address]
+}
+
+func (h *HealthCheckInterceptors) StreamClientInterceptor(clientConfig *ConfigWithHealthCheck) grpc.StreamClientInterceptor {
+ return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ e := h.getOrAddHealthCheckEntry(cc.Target(), clientConfig)
+ e.tick()
+ if !e.isHealthy() {
+ return nil, unhealthyErr
+ }
+
+ return streamer(ctx, desc, cc, method, opts...)
+ }
+}
+
+func (h *HealthCheckInterceptors) UnaryHealthCheckInterceptor(clientConfig *ConfigWithHealthCheck) grpc.UnaryClientInterceptor {
+ return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ e := h.getOrAddHealthCheckEntry(cc.Target(), clientConfig)
+ e.tick()
+ if !e.isHealthy() {
+ return unhealthyErr
+ }
+ return invoker(ctx, method, req, reply, cc, opts...)
+ }
+}
+
+func healthCheck(client grpc_health_v1.HealthClient, timeout time.Duration) error {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+ ctx = user.InjectOrgID(ctx, "0")
+
+ resp, err := client.Check(ctx, &grpc_health_v1.HealthCheckRequest{})
+ if err != nil {
+ return err
+ }
+ if resp.Status != grpc_health_v1.HealthCheckResponse_SERVING {
+ return fmt.Errorf("failing healthcheck status: %s", resp.Status)
+ }
+ return nil
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/instrumentation.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/instrumentation.go
index 533b296ec..a4e6cad73 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/instrumentation.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/instrumentation.go
@@ -19,8 +19,24 @@ func Instrument(requestDuration *prometheus.HistogramVec) ([]grpc.UnaryClientInt
cortexmiddleware.PrometheusGRPCUnaryInstrumentation(requestDuration),
}, []grpc.StreamClientInterceptor{
grpcutil.HTTPHeaderPropagationStreamClientInterceptor,
+ unwrapErrorStreamClientInterceptor(),
otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()),
middleware.StreamClientUserHeaderInterceptor,
cortexmiddleware.PrometheusGRPCStreamInstrumentation(requestDuration),
}
}
+
+func InstrumentReusableStream(requestDuration *prometheus.HistogramVec) ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) {
+ return []grpc.UnaryClientInterceptor{
+ grpcutil.HTTPHeaderPropagationClientInterceptor,
+ otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()),
+ middleware.ClientUserHeaderInterceptor,
+ cortexmiddleware.PrometheusGRPCUnaryInstrumentation(requestDuration),
+ }, []grpc.StreamClientInterceptor{
+ grpcutil.HTTPHeaderPropagationStreamClientInterceptor,
+ unwrapErrorStreamClientInterceptor(),
+ otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()),
+ middleware.StreamClientUserHeaderInterceptor,
+ cortexmiddleware.PrometheusGRPCReusableStreamInstrumentation(requestDuration),
+ }
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/ratelimit.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/ratelimit.go
index 59ba3b7f0..09ee645b2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/ratelimit.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/ratelimit.go
@@ -16,7 +16,7 @@ func NewRateLimiter(cfg *Config) grpc.UnaryClientInterceptor {
burst = int(cfg.RateLimit)
}
limiter := rate.NewLimiter(rate.Limit(cfg.RateLimit), burst)
- return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
err := limiter.Wait(ctx)
if err != nil {
return status.Error(codes.ResourceExhausted, err.Error())
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/signing_handler.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/signing_handler.go
index d5b7803f2..a6f5ee2f7 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/signing_handler.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/signing_handler.go
@@ -27,26 +27,18 @@ type SignRequest interface {
VerifySign(context.Context, string) (bool, error)
}
-func UnarySigningServerInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
+func UnarySigningServerInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
rs, ok := req.(SignRequest)
if !ok {
return handler(ctx, req)
}
- md, ok := metadata.FromIncomingContext(ctx)
-
- if !ok {
- return nil, ErrSignatureNotPresent
- }
-
- sig, ok := md[reqSignHeaderName]
-
- if !ok || len(sig) != 1 {
+ sig := metadata.ValueFromIncomingContext(ctx, reqSignHeaderName)
+ if sig == nil || len(sig) != 1 {
return nil, ErrSignatureNotPresent
}
valid, err := rs.VerifySign(ctx, sig[0])
-
if err != nil {
return nil, err
}
@@ -58,7 +50,7 @@ func UnarySigningServerInterceptor(ctx context.Context, req interface{}, _ *grpc
return handler(ctx, req)
}
-func UnarySigningClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+func UnarySigningClientInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
rs, ok := req.(SignRequest)
if !ok {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/unwrap.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/unwrap.go
new file mode 100644
index 000000000..ee1e0129d
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/unwrap.go
@@ -0,0 +1,37 @@
+package grpcclient
+
+import (
+ "context"
+ "errors"
+
+ "google.golang.org/grpc"
+)
+
+// unwrapErrorStreamClientInterceptor unwraps errors wrapped by OpenTracingStreamClientInterceptor
+func unwrapErrorStreamClientInterceptor() grpc.StreamClientInterceptor {
+ return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ stream, err := streamer(ctx, desc, cc, method, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ return &unwrapErrorClientStream{
+ ClientStream: stream,
+ }, nil
+ }
+}
+
+type unwrapErrorClientStream struct {
+ grpc.ClientStream
+}
+
+func (s *unwrapErrorClientStream) RecvMsg(m any) error {
+ err := s.ClientStream.RecvMsg(m)
+ if err != nil {
+ // Try to unwrap the error to get the original error
+ if wrappedErr := errors.Unwrap(err); wrappedErr != nil {
+ return wrappedErr
+ }
+ }
+ return err
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/snappy/snappy.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/snappy/snappy.go
index fe01b4ca3..022b06830 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/snappy/snappy.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/snappy/snappy.go
@@ -23,12 +23,12 @@ type compressor struct {
func newCompressor() *compressor {
c := &compressor{}
c.readersPool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
return snappy.NewReader(nil)
},
}
c.writersPool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
return snappy.NewBufferedWriter(nil)
},
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock/snappyblock.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock/snappyblock.go
index a40e8429d..ce4db9291 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock/snappyblock.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcencoding/snappyblock/snappyblock.go
@@ -24,7 +24,7 @@ type compressor struct {
func newCompressor() *compressor {
c := &compressor{}
c.readersPool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &reader{
pool: &c.readersPool,
cbuff: bytes.NewBuffer(make([]byte, 0, 512)),
@@ -32,7 +32,7 @@ func newCompressor() *compressor {
},
}
c.writersPool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &writeCloser{
pool: &c.writersPool,
buff: bytes.NewBuffer(make([]byte, 0, 512)),
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcutil/naming.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcutil/naming.go
index 802932440..701f702bc 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/grpcutil/naming.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcutil/naming.go
@@ -21,7 +21,7 @@ type Update struct {
Addr string
// Metadata is the updated metadata. It is nil if there is no metadata update.
// Metadata is not required for a custom naming implementation.
- Metadata interface{}
+ Metadata any
}
// Watcher watches for SRV updates on the specified target.
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcutil/util.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcutil/util.go
index 8da1c6916..acad4ba89 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/grpcutil/util.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcutil/util.go
@@ -8,7 +8,7 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
- util_log "github.com/cortexproject/cortex/pkg/util/log"
+ "github.com/cortexproject/cortex/pkg/util/requestmeta"
)
type wrappedServerStream struct {
@@ -33,50 +33,57 @@ func IsGRPCContextCanceled(err error) bool {
// HTTPHeaderPropagationServerInterceptor allows for propagation of HTTP Request headers across gRPC calls - works
// alongside HTTPHeaderPropagationClientInterceptor
-func HTTPHeaderPropagationServerInterceptor(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
- ctx = extractForwardedHeadersFromMetadata(ctx)
+func HTTPHeaderPropagationServerInterceptor(ctx context.Context, req any, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
+ ctx = extractForwardedRequestMetadataFromMetadata(ctx)
h, err := handler(ctx, req)
return h, err
}
// HTTPHeaderPropagationStreamServerInterceptor does the same as HTTPHeaderPropagationServerInterceptor but for streams
-func HTTPHeaderPropagationStreamServerInterceptor(srv interface{}, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+func HTTPHeaderPropagationStreamServerInterceptor(srv any, ss grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ ctx := extractForwardedRequestMetadataFromMetadata(ss.Context())
return handler(srv, wrappedServerStream{
- ctx: extractForwardedHeadersFromMetadata(ss.Context()),
+ ctx: ctx,
ServerStream: ss,
})
}
-// extractForwardedHeadersFromMetadata implements HTTPHeaderPropagationServerInterceptor by placing forwarded
+// extractForwardedRequestMetadataFromMetadata implements HTTPHeaderPropagationServerInterceptor by placing forwarded
// headers into incoming context
-func extractForwardedHeadersFromMetadata(ctx context.Context) context.Context {
- md, ok := metadata.FromIncomingContext(ctx)
- if !ok {
+func extractForwardedRequestMetadataFromMetadata(ctx context.Context) context.Context {
+ headersSlice := metadata.ValueFromIncomingContext(ctx, requestmeta.PropagationStringForRequestMetadata)
+ if headersSlice == nil {
+ // we want to check old key if no data
+ headersSlice = metadata.ValueFromIncomingContext(ctx, requestmeta.HeaderPropagationStringForRequestLogging)
+ }
+
+ if headersSlice == nil {
return ctx
}
- return util_log.ContextWithHeaderMapFromMetadata(ctx, md)
+
+ return requestmeta.ContextWithRequestMetadataMapFromHeaderSlice(ctx, headersSlice)
}
// HTTPHeaderPropagationClientInterceptor allows for propagation of HTTP Request headers across gRPC calls - works
// alongside HTTPHeaderPropagationServerInterceptor
-func HTTPHeaderPropagationClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn,
+func HTTPHeaderPropagationClientInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn,
invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
- ctx = injectForwardedHeadersIntoMetadata(ctx)
+ ctx = injectForwardedRequestMetadata(ctx)
return invoker(ctx, method, req, reply, cc, opts...)
}
// HTTPHeaderPropagationStreamClientInterceptor does the same as HTTPHeaderPropagationClientInterceptor but for streams
func HTTPHeaderPropagationStreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string,
streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
- ctx = injectForwardedHeadersIntoMetadata(ctx)
+ ctx = injectForwardedRequestMetadata(ctx)
return streamer(ctx, desc, cc, method, opts...)
}
-// injectForwardedHeadersIntoMetadata implements HTTPHeaderPropagationClientInterceptor and HTTPHeaderPropagationStreamClientInterceptor
+// injectForwardedRequestMetadata implements HTTPHeaderPropagationClientInterceptor and HTTPHeaderPropagationStreamClientInterceptor
// by inserting headers that are supposed to be forwarded into metadata of the request
-func injectForwardedHeadersIntoMetadata(ctx context.Context) context.Context {
- headerMap := util_log.HeaderMapFromContext(ctx)
- if headerMap == nil {
+func injectForwardedRequestMetadata(ctx context.Context) context.Context {
+ requestMetadataMap := requestmeta.MapFromContext(ctx)
+ if requestMetadataMap == nil {
return ctx
}
md, ok := metadata.FromOutgoingContext(ctx)
@@ -85,13 +92,13 @@ func injectForwardedHeadersIntoMetadata(ctx context.Context) context.Context {
}
newCtx := ctx
- if _, ok := md[util_log.HeaderPropagationStringForRequestLogging]; !ok {
+ if _, ok := md[requestmeta.PropagationStringForRequestMetadata]; !ok {
var mdContent []string
- for header, content := range headerMap {
- mdContent = append(mdContent, header, content)
+ for requestMetadata, content := range requestMetadataMap {
+ mdContent = append(mdContent, requestMetadata, content)
}
md = md.Copy()
- md[util_log.HeaderPropagationStringForRequestLogging] = mdContent
+ md[requestmeta.PropagationStringForRequestMetadata] = mdContent
newCtx = metadata.NewOutgoingContext(ctx, md)
}
return newCtx
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/histogram/testutils.go b/vendor/github.com/cortexproject/cortex/pkg/util/histogram/testutils.go
index dbb2e40bb..82fdf2b0f 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/histogram/testutils.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/histogram/testutils.go
@@ -21,8 +21,8 @@ import (
// Adapted from Prometheus model/histogram/test_utils.go GenerateBigTestHistograms.
func GenerateTestHistograms(from, step, numHistograms int) []*histogram.Histogram {
var histograms []*histogram.Histogram
- for i := 0; i < numHistograms; i++ {
- v := from + i*step
+ for i := range numHistograms {
+ v := int64(from + i*step)
histograms = append(histograms, tsdbutil.GenerateTestGaugeHistogram(v))
}
return histograms
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/http.go b/vendor/github.com/cortexproject/cortex/pkg/util/http.go
index 09fb3df38..da7c40cc4 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/http.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/http.go
@@ -2,6 +2,7 @@ package util
import (
"bytes"
+ "compress/gzip"
"context"
"encoding/json"
"flag"
@@ -45,7 +46,7 @@ func (b BasicAuth) IsEnabled() bool {
}
// WriteJSONResponse writes some JSON as a HTTP response.
-func WriteJSONResponse(w http.ResponseWriter, v interface{}) {
+func WriteJSONResponse(w http.ResponseWriter, v any) {
w.Header().Set("Content-Type", "application/json")
data, err := json.Marshal(v)
@@ -61,7 +62,7 @@ func WriteJSONResponse(w http.ResponseWriter, v interface{}) {
}
// WriteYAMLResponse writes some YAML as a HTTP response.
-func WriteYAMLResponse(w http.ResponseWriter, v interface{}) {
+func WriteYAMLResponse(w http.ResponseWriter, v any) {
// There is not standardised content-type for YAML, text/plain ensures the
// YAML is displayed in the browser instead of offered as a download
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
@@ -96,7 +97,7 @@ func WriteHTMLResponse(w http.ResponseWriter, message string) {
// RenderHTTPResponse either responds with json or a rendered html page using the passed in template
// by checking the Accepts header
-func RenderHTTPResponse(w http.ResponseWriter, v interface{}, t *template.Template, r *http.Request) {
+func RenderHTTPResponse(w http.ResponseWriter, v any, t *template.Template, r *http.Request) {
accept := r.Header.Get("Accept")
if strings.Contains(accept, "application/json") {
WriteJSONResponse(w, v)
@@ -110,7 +111,7 @@ func RenderHTTPResponse(w http.ResponseWriter, v interface{}, t *template.Templa
}
// StreamWriteYAMLResponseCommon stream writes data as http response
-func streamWriteYAMLResponseCommon(w http.ResponseWriter, iter chan interface{}, logger log.Logger, marshalFn func(in interface{}) (out []byte, err error)) {
+func streamWriteYAMLResponseCommon(w http.ResponseWriter, iter chan any, logger log.Logger, marshalFn func(in any) (out []byte, err error)) {
w.Header().Set("Content-Type", "application/yaml")
for v := range iter {
data, err := marshalFn(v)
@@ -127,12 +128,12 @@ func streamWriteYAMLResponseCommon(w http.ResponseWriter, iter chan interface{},
}
// StreamWriteYAMLResponse stream writes data as http response using yaml v2 library
-func StreamWriteYAMLResponse(w http.ResponseWriter, iter chan interface{}, logger log.Logger) {
+func StreamWriteYAMLResponse(w http.ResponseWriter, iter chan any, logger log.Logger) {
streamWriteYAMLResponseCommon(w, iter, logger, yaml.Marshal)
}
// StreamWriteYAMLV3Response stream writes data as http response using yaml v3 library
-func StreamWriteYAMLV3Response(w http.ResponseWriter, iter chan interface{}, logger log.Logger) {
+func StreamWriteYAMLV3Response(w http.ResponseWriter, iter chan any, logger log.Logger) {
streamWriteYAMLResponseCommon(w, iter, logger, yamlv3.Marshal)
}
@@ -143,6 +144,7 @@ type CompressionType int
const (
NoCompression CompressionType = iota
RawSnappy
+ Gzip
)
// ParseProtoReader parses a compressed proto from an io.Reader.
@@ -215,6 +217,13 @@ func decompressFromReader(reader io.Reader, expectedSize, maxSize int, compressi
return nil, err
}
body, err = decompressFromBuffer(&buf, maxSize, RawSnappy, sp)
+ case Gzip:
+ reader, err = gzip.NewReader(reader)
+ if err != nil {
+ return nil, err
+ }
+ _, err = buf.ReadFrom(reader)
+ body = buf.Bytes()
}
return body, err
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/httpgrpcutil/errors.go b/vendor/github.com/cortexproject/cortex/pkg/util/httpgrpcutil/errors.go
index b2b17eed8..c841e0047 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/httpgrpcutil/errors.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/httpgrpcutil/errors.go
@@ -7,7 +7,7 @@ import (
"github.com/weaveworks/common/httpgrpc"
)
-func WrapHTTPGrpcError(err error, format string, args ...interface{}) error {
+func WrapHTTPGrpcError(err error, format string, args ...any) error {
if err == nil {
return nil
}
@@ -19,6 +19,6 @@ func WrapHTTPGrpcError(err error, format string, args ...interface{}) error {
return httpgrpc.ErrorFromHTTPResponse(&httpgrpc.HTTPResponse{
Code: resp.Code,
Headers: resp.Headers,
- Body: []byte(fmt.Sprintf("%s, %s", msg, err)),
+ Body: fmt.Appendf(nil, "%s, %s", msg, err),
})
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/labels.go b/vendor/github.com/cortexproject/cortex/pkg/util/labels.go
index c1bc12653..2e78a0aa9 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/labels.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/labels.go
@@ -10,10 +10,10 @@ import (
// LabelsToMetric converts a Labels to Metric
// Don't do this on any performance sensitive paths.
func LabelsToMetric(ls labels.Labels) model.Metric {
- m := make(model.Metric, len(ls))
- for _, l := range ls {
+ m := make(model.Metric, ls.Len())
+ ls.Range(func(l labels.Label) {
m[model.LabelName(l.Name)] = model.LabelValue(l.Value)
- }
+ })
return m
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/labelset/tracker.go b/vendor/github.com/cortexproject/cortex/pkg/util/labelset/tracker.go
new file mode 100644
index 000000000..2f624554b
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/labelset/tracker.go
@@ -0,0 +1,111 @@
+package labelset
+
+import (
+ "sync"
+
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+
+ "github.com/cortexproject/cortex/pkg/util"
+)
+
+const (
+ numMetricShards = 128
+)
+
+type LabelSetTracker struct {
+ shards []*labelSetCounterShard
+}
+
+// NewLabelSetTracker initializes a LabelSetTracker to keep track of active labelset limits.
+func NewLabelSetTracker() *LabelSetTracker {
+ shards := make([]*labelSetCounterShard, 0, numMetricShards)
+ for range numMetricShards {
+ shards = append(shards, &labelSetCounterShard{
+ RWMutex: &sync.RWMutex{},
+ userLabelSets: map[string]map[uint64]labels.Labels{},
+ })
+ }
+ return &LabelSetTracker{shards: shards}
+}
+
+type labelSetCounterShard struct {
+ *sync.RWMutex
+ userLabelSets map[string]map[uint64]labels.Labels
+}
+
+// Track accepts userID, label set and hash of the label set limit.
+func (m *LabelSetTracker) Track(userId string, hash uint64, labelSet labels.Labels) {
+ s := m.shards[util.HashFP(model.Fingerprint(hash))%numMetricShards]
+ s.Lock()
+ if userEntry, ok := s.userLabelSets[userId]; ok {
+ if _, ok2 := userEntry[hash]; !ok2 {
+ userEntry[hash] = labelSet
+ }
+ } else {
+ s.userLabelSets[userId] = map[uint64]labels.Labels{hash: labelSet}
+ }
+ // Unlock before we update metrics.
+ s.Unlock()
+}
+
+// UpdateMetrics cleans up dangling user and label set from the tracker as well as metrics.
+// It takes a function for user to customize the metrics cleanup logic when either a user or
+// a specific label set is removed. If a user is removed then removeUser is set to true.
+func (m *LabelSetTracker) UpdateMetrics(userSet map[string]map[uint64]struct{}, deleteMetricFunc func(user, labelSetStr string, removeUser bool)) {
+ for i := range numMetricShards {
+ shard := m.shards[i]
+ shard.Lock()
+
+ for user, userEntry := range shard.userLabelSets {
+ limits, ok := userSet[user]
+ // Remove user if it doesn't exist or has no limits anymore.
+ if !ok || len(limits) == 0 {
+ deleteMetricFunc(user, "", true)
+ delete(shard.userLabelSets, user)
+ continue
+ }
+ for h, lbls := range userEntry {
+ // This limit no longer exists.
+ if _, ok := limits[h]; !ok {
+ delete(userEntry, h)
+ labelSetStr := lbls.String()
+ deleteMetricFunc(user, labelSetStr, false)
+ continue
+ }
+ }
+ }
+
+ shard.Unlock()
+ }
+}
+
+// labelSetExists is used for testing only to check the existence of a label set.
+func (m *LabelSetTracker) labelSetExists(userId string, hash uint64, labelSet labels.Labels) bool {
+ s := m.shards[util.HashFP(model.Fingerprint(hash))%numMetricShards]
+ s.RLock()
+ defer s.RUnlock()
+ userEntry, ok := s.userLabelSets[userId]
+ if !ok {
+ return false
+ }
+ set, ok := userEntry[hash]
+ if !ok {
+ return false
+ }
+ return labels.Compare(set, labelSet) == 0
+}
+
+// userExists is used for testing only to check the existence of a user.
+func (m *LabelSetTracker) userExists(userId string) bool {
+ for i := range numMetricShards {
+ shard := m.shards[i]
+ shard.RLock()
+ defer shard.RUnlock()
+ _, ok := shard.userLabelSets[userId]
+ if ok {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log/log.go b/vendor/github.com/cortexproject/cortex/pkg/util/log/log.go
index da16d11b9..51df578b2 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/log/log.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/log/log.go
@@ -1,32 +1,23 @@
package log
import (
- "context"
"fmt"
- "net/http"
"os"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/promslog"
"github.com/weaveworks/common/logging"
"github.com/weaveworks/common/server"
- "google.golang.org/grpc/metadata"
-)
-
-type contextKey int
-
-const (
- headerMapContextKey contextKey = 0
-
- HeaderPropagationStringForRequestLogging string = "x-http-header-forwarding-logging"
)
var (
// Logger is a shared go-kit logger.
// TODO: Change all components to take a non-global logger via their constructors.
// Prefer accepting a non-global logger as an argument.
- Logger = log.NewNopLogger()
+ Logger = log.NewNopLogger()
+ SLogger = promslog.NewNopLogger()
logMessages = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "log_messages_total",
@@ -62,7 +53,8 @@ func InitLogger(cfg *server.Config) {
// PrometheusLogger exposes Prometheus counters for each of go-kit's log levels.
type PrometheusLogger struct {
- logger log.Logger
+ logger log.Logger
+ logLevel logging.Level
}
// NewPrometheusLogger creates a new instance of PrometheusLogger which exposes
@@ -80,24 +72,25 @@ func newLoggerWithFormat(format logging.Format) log.Logger {
return logger
}
-func newPrometheusLoggerFrom(logger log.Logger, logLevel logging.Level, keyvals ...interface{}) log.Logger {
+func newPrometheusLoggerFrom(logger log.Logger, logLevel logging.Level, keyvals ...any) log.Logger {
// Sort the logger chain to avoid expensive log.Valuer evaluation for disallowed level.
// Ref: https://github.com/go-kit/log/issues/14#issuecomment-945038252
logger = log.With(logger, "ts", log.DefaultTimestampUTC)
logger = log.With(logger, keyvals...)
- logger = level.NewFilter(logger, LevelFilter(logLevel.String()))
+ logger = level.NewFilter(logger, logLevel.Gokit)
// Initialise counters for all supported levels:
for _, level := range supportedLevels {
logMessages.WithLabelValues(level.String())
}
return &PrometheusLogger{
- logger: logger,
+ logger: logger,
+ logLevel: logLevel,
}
}
// Log increments the appropriate Prometheus counter depending on the log level.
-func (pl *PrometheusLogger) Log(kv ...interface{}) error {
+func (pl *PrometheusLogger) Log(kv ...any) error {
pl.logger.Log(kv...)
l := "unknown"
for i := 1; i < len(kv); i += 2 {
@@ -122,54 +115,3 @@ func CheckFatal(location string, err error) {
os.Exit(1)
}
}
-
-// TODO(dannyk): remove once weaveworks/common updates to go-kit/log
-//
-// -> we can then revert to using Level.Gokit
-func LevelFilter(l string) level.Option {
- switch l {
- case "debug":
- return level.AllowDebug()
- case "info":
- return level.AllowInfo()
- case "warn":
- return level.AllowWarn()
- case "error":
- return level.AllowError()
- default:
- return level.AllowAll()
- }
-}
-
-func HeaderMapFromContext(ctx context.Context) map[string]string {
- headerMap, ok := ctx.Value(headerMapContextKey).(map[string]string)
- if !ok {
- return nil
- }
- return headerMap
-}
-
-func ContextWithHeaderMap(ctx context.Context, headerMap map[string]string) context.Context {
- return context.WithValue(ctx, headerMapContextKey, headerMap)
-}
-
-// InjectHeadersIntoHTTPRequest injects the logging header map from the context into the request headers.
-func InjectHeadersIntoHTTPRequest(headerMap map[string]string, request *http.Request) {
- for header, contents := range headerMap {
- request.Header.Add(header, contents)
- }
-}
-
-func ContextWithHeaderMapFromMetadata(ctx context.Context, md metadata.MD) context.Context {
- headersSlice, ok := md[HeaderPropagationStringForRequestLogging]
- if !ok || len(headersSlice)%2 == 1 {
- return ctx
- }
-
- headerMap := make(map[string]string)
- for i := 0; i < len(headersSlice); i += 2 {
- headerMap[headersSlice[i]] = headersSlice[i+1]
- }
-
- return ContextWithHeaderMap(ctx, headerMap)
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log/slog_adapter.go b/vendor/github.com/cortexproject/cortex/pkg/util/log/slog_adapter.go
new file mode 100644
index 000000000..eec19bb4a
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/log/slog_adapter.go
@@ -0,0 +1,30 @@
+package log
+
+import (
+ "log/slog"
+
+ "github.com/go-kit/log"
+ sloggk "github.com/tjhop/slog-gokit"
+)
+
+// GoKitLogToSlog convert go-kit/log to slog
+// usage: logutil.GoKitLogToSlog(gokitLogger)
+func GoKitLogToSlog(logger log.Logger) *slog.Logger {
+ levelVar := slog.LevelVar{}
+ promLogger, ok := logger.(*PrometheusLogger)
+ if !ok {
+ levelVar.Set(slog.LevelDebug)
+ } else {
+ switch promLogger.logLevel.String() {
+ case "debug":
+ levelVar.Set(slog.LevelDebug)
+ case "info":
+ levelVar.Set(slog.LevelInfo)
+ case "warn":
+ levelVar.Set(slog.LevelWarn)
+ case "error":
+ levelVar.Set(slog.LevelError)
+ }
+ }
+ return slog.New(sloggk.NewGoKitHandler(logger, &levelVar))
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log/wrappers.go b/vendor/github.com/cortexproject/cortex/pkg/util/log/wrappers.go
index 1394b7b0b..9a706a570 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/log/wrappers.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/log/wrappers.go
@@ -9,6 +9,7 @@ import (
"go.opentelemetry.io/otel/trace"
"github.com/cortexproject/cortex/pkg/tenant"
+ "github.com/cortexproject/cortex/pkg/util/requestmeta"
)
// WithUserID returns a Logger that has information about the current user in
@@ -64,7 +65,7 @@ func WithSourceIPs(sourceIPs string, l log.Logger) log.Logger {
// HeadersFromContext enables the logging of specified HTTP Headers that have been added to a context
func HeadersFromContext(ctx context.Context, l log.Logger) log.Logger {
- headerContentsMap := HeaderMapFromContext(ctx)
+ headerContentsMap := requestmeta.LoggingHeadersAndRequestIdFromContext(ctx)
for header, contents := range headerContentsMap {
l = log.With(l, header, contents)
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/matchers.go b/vendor/github.com/cortexproject/cortex/pkg/util/matchers.go
deleted file mode 100644
index d0ee099b5..000000000
--- a/vendor/github.com/cortexproject/cortex/pkg/util/matchers.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package util
-
-import (
- "github.com/prometheus/prometheus/model/labels"
-)
-
-// SplitFiltersAndMatchers splits empty matchers off, which are treated as filters, see #220
-func SplitFiltersAndMatchers(allMatchers []*labels.Matcher) (filters, matchers []*labels.Matcher) {
- for _, matcher := range allMatchers {
- // If a matcher matches "", we need to fetch possible chunks where
- // there is no value and will therefore not be in our label index.
- // e.g. {foo=""} and {foo!="bar"} both match "", so we need to return
- // chunks which do not have a foo label set. When looking entries in
- // the index, we should ignore this matcher to fetch all possible chunks
- // and then filter on the matcher after the chunks have been fetched.
- if matcher.Matches("") {
- filters = append(filters, matcher)
- } else {
- matchers = append(matchers, matcher)
- }
- }
- return
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go
index 0a823920f..6678c4787 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go
@@ -20,7 +20,7 @@ import (
var (
bytesBufferPool = sync.Pool{
- New: func() interface{} {
+ New: func() any {
return bytes.NewBuffer(nil)
},
}
@@ -723,7 +723,7 @@ func (r *UserRegistries) BuildMetricFamiliesPerUser() MetricFamiliesPerUser {
// FromLabelPairsToLabels converts dto.LabelPair into labels.Labels.
func FromLabelPairsToLabels(pairs []*dto.LabelPair) labels.Labels {
- builder := labels.NewBuilder(nil)
+ builder := labels.NewBuilder(labels.EmptyLabels())
for _, pair := range pairs {
builder.Set(pair.GetName(), pair.GetValue())
}
@@ -770,7 +770,7 @@ func GetLabels(c prometheus.Collector, filter map[string]string) ([]labels.Label
errs := tsdb_errors.NewMulti()
var result []labels.Labels
dtoMetric := &dto.Metric{}
- lbls := labels.NewBuilder(nil)
+ lbls := labels.NewBuilder(labels.EmptyLabels())
nextMetric:
for m := range ch {
@@ -781,7 +781,7 @@ nextMetric:
continue
}
- lbls.Reset(nil)
+ lbls.Reset(labels.EmptyLabels())
for _, lp := range dtoMetric.Label {
n := lp.GetName()
v := lp.GetValue()
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/middleware/grpc.go b/vendor/github.com/cortexproject/cortex/pkg/util/middleware/grpc.go
index 66f0d3766..3adea5eb9 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/middleware/grpc.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/middleware/grpc.go
@@ -15,7 +15,7 @@ import (
// PrometheusGRPCUnaryInstrumentation records duration of gRPC requests client side.
func PrometheusGRPCUnaryInstrumentation(metric *prometheus.HistogramVec) grpc.UnaryClientInterceptor {
- return func(ctx context.Context, method string, req, resp interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ return func(ctx context.Context, method string, req, resp any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
start := time.Now()
err := invoker(ctx, method, req, resp, cc, opts...)
metric.WithLabelValues(method, errorCode(err)).Observe(time.Since(start).Seconds())
@@ -46,7 +46,7 @@ type instrumentedClientStream struct {
grpc.ClientStream
}
-func (s *instrumentedClientStream) SendMsg(m interface{}) error {
+func (s *instrumentedClientStream) SendMsg(m any) error {
err := s.ClientStream.SendMsg(m)
if err == nil {
return err
@@ -61,7 +61,7 @@ func (s *instrumentedClientStream) SendMsg(m interface{}) error {
return err
}
-func (s *instrumentedClientStream) RecvMsg(m interface{}) error {
+func (s *instrumentedClientStream) RecvMsg(m any) error {
err := s.ClientStream.RecvMsg(m)
if err == nil {
return err
@@ -84,6 +84,57 @@ func (s *instrumentedClientStream) Header() (metadata.MD, error) {
return md, err
}
+// PrometheusGRPCReusableStreamInstrumentation records duration of reusable streaming gRPC requests client side.
+func PrometheusGRPCReusableStreamInstrumentation(metric *prometheus.HistogramVec) grpc.StreamClientInterceptor {
+ return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string,
+ streamer grpc.Streamer, opts ...grpc.CallOption,
+ ) (grpc.ClientStream, error) {
+ stream, err := streamer(ctx, desc, cc, method, opts...)
+ return &instrumentedReusableClientStream{
+ metric: metric,
+ method: method,
+ ClientStream: stream,
+ }, err
+ }
+}
+
+type instrumentedReusableClientStream struct {
+ metric *prometheus.HistogramVec
+ method string
+ grpc.ClientStream
+}
+
+func (s *instrumentedReusableClientStream) SendMsg(m any) error {
+ start := time.Now()
+ err := s.ClientStream.SendMsg(m)
+ if err != nil && err != io.EOF {
+ s.metric.WithLabelValues(s.method, errorCode(err)).Observe(time.Since(start).Seconds())
+ return err
+ }
+ s.metric.WithLabelValues(s.method, errorCode(nil)).Observe(time.Since(start).Seconds())
+ return err
+}
+
+func (s *instrumentedReusableClientStream) RecvMsg(m any) error {
+ start := time.Now()
+ err := s.ClientStream.RecvMsg(m)
+ if err != nil && err != io.EOF {
+ s.metric.WithLabelValues(s.method, errorCode(err)).Observe(time.Since(start).Seconds())
+ return err
+ }
+ s.metric.WithLabelValues(s.method, errorCode(nil)).Observe(time.Since(start).Seconds())
+ return err
+}
+
+func (s *instrumentedReusableClientStream) Header() (metadata.MD, error) {
+ start := time.Now()
+ md, err := s.ClientStream.Header()
+ if err != nil {
+ s.metric.WithLabelValues(s.method, errorCode(err)).Observe(time.Since(start).Seconds())
+ }
+ return md, err
+}
+
func errorCode(err error) string {
respStatus := "2xx"
if err != nil {
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/net.go b/vendor/github.com/cortexproject/cortex/pkg/util/net.go
index 852f6bf6f..8db916ca3 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/net.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/net.go
@@ -37,7 +37,7 @@ func GetFirstAddressOf(names []string) (string, error) {
return ipAddr, nil
}
if ipAddr == "" {
- return "", fmt.Errorf("No address found for %s", names)
+ return "", fmt.Errorf("no address found for %s", names)
}
if strings.HasPrefix(ipAddr, `169.254.`) {
level.Warn(util_log.Logger).Log("msg", "using automatic private ip", "address", ipAddr)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/priority_queue.go b/vendor/github.com/cortexproject/cortex/pkg/util/priority_queue.go
index 9937c231c..4bb8b1f06 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/priority_queue.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/priority_queue.go
@@ -30,11 +30,11 @@ func (q queue) Swap(i, j int) { q[i], q[j] = q[j], q[i] }
// Push and Pop use pointer receivers because they modify the slice's length,
// not just its contents.
-func (q *queue) Push(x interface{}) {
+func (q *queue) Push(x any) {
*q = append(*q, x.(PriorityOp))
}
-func (q *queue) Pop() interface{} {
+func (q *queue) Pop() any {
old := *q
n := len(old)
x := old[n-1]
@@ -103,7 +103,7 @@ func (pq *PriorityQueue) Dequeue() PriorityOp {
pq.lock.Lock()
defer pq.lock.Unlock()
- for len(pq.queue) == 0 && !(pq.closing || pq.closed) {
+ for len(pq.queue) == 0 && (!pq.closing && !pq.closed) {
pq.cond.Wait()
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/requestmeta/context.go b/vendor/github.com/cortexproject/cortex/pkg/util/requestmeta/context.go
new file mode 100644
index 000000000..190ec3f80
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/requestmeta/context.go
@@ -0,0 +1,84 @@
+package requestmeta
+
+import (
+ "context"
+ "net/http"
+ "net/textproto"
+
+ "google.golang.org/grpc/metadata"
+)
+
+type contextKey int
+
+const (
+ requestMetadataContextKey contextKey = 0
+ PropagationStringForRequestMetadata string = "x-request-metadata-propagation-string"
+ // HeaderPropagationStringForRequestLogging is used for backwards compatibility
+ HeaderPropagationStringForRequestLogging string = "x-http-header-forwarding-logging"
+)
+
+func ContextWithRequestMetadataMap(ctx context.Context, requestContextMap map[string]string) context.Context {
+ return context.WithValue(ctx, requestMetadataContextKey, requestContextMap)
+}
+
+func MapFromContext(ctx context.Context) map[string]string {
+ requestContextMap, ok := ctx.Value(requestMetadataContextKey).(map[string]string)
+ if !ok {
+ return nil
+ }
+ return requestContextMap
+}
+
+// ContextWithRequestMetadataMapFromHeaders adds MetadataContext headers to context and Removes non-existent headers.
+// targetHeaders is passed for backwards compatibility, otherwise header keys should be in header itself.
+func ContextWithRequestMetadataMapFromHeaders(ctx context.Context, headers map[string]string, targetHeaders []string) context.Context {
+ headerMap := make(map[string]string)
+ loggingHeaders := headers[textproto.CanonicalMIMEHeaderKey(LoggingHeadersKey)]
+ headerKeys := targetHeaders
+ if loggingHeaders != "" {
+ headerKeys = LoggingHeaderKeysFromString(loggingHeaders)
+ headerKeys = append(headerKeys, LoggingHeadersKey)
+ }
+ headerKeys = append(headerKeys, RequestIdKey)
+ headerKeys = append(headerKeys, RequestSourceKey)
+ for _, header := range headerKeys {
+ if v, ok := headers[textproto.CanonicalMIMEHeaderKey(header)]; ok {
+ headerMap[header] = v
+ }
+ }
+ return ContextWithRequestMetadataMap(ctx, headerMap)
+}
+
+func InjectMetadataIntoHTTPRequestHeaders(requestMetadataMap map[string]string, request *http.Request) {
+ for key, contents := range requestMetadataMap {
+ request.Header.Add(key, contents)
+ }
+}
+
+func ContextWithRequestMetadataMapFromHeaderSlice(ctx context.Context, headerSlice []string) context.Context {
+ if len(headerSlice)%2 == 1 {
+ return ctx
+ }
+
+ requestMetadataMap := make(map[string]string, len(headerSlice)/2)
+ for i := 0; i < len(headerSlice); i += 2 {
+ requestMetadataMap[headerSlice[i]] = headerSlice[i+1]
+ }
+
+ return ContextWithRequestMetadataMap(ctx, requestMetadataMap)
+}
+
+func ContextWithRequestMetadataMapFromMetadata(ctx context.Context, md metadata.MD) context.Context {
+ headersSlice, ok := md[PropagationStringForRequestMetadata]
+
+ // we want to check old key if no data
+ if !ok {
+ headersSlice, ok = md[HeaderPropagationStringForRequestLogging]
+ }
+
+ if !ok {
+ return ctx
+ }
+
+ return ContextWithRequestMetadataMapFromHeaderSlice(ctx, headersSlice)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/requestmeta/id.go b/vendor/github.com/cortexproject/cortex/pkg/util/requestmeta/id.go
new file mode 100644
index 000000000..01b34e430
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/requestmeta/id.go
@@ -0,0 +1,22 @@
+package requestmeta
+
+import "context"
+
+const RequestIdKey = "x-cortex-request-id"
+
+func RequestIdFromContext(ctx context.Context) string {
+ metadataMap := MapFromContext(ctx)
+ if metadataMap == nil {
+ return ""
+ }
+ return metadataMap[RequestIdKey]
+}
+
+func ContextWithRequestId(ctx context.Context, reqId string) context.Context {
+ metadataMap := MapFromContext(ctx)
+ if metadataMap == nil {
+ metadataMap = make(map[string]string)
+ }
+ metadataMap[RequestIdKey] = reqId
+ return ContextWithRequestMetadataMap(ctx, metadataMap)
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/requestmeta/logging_headers.go b/vendor/github.com/cortexproject/cortex/pkg/util/requestmeta/logging_headers.go
new file mode 100644
index 000000000..02b2a4270
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/requestmeta/logging_headers.go
@@ -0,0 +1,60 @@
+package requestmeta
+
+import (
+ "context"
+ "strings"
+)
+
+const (
+ LoggingHeadersKey = "x-request-logging-headers-key"
+ loggingHeadersDelimiter = ","
+)
+
+func LoggingHeaderKeysToString(targetHeaders []string) string {
+ return strings.Join(targetHeaders, loggingHeadersDelimiter)
+}
+
+func LoggingHeaderKeysFromString(headerKeysString string) []string {
+ return strings.Split(headerKeysString, loggingHeadersDelimiter)
+}
+
+func LoggingHeadersFromContext(ctx context.Context) map[string]string {
+ metadataMap := MapFromContext(ctx)
+ if metadataMap == nil {
+ return nil
+ }
+ loggingHeadersString := metadataMap[LoggingHeadersKey]
+ if loggingHeadersString == "" {
+ // Backward compatibility: if no specific headers are listed, return all metadata excluding requestId and source
+ result := make(map[string]string, len(metadataMap))
+ for k, v := range metadataMap {
+ if k == RequestIdKey || k == RequestSourceKey {
+ continue
+ }
+ result[k] = v
+ }
+ return result
+ }
+
+ result := make(map[string]string)
+ for _, header := range LoggingHeaderKeysFromString(loggingHeadersString) {
+ if v, ok := metadataMap[header]; ok {
+ result[header] = v
+ }
+ }
+ return result
+}
+
+func LoggingHeadersAndRequestIdFromContext(ctx context.Context) map[string]string {
+ metadataMap := MapFromContext(ctx)
+ if metadataMap == nil {
+ return nil
+ }
+
+ loggingHeaders := LoggingHeadersFromContext(ctx)
+ if reqId := RequestIdFromContext(ctx); reqId != "" {
+ loggingHeaders[RequestIdKey] = reqId
+ }
+
+ return loggingHeaders
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/requestmeta/source.go b/vendor/github.com/cortexproject/cortex/pkg/util/requestmeta/source.go
new file mode 100644
index 000000000..6f0f23db0
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/requestmeta/source.go
@@ -0,0 +1,27 @@
+package requestmeta
+
+import "context"
+
+const RequestSourceKey = "x-cortex-request-source"
+
+const (
+ SourceAPI = "api"
+ SourceRuler = "ruler"
+)
+
+func ContextWithRequestSource(ctx context.Context, source string) context.Context {
+ metadataMap := MapFromContext(ctx)
+ if metadataMap == nil {
+ metadataMap = make(map[string]string)
+ }
+ metadataMap[RequestSourceKey] = source
+ return ContextWithRequestMetadataMap(ctx, metadataMap)
+}
+
+func RequestFromRuler(ctx context.Context) bool {
+ metadataMap := MapFromContext(ctx)
+ if metadataMap == nil {
+ return false
+ }
+ return metadataMap[RequestSourceKey] == SourceRuler
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/runutil/runutil.go b/vendor/github.com/cortexproject/cortex/pkg/util/runutil/runutil.go
index 421f77427..b8303e05b 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/runutil/runutil.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/runutil/runutil.go
@@ -20,7 +20,7 @@ func CloseWithErrCapture(err *error, closer io.Closer, msg string) {
// CloseWithLogOnErr closes an io.Closer and logs any relevant error from it wrapped with the provided format string and
// args.
-func CloseWithLogOnErr(logger log.Logger, closer io.Closer, format string, args ...interface{}) {
+func CloseWithLogOnErr(logger log.Logger, closer io.Closer, format string, args ...any) {
err := closer.Close()
if err == nil || errors.Is(err, os.ErrClosed) {
return
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/shard.go b/vendor/github.com/cortexproject/cortex/pkg/util/shard.go
index 82392b3a1..364f39656 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/shard.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/shard.go
@@ -10,6 +10,10 @@ const (
// Sharding strategies.
ShardingStrategyDefault = "default"
ShardingStrategyShuffle = "shuffle-sharding"
+
+ // Compaction strategies
+ CompactionStrategyDefault = "default"
+ CompactionStrategyPartitioning = "partitioning"
)
var (
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/noop.go b/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/noop.go
index 8c7480ec8..72943361a 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/noop.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/noop.go
@@ -25,15 +25,15 @@ func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
func (n noopSpan) Context() opentracing.SpanContext { return defaultNoopSpanContext }
func (n noopSpan) SetBaggageItem(key, val string) opentracing.Span { return defaultNoopSpan }
func (n noopSpan) BaggageItem(key string) string { return emptyString }
-func (n noopSpan) SetTag(key string, value interface{}) opentracing.Span { return n }
+func (n noopSpan) SetTag(key string, value any) opentracing.Span { return n }
func (n noopSpan) LogFields(fields ...log.Field) {}
-func (n noopSpan) LogKV(keyVals ...interface{}) {}
+func (n noopSpan) LogKV(keyVals ...any) {}
func (n noopSpan) Finish() {}
func (n noopSpan) FinishWithOptions(opts opentracing.FinishOptions) {}
func (n noopSpan) SetOperationName(operationName string) opentracing.Span { return n }
func (n noopSpan) Tracer() opentracing.Tracer { return defaultNoopTracer }
func (n noopSpan) LogEvent(event string) {}
-func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {}
+func (n noopSpan) LogEventWithPayload(event string, payload any) {}
func (n noopSpan) Log(data opentracing.LogData) {}
// StartSpan belongs to the Tracer interface.
@@ -42,11 +42,11 @@ func (n noopTracer) StartSpan(operationName string, opts ...opentracing.StartSpa
}
// Inject belongs to the Tracer interface.
-func (n noopTracer) Inject(sp opentracing.SpanContext, format interface{}, carrier interface{}) error {
+func (n noopTracer) Inject(sp opentracing.SpanContext, format any, carrier any) error {
return nil
}
// Extract belongs to the Tracer interface.
-func (n noopTracer) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) {
+func (n noopTracer) Extract(format any, carrier any) (opentracing.SpanContext, error) {
return nil, opentracing.ErrSpanContextNotFound
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go b/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go
index c0b1184f0..cde7ae045 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go
@@ -30,14 +30,14 @@ type SpanLogger struct {
}
// New makes a new SpanLogger, where logs will be sent to the global logger.
-func New(ctx context.Context, method string, kvps ...interface{}) (*SpanLogger, context.Context) {
+func New(ctx context.Context, method string, kvps ...any) (*SpanLogger, context.Context) {
return NewWithLogger(ctx, util_log.Logger, method, kvps...)
}
// NewWithLogger makes a new SpanLogger with a custom log.Logger to send logs
// to. The provided context will have the logger attached to it and can be
// retrieved with FromContext or FromContextWithFallback.
-func NewWithLogger(ctx context.Context, l log.Logger, method string, kvps ...interface{}) (*SpanLogger, context.Context) {
+func NewWithLogger(ctx context.Context, l log.Logger, method string, kvps ...any) (*SpanLogger, context.Context) {
span, ctx := opentracing.StartSpanFromContext(ctx, method)
if ids, _ := tenant.TenantIDs(ctx); len(ids) > 0 {
span.SetTag(TenantIDTagName, ids)
@@ -83,13 +83,13 @@ func FromContextWithFallback(ctx context.Context, fallback log.Logger) *SpanLogg
// Log implements gokit's Logger interface; sends logs to underlying logger and
// also puts the on the spans.
-func (s *SpanLogger) Log(kvps ...interface{}) error {
+func (s *SpanLogger) Log(kvps ...any) error {
s.Logger.Log(kvps...)
fields, err := otlog.InterleavedKVToFields(kvps...)
if err != nil {
return err
}
- s.Span.LogFields(fields...)
+ s.LogFields(fields...)
return nil
}
@@ -99,6 +99,6 @@ func (s *SpanLogger) Error(err error) error {
return nil
}
ext.Error.Set(s.Span, true)
- s.Span.LogFields(otlog.Error(err))
+ s.LogFields(otlog.Error(err))
return err
}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/strings.go b/vendor/github.com/cortexproject/cortex/pkg/util/strings.go
index ddc9de9ff..4fdaded30 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/strings.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/strings.go
@@ -3,22 +3,19 @@ package util
import (
"context"
"sync"
+ "time"
"unsafe"
"github.com/bboreham/go-loser"
- "go.uber.org/atomic"
+ "github.com/hashicorp/golang-lru/v2/expirable"
)
-// StringsContain returns true if the search value is within the list of input values.
-func StringsContain(values []string, search string) bool {
- for _, v := range values {
- if search == v {
- return true
- }
- }
-
- return false
-}
+const (
+ // Max size is ser to 2M.
+ maxInternerLruCacheSize = 2e6
+ // TTL should be similar to the head compaction interval
+ internerLruCacheTTL = time.Hour * 2
+)
// StringsMap returns a map where keys are input values.
func StringsMap(values []string) map[string]bool {
@@ -129,8 +126,10 @@ func MergeSortedSlices(ctx context.Context, a ...[]string) ([]string, error) {
r := make([]string, 0, sumLengh*2/10)
var current string
+ cnt := 0
for lt.Next() {
- if ctx.Err() != nil {
+ cnt++
+ if cnt%CheckContextEveryNIterations == 0 && ctx.Err() != nil {
return nil, ctx.Err()
}
if lt.At() != current {
@@ -143,30 +142,27 @@ func MergeSortedSlices(ctx context.Context, a ...[]string) ([]string, error) {
type Interner interface {
Intern(s string) string
- Release(s string)
}
-// NewInterner returns a new Interner to be used to intern strings.
-// Based on https://github.com/prometheus/prometheus/blob/726ed124e4468d0274ba89b0934a6cc8c975532d/storage/remote/intern.go#L51
-func NewInterner() Interner {
+// NewLruInterner returns a new Interner to be used to intern strings.
+// The interner will use a LRU cache to return the deduplicated strings
+func NewLruInterner(enabled bool) Interner {
+ if !enabled {
+ return &noOpInterner{}
+ }
return &pool{
- pool: map[string]*entry{},
+ lru: expirable.NewLRU[string, string](maxInternerLruCacheSize, nil, internerLruCacheTTL),
}
}
-type pool struct {
- mtx sync.RWMutex
- pool map[string]*entry
-}
+type noOpInterner struct{}
-type entry struct {
- refs atomic.Int64
-
- s string
+func (n noOpInterner) Intern(s string) string {
+ return s
}
-func newEntry(s string) *entry {
- return &entry{s: s}
+type pool struct {
+ lru *expirable.LRU[string, string]
}
// Intern returns the interned string. It returns the canonical representation of string.
@@ -175,45 +171,10 @@ func (p *pool) Intern(s string) string {
return ""
}
- p.mtx.RLock()
- interned, ok := p.pool[s]
- p.mtx.RUnlock()
+ interned, ok := p.lru.Get(s)
if ok {
- interned.refs.Inc()
- return interned.s
- }
- p.mtx.Lock()
- defer p.mtx.Unlock()
- if interned, ok := p.pool[s]; ok {
- interned.refs.Inc()
- return interned.s
+ return interned
}
-
- p.pool[s] = newEntry(s)
- p.pool[s].refs.Store(1)
+ p.lru.Add(s, s)
return s
}
-
-// Release releases a reference of the string `s`.
-// If the reference count become 0, the string `s` is removed from the memory
-func (p *pool) Release(s string) {
- p.mtx.RLock()
- interned, ok := p.pool[s]
- p.mtx.RUnlock()
-
- if !ok {
- return
- }
-
- refs := interned.refs.Dec()
- if refs > 0 {
- return
- }
-
- p.mtx.Lock()
- defer p.mtx.Unlock()
- if interned.refs.Load() != 0 {
- return
- }
- delete(p.pool, s)
-}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/test_util.go b/vendor/github.com/cortexproject/cortex/pkg/util/test_util.go
index 521a921e1..193e7dd9d 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/test_util.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/test_util.go
@@ -19,10 +19,10 @@ func GenerateRandomStrings() []string {
randomChar := "0123456789abcdef"
randomStrings := make([]string, 0, 1000000)
sb := strings.Builder{}
- for i := 0; i < 1000000; i++ {
+ for range 1000000 {
sb.Reset()
sb.WriteString("pod://")
- for j := 0; j < 14; j++ {
+ for range 14 {
sb.WriteByte(randomChar[rand.Int()%len(randomChar)])
}
randomStrings = append(randomStrings, sb.String())
@@ -50,20 +50,20 @@ func GenerateChunk(t require.TestingT, step time.Duration, from model.Time, poin
switch pe {
case chunkenc.EncXOR:
- for i := 0; i < points; i++ {
+ for range points {
appender.Append(int64(ts), float64(ts))
ts = ts.Add(step)
}
case chunkenc.EncHistogram:
histograms := histogram_util.GenerateTestHistograms(int(from), int(step/time.Millisecond), points)
- for i := 0; i < points; i++ {
+ for i := range points {
_, _, appender, err = appender.AppendHistogram(nil, int64(ts), histograms[i], true)
require.NoError(t, err)
ts = ts.Add(step)
}
case chunkenc.EncFloatHistogram:
histograms := histogram_util.GenerateTestHistograms(int(from), int(step/time.Millisecond), points)
- for i := 0; i < points; i++ {
+ for i := range points {
_, _, appender, err = appender.AppendFloatHistogram(nil, int64(ts), histograms[i].ToFloat(nil), true)
require.NoError(t, err)
ts = ts.Add(step)
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/time.go b/vendor/github.com/cortexproject/cortex/pkg/util/time.go
index 3f19a71da..5e5249329 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/time.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/time.go
@@ -240,3 +240,72 @@ func ParseDurationMs(s string) (int64, error) {
}
return 0, httpgrpc.Errorf(http.StatusBadRequest, "cannot parse %q to a valid duration", s)
}
+
+func DurationMilliseconds(d time.Duration) int64 {
+ return int64(d / (time.Millisecond / time.Nanosecond))
+}
+
+// Copied from https://github.com/prometheus/prometheus/blob/dfae954dc1137568f33564e8cffda321f2867925/promql/engine.go#L811
+func GetTimeRangesForSelector(start, end int64, lookbackDelta time.Duration, n *parser.VectorSelector, path []parser.Node, evalRange time.Duration) (int64, int64) {
+ subqOffset, subqRange, subqTs := subqueryTimes(path)
+
+ if subqTs != nil {
+ // The timestamp on the subquery overrides the eval statement time ranges.
+ start = *subqTs
+ end = *subqTs
+ }
+
+ if n.Timestamp != nil {
+ // The timestamp on the selector overrides everything.
+ start = *n.Timestamp
+ end = *n.Timestamp
+ } else {
+ offsetMilliseconds := DurationMilliseconds(subqOffset)
+ start = start - offsetMilliseconds - DurationMilliseconds(subqRange)
+ end -= offsetMilliseconds
+ }
+
+ if evalRange == 0 {
+ start -= DurationMilliseconds(lookbackDelta)
+ } else {
+ // For all matrix queries we want to ensure that we have (end-start) + range selected
+ // this way we have `range` data before the start time
+ start -= DurationMilliseconds(evalRange)
+ }
+
+ offsetMilliseconds := DurationMilliseconds(n.OriginalOffset)
+ start -= offsetMilliseconds
+ end -= offsetMilliseconds
+
+ return start, end
+}
+
+// Copied from https://github.com/prometheus/prometheus/blob/dfae954dc1137568f33564e8cffda321f2867925/promql/engine.go#L754
+// subqueryTimes returns the sum of offsets and ranges of all subqueries in the path.
+// If the @ modifier is used, then the offset and range is w.r.t. that timestamp
+// (i.e. the sum is reset when we have @ modifier).
+// The returned *int64 is the closest timestamp that was seen. nil for no @ modifier.
+func subqueryTimes(path []parser.Node) (time.Duration, time.Duration, *int64) {
+ var (
+ subqOffset, subqRange time.Duration
+ ts int64 = math.MaxInt64
+ )
+ for _, node := range path {
+ if n, ok := node.(*parser.SubqueryExpr); ok {
+ subqOffset += n.OriginalOffset
+ subqRange += n.Range
+ if n.Timestamp != nil {
+ // The @ modifier on subquery invalidates all the offset and
+ // range till now. Hence resetting it here.
+ subqOffset = n.OriginalOffset
+ subqRange = n.Range
+ ts = *n.Timestamp
+ }
+ }
+ }
+ var tsp *int64
+ if ts != math.MaxInt64 {
+ tsp = &ts
+ }
+ return subqOffset, subqRange, tsp
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/errors.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/errors.go
new file mode 100644
index 000000000..11ade9c93
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/errors.go
@@ -0,0 +1,309 @@
+package validation
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/histogram"
+
+ "github.com/cortexproject/cortex/pkg/cortexpb"
+)
+
+// ValidationError is an error returned by series validation.
+//
+// Ignore stutter warning.
+// nolint:revive
+type ValidationError error
+
+// genericValidationError is a basic implementation of ValidationError which can be used when the
+// error format only contains the cause and the series.
+type genericValidationError struct {
+ message string
+ cause string
+ series []cortexpb.LabelAdapter
+}
+
+func (e *genericValidationError) Error() string {
+ return fmt.Sprintf(e.message, e.cause, formatLabelSet(e.series))
+}
+
+// labelNameTooLongError is a customized ValidationError, in that the cause and the series are
+// formatted in different order in Error.
+type labelNameTooLongError struct {
+ labelName string
+ series []cortexpb.LabelAdapter
+ limit int
+}
+
+func (e *labelNameTooLongError) Error() string {
+ return fmt.Sprintf("label name too long for metric (actual: %d, limit: %d) metric: %.200q label name: %.200q", len(e.labelName), e.limit, formatLabelSet(e.series), e.labelName)
+}
+
+func newLabelNameTooLongError(series []cortexpb.LabelAdapter, labelName string, limit int) ValidationError {
+ return &labelNameTooLongError{
+ labelName: labelName,
+ series: series,
+ limit: limit,
+ }
+}
+
+// labelValueTooLongError is a customized ValidationError, in that the cause and the series are
+// formatted in different order in Error.
+type labelValueTooLongError struct {
+ labelName string
+ labelValue string
+ series []cortexpb.LabelAdapter
+ limit int
+}
+
+func (e *labelValueTooLongError) Error() string {
+ return fmt.Sprintf("label value too long for metric (actual: %d, limit: %d) metric: %.200q label name: %.200q label value: %.200q",
+ len(e.labelValue), e.limit, formatLabelSet(e.series), e.labelName, e.labelValue)
+}
+
+func newLabelValueTooLongError(series []cortexpb.LabelAdapter, labelName, labelValue string, limit int) ValidationError {
+ return &labelValueTooLongError{
+ labelName: labelName,
+ labelValue: labelValue,
+ series: series,
+ limit: limit,
+ }
+}
+
+// labelsSizeBytesExceededError is a customized ValidationError, in that the cause and the series are
+// formatted in different order in Error.
+type labelsSizeBytesExceededError struct {
+ labelsSizeBytes int
+ series []cortexpb.LabelAdapter
+ limit int
+}
+
+func (e *labelsSizeBytesExceededError) Error() string {
+ return fmt.Sprintf("labels size bytes exceeded for metric (actual: %d, limit: %d) metric: %.200q", e.labelsSizeBytes, e.limit, formatLabelSet(e.series))
+}
+
+func labelSizeBytesExceededError(series []cortexpb.LabelAdapter, labelsSizeBytes int, limit int) ValidationError {
+ return &labelsSizeBytesExceededError{
+ labelsSizeBytes: labelsSizeBytes,
+ series: series,
+ limit: limit,
+ }
+}
+
+func newInvalidLabelError(series []cortexpb.LabelAdapter, labelName string) ValidationError {
+ return &genericValidationError{
+ message: "sample invalid label: %.200q metric %.200q",
+ cause: labelName,
+ series: series,
+ }
+}
+
+func newDuplicatedLabelError(series []cortexpb.LabelAdapter, labelName string) ValidationError {
+ return &genericValidationError{
+ message: "duplicate label name: %.200q metric %.200q",
+ cause: labelName,
+ series: series,
+ }
+}
+
+func newLabelsNotSortedError(series []cortexpb.LabelAdapter, labelName string) ValidationError {
+ return &genericValidationError{
+ message: "labels not sorted: %.200q metric %.200q",
+ cause: labelName,
+ series: series,
+ }
+}
+
+type tooManyLabelsError struct {
+ series []cortexpb.LabelAdapter
+ limit int
+}
+
+func newTooManyLabelsError(series []cortexpb.LabelAdapter, limit int) ValidationError {
+ return &tooManyLabelsError{
+ series: series,
+ limit: limit,
+ }
+}
+
+func (e *tooManyLabelsError) Error() string {
+ return fmt.Sprintf(
+ "series has too many labels (actual: %d, limit: %d) series: '%s'",
+ len(e.series), e.limit, cortexpb.FromLabelAdaptersToMetric(e.series).String())
+}
+
+type noMetricNameError struct{}
+
+func newNoMetricNameError() ValidationError {
+ return &noMetricNameError{}
+}
+
+func (e *noMetricNameError) Error() string {
+ return "sample missing metric name"
+}
+
+type invalidMetricNameError struct {
+ metricName string
+}
+
+func newInvalidMetricNameError(metricName string) ValidationError {
+ return &invalidMetricNameError{
+ metricName: metricName,
+ }
+}
+
+func (e *invalidMetricNameError) Error() string {
+ return fmt.Sprintf("sample invalid metric name: %.200q", e.metricName)
+}
+
+// sampleValidationError is a ValidationError implementation suitable for sample validation errors.
+type sampleValidationError struct {
+ message string
+ metricName string
+ timestamp int64
+}
+
+func (e *sampleValidationError) Error() string {
+ return fmt.Sprintf(e.message, e.timestamp, e.metricName)
+}
+
+func newSampleTimestampTooOldError(metricName string, timestamp int64) ValidationError {
+ return &sampleValidationError{
+ message: "timestamp too old: %d metric: %.200q",
+ metricName: metricName,
+ timestamp: timestamp,
+ }
+}
+
+func newSampleTimestampTooNewError(metricName string, timestamp int64) ValidationError {
+ return &sampleValidationError{
+ message: "timestamp too new: %d metric: %.200q",
+ metricName: metricName,
+ timestamp: timestamp,
+ }
+}
+
+// exemplarValidationError is a ValidationError implementation suitable for exemplar validation errors.
+type exemplarValidationError struct {
+ message string
+ seriesLabels []cortexpb.LabelAdapter
+ exemplarLabels []cortexpb.LabelAdapter
+ timestamp int64
+}
+
+func (e *exemplarValidationError) Error() string {
+ return fmt.Sprintf(e.message, e.timestamp, cortexpb.FromLabelAdaptersToLabels(e.seriesLabels).String(), cortexpb.FromLabelAdaptersToLabels(e.exemplarLabels).String())
+}
+
+func newExemplarEmtpyLabelsError(seriesLabels []cortexpb.LabelAdapter, exemplarLabels []cortexpb.LabelAdapter, timestamp int64) ValidationError {
+ return &exemplarValidationError{
+ message: "exemplar missing labels, timestamp: %d series: %s labels: %s",
+ seriesLabels: seriesLabels,
+ exemplarLabels: exemplarLabels,
+ timestamp: timestamp,
+ }
+}
+
+func newExemplarMissingTimestampError(seriesLabels []cortexpb.LabelAdapter, exemplarLabels []cortexpb.LabelAdapter, timestamp int64) ValidationError {
+ return &exemplarValidationError{
+ message: "exemplar missing timestamp, timestamp: %d series: %s labels: %s",
+ seriesLabels: seriesLabels,
+ exemplarLabels: exemplarLabels,
+ timestamp: timestamp,
+ }
+}
+
+var labelLenMsg = "exemplar combined labelset exceeds " + strconv.Itoa(ExemplarMaxLabelSetLength) + " characters, timestamp: %d series: %s labels: %s"
+
+func newExemplarLabelLengthError(seriesLabels []cortexpb.LabelAdapter, exemplarLabels []cortexpb.LabelAdapter, timestamp int64) ValidationError {
+ return &exemplarValidationError{
+ message: labelLenMsg,
+ seriesLabels: seriesLabels,
+ exemplarLabels: exemplarLabels,
+ timestamp: timestamp,
+ }
+}
+
+// histogramBucketLimitExceededError is a ValidationError implementation for samples with native histogram
+// exceeding max bucket limit and cannot reduce resolution further to be within the max bucket limit.
+type histogramBucketLimitExceededError struct {
+ series []cortexpb.LabelAdapter
+ limit int
+}
+
+func newHistogramBucketLimitExceededError(series []cortexpb.LabelAdapter, limit int) ValidationError {
+ return &histogramBucketLimitExceededError{
+ series: series,
+ limit: limit,
+ }
+}
+
+func (e *histogramBucketLimitExceededError) Error() string {
+ return fmt.Sprintf("native histogram bucket count exceeded for metric (limit: %d) metric: %.200q", e.limit, formatLabelSet(e.series))
+}
+
+// nativeHistogramSchemaInvalidError is a ValidationError implementation for samples with native histogram
+// exceeding the valid schema range.
+type nativeHistogramSchemaInvalidError struct {
+ series []cortexpb.LabelAdapter
+ receivedSchema int
+}
+
+func newNativeHistogramSchemaInvalidError(series []cortexpb.LabelAdapter, receivedSchema int) ValidationError {
+ return &nativeHistogramSchemaInvalidError{
+ series: series,
+ receivedSchema: receivedSchema,
+ }
+}
+
+func (e *nativeHistogramSchemaInvalidError) Error() string {
+ return fmt.Sprintf("invalid native histogram schema %d for metric: %.200q. supported schema from %d to %d", e.receivedSchema, formatLabelSet(e.series), histogram.ExponentialSchemaMin, histogram.ExponentialSchemaMax)
+}
+
+// nativeHistogramSampleSizeBytesExceededError is a ValidationError implementation for samples with native histogram
+// exceeding the sample size bytes limit
+type nativeHistogramSampleSizeBytesExceededError struct {
+ nhSampleSizeBytes int
+ series []cortexpb.LabelAdapter
+ limit int
+}
+
+func newNativeHistogramSampleSizeBytesExceededError(series []cortexpb.LabelAdapter, nhSampleSizeBytes int, limit int) ValidationError {
+ return &nativeHistogramSampleSizeBytesExceededError{
+ nhSampleSizeBytes: nhSampleSizeBytes,
+ series: series,
+ limit: limit,
+ }
+}
+
+func (e *nativeHistogramSampleSizeBytesExceededError) Error() string {
+ return fmt.Sprintf("native histogram sample size bytes exceeded for metric (actual: %d, limit: %d) metric: %.200q", e.nhSampleSizeBytes, e.limit, formatLabelSet(e.series))
+}
+
+// formatLabelSet formats label adapters as a metric name with labels, while preserving
+// label order, and keeping duplicates. If there are multiple "__name__" labels, only
+// first one is used as metric name, other ones will be included as regular labels.
+func formatLabelSet(ls []cortexpb.LabelAdapter) string {
+ metricName, hasMetricName := "", false
+
+ labelStrings := make([]string, 0, len(ls))
+ for _, l := range ls {
+ if l.Name == model.MetricNameLabel && !hasMetricName && l.Value != "" {
+ metricName = l.Value
+ hasMetricName = true
+ } else {
+ labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", l.Name, l.Value))
+ }
+ }
+
+ if len(labelStrings) == 0 {
+ if hasMetricName {
+ return metricName
+ }
+ return "{}"
+ }
+
+ return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/exporter.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/exporter.go
new file mode 100644
index 000000000..b0c0cc925
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/exporter.go
@@ -0,0 +1,93 @@
+package validation
+
+import (
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// OverridesExporter exposes per-tenant resource limit overrides as Prometheus metrics
+type OverridesExporter struct {
+ tenantLimits TenantLimits
+ description *prometheus.Desc
+}
+
+// NewOverridesExporter creates an OverridesExporter that reads updates to per-tenant
+// limits using the provided function.
+func NewOverridesExporter(tenantLimits TenantLimits) *OverridesExporter {
+ return &OverridesExporter{
+ tenantLimits: tenantLimits,
+ description: prometheus.NewDesc(
+ "cortex_overrides",
+ "Resource limit overrides applied to tenants",
+ []string{"limit_name", "user"},
+ nil,
+ ),
+ }
+}
+
+func (oe *OverridesExporter) Describe(ch chan<- *prometheus.Desc) {
+ ch <- oe.description
+}
+
+func (oe *OverridesExporter) Collect(ch chan<- prometheus.Metric) {
+ allLimits := oe.tenantLimits.AllByUserID()
+ for tenant, limits := range allLimits {
+ for metricName, value := range ExtractNumericalValues(limits) {
+ ch <- prometheus.MustNewConstMetric(oe.description, prometheus.GaugeValue, value, metricName, tenant)
+ }
+ }
+}
+
+func ExtractNumericalValues(l *Limits) map[string]float64 {
+ metrics := make(map[string]float64)
+
+ v := reflect.ValueOf(l).Elem()
+ t := v.Type()
+
+ for i := 0; i < v.NumField(); i++ {
+ field := v.Field(i)
+ fieldType := t.Field(i)
+
+ tag := fieldType.Tag.Get("yaml")
+ if tag == "" || tag == "-" {
+ // not exist tag or tag is "-"
+ continue
+ }
+
+ // remove options like omitempty
+ if idx := strings.Index(tag, ","); idx != -1 {
+ tag = tag[:idx]
+ }
+
+ switch field.Kind() {
+ case reflect.Int, reflect.Int64:
+ switch fieldType.Type.String() {
+ case "model.Duration":
+ // we export the model.Duration in seconds
+ metrics[tag] = time.Duration(field.Int()).Seconds()
+ case "model.ValidationScheme":
+ // skip
+ default:
+ metrics[tag] = float64(field.Int())
+ }
+ case reflect.Uint, reflect.Uint64:
+ metrics[tag] = float64(field.Uint())
+ case reflect.Float64:
+ metrics[tag] = field.Float()
+ case reflect.Bool:
+ if field.Bool() {
+ // true as 1.0
+ metrics[tag] = 1.0
+ } else {
+ // false as 0.0
+ metrics[tag] = 0.0
+ }
+ case reflect.String, reflect.Slice, reflect.Map, reflect.Struct:
+ continue
+ }
+ }
+ return metrics
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go
new file mode 100644
index 000000000..16b956bef
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go
@@ -0,0 +1,1228 @@
+package validation
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "flag"
+ "fmt"
+ "maps"
+ "math"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/cespare/xxhash/v2"
+ "github.com/go-kit/log/level"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/relabel"
+ "github.com/segmentio/fasthash/fnv1a"
+ "golang.org/x/time/rate"
+
+ "github.com/cortexproject/cortex/pkg/util/flagext"
+ util_log "github.com/cortexproject/cortex/pkg/util/log"
+)
+
+var errMaxGlobalSeriesPerUserValidation = errors.New("the ingester.max-global-series-per-user limit is unsupported if distributor.shard-by-all-labels is disabled")
+var errMaxGlobalNativeHistogramSeriesPerUserValidation = errors.New("the ingester.max-global-native-histogram-series-per-user limit is unsupported if distributor.shard-by-all-labels or ingester.active-series-metrics-enabled is disabled")
+var errMaxLocalNativeHistogramSeriesPerUserValidation = errors.New("the ingester.max-local-native-histogram-series-per-user limit is unsupported if ingester.active-series-metrics-enabled is disabled")
+var errDuplicateQueryPriorities = errors.New("duplicate entry of priorities found. Make sure they are all unique, including the default priority")
+var errCompilingQueryPriorityRegex = errors.New("error compiling query priority regex")
+var errDuplicatePerLabelSetLimit = errors.New("duplicate per labelSet limits found. Make sure they are all unique")
+var errInvalidLabelName = errors.New("invalid label name")
+var errInvalidLabelValue = errors.New("invalid label value")
+
+// Supported values for enum limits
+const (
+ LocalIngestionRateStrategy = "local"
+ GlobalIngestionRateStrategy = "global"
+)
+
+// AccessDeniedError are errors that do not comply with the limits specified.
+type AccessDeniedError string
+
+func (e AccessDeniedError) Error() string {
+ return string(e)
+}
+
+// LimitError are errors that do not comply with the limits specified.
+type LimitError string
+
+func (e LimitError) Error() string {
+ return string(e)
+}
+
+func IsLimitError(e error) bool {
+ var limitError LimitError
+ return errors.As(e, &limitError)
+}
+
+type DisabledRuleGroup struct {
+ Namespace string `yaml:"namespace" doc:"nocli|description=namespace in which the rule group belongs"`
+ Name string `yaml:"name" doc:"nocli|description=name of the rule group"`
+ User string `yaml:"-" doc:"nocli"`
+}
+
+type DisabledRuleGroups []DisabledRuleGroup
+
+type QueryPriority struct {
+ Enabled bool `yaml:"enabled" json:"enabled"`
+ DefaultPriority int64 `yaml:"default_priority" json:"default_priority"`
+ Priorities []PriorityDef `yaml:"priorities" json:"priorities" doc:"nocli|description=List of priority definitions."`
+}
+
+type PriorityDef struct {
+ Priority int64 `yaml:"priority" json:"priority" doc:"nocli|description=Priority level. Must be a unique value.|default=0"`
+ ReservedQueriers float64 `yaml:"reserved_queriers" json:"reserved_queriers" doc:"nocli|description=Number of reserved queriers to handle priorities higher or equal to the priority level. Value between 0 and 1 will be used as a percentage.|default=0"`
+ QueryAttributes []QueryAttribute `yaml:"query_attributes" json:"query_attributes" doc:"nocli|description=List of query_attributes to match and assign priority to queries. A query is assigned to this priority if it matches any query_attribute in this list. Each query_attribute has several properties (e.g., regex, time_window, user_agent), and all specified properties must match for a query_attribute to be considered a match. Only the specified properties are checked, and an AND operator is applied to them."`
+}
+
+type QueryRejection struct {
+ Enabled bool `yaml:"enabled" json:"enabled"`
+ QueryAttributes []QueryAttribute `yaml:"query_attributes" json:"query_attributes" doc:"nocli|description=List of query_attributes to match and reject queries. A query is rejected if it matches any query_attribute in this list. Each query_attribute has several properties (e.g., regex, time_window, user_agent), and all specified properties must match for a query_attribute to be considered a match. Only the specified properties are checked, and an AND operator is applied to them."`
+}
+
+type QueryAttribute struct {
+ ApiType string `yaml:"api_type" json:"api_type" doc:"nocli|description=API type for the query. Should be one of the query, query_range, series, labels, label_values. If not set, it won't be checked."`
+ Regex string `yaml:"regex" json:"regex" doc:"nocli|description=Regex that the query string (or at least one of the matchers in metadata query) should match. If not set, it won't be checked."`
+ TimeWindow TimeWindow `yaml:"time_window" json:"time_window" doc:"nocli|description=Overall data select time window (including range selectors, modifiers and lookback delta) that the query should be within. If not set, it won't be checked."`
+ TimeRangeLimit TimeRangeLimit `yaml:"time_range_limit" json:"time_range_limit" doc:"nocli|description=Query time range should be within this limit to match. Depending on where it was used, in most of the use-cases, either min or max value will be used. If not set, it won't be checked."`
+ QueryStepLimit QueryStepLimit `yaml:"query_step_limit" json:"query_step_limit" doc:"nocli|description=If query step provided should be within this limit to match. If not set, it won't be checked. This property only applied to range queries and ignored for other types of queries."`
+ UserAgentRegex string `yaml:"user_agent_regex" json:"user_agent_regex" doc:"nocli|description=Regex that User-Agent header of the request should match. If not set, it won't be checked."`
+ DashboardUID string `yaml:"dashboard_uid" json:"dashboard_uid" doc:"nocli|description=Grafana includes X-Dashboard-Uid header in query requests. If this field is provided then X-Dashboard-Uid header of request should match this value. If not set, it won't be checked. This property won't be applied to metadata queries."`
+ PanelID string `yaml:"panel_id" json:"panel_id" doc:"nocli|description=Grafana includes X-Panel-Id header in query requests. If this field is provided then X-Panel-Id header of request should match this value. If not set, it won't be checked. This property won't be applied to metadata queries."`
+ CompiledRegex *regexp.Regexp
+ CompiledUserAgentRegex *regexp.Regexp
+}
+
+type TimeWindow struct {
+ Start model.Duration `yaml:"start" json:"start" doc:"nocli|description=Start of the data select time window (including range selectors, modifiers and lookback delta) that the query should be within. If set to 0, it won't be checked.|default=0"`
+ End model.Duration `yaml:"end" json:"end" doc:"nocli|description=End of the data select time window (including range selectors, modifiers and lookback delta) that the query should be within. If set to 0, it won't be checked.|default=0"`
+}
+
+type TimeRangeLimit struct {
+ Min model.Duration `yaml:"min" json:"min" doc:"nocli|description=This will be duration (12h, 1d, 15d etc.). Query time range should be above or equal to this value to match. Ex: if this value is 20d, then queries whose range is bigger than or equal to 20d will match. If set to 0, it won't be checked.|default=0"`
+ Max model.Duration `yaml:"max" json:"max" doc:"nocli|description=This will be duration (12h, 1d, 15d etc.). Query time range should be below or equal to this value to match. Ex: if this value is 24h, then queries whose range is smaller than or equal to 24h will match.If set to 0, it won't be checked.|default=0"`
+}
+
+type QueryStepLimit struct {
+ Min model.Duration `yaml:"min" json:"min" doc:"nocli|description=Query step should be above or equal to this value to match. If set to 0, it won't be checked.|default=0"`
+ Max model.Duration `yaml:"max" json:"max" doc:"nocli|description=Query step should be below or equal to this value to match. If set to 0, it won't be checked.|default=0"`
+}
+
+type LimitsPerLabelSetEntry struct {
+ MaxSeries int `yaml:"max_series" json:"max_series" doc:"nocli|description=The maximum number of active series per LabelSet, across the cluster before replication. Setting the value 0 will enable the monitoring (metrics) but would not enforce any limits."`
+}
+
+type LimitsPerLabelSet struct {
+ Limits LimitsPerLabelSetEntry `yaml:"limits" json:"limits" doc:"nocli"`
+ LabelSet labels.Labels `yaml:"label_set" json:"label_set" doc:"nocli|description=LabelSet which the limit should be applied. If no labels are provided, it becomes the default partition which matches any series that doesn't match any other explicitly defined label sets.'"`
+ Id string `yaml:"-" json:"-" doc:"nocli"`
+ Hash uint64 `yaml:"-" json:"-" doc:"nocli"`
+}
+
+// Limits describe all the limits for users; can be used to describe global default
+// limits via flags, or per-user limits via yaml config.
+type Limits struct {
+ // Distributor enforced limits.
+ IngestionRate float64 `yaml:"ingestion_rate" json:"ingestion_rate"`
+ NativeHistogramIngestionRate float64 `yaml:"native_histogram_ingestion_rate" json:"native_histogram_ingestion_rate"`
+ IngestionRateStrategy string `yaml:"ingestion_rate_strategy" json:"ingestion_rate_strategy"`
+ IngestionBurstSize int `yaml:"ingestion_burst_size" json:"ingestion_burst_size"`
+ NativeHistogramIngestionBurstSize int `yaml:"native_histogram_ingestion_burst_size" json:"native_histogram_ingestion_burst_size"`
+ AcceptHASamples bool `yaml:"accept_ha_samples" json:"accept_ha_samples"`
+ AcceptMixedHASamples bool `yaml:"accept_mixed_ha_samples" json:"accept_mixed_ha_samples"`
+ HAClusterLabel string `yaml:"ha_cluster_label" json:"ha_cluster_label"`
+ HAReplicaLabel string `yaml:"ha_replica_label" json:"ha_replica_label"`
+ HAMaxClusters int `yaml:"ha_max_clusters" json:"ha_max_clusters"`
+ DropLabels flagext.StringSlice `yaml:"drop_labels" json:"drop_labels"`
+ MaxLabelNameLength int `yaml:"max_label_name_length" json:"max_label_name_length"`
+ MaxLabelValueLength int `yaml:"max_label_value_length" json:"max_label_value_length"`
+ MaxLabelNamesPerSeries int `yaml:"max_label_names_per_series" json:"max_label_names_per_series"`
+ MaxLabelsSizeBytes int `yaml:"max_labels_size_bytes" json:"max_labels_size_bytes"`
+ MaxNativeHistogramSampleSizeBytes int `yaml:"max_native_histogram_sample_size_bytes" json:"max_native_histogram_sample_size_bytes"`
+ MaxMetadataLength int `yaml:"max_metadata_length" json:"max_metadata_length"`
+ RejectOldSamples bool `yaml:"reject_old_samples" json:"reject_old_samples"`
+ RejectOldSamplesMaxAge model.Duration `yaml:"reject_old_samples_max_age" json:"reject_old_samples_max_age"`
+ CreationGracePeriod model.Duration `yaml:"creation_grace_period" json:"creation_grace_period"`
+ EnforceMetadataMetricName bool `yaml:"enforce_metadata_metric_name" json:"enforce_metadata_metric_name"`
+ EnforceMetricName bool `yaml:"enforce_metric_name" json:"enforce_metric_name"`
+ IngestionTenantShardSize int `yaml:"ingestion_tenant_shard_size" json:"ingestion_tenant_shard_size"`
+ MetricRelabelConfigs []*relabel.Config `yaml:"metric_relabel_configs,omitempty" json:"metric_relabel_configs,omitempty" doc:"nocli|description=List of metric relabel configurations. Note that in most situations, it is more effective to use metrics relabeling directly in the Prometheus server, e.g. remote_write.write_relabel_configs."`
+ MaxNativeHistogramBuckets int `yaml:"max_native_histogram_buckets" json:"max_native_histogram_buckets"`
+ PromoteResourceAttributes []string `yaml:"promote_resource_attributes" json:"promote_resource_attributes"`
+
+ // Ingester enforced limits.
+ // Series
+ MaxLocalSeriesPerUser int `yaml:"max_series_per_user" json:"max_series_per_user"`
+ MaxLocalSeriesPerMetric int `yaml:"max_series_per_metric" json:"max_series_per_metric"`
+ MaxLocalNativeHistogramSeriesPerUser int `yaml:"max_native_histogram_series_per_user" json:"max_native_histogram_series_per_user"`
+ MaxGlobalSeriesPerUser int `yaml:"max_global_series_per_user" json:"max_global_series_per_user"`
+ MaxGlobalSeriesPerMetric int `yaml:"max_global_series_per_metric" json:"max_global_series_per_metric"`
+ MaxGlobalNativeHistogramSeriesPerUser int `yaml:"max_global_native_histogram_series_per_user" json:"max_global_native_histogram_series_per_user"`
+ LimitsPerLabelSet []LimitsPerLabelSet `yaml:"limits_per_label_set" json:"limits_per_label_set" doc:"nocli|description=[Experimental] Enable limits per LabelSet. Supported limits per labelSet: [max_series]"`
+ EnableNativeHistograms bool `yaml:"enable_native_histograms" json:"enable_native_histograms"`
+
+ // Metadata
+ MaxLocalMetricsWithMetadataPerUser int `yaml:"max_metadata_per_user" json:"max_metadata_per_user"`
+ MaxLocalMetadataPerMetric int `yaml:"max_metadata_per_metric" json:"max_metadata_per_metric"`
+ MaxGlobalMetricsWithMetadataPerUser int `yaml:"max_global_metadata_per_user" json:"max_global_metadata_per_user"`
+ MaxGlobalMetadataPerMetric int `yaml:"max_global_metadata_per_metric" json:"max_global_metadata_per_metric"`
+ // Out-of-order
+ OutOfOrderTimeWindow model.Duration `yaml:"out_of_order_time_window" json:"out_of_order_time_window"`
+ // Exemplars
+ MaxExemplars int `yaml:"max_exemplars" json:"max_exemplars"`
+
+ // Querier enforced limits.
+ MaxChunksPerQuery int `yaml:"max_fetched_chunks_per_query" json:"max_fetched_chunks_per_query"`
+ MaxFetchedSeriesPerQuery int `yaml:"max_fetched_series_per_query" json:"max_fetched_series_per_query"`
+ MaxFetchedChunkBytesPerQuery int `yaml:"max_fetched_chunk_bytes_per_query" json:"max_fetched_chunk_bytes_per_query"`
+ MaxFetchedDataBytesPerQuery int `yaml:"max_fetched_data_bytes_per_query" json:"max_fetched_data_bytes_per_query"`
+ MaxQueryLookback model.Duration `yaml:"max_query_lookback" json:"max_query_lookback"`
+ MaxQueryLength model.Duration `yaml:"max_query_length" json:"max_query_length"`
+ MaxQueryParallelism int `yaml:"max_query_parallelism" json:"max_query_parallelism"`
+ MaxQueryResponseSize int64 `yaml:"max_query_response_size" json:"max_query_response_size"`
+ MaxCacheFreshness model.Duration `yaml:"max_cache_freshness" json:"max_cache_freshness"`
+ MaxQueriersPerTenant float64 `yaml:"max_queriers_per_tenant" json:"max_queriers_per_tenant"`
+ QueryVerticalShardSize int `yaml:"query_vertical_shard_size" json:"query_vertical_shard_size"`
+ QueryPartialData bool `yaml:"query_partial_data" json:"query_partial_data" doc:"nocli|description=Enable to allow queries to be evaluated with data from a single zone, if other zones are not available.|default=false"`
+
+ // Parquet Queryable enforced limits.
+ ParquetMaxFetchedRowCount int `yaml:"parquet_max_fetched_row_count" json:"parquet_max_fetched_row_count"`
+ ParquetMaxFetchedChunkBytes int `yaml:"parquet_max_fetched_chunk_bytes" json:"parquet_max_fetched_chunk_bytes"`
+ ParquetMaxFetchedDataBytes int `yaml:"parquet_max_fetched_data_bytes" json:"parquet_max_fetched_data_bytes"`
+
+ // Query Frontend / Scheduler enforced limits.
+ MaxOutstandingPerTenant int `yaml:"max_outstanding_requests_per_tenant" json:"max_outstanding_requests_per_tenant"`
+ QueryPriority QueryPriority `yaml:"query_priority" json:"query_priority" doc:"nocli|description=Configuration for query priority."`
+ queryAttributeRegexHash uint64
+ queryAttributeCompiledRegex map[string]*regexp.Regexp
+ QueryRejection QueryRejection `yaml:"query_rejection" json:"query_rejection" doc:"nocli|description=Configuration for query rejection."`
+
+ // Ruler defaults and limits.
+ RulerEvaluationDelay model.Duration `yaml:"ruler_evaluation_delay_duration" json:"ruler_evaluation_delay_duration"`
+ RulerTenantShardSize float64 `yaml:"ruler_tenant_shard_size" json:"ruler_tenant_shard_size"`
+ RulerMaxRulesPerRuleGroup int `yaml:"ruler_max_rules_per_rule_group" json:"ruler_max_rules_per_rule_group"`
+ RulerMaxRuleGroupsPerTenant int `yaml:"ruler_max_rule_groups_per_tenant" json:"ruler_max_rule_groups_per_tenant"`
+ RulerQueryOffset model.Duration `yaml:"ruler_query_offset" json:"ruler_query_offset"`
+ RulerExternalLabels labels.Labels `yaml:"ruler_external_labels" json:"ruler_external_labels" doc:"nocli|description=external labels for alerting rules"`
+ RulesPartialData bool `yaml:"rules_partial_data" json:"rules_partial_data" doc:"nocli|description=Enable to allow rules to be evaluated with data from a single zone, if other zones are not available.|default=false"`
+
+ // Store-gateway.
+ StoreGatewayTenantShardSize float64 `yaml:"store_gateway_tenant_shard_size" json:"store_gateway_tenant_shard_size"`
+ MaxDownloadedBytesPerRequest int `yaml:"max_downloaded_bytes_per_request" json:"max_downloaded_bytes_per_request"`
+
+ // Compactor.
+ CompactorBlocksRetentionPeriod model.Duration `yaml:"compactor_blocks_retention_period" json:"compactor_blocks_retention_period"`
+ CompactorTenantShardSize float64 `yaml:"compactor_tenant_shard_size" json:"compactor_tenant_shard_size"`
+ CompactorPartitionIndexSizeBytes int64 `yaml:"compactor_partition_index_size_bytes" json:"compactor_partition_index_size_bytes"`
+ CompactorPartitionSeriesCount int64 `yaml:"compactor_partition_series_count" json:"compactor_partition_series_count"`
+
+ // Parquet converter
+ ParquetConverterEnabled bool `yaml:"parquet_converter_enabled" json:"parquet_converter_enabled"`
+ ParquetConverterTenantShardSize float64 `yaml:"parquet_converter_tenant_shard_size" json:"parquet_converter_tenant_shard_size"`
+ ParquetConverterSortColumns []string `yaml:"parquet_converter_sort_columns" json:"parquet_converter_sort_columns"`
+ // This config doesn't have a CLI flag registered here because they're registered in
+ // their own original config struct.
+ S3SSEType string `yaml:"s3_sse_type" json:"s3_sse_type" doc:"nocli|description=S3 server-side encryption type. Required to enable server-side encryption overrides for a specific tenant. If not set, the default S3 client settings are used."`
+ S3SSEKMSKeyID string `yaml:"s3_sse_kms_key_id" json:"s3_sse_kms_key_id" doc:"nocli|description=S3 server-side encryption KMS Key ID. Ignored if the SSE type override is not set."`
+ S3SSEKMSEncryptionContext string `yaml:"s3_sse_kms_encryption_context" json:"s3_sse_kms_encryption_context" doc:"nocli|description=S3 server-side encryption KMS encryption context. If unset and the key ID override is set, the encryption context will not be provided to S3. Ignored if the SSE type override is not set."`
+
+ // Alertmanager.
+ AlertmanagerReceiversBlockCIDRNetworks flagext.CIDRSliceCSV `yaml:"alertmanager_receivers_firewall_block_cidr_networks" json:"alertmanager_receivers_firewall_block_cidr_networks"`
+ AlertmanagerReceiversBlockPrivateAddresses bool `yaml:"alertmanager_receivers_firewall_block_private_addresses" json:"alertmanager_receivers_firewall_block_private_addresses"`
+
+ NotificationRateLimit float64 `yaml:"alertmanager_notification_rate_limit" json:"alertmanager_notification_rate_limit"`
+ NotificationRateLimitPerIntegration NotificationRateLimitMap `yaml:"alertmanager_notification_rate_limit_per_integration" json:"alertmanager_notification_rate_limit_per_integration"`
+
+ AlertmanagerMaxConfigSizeBytes int `yaml:"alertmanager_max_config_size_bytes" json:"alertmanager_max_config_size_bytes"`
+ AlertmanagerMaxTemplatesCount int `yaml:"alertmanager_max_templates_count" json:"alertmanager_max_templates_count"`
+ AlertmanagerMaxTemplateSizeBytes int `yaml:"alertmanager_max_template_size_bytes" json:"alertmanager_max_template_size_bytes"`
+ AlertmanagerMaxDispatcherAggregationGroups int `yaml:"alertmanager_max_dispatcher_aggregation_groups" json:"alertmanager_max_dispatcher_aggregation_groups"`
+ AlertmanagerMaxAlertsCount int `yaml:"alertmanager_max_alerts_count" json:"alertmanager_max_alerts_count"`
+ AlertmanagerMaxAlertsSizeBytes int `yaml:"alertmanager_max_alerts_size_bytes" json:"alertmanager_max_alerts_size_bytes"`
+ AlertmanagerMaxSilencesCount int `yaml:"alertmanager_max_silences_count" json:"alertmanager_max_silences_count"`
+ AlertmanagerMaxSilencesSizeBytes int `yaml:"alertmanager_max_silences_size_bytes" json:"alertmanager_max_silences_size_bytes"`
+ DisabledRuleGroups DisabledRuleGroups `yaml:"disabled_rule_groups" json:"disabled_rule_groups" doc:"nocli|description=list of rule groups to disable"`
+}
+
+// RegisterFlags adds the flags required to config this to the given FlagSet
+func (l *Limits) RegisterFlags(f *flag.FlagSet) {
+ flagext.DeprecatedFlag(f, "ingester.max-series-per-query", "Deprecated: The maximum number of series for which a query can fetch samples from each ingester. This limit is enforced only in the ingesters (when querying samples not flushed to the storage yet) and it's a per-instance limit. This limit is ignored when running the Cortex blocks storage. When running Cortex with blocks storage use -querier.max-fetched-series-per-query limit instead.", util_log.Logger)
+
+ f.IntVar(&l.IngestionTenantShardSize, "distributor.ingestion-tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used. Must be set both on ingesters and distributors. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant.")
+ f.Float64Var(&l.IngestionRate, "distributor.ingestion-rate-limit", 25000, "Per-user ingestion rate limit in samples per second.")
+ f.Float64Var(&l.NativeHistogramIngestionRate, "distributor.native-histogram-ingestion-rate-limit", float64(rate.Inf), "Per-user native histogram ingestion rate limit in samples per second. Disabled by default")
+ f.StringVar(&l.IngestionRateStrategy, "distributor.ingestion-rate-limit-strategy", "local", "Whether the ingestion rate limit should be applied individually to each distributor instance (local), or evenly shared across the cluster (global).")
+ f.IntVar(&l.IngestionBurstSize, "distributor.ingestion-burst-size", 50000, "Per-user allowed ingestion burst size (in number of samples).")
+ f.IntVar(&l.NativeHistogramIngestionBurstSize, "distributor.native-histogram-ingestion-burst-size", 0, "Per-user allowed native histogram ingestion burst size (in number of samples)")
+ f.BoolVar(&l.AcceptHASamples, "distributor.ha-tracker.enable-for-all-users", false, "Flag to enable, for all users, handling of samples with external labels identifying replicas in an HA Prometheus setup.")
+ f.BoolVar(&l.AcceptMixedHASamples, "experimental.distributor.ha-tracker.mixed-ha-samples", false, "[Experimental] Flag to enable handling of samples with mixed external labels identifying replicas in an HA Prometheus setup. Supported only if -distributor.ha-tracker.enable-for-all-users is true.")
+ f.StringVar(&l.HAClusterLabel, "distributor.ha-tracker.cluster", "cluster", "Prometheus label to look for in samples to identify a Prometheus HA cluster.")
+ f.StringVar(&l.HAReplicaLabel, "distributor.ha-tracker.replica", "__replica__", "Prometheus label to look for in samples to identify a Prometheus HA replica.")
+ f.IntVar(&l.HAMaxClusters, "distributor.ha-tracker.max-clusters", 0, "Maximum number of clusters that HA tracker will keep track of for single user. 0 to disable the limit.")
+ f.Var((*flagext.StringSliceCSV)(&l.PromoteResourceAttributes), "distributor.promote-resource-attributes", "Comma separated list of resource attributes that should be converted to labels.")
+ f.Var(&l.DropLabels, "distributor.drop-label", "This flag can be used to specify label names that to drop during sample ingestion within the distributor and can be repeated in order to drop multiple labels.")
+ f.IntVar(&l.MaxLabelNameLength, "validation.max-length-label-name", 1024, "Maximum length accepted for label names")
+ f.IntVar(&l.MaxLabelValueLength, "validation.max-length-label-value", 2048, "Maximum length accepted for label value. This setting also applies to the metric name")
+ f.IntVar(&l.MaxLabelNamesPerSeries, "validation.max-label-names-per-series", 30, "Maximum number of label names per series.")
+ f.IntVar(&l.MaxLabelsSizeBytes, "validation.max-labels-size-bytes", 0, "Maximum combined size in bytes of all labels and label values accepted for a series. 0 to disable the limit.")
+ f.IntVar(&l.MaxNativeHistogramSampleSizeBytes, "validation.max-native-histogram-sample-size-bytes", 0, "Maximum size in bytes of a native histogram sample. 0 to disable the limit.")
+ f.IntVar(&l.MaxMetadataLength, "validation.max-metadata-length", 1024, "Maximum length accepted for metric metadata. Metadata refers to Metric Name, HELP and UNIT.")
+ f.BoolVar(&l.RejectOldSamples, "validation.reject-old-samples", false, "Reject old samples.")
+ _ = l.RejectOldSamplesMaxAge.Set("14d")
+ f.Var(&l.RejectOldSamplesMaxAge, "validation.reject-old-samples.max-age", "Maximum accepted sample age before rejecting.")
+ _ = l.CreationGracePeriod.Set("10m")
+ f.Var(&l.CreationGracePeriod, "validation.create-grace-period", "Duration which table will be created/deleted before/after it's needed; we won't accept sample from before this time.")
+ f.BoolVar(&l.EnforceMetricName, "validation.enforce-metric-name", true, "Enforce every sample has a metric name.")
+ f.BoolVar(&l.EnforceMetadataMetricName, "validation.enforce-metadata-metric-name", true, "Enforce every metadata has a metric name.")
+ f.IntVar(&l.MaxNativeHistogramBuckets, "validation.max-native-histogram-buckets", 0, "Limit on total number of positive and negative buckets allowed in a single native histogram. The resolution of a histogram with more buckets will be reduced until the number of buckets is within the limit. If the limit cannot be reached, the sample will be discarded. 0 means no limit. Enforced at Distributor.")
+
+ f.IntVar(&l.MaxLocalSeriesPerUser, "ingester.max-series-per-user", 5000000, "The maximum number of active series per user, per ingester. 0 to disable.")
+ f.IntVar(&l.MaxLocalSeriesPerMetric, "ingester.max-series-per-metric", 50000, "The maximum number of active series per metric name, per ingester. 0 to disable.")
+ f.IntVar(&l.MaxGlobalSeriesPerUser, "ingester.max-global-series-per-user", 0, "The maximum number of active series per user, across the cluster before replication. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.")
+ f.IntVar(&l.MaxGlobalSeriesPerMetric, "ingester.max-global-series-per-metric", 0, "The maximum number of active series per metric name, across the cluster before replication. 0 to disable.")
+ f.IntVar(&l.MaxLocalNativeHistogramSeriesPerUser, "ingester.max-native-histogram-series-per-user", 0, "The maximum number of active native histogram series per user, per ingester. 0 to disable. Supported only if ingester.active-series-metrics-enabled is true.")
+ f.IntVar(&l.MaxGlobalNativeHistogramSeriesPerUser, "ingester.max-global-native-histogram-series-per-user", 0, "The maximum number of active native histogram series per user, across the cluster before replication. 0 to disable. Supported only if -distributor.shard-by-all-labels and ingester.active-series-metrics-enabled is true.")
+ f.BoolVar(&l.EnableNativeHistograms, "blocks-storage.tsdb.enable-native-histograms", false, "[EXPERIMENTAL] True to enable native histogram.")
+ f.IntVar(&l.MaxExemplars, "ingester.max-exemplars", 0, "Enables support for exemplars in TSDB and sets the maximum number that will be stored. less than zero means disabled. If the value is set to zero, cortex will fallback to blocks-storage.tsdb.max-exemplars value.")
+ f.Var(&l.OutOfOrderTimeWindow, "ingester.out-of-order-time-window", "[Experimental] Configures the allowed time window for ingestion of out-of-order samples. Disabled (0s) by default.")
+
+ f.IntVar(&l.MaxLocalMetricsWithMetadataPerUser, "ingester.max-metadata-per-user", 8000, "The maximum number of active metrics with metadata per user, per ingester. 0 to disable.")
+ f.IntVar(&l.MaxLocalMetadataPerMetric, "ingester.max-metadata-per-metric", 10, "The maximum number of metadata per metric, per ingester. 0 to disable.")
+ f.IntVar(&l.MaxGlobalMetricsWithMetadataPerUser, "ingester.max-global-metadata-per-user", 0, "The maximum number of active metrics with metadata per user, across the cluster. 0 to disable. Supported only if -distributor.shard-by-all-labels is true.")
+ f.IntVar(&l.MaxGlobalMetadataPerMetric, "ingester.max-global-metadata-per-metric", 0, "The maximum number of metadata per metric, across the cluster. 0 to disable.")
+ f.IntVar(&l.MaxChunksPerQuery, "querier.max-fetched-chunks-per-query", 2000000, "Maximum number of chunks that can be fetched in a single query from ingesters and long-term storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.")
+ f.IntVar(&l.MaxFetchedSeriesPerQuery, "querier.max-fetched-series-per-query", 0, "The maximum number of unique series for which a query can fetch samples from each ingesters and blocks storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable")
+ f.IntVar(&l.MaxFetchedChunkBytesPerQuery, "querier.max-fetched-chunk-bytes-per-query", 0, "Deprecated (use max-fetched-data-bytes-per-query instead): The maximum size of all chunks in bytes that a query can fetch from each ingester and storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.")
+ f.IntVar(&l.MaxFetchedDataBytesPerQuery, "querier.max-fetched-data-bytes-per-query", 0, "The maximum combined size of all data that a query can fetch from each ingester and storage. This limit is enforced in the querier and ruler for `query`, `query_range` and `series` APIs. 0 to disable.")
+ f.Var(&l.MaxQueryLength, "store.max-query-length", "Limit the query time range (end - start time of range query parameter and max - min of data fetched time range). This limit is enforced in the query-frontend and ruler (on the received query). 0 to disable.")
+ f.Var(&l.MaxQueryLookback, "querier.max-query-lookback", "Limit how long back data (series and metadata) can be queried, up until duration ago. This limit is enforced in the query-frontend, querier and ruler. If the requested time range is outside the allowed range, the request will not fail but will be manipulated to only query data within the allowed time range. 0 to disable.")
+ f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of split queries will be scheduled in parallel by the frontend.")
+ _ = l.MaxCacheFreshness.Set("1m")
+ f.Int64Var(&l.MaxQueryResponseSize, "frontend.max-query-response-size", 0, "The maximum total uncompressed query response size. If the query was sharded the limit is applied to the total response size of all shards. This limit is enforced in query-frontend for `query` and `query_range` APIs. 0 to disable.")
+ f.Var(&l.MaxCacheFreshness, "frontend.max-cache-freshness", "Most recent allowed cacheable result per-tenant, to prevent caching very recent results that might still be in flux.")
+ f.Float64Var(&l.MaxQueriersPerTenant, "frontend.max-queriers-per-tenant", 0, "Maximum number of queriers that can handle requests for a single tenant. If set to 0 or value higher than number of available queriers, *all* queriers will handle requests for the tenant. If the value is < 1, it will be treated as a percentage and the gets a percentage of the total queriers. Each frontend (or query-scheduler, if used) will select the same set of queriers for the same tenant (given that all queriers are connected to all frontends / query-schedulers). This option only works with queriers connecting to the query-frontend / query-scheduler, not when using downstream URL.")
+ f.IntVar(&l.QueryVerticalShardSize, "frontend.query-vertical-shard-size", 0, "[Experimental] Number of shards to use when distributing shardable PromQL queries.")
+ f.BoolVar(&l.QueryPriority.Enabled, "frontend.query-priority.enabled", false, "Whether queries are assigned with priorities.")
+ f.Int64Var(&l.QueryPriority.DefaultPriority, "frontend.query-priority.default-priority", 0, "Priority assigned to all queries by default. Must be a unique value. Use this as a baseline to make certain queries higher/lower priority.")
+ f.BoolVar(&l.QueryRejection.Enabled, "frontend.query-rejection.enabled", false, "Whether query rejection is enabled.")
+
+ f.IntVar(&l.MaxOutstandingPerTenant, "frontend.max-outstanding-requests-per-tenant", 100, "Maximum number of outstanding requests per tenant per request queue (either query frontend or query scheduler); requests beyond this error with HTTP 429.")
+
+ f.Var(&l.RulerEvaluationDelay, "ruler.evaluation-delay-duration", "Deprecated(use ruler.query-offset instead) and will be removed in v1.19.0: Duration to delay the evaluation of rules to ensure the underlying metrics have been pushed to Cortex.")
+ f.Float64Var(&l.RulerTenantShardSize, "ruler.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used by ruler. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant. If the value is < 1 the shard size will be a percentage of the total rulers.")
+ f.IntVar(&l.RulerMaxRulesPerRuleGroup, "ruler.max-rules-per-rule-group", 0, "Maximum number of rules per rule group per-tenant. 0 to disable.")
+ f.IntVar(&l.RulerMaxRuleGroupsPerTenant, "ruler.max-rule-groups-per-tenant", 0, "Maximum number of rule groups per-tenant. 0 to disable.")
+ f.Var(&l.RulerQueryOffset, "ruler.query-offset", "Duration to offset all rule evaluation queries per-tenant.")
+
+ f.Var(&l.CompactorBlocksRetentionPeriod, "compactor.blocks-retention-period", "Delete blocks containing samples older than the specified retention period. 0 to disable.")
+ f.Float64Var(&l.CompactorTenantShardSize, "compactor.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used by the compactor. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant. If the value is < 1 and > 0 the shard size will be a percentage of the total compactors")
+ // Default to 64GB because this is the hard limit of index size in Cortex
+ f.Int64Var(&l.CompactorPartitionIndexSizeBytes, "compactor.partition-index-size-bytes", 68719476736, "Index size limit in bytes for each compaction partition. 0 means no limit")
+ f.Int64Var(&l.CompactorPartitionSeriesCount, "compactor.partition-series-count", 0, "Time series count limit for each compaction partition. 0 means no limit")
+
+ f.Float64Var(&l.ParquetConverterTenantShardSize, "parquet-converter.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used by the parquet converter. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant. If the value is < 1 and > 0 the shard size will be a percentage of the total parquet converters.")
+ f.BoolVar(&l.ParquetConverterEnabled, "parquet-converter.enabled", false, "If set, enables the Parquet converter to create the parquet files.")
+ f.Var((*flagext.StringSlice)(&l.ParquetConverterSortColumns), "parquet-converter.sort-columns", "Additional label names for specific tenants to sort by after metric name, in order of precedence. These are applied during Parquet file generation.")
+
+ // Parquet Queryable enforced limits.
+ f.IntVar(&l.ParquetMaxFetchedRowCount, "querier.parquet-queryable.max-fetched-row-count", 0, "The maximum number of rows that can be fetched when querying parquet storage. Each row maps to a series in a parquet file. This limit applies before materializing chunks. 0 to disable.")
+ f.IntVar(&l.ParquetMaxFetchedChunkBytes, "querier.parquet-queryable.max-fetched-chunk-bytes", 0, "The maximum number of bytes that can be used to fetch chunk column pages when querying parquet storage. 0 to disable.")
+ f.IntVar(&l.ParquetMaxFetchedDataBytes, "querier.parquet-queryable.max-fetched-data-bytes", 0, "The maximum number of bytes that can be used to fetch all column pages when querying parquet storage. 0 to disable.")
+
+ // Store-gateway.
+ f.Float64Var(&l.StoreGatewayTenantShardSize, "store-gateway.tenant-shard-size", 0, "The default tenant's shard size when the shuffle-sharding strategy is used. Must be set when the store-gateway sharding is enabled with the shuffle-sharding strategy. When this setting is specified in the per-tenant overrides, a value of 0 disables shuffle sharding for the tenant. If the value is < 1 the shard size will be a percentage of the total store-gateways.")
+ f.IntVar(&l.MaxDownloadedBytesPerRequest, "store-gateway.max-downloaded-bytes-per-request", 0, "The maximum number of data bytes to download per gRPC request in Store Gateway, including Series/LabelNames/LabelValues requests. 0 to disable.")
+
+ // Alertmanager.
+ f.Var(&l.AlertmanagerReceiversBlockCIDRNetworks, "alertmanager.receivers-firewall-block-cidr-networks", "Comma-separated list of network CIDRs to block in Alertmanager receiver integrations.")
+ f.BoolVar(&l.AlertmanagerReceiversBlockPrivateAddresses, "alertmanager.receivers-firewall-block-private-addresses", false, "True to block private and local addresses in Alertmanager receiver integrations. It blocks private addresses defined by RFC 1918 (IPv4 addresses) and RFC 4193 (IPv6 addresses), as well as loopback, local unicast and local multicast addresses.")
+
+ f.Float64Var(&l.NotificationRateLimit, "alertmanager.notification-rate-limit", 0, "Per-user rate limit for sending notifications from Alertmanager in notifications/sec. 0 = rate limit disabled. Negative value = no notifications are allowed.")
+
+ if l.NotificationRateLimitPerIntegration == nil {
+ l.NotificationRateLimitPerIntegration = NotificationRateLimitMap{}
+ }
+ f.Var(&l.NotificationRateLimitPerIntegration, "alertmanager.notification-rate-limit-per-integration", "Per-integration notification rate limits. Value is a map, where each key is integration name and value is a rate-limit (float). On command line, this map is given in JSON format. Rate limit has the same meaning as -alertmanager.notification-rate-limit, but only applies for specific integration. Allowed integration names: "+strings.Join(allowedIntegrationNames, ", ")+".")
+ f.IntVar(&l.AlertmanagerMaxConfigSizeBytes, "alertmanager.max-config-size-bytes", 0, "Maximum size of configuration file for Alertmanager that tenant can upload via Alertmanager API. 0 = no limit.")
+ f.IntVar(&l.AlertmanagerMaxTemplatesCount, "alertmanager.max-templates-count", 0, "Maximum number of templates in tenant's Alertmanager configuration uploaded via Alertmanager API. 0 = no limit.")
+ f.IntVar(&l.AlertmanagerMaxTemplateSizeBytes, "alertmanager.max-template-size-bytes", 0, "Maximum size of single template in tenant's Alertmanager configuration uploaded via Alertmanager API. 0 = no limit.")
+ f.IntVar(&l.AlertmanagerMaxDispatcherAggregationGroups, "alertmanager.max-dispatcher-aggregation-groups", 0, "Maximum number of aggregation groups in Alertmanager's dispatcher that a tenant can have. Each active aggregation group uses single goroutine. When the limit is reached, dispatcher will not dispatch alerts that belong to additional aggregation groups, but existing groups will keep working properly. 0 = no limit.")
+ f.IntVar(&l.AlertmanagerMaxAlertsCount, "alertmanager.max-alerts-count", 0, "Maximum number of alerts that a single user can have. Inserting more alerts will fail with a log message and metric increment. 0 = no limit.")
+ f.IntVar(&l.AlertmanagerMaxAlertsSizeBytes, "alertmanager.max-alerts-size-bytes", 0, "Maximum total size of alerts that a single user can have, alert size is the sum of the bytes of its labels, annotations and generatorURL. Inserting more alerts will fail with a log message and metric increment. 0 = no limit.")
+ f.IntVar(&l.AlertmanagerMaxSilencesCount, "alertmanager.max-silences-count", 0, "Maximum number of silences that a single user can have, including expired silences. 0 = no limit.")
+ f.IntVar(&l.AlertmanagerMaxSilencesSizeBytes, "alertmanager.max-silences-size-bytes", 0, "Maximum size of individual silences that a single user can have. 0 = no limit.")
+}
+
+// Validate the limits config and returns an error if the validation
+// doesn't pass
+func (l *Limits) Validate(nameValidationScheme model.ValidationScheme, shardByAllLabels bool, activeSeriesMetricsEnabled bool) error {
+ // The ingester.max-global-series-per-user metric is not supported
+ // if shard-by-all-labels is disabled
+ if l.MaxGlobalSeriesPerUser > 0 && !shardByAllLabels {
+ return errMaxGlobalSeriesPerUserValidation
+ }
+
+ // The ingester.max-global-native-histograms-series-per-user metric is not supported
+ // if shard-by-all-labels is disabled
+ // or if active-series-metrics-enabled is disabled
+ if l.MaxGlobalNativeHistogramSeriesPerUser > 0 && (!shardByAllLabels || !activeSeriesMetricsEnabled) {
+ return errMaxGlobalNativeHistogramSeriesPerUserValidation
+ }
+
+ if l.MaxLocalNativeHistogramSeriesPerUser > 0 && !activeSeriesMetricsEnabled {
+ return errMaxLocalNativeHistogramSeriesPerUserValidation
+ }
+
+ if err := l.RulerExternalLabels.Validate(func(l labels.Label) error {
+ if !nameValidationScheme.IsValidLabelName(l.Name) {
+ return fmt.Errorf("%w: %q", errInvalidLabelName, l.Name)
+ }
+ if !model.LabelValue(l.Value).IsValid() {
+ return fmt.Errorf("%w: %q", errInvalidLabelValue, l.Value)
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (l *Limits) UnmarshalYAML(unmarshal func(any) error) error {
+ // We want to set l to the defaults and then overwrite it with the input.
+ // To make unmarshal fill the plain data struct rather than calling UnmarshalYAML
+ // again, we have to hide it using a type indirection. See prometheus/config.
+
+ // During startup we won't have a default value so we don't want to overwrite them
+ if defaultLimits != nil {
+ *l = *defaultLimits
+ // Make copy of default limits. Otherwise unmarshalling would modify map in default limits.
+ l.copyNotificationIntegrationLimits(defaultLimits.NotificationRateLimitPerIntegration)
+ }
+ type plain Limits
+ if err := unmarshal((*plain)(l)); err != nil {
+ return err
+ }
+
+ if err := l.compileQueryAttributeRegex(); err != nil {
+ return err
+ }
+
+ if err := l.calculateMaxSeriesPerLabelSetId(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (l *Limits) UnmarshalJSON(data []byte) error {
+ // Like the YAML method above, we want to set l to the defaults and then overwrite
+ // it with the input. We prevent an infinite loop of calling UnmarshalJSON by hiding
+ // behind type indirection.
+ if defaultLimits != nil {
+ *l = *defaultLimits
+ // Make copy of default limits. Otherwise unmarshalling would modify map in default limits.
+ l.copyNotificationIntegrationLimits(defaultLimits.NotificationRateLimitPerIntegration)
+ }
+
+ type plain Limits
+ dec := json.NewDecoder(bytes.NewReader(data))
+ dec.DisallowUnknownFields()
+
+ if err := dec.Decode((*plain)(l)); err != nil {
+ return err
+ }
+
+ if err := l.compileQueryAttributeRegex(); err != nil {
+ return err
+ }
+
+ if err := l.calculateMaxSeriesPerLabelSetId(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (l *Limits) calculateMaxSeriesPerLabelSetId() error {
+ hMap := map[uint64]struct{}{}
+
+ for k, limit := range l.LimitsPerLabelSet {
+ limit.Id = limit.LabelSet.String()
+ limit.Hash = fnv1a.HashBytes64([]byte(limit.Id))
+ l.LimitsPerLabelSet[k] = limit
+ if _, ok := hMap[limit.Hash]; ok {
+ return errDuplicatePerLabelSetLimit
+ }
+ hMap[limit.Hash] = struct{}{}
+ }
+
+ return nil
+}
+
+func (l *Limits) copyNotificationIntegrationLimits(defaults NotificationRateLimitMap) {
+ l.NotificationRateLimitPerIntegration = make(map[string]float64, len(defaults))
+ maps.Copy(l.NotificationRateLimitPerIntegration, defaults)
+}
+
+func (l *Limits) hasQueryAttributeRegexChanged() bool {
+ var newHash uint64
+ h := xxhash.New()
+
+ if l.QueryPriority.Enabled {
+ for _, priority := range l.QueryPriority.Priorities {
+ for _, attribute := range priority.QueryAttributes {
+ addToHash(h, attribute.Regex)
+ addToHash(h, attribute.UserAgentRegex)
+ }
+ }
+ }
+ if l.QueryRejection.Enabled {
+ for _, attribute := range l.QueryRejection.QueryAttributes {
+ addToHash(h, attribute.Regex)
+ addToHash(h, attribute.UserAgentRegex)
+ }
+ }
+
+ newHash = h.Sum64()
+
+ if newHash != l.queryAttributeRegexHash {
+ l.queryAttributeRegexHash = newHash
+ return true
+ }
+ return false
+}
+
+func addToHash(h *xxhash.Digest, regex string) {
+ if regex == "" {
+ return
+ }
+ _, _ = h.WriteString(regex)
+ _, _ = h.Write([]byte{'\xff'})
+}
+
+func (l *Limits) compileQueryAttributeRegex() error {
+ if !l.QueryPriority.Enabled && !l.QueryRejection.Enabled {
+ return nil
+ }
+ regexChanged := l.hasQueryAttributeRegexChanged()
+ newCompiledRegex := map[string]*regexp.Regexp{}
+
+ if l.QueryPriority.Enabled {
+ prioritySet := map[int64]struct{}{}
+
+ for i, priority := range l.QueryPriority.Priorities {
+ // Check for duplicate priority entry
+ if _, exists := prioritySet[priority.Priority]; exists {
+ return errDuplicateQueryPriorities
+ }
+ prioritySet[priority.Priority] = struct{}{}
+
+ err := l.compileQueryAttributeRegexes(l.QueryPriority.Priorities[i].QueryAttributes, regexChanged, newCompiledRegex)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if l.QueryRejection.Enabled {
+ err := l.compileQueryAttributeRegexes(l.QueryRejection.QueryAttributes, regexChanged, newCompiledRegex)
+ if err != nil {
+ return err
+ }
+ }
+
+ if regexChanged {
+ l.queryAttributeCompiledRegex = newCompiledRegex
+ }
+
+ return nil
+}
+
+func (l *Limits) compileQueryAttributeRegexes(queryAttributes []QueryAttribute, regexChanged bool, newCompiledRegex map[string]*regexp.Regexp) error {
+ for j, attribute := range queryAttributes {
+ if regexChanged {
+ compiledRegex, err := regexp.Compile(attribute.Regex)
+ if err != nil {
+ return errors.Join(errCompilingQueryPriorityRegex, err)
+ }
+ newCompiledRegex[attribute.Regex] = compiledRegex
+ queryAttributes[j].CompiledRegex = compiledRegex
+
+ compiledUserAgentRegex, err := regexp.Compile(attribute.UserAgentRegex)
+ if err != nil {
+ return errors.Join(errCompilingQueryPriorityRegex, err)
+ }
+ newCompiledRegex[attribute.UserAgentRegex] = compiledUserAgentRegex
+ queryAttributes[j].CompiledUserAgentRegex = compiledUserAgentRegex
+ } else {
+ queryAttributes[j].CompiledRegex = l.queryAttributeCompiledRegex[attribute.Regex]
+ queryAttributes[j].CompiledUserAgentRegex = l.queryAttributeCompiledRegex[attribute.UserAgentRegex]
+ }
+ }
+ return nil
+}
+
+// When we load YAML from disk, we want the various per-customer limits
+// to default to any values specified on the command line, not default
+// command line values. This global contains those values. I (Tom) cannot
+// find a nicer way I'm afraid.
+var defaultLimits *Limits
+
+// SetDefaultLimitsForYAMLUnmarshalling sets global default limits, used when loading
+// Limits from YAML files. This is used to ensure per-tenant limits are defaulted to
+// those values.
+func SetDefaultLimitsForYAMLUnmarshalling(defaults Limits) {
+ defaultLimits = &defaults
+}
+
+// TenantLimits exposes per-tenant limit overrides to various resource usage limits
+type TenantLimits interface {
+ // ByUserID gets limits specific to a particular tenant or nil if there are none
+ ByUserID(userID string) *Limits
+
+ // AllByUserID gets a mapping of all tenant IDs and limits for that user
+ AllByUserID() map[string]*Limits
+}
+
+// Overrides periodically fetch a set of per-user overrides, and provides convenience
+// functions for fetching the correct value.
+type Overrides struct {
+ defaultLimits *Limits
+ tenantLimits TenantLimits
+}
+
+// NewOverrides makes a new Overrides.
+func NewOverrides(defaults Limits, tenantLimits TenantLimits) *Overrides {
+ return &Overrides{
+ tenantLimits: tenantLimits,
+ defaultLimits: &defaults,
+ }
+}
+
+// IngestionRate returns the limit on ingester rate (samples per second).
+func (o *Overrides) IngestionRate(userID string) float64 {
+ return o.GetOverridesForUser(userID).IngestionRate
+}
+
+// NativeHistogramIngestionRate returns the limit on ingester rate (samples per second).
+func (o *Overrides) NativeHistogramIngestionRate(userID string) float64 {
+ return o.GetOverridesForUser(userID).NativeHistogramIngestionRate
+}
+
+// IngestionRateStrategy returns whether the ingestion rate limit should be individually applied
+// to each distributor instance (local) or evenly shared across the cluster (global).
+func (o *Overrides) IngestionRateStrategy() string {
+ // The ingestion rate strategy can't be overridden on a per-tenant basis
+ return o.defaultLimits.IngestionRateStrategy
+}
+
+// IngestionBurstSize returns the burst size for ingestion rate.
+func (o *Overrides) IngestionBurstSize(userID string) int {
+ return o.GetOverridesForUser(userID).IngestionBurstSize
+}
+
+// NativeHistogramIngestionBurstSize returns the burst size for ingestion rate.
+func (o *Overrides) NativeHistogramIngestionBurstSize(userID string) int {
+ return o.GetOverridesForUser(userID).NativeHistogramIngestionBurstSize
+}
+
+// AcceptHASamples returns whether the distributor should track and accept samples from HA replicas for this user.
+func (o *Overrides) AcceptHASamples(userID string) bool {
+ return o.GetOverridesForUser(userID).AcceptHASamples
+}
+
+// AcceptMixedHASamples returns whether the distributor should track and accept samples from mixed HA replicas for this user.
+func (o *Overrides) AcceptMixedHASamples(userID string) bool {
+ return o.GetOverridesForUser(userID).AcceptMixedHASamples
+}
+
+// HAClusterLabel returns the cluster label to look for when deciding whether to accept a sample from a Prometheus HA replica.
+func (o *Overrides) HAClusterLabel(userID string) string {
+ return o.GetOverridesForUser(userID).HAClusterLabel
+}
+
+// HAReplicaLabel returns the replica label to look for when deciding whether to accept a sample from a Prometheus HA replica.
+func (o *Overrides) HAReplicaLabel(userID string) string {
+ return o.GetOverridesForUser(userID).HAReplicaLabel
+}
+
+// DropLabels returns the list of labels to be dropped when ingesting HA samples for the user.
+func (o *Overrides) DropLabels(userID string) flagext.StringSlice {
+ return o.GetOverridesForUser(userID).DropLabels
+}
+
+// MaxLabelNameLength returns maximum length a label name can be.
+func (o *Overrides) MaxLabelNameLength(userID string) int {
+ return o.GetOverridesForUser(userID).MaxLabelNameLength
+}
+
+// MaxLabelValueLength returns maximum length a label value can be. This also is
+// the maximum length of a metric name.
+func (o *Overrides) MaxLabelValueLength(userID string) int {
+ return o.GetOverridesForUser(userID).MaxLabelValueLength
+}
+
+// MaxLabelNamesPerSeries returns maximum number of label/value pairs timeseries.
+func (o *Overrides) MaxLabelNamesPerSeries(userID string) int {
+ return o.GetOverridesForUser(userID).MaxLabelNamesPerSeries
+}
+
+// MaxLabelsSizeBytes returns maximum number of label/value pairs timeseries.
+func (o *Overrides) MaxLabelsSizeBytes(userID string) int {
+ return o.GetOverridesForUser(userID).MaxLabelsSizeBytes
+}
+
+// MaxMetadataLength returns maximum length metadata can be. Metadata refers
+// to the Metric Name, HELP and UNIT.
+func (o *Overrides) MaxMetadataLength(userID string) int {
+ return o.GetOverridesForUser(userID).MaxMetadataLength
+}
+
+// RejectOldSamples returns true when we should reject samples older than certain
+// age.
+func (o *Overrides) RejectOldSamples(userID string) bool {
+ return o.GetOverridesForUser(userID).RejectOldSamples
+}
+
+// RejectOldSamplesMaxAge returns the age at which samples should be rejected.
+func (o *Overrides) RejectOldSamplesMaxAge(userID string) time.Duration {
+ return time.Duration(o.GetOverridesForUser(userID).RejectOldSamplesMaxAge)
+}
+
+// CreationGracePeriod is misnamed, and actually returns how far into the future
+// we should accept samples.
+func (o *Overrides) CreationGracePeriod(userID string) time.Duration {
+ return time.Duration(o.GetOverridesForUser(userID).CreationGracePeriod)
+}
+
+// MaxLocalSeriesPerUser returns the maximum number of series a user is allowed to store in a single ingester.
+func (o *Overrides) MaxLocalSeriesPerUser(userID string) int {
+ return o.GetOverridesForUser(userID).MaxLocalSeriesPerUser
+}
+
+// MaxLocalNativeHistogramSeriesPerUser returns the maximum number of native histogram series a user is allowed to store in a single ingester.
+func (o *Overrides) MaxLocalNativeHistogramSeriesPerUser(userID string) int {
+ return o.GetOverridesForUser(userID).MaxLocalNativeHistogramSeriesPerUser
+}
+
+// MaxLocalSeriesPerMetric returns the maximum number of series allowed per metric in a single ingester.
+func (o *Overrides) MaxLocalSeriesPerMetric(userID string) int {
+ return o.GetOverridesForUser(userID).MaxLocalSeriesPerMetric
+}
+
+// MaxGlobalSeriesPerUser returns the maximum number of series a user is allowed to store across the cluster.
+func (o *Overrides) MaxGlobalSeriesPerUser(userID string) int {
+ return o.GetOverridesForUser(userID).MaxGlobalSeriesPerUser
+}
+
+// MaxGlobalNativeHistogramSeriesPerUser returns the maximum number of native histogram series a user is allowed to store across the cluster.
+func (o *Overrides) MaxGlobalNativeHistogramSeriesPerUser(userID string) int {
+ return o.GetOverridesForUser(userID).MaxGlobalNativeHistogramSeriesPerUser
+}
+
+// EnableNativeHistograms returns whether the Ingester should accept native histogram samples from this user.
+func (o *Overrides) EnableNativeHistograms(userID string) bool {
+ return o.GetOverridesForUser(userID).EnableNativeHistograms
+}
+
+// OutOfOrderTimeWindow returns the allowed time window for ingestion of out-of-order samples.
+func (o *Overrides) OutOfOrderTimeWindow(userID string) model.Duration {
+ return o.GetOverridesForUser(userID).OutOfOrderTimeWindow
+}
+
+// MaxGlobalSeriesPerMetric returns the maximum number of series allowed per metric across the cluster.
+func (o *Overrides) MaxGlobalSeriesPerMetric(userID string) int {
+ return o.GetOverridesForUser(userID).MaxGlobalSeriesPerMetric
+}
+
+// LimitsPerLabelSet returns the user limits per labelset across the cluster.
+func (o *Overrides) LimitsPerLabelSet(userID string) []LimitsPerLabelSet {
+ return o.GetOverridesForUser(userID).LimitsPerLabelSet
+}
+
+// MaxChunksPerQueryFromStore returns the maximum number of chunks allowed per query when fetching
+// chunks from the long-term storage.
+func (o *Overrides) MaxChunksPerQueryFromStore(userID string) int {
+ return o.GetOverridesForUser(userID).MaxChunksPerQuery
+}
+
+func (o *Overrides) MaxChunksPerQuery(userID string) int {
+ return o.GetOverridesForUser(userID).MaxChunksPerQuery
+}
+
+// MaxFetchedSeriesPerQuery returns the maximum number of series allowed per query when fetching
+// chunks from ingesters and blocks storage.
+func (o *Overrides) MaxFetchedSeriesPerQuery(userID string) int {
+ return o.GetOverridesForUser(userID).MaxFetchedSeriesPerQuery
+}
+
+// MaxFetchedChunkBytesPerQuery returns the maximum number of bytes for chunks allowed per query when fetching
+// chunks from ingesters and blocks storage.
+func (o *Overrides) MaxFetchedChunkBytesPerQuery(userID string) int {
+ return o.GetOverridesForUser(userID).MaxFetchedChunkBytesPerQuery
+}
+
+// MaxFetchedDataBytesPerQuery returns the maximum number of bytes for all data allowed per query when fetching
+// from ingesters and blocks storage.
+func (o *Overrides) MaxFetchedDataBytesPerQuery(userID string) int {
+ return o.GetOverridesForUser(userID).MaxFetchedDataBytesPerQuery
+}
+
+// MaxDownloadedBytesPerRequest returns the maximum number of bytes to download for each gRPC request in Store Gateway,
+// including any data fetched from cache or object storage.
+func (o *Overrides) MaxDownloadedBytesPerRequest(userID string) int {
+ return o.GetOverridesForUser(userID).MaxDownloadedBytesPerRequest
+}
+
+// MaxQueryLookback returns the max lookback period of queries.
+func (o *Overrides) MaxQueryLookback(userID string) time.Duration {
+ return time.Duration(o.GetOverridesForUser(userID).MaxQueryLookback)
+}
+
+// MaxQueryLength returns the limit of the length (in time) of a query.
+func (o *Overrides) MaxQueryLength(userID string) time.Duration {
+ return time.Duration(o.GetOverridesForUser(userID).MaxQueryLength)
+}
+
+// MaxQueryResponseSize returns the max total response size of a query in bytes.
+func (o *Overrides) MaxQueryResponseSize(userID string) int64 {
+ return o.GetOverridesForUser(userID).MaxQueryResponseSize
+}
+
+// MaxCacheFreshness returns the period after which results are cacheable,
+// to prevent caching of very recent results.
+func (o *Overrides) MaxCacheFreshness(userID string) time.Duration {
+ return time.Duration(o.GetOverridesForUser(userID).MaxCacheFreshness)
+}
+
+// MaxQueriersPerUser returns the maximum number of queriers that can handle requests for this user.
+func (o *Overrides) MaxQueriersPerUser(userID string) float64 {
+ return o.GetOverridesForUser(userID).MaxQueriersPerTenant
+}
+
+// QueryVerticalShardSize returns the number of shards to use when distributing shardable PromQL queries.
+func (o *Overrides) QueryVerticalShardSize(userID string) int {
+ return o.GetOverridesForUser(userID).QueryVerticalShardSize
+}
+
+// QueryPartialData returns whether query may be evaluated with data from a single zone, if other zones are not available.
+func (o *Overrides) QueryPartialData(userID string) bool {
+ return o.GetOverridesForUser(userID).QueryPartialData
+}
+
+// MaxQueryParallelism returns the limit to the number of split queries the
+// frontend will process in parallel.
+func (o *Overrides) MaxQueryParallelism(userID string) int {
+ return o.GetOverridesForUser(userID).MaxQueryParallelism
+}
+
+// MaxOutstandingPerTenant returns the limit to the maximum number
+// of outstanding requests per tenant per request queue.
+func (o *Overrides) MaxOutstandingPerTenant(userID string) int {
+ return o.GetOverridesForUser(userID).MaxOutstandingPerTenant
+}
+
+// QueryPriority returns the query priority config for the tenant, including different priorities and their attributes
+func (o *Overrides) QueryPriority(userID string) QueryPriority {
+ return o.GetOverridesForUser(userID).QueryPriority
+}
+
+// QueryRejection returns the query reject config for the tenant
+func (o *Overrides) QueryRejection(userID string) QueryRejection {
+ return o.GetOverridesForUser(userID).QueryRejection
+}
+
+// EnforceMetricName whether to enforce the presence of a metric name.
+func (o *Overrides) EnforceMetricName(userID string) bool {
+ return o.GetOverridesForUser(userID).EnforceMetricName
+}
+
+// EnforceMetadataMetricName whether to enforce the presence of a metric name on metadata.
+func (o *Overrides) EnforceMetadataMetricName(userID string) bool {
+ return o.GetOverridesForUser(userID).EnforceMetadataMetricName
+}
+
+// MaxNativeHistogramBuckets returns the maximum total number of positive and negative buckets of a single native histogram
+// a user is allowed to store.
+func (o *Overrides) MaxNativeHistogramBuckets(userID string) int {
+ return o.GetOverridesForUser(userID).MaxNativeHistogramBuckets
+}
+
+// MaxLocalMetricsWithMetadataPerUser returns the maximum number of metrics with metadata a user is allowed to store in a single ingester.
+func (o *Overrides) MaxLocalMetricsWithMetadataPerUser(userID string) int {
+ return o.GetOverridesForUser(userID).MaxLocalMetricsWithMetadataPerUser
+}
+
+// MaxLocalMetadataPerMetric returns the maximum number of metadata allowed per metric in a single ingester.
+func (o *Overrides) MaxLocalMetadataPerMetric(userID string) int {
+ return o.GetOverridesForUser(userID).MaxLocalMetadataPerMetric
+}
+
+// MaxGlobalMetricsWithMetadataPerUser returns the maximum number of metrics with metadata a user is allowed to store across the cluster.
+func (o *Overrides) MaxGlobalMetricsWithMetadataPerUser(userID string) int {
+ return o.GetOverridesForUser(userID).MaxGlobalMetricsWithMetadataPerUser
+}
+
+// MaxGlobalMetadataPerMetric returns the maximum number of metadata allowed per metric across the cluster.
+func (o *Overrides) MaxGlobalMetadataPerMetric(userID string) int {
+ return o.GetOverridesForUser(userID).MaxGlobalMetadataPerMetric
+}
+
+// PromoteResourceAttributes returns the promote resource attributes for a given user.
+func (o *Overrides) PromoteResourceAttributes(userID string) []string {
+ return o.GetOverridesForUser(userID).PromoteResourceAttributes
+}
+
+// IngestionTenantShardSize returns the ingesters shard size for a given user.
+func (o *Overrides) IngestionTenantShardSize(userID string) int {
+ return o.GetOverridesForUser(userID).IngestionTenantShardSize
+}
+
+// CompactorBlocksRetentionPeriod returns the retention period for a given user.
+func (o *Overrides) CompactorBlocksRetentionPeriod(userID string) time.Duration {
+ return time.Duration(o.GetOverridesForUser(userID).CompactorBlocksRetentionPeriod)
+}
+
+// CompactorTenantShardSize returns shard size (number of rulers) used by this tenant when using shuffle-sharding strategy.
+func (o *Overrides) CompactorTenantShardSize(userID string) float64 {
+ return o.GetOverridesForUser(userID).CompactorTenantShardSize
+}
+
+// ParquetConverterTenantShardSize returns shard size (number of converters) used by this tenant when using shuffle-sharding strategy.
+func (o *Overrides) ParquetConverterTenantShardSize(userID string) float64 {
+ return o.GetOverridesForUser(userID).ParquetConverterTenantShardSize
+}
+
+// ParquetConverterEnabled returns true is parquet is enabled.
+func (o *Overrides) ParquetConverterEnabled(userID string) bool {
+ return o.GetOverridesForUser(userID).ParquetConverterEnabled
+}
+
+// ParquetConverterSortColumns returns the additional sort columns for parquet files.
+func (o *Overrides) ParquetConverterSortColumns(userID string) []string {
+ return o.GetOverridesForUser(userID).ParquetConverterSortColumns
+}
+
+// ParquetMaxFetchedRowCount returns the maximum number of rows that can be fetched when querying parquet storage.
+func (o *Overrides) ParquetMaxFetchedRowCount(userID string) int {
+ return o.GetOverridesForUser(userID).ParquetMaxFetchedRowCount
+}
+
+// ParquetMaxFetchedChunkBytes returns the maximum number of bytes that can be used to fetch chunk column pages when querying parquet storage.
+func (o *Overrides) ParquetMaxFetchedChunkBytes(userID string) int {
+ return o.GetOverridesForUser(userID).ParquetMaxFetchedChunkBytes
+}
+
+// ParquetMaxFetchedDataBytes returns the maximum number of bytes that can be used to fetch all column pages when querying parquet storage.
+func (o *Overrides) ParquetMaxFetchedDataBytes(userID string) int {
+ return o.GetOverridesForUser(userID).ParquetMaxFetchedDataBytes
+}
+
+// CompactorPartitionIndexSizeBytes returns shard size (number of rulers) used by this tenant when using shuffle-sharding strategy.
+func (o *Overrides) CompactorPartitionIndexSizeBytes(userID string) int64 {
+ return o.GetOverridesForUser(userID).CompactorPartitionIndexSizeBytes
+}
+
+// CompactorPartitionSeriesCount returns shard size (number of rulers) used by this tenant when using shuffle-sharding strategy.
+func (o *Overrides) CompactorPartitionSeriesCount(userID string) int64 {
+ return o.GetOverridesForUser(userID).CompactorPartitionSeriesCount
+}
+
+// MetricRelabelConfigs returns the metric relabel configs for a given user.
+func (o *Overrides) MetricRelabelConfigs(userID string) []*relabel.Config {
+ return o.GetOverridesForUser(userID).MetricRelabelConfigs
+}
+
+// RulerTenantShardSize returns shard size (number of rulers) used by this tenant when using shuffle-sharding strategy.
+func (o *Overrides) RulerTenantShardSize(userID string) float64 {
+ return o.GetOverridesForUser(userID).RulerTenantShardSize
+}
+
+// RulerMaxRulesPerRuleGroup returns the maximum number of rules per rule group for a given user.
+func (o *Overrides) RulerMaxRulesPerRuleGroup(userID string) int {
+ return o.GetOverridesForUser(userID).RulerMaxRulesPerRuleGroup
+}
+
+// RulerMaxRuleGroupsPerTenant returns the maximum number of rule groups for a given user.
+func (o *Overrides) RulerMaxRuleGroupsPerTenant(userID string) int {
+ return o.GetOverridesForUser(userID).RulerMaxRuleGroupsPerTenant
+}
+
+// RulerQueryOffset returns the rule query offset for a given user.
+func (o *Overrides) RulerQueryOffset(userID string) time.Duration {
+ ruleOffset := time.Duration(o.GetOverridesForUser(userID).RulerQueryOffset)
+ evaluationDelay := time.Duration(o.GetOverridesForUser(userID).RulerEvaluationDelay)
+ if evaluationDelay > ruleOffset {
+ level.Warn(util_log.Logger).Log("msg", "ruler.query-offset was overridden by highest value in [Deprecated]ruler.evaluation-delay-duration", "ruler.query-offset", ruleOffset, "ruler.evaluation-delay-duration", evaluationDelay)
+ return evaluationDelay
+ }
+ return ruleOffset
+}
+
+// RulesPartialData returns whether rule may be evaluated with data from a single zone, if other zones are not available.
+func (o *Overrides) RulesPartialData(userID string) bool {
+ return o.GetOverridesForUser(userID).RulesPartialData
+}
+
+// StoreGatewayTenantShardSize returns the store-gateway shard size for a given user.
+func (o *Overrides) StoreGatewayTenantShardSize(userID string) float64 {
+ return o.GetOverridesForUser(userID).StoreGatewayTenantShardSize
+}
+
+// MaxHAReplicaGroups returns maximum number of clusters that HA tracker will track for a user.
+func (o *Overrides) MaxHAReplicaGroups(user string) int {
+ return o.GetOverridesForUser(user).HAMaxClusters
+}
+
+// S3SSEType returns the per-tenant S3 SSE type.
+func (o *Overrides) S3SSEType(user string) string {
+ return o.GetOverridesForUser(user).S3SSEType
+}
+
+// S3SSEKMSKeyID returns the per-tenant S3 KMS-SSE key id.
+func (o *Overrides) S3SSEKMSKeyID(user string) string {
+ return o.GetOverridesForUser(user).S3SSEKMSKeyID
+}
+
+// S3SSEKMSEncryptionContext returns the per-tenant S3 KMS-SSE encryption context.
+func (o *Overrides) S3SSEKMSEncryptionContext(user string) string {
+ return o.GetOverridesForUser(user).S3SSEKMSEncryptionContext
+}
+
+// AlertmanagerReceiversBlockCIDRNetworks returns the list of network CIDRs that should be blocked
+// in the Alertmanager receivers for the given user.
+func (o *Overrides) AlertmanagerReceiversBlockCIDRNetworks(user string) []flagext.CIDR {
+ return o.GetOverridesForUser(user).AlertmanagerReceiversBlockCIDRNetworks
+}
+
+// AlertmanagerReceiversBlockPrivateAddresses returns true if private addresses should be blocked
+// in the Alertmanager receivers for the given user.
+func (o *Overrides) AlertmanagerReceiversBlockPrivateAddresses(user string) bool {
+ return o.GetOverridesForUser(user).AlertmanagerReceiversBlockPrivateAddresses
+}
+
+// MaxExemplars gets the maximum number of exemplars that will be stored per user. 0 or less means disabled.
+func (o *Overrides) MaxExemplars(userID string) int {
+ return o.GetOverridesForUser(userID).MaxExemplars
+}
+
+// Notification limits are special. Limits are returned in following order:
+// 1. per-tenant limits for given integration
+// 2. default limits for given integration
+// 3. per-tenant limits
+// 4. default limits
+func (o *Overrides) getNotificationLimitForUser(user, integration string) float64 {
+ u := o.GetOverridesForUser(user)
+ if n, ok := u.NotificationRateLimitPerIntegration[integration]; ok {
+ return n
+ }
+
+ return u.NotificationRateLimit
+}
+
+func (o *Overrides) NotificationRateLimit(user string, integration string) rate.Limit {
+ l := o.getNotificationLimitForUser(user, integration)
+ if l == 0 || math.IsInf(l, 1) {
+ return rate.Inf // No rate limit.
+ }
+
+ if l < 0 {
+ l = 0 // No notifications will be sent.
+ }
+ return rate.Limit(l)
+}
+
+const maxInt = int(^uint(0) >> 1)
+
+func (o *Overrides) NotificationBurstSize(user string, integration string) int {
+ // Burst size is computed from rate limit. Rate limit is already normalized to [0, +inf), where 0 means disabled.
+ l := o.NotificationRateLimit(user, integration)
+ if l == 0 {
+ return 0
+ }
+
+ // floats can be larger than max int. This also handles case where l == rate.Inf.
+ if float64(l) >= float64(maxInt) {
+ return maxInt
+ }
+
+ // For values between (0, 1), allow single notification per second (every 1/limit seconds).
+ if l < 1 {
+ return 1
+ }
+
+ return int(l)
+}
+
+func (o *Overrides) AlertmanagerMaxConfigSize(userID string) int {
+ return o.GetOverridesForUser(userID).AlertmanagerMaxConfigSizeBytes
+}
+
+func (o *Overrides) AlertmanagerMaxTemplatesCount(userID string) int {
+ return o.GetOverridesForUser(userID).AlertmanagerMaxTemplatesCount
+}
+
+func (o *Overrides) AlertmanagerMaxTemplateSize(userID string) int {
+ return o.GetOverridesForUser(userID).AlertmanagerMaxTemplateSizeBytes
+}
+
+func (o *Overrides) AlertmanagerMaxDispatcherAggregationGroups(userID string) int {
+ return o.GetOverridesForUser(userID).AlertmanagerMaxDispatcherAggregationGroups
+}
+
+func (o *Overrides) AlertmanagerMaxAlertsCount(userID string) int {
+ return o.GetOverridesForUser(userID).AlertmanagerMaxAlertsCount
+}
+
+func (o *Overrides) AlertmanagerMaxAlertsSizeBytes(userID string) int {
+ return o.GetOverridesForUser(userID).AlertmanagerMaxAlertsSizeBytes
+}
+
+func (o *Overrides) AlertmanagerMaxSilencesCount(userID string) int {
+ return o.GetOverridesForUser(userID).AlertmanagerMaxSilencesCount
+}
+
+func (o *Overrides) AlertmanagerMaxSilenceSizeBytes(userID string) int {
+ return o.GetOverridesForUser(userID).AlertmanagerMaxSilencesSizeBytes
+}
+
+func (o *Overrides) DisabledRuleGroups(userID string) DisabledRuleGroups {
+ if o.tenantLimits != nil {
+ l := o.tenantLimits.ByUserID(userID)
+ if l != nil {
+ disabledRuleGroupsForUser := make(DisabledRuleGroups, len(l.DisabledRuleGroups))
+
+ for i, disabledRuleGroup := range l.DisabledRuleGroups {
+ disabledRuleGroupForUser := DisabledRuleGroup{
+ Namespace: disabledRuleGroup.Namespace,
+ Name: disabledRuleGroup.Name,
+ User: userID,
+ }
+ disabledRuleGroupsForUser[i] = disabledRuleGroupForUser
+ }
+ return disabledRuleGroupsForUser
+ }
+ }
+ return DisabledRuleGroups{}
+}
+
+func (o *Overrides) RulerExternalLabels(userID string) labels.Labels {
+ return o.GetOverridesForUser(userID).RulerExternalLabels
+}
+
+// GetOverridesForUser returns the per-tenant limits with overrides.
+func (o *Overrides) GetOverridesForUser(userID string) *Limits {
+ if o.tenantLimits != nil {
+ l := o.tenantLimits.ByUserID(userID)
+ if l != nil {
+ return l
+ }
+ }
+ return o.defaultLimits
+}
+
+// SmallestPositiveIntPerTenant is returning the minimal positive value of the
+// supplied limit function for all given tenants.
+func SmallestPositiveIntPerTenant(tenantIDs []string, f func(string) int) int {
+ var result *int
+ for _, tenantID := range tenantIDs {
+ v := f(tenantID)
+ if result == nil || v < *result {
+ result = &v
+ }
+ }
+ if result == nil {
+ return 0
+ }
+ return *result
+}
+
+// SmallestPositiveNonZeroFloat64PerTenant is returning the minimal positive and
+// non-zero value of the supplied limit function for all given tenants. In many
+// limits a value of 0 means unlimited so the method will return 0 only if all
+// inputs have a limit of 0 or an empty tenant list is given.
+func SmallestPositiveNonZeroFloat64PerTenant(tenantIDs []string, f func(string) float64) float64 {
+ var result *float64
+ for _, tenantID := range tenantIDs {
+ v := f(tenantID)
+ if v > 0 && (result == nil || v < *result) {
+ result = &v
+ }
+ }
+ if result == nil {
+ return 0
+ }
+ return *result
+}
+
+// SmallestPositiveNonZeroDurationPerTenant is returning the minimal positive
+// and non-zero value of the supplied limit function for all given tenants. In
+// many limits a value of 0 means unlimited so the method will return 0 only if
+// all inputs have a limit of 0 or an empty tenant list is given.
+func SmallestPositiveNonZeroDurationPerTenant(tenantIDs []string, f func(string) time.Duration) time.Duration {
+ var result *time.Duration
+ for _, tenantID := range tenantIDs {
+ v := f(tenantID)
+ if v > 0 && (result == nil || v < *result) {
+ result = &v
+ }
+ }
+ if result == nil {
+ return 0
+ }
+ return *result
+}
+
+// MaxDurationPerTenant is returning the maximum duration per tenant. Without
+// tenants given it will return a time.Duration(0).
+func MaxDurationPerTenant(tenantIDs []string, f func(string) time.Duration) time.Duration {
+ result := time.Duration(0)
+ for _, tenantID := range tenantIDs {
+ v := f(tenantID)
+ if v > result {
+ result = v
+ }
+ }
+ return result
+}
+
+// LimitsPerLabelSetsForSeries checks matching labelset limits for the given series.
+func LimitsPerLabelSetsForSeries(limitsPerLabelSets []LimitsPerLabelSet, metric labels.Labels) []LimitsPerLabelSet {
+ // returning early to not have any overhead
+ if len(limitsPerLabelSets) == 0 {
+ return nil
+ }
+ r := make([]LimitsPerLabelSet, 0, len(limitsPerLabelSets))
+ defaultPartitionIndex := -1
+outer:
+ for i, lbls := range limitsPerLabelSets {
+ // Default partition exists.
+ if lbls.LabelSet.Len() == 0 {
+ defaultPartitionIndex = i
+ continue
+ }
+ found := true
+ lbls.LabelSet.Range(func(l labels.Label) {
+ // We did not find some of the labels on the set
+ if v := metric.Get(l.Name); v != l.Value {
+ found = false
+ }
+ })
+
+ if !found {
+ continue outer
+ }
+ r = append(r, lbls)
+ }
+ // Use default partition limiter if it is configured and no other matching partitions.
+ if defaultPartitionIndex != -1 && len(r) == 0 {
+ r = append(r, limitsPerLabelSets[defaultPartitionIndex])
+ }
+ return r
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/notifications_limit_flag.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/notifications_limit_flag.go
new file mode 100644
index 000000000..d06c7e6ff
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/notifications_limit_flag.go
@@ -0,0 +1,56 @@
+package validation
+
+import (
+ "encoding/json"
+ "fmt"
+ "slices"
+
+ "github.com/pkg/errors"
+)
+
+var allowedIntegrationNames = []string{
+ "webhook", "email", "pagerduty", "opsgenie", "wechat", "slack", "victorops", "pushover", "sns", "telegram", "discord", "webex",
+ "msteams", "msteamsv2", "jira", "rocketchat",
+}
+
+type NotificationRateLimitMap map[string]float64
+
+// String implements flag.Value
+func (m NotificationRateLimitMap) String() string {
+ out, err := json.Marshal(map[string]float64(m))
+ if err != nil {
+ return fmt.Sprintf("failed to marshal: %v", err)
+ }
+ return string(out)
+}
+
+// Set implements flag.Value
+func (m NotificationRateLimitMap) Set(s string) error {
+ newMap := map[string]float64{}
+ return m.updateMap(json.Unmarshal([]byte(s), &newMap), newMap)
+}
+
+// UnmarshalYAML implements yaml.Unmarshaler.
+func (m NotificationRateLimitMap) UnmarshalYAML(unmarshal func(any) error) error {
+ newMap := map[string]float64{}
+ return m.updateMap(unmarshal(newMap), newMap)
+}
+
+func (m NotificationRateLimitMap) updateMap(unmarshalErr error, newMap map[string]float64) error {
+ if unmarshalErr != nil {
+ return unmarshalErr
+ }
+
+ for k, v := range newMap {
+ if !slices.Contains(allowedIntegrationNames, k) {
+ return errors.Errorf("unknown integration name: %s", k)
+ }
+ m[k] = v
+ }
+ return nil
+}
+
+// MarshalYAML implements yaml.Marshaler.
+func (m NotificationRateLimitMap) MarshalYAML() (any, error) {
+ return map[string]float64(m), nil
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go
new file mode 100644
index 000000000..42f7e75c3
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go
@@ -0,0 +1,465 @@
+package validation
+
+import (
+ "errors"
+ "net/http"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/go-kit/log"
+ "github.com/go-kit/log/level"
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/histogram"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/weaveworks/common/httpgrpc"
+
+ "github.com/cortexproject/cortex/pkg/cortexpb"
+ "github.com/cortexproject/cortex/pkg/util"
+ "github.com/cortexproject/cortex/pkg/util/discardedseries"
+ "github.com/cortexproject/cortex/pkg/util/extract"
+ "github.com/cortexproject/cortex/pkg/util/labelset"
+)
+
+const (
+ discardReasonLabel = "reason"
+
+ errMetadataMissingMetricName = "metadata missing metric name"
+ errMetadataTooLong = "metadata '%s' value too long: %.200q metric %.200q"
+
+ typeMetricName = "METRIC_NAME"
+ typeHelp = "HELP"
+ typeUnit = "UNIT"
+
+ metricNameTooLong = "metric_name_too_long"
+ helpTooLong = "help_too_long"
+ unitTooLong = "unit_too_long"
+
+ // ErrQueryTooLong is used in chunk store, querier and query frontend.
+ ErrQueryTooLong = "the query time range exceeds the limit (query length: %s, limit: %s)"
+
+ missingMetricName = "missing_metric_name"
+ invalidMetricName = "metric_name_invalid"
+ greaterThanMaxSampleAge = "greater_than_max_sample_age"
+ maxLabelNamesPerSeries = "max_label_names_per_series"
+ tooFarInFuture = "too_far_in_future"
+ invalidLabel = "label_invalid"
+ labelNameTooLong = "label_name_too_long"
+ duplicateLabelNames = "duplicate_label_names"
+ labelsNotSorted = "labels_not_sorted"
+ labelValueTooLong = "label_value_too_long"
+ labelsSizeBytesExceeded = "labels_size_bytes_exceeded"
+
+ // Exemplar-specific validation reasons
+ exemplarLabelsMissing = "exemplar_labels_missing"
+ exemplarLabelsTooLong = "exemplar_labels_too_long"
+ exemplarTimestampInvalid = "exemplar_timestamp_invalid"
+
+ // Native Histogram specific validation reasons
+ nativeHistogramBucketCountLimitExceeded = "native_histogram_buckets_exceeded"
+ nativeHistogramInvalidSchema = "native_histogram_invalid_schema"
+ nativeHistogramSampleSizeBytesExceeded = "native_histogram_sample_size_bytes_exceeded"
+
+ // RateLimited is one of the values for the reason to discard samples.
+ // Declared here to avoid duplication in ingester and distributor.
+ RateLimited = "rate_limited"
+ NativeHistogramRateLimited = "native_histogram_rate_limited"
+
+ // Too many HA clusters is one of the reasons for discarding samples.
+ TooManyHAClusters = "too_many_ha_clusters"
+
+ // DroppedByRelabelConfiguration Samples can also be discarded because of relabeling configuration
+ DroppedByRelabelConfiguration = "relabel_configuration"
+ // DroppedByUserConfigurationOverride Samples discarded due to user configuration removing label __name__
+ DroppedByUserConfigurationOverride = "user_label_removal_configuration"
+
+ // The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters
+ // https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars
+ ExemplarMaxLabelSetLength = 128
+)
+
+type ValidateMetrics struct {
+ DiscardedSamples *prometheus.CounterVec
+ DiscardedExemplars *prometheus.CounterVec
+ DiscardedMetadata *prometheus.CounterVec
+ HistogramSamplesReducedResolution *prometheus.CounterVec
+ LabelSizeBytes *prometheus.HistogramVec
+
+ DiscardedSamplesPerLabelSet *prometheus.CounterVec
+ LabelSetTracker *labelset.LabelSetTracker
+
+ DiscardedSeries *prometheus.GaugeVec
+ DiscardedSeriesPerLabelset *prometheus.GaugeVec
+ DiscardedSeriesTracker *discardedseries.DiscardedSeriesTracker
+ DiscardedSeriesPerLabelsetTracker *discardedseries.DiscardedSeriesPerLabelsetTracker
+}
+
+func registerCollector(r prometheus.Registerer, c prometheus.Collector) {
+ err := r.Register(c)
+ if err != nil && !errors.As(err, &prometheus.AlreadyRegisteredError{}) {
+ panic(err)
+ }
+}
+
+func NewValidateMetrics(r prometheus.Registerer) *ValidateMetrics {
+ discardedSamples := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "cortex_discarded_samples_total",
+ Help: "The total number of samples that were discarded.",
+ },
+ []string{discardReasonLabel, "user"},
+ )
+ registerCollector(r, discardedSamples)
+ discardedSamplesPerLabelSet := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "cortex_discarded_samples_per_labelset_total",
+ Help: "The total number of samples that were discarded for each labelset.",
+ },
+ []string{discardReasonLabel, "user", "labelset"},
+ )
+ registerCollector(r, discardedSamplesPerLabelSet)
+ discardedExemplars := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "cortex_discarded_exemplars_total",
+ Help: "The total number of exemplars that were discarded.",
+ },
+ []string{discardReasonLabel, "user"},
+ )
+ registerCollector(r, discardedExemplars)
+ discardedMetadata := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "cortex_discarded_metadata_total",
+ Help: "The total number of metadata that were discarded.",
+ },
+ []string{discardReasonLabel, "user"},
+ )
+ registerCollector(r, discardedMetadata)
+ histogramSamplesReducedResolution := prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Name: "cortex_reduced_resolution_histogram_samples_total",
+ Help: "The total number of histogram samples that had the resolution reduced.",
+ },
+ []string{"user"},
+ )
+ registerCollector(r, histogramSamplesReducedResolution)
+ labelSizeBytes := prometheus.NewHistogramVec(prometheus.HistogramOpts{
+ Name: "cortex_label_size_bytes",
+ Help: "The combined size in bytes of all labels and label values for a time series.",
+ NativeHistogramBucketFactor: 1.1,
+ NativeHistogramMaxBucketNumber: 100,
+ NativeHistogramMinResetDuration: 1 * time.Hour,
+ }, []string{"user"})
+ registerCollector(r, labelSizeBytes)
+ discardedSeries := prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Name: "cortex_discarded_series",
+ Help: "The number of series that include discarded samples.",
+ },
+ []string{discardReasonLabel, "user"},
+ )
+ registerCollector(r, discardedSeries)
+ discardedSeriesPerLabelset := prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Name: "cortex_discarded_series_per_labelset",
+ Help: "The number of series that include discarded samples for each labelset.",
+ },
+ []string{discardReasonLabel, "user", "labelset"},
+ )
+ registerCollector(r, discardedSeriesPerLabelset)
+
+ m := &ValidateMetrics{
+ DiscardedSamples: discardedSamples,
+ DiscardedSamplesPerLabelSet: discardedSamplesPerLabelSet,
+ DiscardedExemplars: discardedExemplars,
+ DiscardedMetadata: discardedMetadata,
+ HistogramSamplesReducedResolution: histogramSamplesReducedResolution,
+ LabelSizeBytes: labelSizeBytes,
+ LabelSetTracker: labelset.NewLabelSetTracker(),
+ DiscardedSeries: discardedSeries,
+ DiscardedSeriesPerLabelset: discardedSeriesPerLabelset,
+ DiscardedSeriesTracker: discardedseries.NewDiscardedSeriesTracker(discardedSeries),
+ DiscardedSeriesPerLabelsetTracker: discardedseries.NewDiscardedSeriesPerLabelsetTracker(discardedSeriesPerLabelset),
+ }
+ m.DiscardedSeriesTracker.StartVendDiscardedSeriesMetricGoroutine()
+ m.DiscardedSeriesPerLabelsetTracker.StartVendDiscardedSeriesMetricGoroutine()
+
+ return m
+}
+
+// UpdateSamplesDiscardedForSeries updates discarded samples and discarded samples per labelset for the provided reason and series.
+// Used in test only for now.
+func (m *ValidateMetrics) updateSamplesDiscardedForSeries(userID, reason string, labelSetLimits []LimitsPerLabelSet, lbls labels.Labels, count int) {
+ matchedLimits := LimitsPerLabelSetsForSeries(labelSetLimits, lbls)
+ m.updateSamplesDiscarded(userID, reason, matchedLimits, count)
+}
+
+// updateSamplesDiscarded updates discarded samples and discarded samples per labelset for the provided reason.
+// The provided label set needs to be pre-filtered to match the series if applicable.
+// Used in test only for now.
+func (m *ValidateMetrics) updateSamplesDiscarded(userID, reason string, labelSetLimits []LimitsPerLabelSet, count int) {
+ m.DiscardedSamples.WithLabelValues(reason, userID).Add(float64(count))
+ for _, limit := range labelSetLimits {
+ m.LabelSetTracker.Track(userID, limit.Hash, limit.LabelSet)
+ m.DiscardedSamplesPerLabelSet.WithLabelValues(reason, userID, limit.LabelSet.String()).Add(float64(count))
+ }
+}
+
+func (m *ValidateMetrics) UpdateLabelSet(userSet map[string]map[uint64]struct{}, logger log.Logger) {
+ m.LabelSetTracker.UpdateMetrics(userSet, func(user, labelSetStr string, removeUser bool) {
+ if removeUser {
+ // No need to clean up discarded samples per user here as it will be cleaned up elsewhere.
+ if err := util.DeleteMatchingLabels(m.DiscardedSamplesPerLabelSet, map[string]string{"user": user}); err != nil {
+ level.Warn(logger).Log("msg", "failed to remove cortex_discarded_samples_per_labelset_total metric for user", "user", user, "err", err)
+ }
+ return
+ }
+ if err := util.DeleteMatchingLabels(m.DiscardedSamplesPerLabelSet, map[string]string{"user": user, "labelset": labelSetStr}); err != nil {
+ level.Warn(logger).Log("msg", "failed to remove cortex_discarded_samples_per_labelset_total metric", "user", user, "labelset", labelSetStr, "err", err)
+ }
+ })
+}
+
+// ValidateSampleTimestamp returns an err if the sample timestamp is invalid.
+// The returned error may retain the provided series labels.
+func ValidateSampleTimestamp(validateMetrics *ValidateMetrics, limits *Limits, userID string, ls []cortexpb.LabelAdapter, timestampMs int64) ValidationError {
+ unsafeMetricName, _ := extract.UnsafeMetricNameFromLabelAdapters(ls)
+
+ if limits.RejectOldSamples && model.Time(timestampMs) < model.Now().Add(-time.Duration(limits.RejectOldSamplesMaxAge)) {
+ validateMetrics.DiscardedSamples.WithLabelValues(greaterThanMaxSampleAge, userID).Inc()
+ return newSampleTimestampTooOldError(unsafeMetricName, timestampMs)
+ }
+
+ if model.Time(timestampMs) > model.Now().Add(time.Duration(limits.CreationGracePeriod)) {
+ validateMetrics.DiscardedSamples.WithLabelValues(tooFarInFuture, userID).Inc()
+ return newSampleTimestampTooNewError(unsafeMetricName, timestampMs)
+ }
+
+ return nil
+}
+
+// ValidateExemplar returns an error if the exemplar is invalid.
+// The returned error may retain the provided series labels.
+func ValidateExemplar(validateMetrics *ValidateMetrics, userID string, ls []cortexpb.LabelAdapter, e cortexpb.Exemplar) ValidationError {
+ if len(e.Labels) <= 0 {
+ validateMetrics.DiscardedExemplars.WithLabelValues(exemplarLabelsMissing, userID).Inc()
+ return newExemplarEmtpyLabelsError(ls, []cortexpb.LabelAdapter{}, e.TimestampMs)
+ }
+
+ if e.TimestampMs == 0 {
+ validateMetrics.DiscardedExemplars.WithLabelValues(exemplarTimestampInvalid, userID).Inc()
+ return newExemplarMissingTimestampError(
+ ls,
+ e.Labels,
+ e.TimestampMs,
+ )
+ }
+
+ // Exemplar label length does not include chars involved in text
+ // rendering such as quotes, commas, etc. See spec and const definition.
+ labelSetLen := 0
+ for _, l := range e.Labels {
+ labelSetLen += utf8.RuneCountInString(l.Name)
+ labelSetLen += utf8.RuneCountInString(l.Value)
+ }
+
+ if labelSetLen > ExemplarMaxLabelSetLength {
+ validateMetrics.DiscardedExemplars.WithLabelValues(exemplarLabelsTooLong, userID).Inc()
+ return newExemplarLabelLengthError(
+ ls,
+ e.Labels,
+ e.TimestampMs,
+ )
+ }
+
+ return nil
+}
+
+// ValidateLabels returns an err if the labels are invalid.
+// The returned error may retain the provided series labels.
+func ValidateLabels(validateMetrics *ValidateMetrics, limits *Limits, userID string, ls []cortexpb.LabelAdapter, skipLabelNameValidation bool, nameValidationScheme model.ValidationScheme) ValidationError {
+ if limits.EnforceMetricName {
+ unsafeMetricName, err := extract.UnsafeMetricNameFromLabelAdapters(ls)
+ if err != nil {
+ validateMetrics.DiscardedSamples.WithLabelValues(missingMetricName, userID).Inc()
+ return newNoMetricNameError()
+ }
+
+ if !nameValidationScheme.IsValidMetricName(unsafeMetricName) {
+ validateMetrics.DiscardedSamples.WithLabelValues(invalidMetricName, userID).Inc()
+ return newInvalidMetricNameError(unsafeMetricName)
+ }
+ }
+
+ numLabelNames := len(ls)
+ if numLabelNames > limits.MaxLabelNamesPerSeries {
+ validateMetrics.DiscardedSamples.WithLabelValues(maxLabelNamesPerSeries, userID).Inc()
+ return newTooManyLabelsError(ls, limits.MaxLabelNamesPerSeries)
+ }
+
+ maxLabelNameLength := limits.MaxLabelNameLength
+ maxLabelValueLength := limits.MaxLabelValueLength
+ lastLabelName := ""
+ maxLabelsSizeBytes := limits.MaxLabelsSizeBytes
+ labelsSizeBytes := 0
+
+ for _, l := range ls {
+ if !skipLabelNameValidation && !nameValidationScheme.IsValidLabelName(l.Name) {
+ validateMetrics.DiscardedSamples.WithLabelValues(invalidLabel, userID).Inc()
+ return newInvalidLabelError(ls, l.Name)
+ } else if len(l.Name) > maxLabelNameLength {
+ validateMetrics.DiscardedSamples.WithLabelValues(labelNameTooLong, userID).Inc()
+ return newLabelNameTooLongError(ls, l.Name, maxLabelNameLength)
+ } else if len(l.Value) > maxLabelValueLength {
+ validateMetrics.DiscardedSamples.WithLabelValues(labelValueTooLong, userID).Inc()
+ return newLabelValueTooLongError(ls, l.Name, l.Value, maxLabelValueLength)
+ } else if cmp := strings.Compare(lastLabelName, l.Name); cmp >= 0 {
+ if cmp == 0 {
+ validateMetrics.DiscardedSamples.WithLabelValues(duplicateLabelNames, userID).Inc()
+ return newDuplicatedLabelError(ls, l.Name)
+ }
+
+ validateMetrics.DiscardedSamples.WithLabelValues(labelsNotSorted, userID).Inc()
+ return newLabelsNotSortedError(ls, l.Name)
+ }
+
+ lastLabelName = l.Name
+ labelsSizeBytes += l.Size()
+ }
+ validateMetrics.LabelSizeBytes.WithLabelValues(userID).Observe(float64(labelsSizeBytes))
+ if maxLabelsSizeBytes > 0 && labelsSizeBytes > maxLabelsSizeBytes {
+ validateMetrics.DiscardedSamples.WithLabelValues(labelsSizeBytesExceeded, userID).Inc()
+ return labelSizeBytesExceededError(ls, labelsSizeBytes, maxLabelsSizeBytes)
+ }
+ return nil
+}
+
+// ValidateMetadata returns an err if a metric metadata is invalid.
+func ValidateMetadata(validateMetrics *ValidateMetrics, cfg *Limits, userID string, metadata *cortexpb.MetricMetadata) error {
+ if cfg.EnforceMetadataMetricName && metadata.GetMetricFamilyName() == "" {
+ validateMetrics.DiscardedMetadata.WithLabelValues(missingMetricName, userID).Inc()
+ return httpgrpc.Errorf(http.StatusBadRequest, errMetadataMissingMetricName)
+ }
+
+ maxMetadataValueLength := cfg.MaxMetadataLength
+ var reason string
+ var cause string
+ var metadataType string
+ if len(metadata.GetMetricFamilyName()) > maxMetadataValueLength {
+ metadataType = typeMetricName
+ reason = metricNameTooLong
+ cause = metadata.GetMetricFamilyName()
+ } else if len(metadata.Help) > maxMetadataValueLength {
+ metadataType = typeHelp
+ reason = helpTooLong
+ cause = metadata.Help
+ } else if len(metadata.Unit) > maxMetadataValueLength {
+ metadataType = typeUnit
+ reason = unitTooLong
+ cause = metadata.Unit
+ }
+
+ if reason != "" {
+ validateMetrics.DiscardedMetadata.WithLabelValues(reason, userID).Inc()
+ return httpgrpc.Errorf(http.StatusBadRequest, errMetadataTooLong, metadataType, cause, metadata.GetMetricFamilyName())
+ }
+
+ return nil
+}
+
+func ValidateNativeHistogram(validateMetrics *ValidateMetrics, limits *Limits, userID string, ls []cortexpb.LabelAdapter, histogramSample cortexpb.Histogram) (cortexpb.Histogram, error) {
+
+ // sample size validation for native histogram
+ if limits.MaxNativeHistogramSampleSizeBytes > 0 && histogramSample.Size() > limits.MaxNativeHistogramSampleSizeBytes {
+ validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramSampleSizeBytesExceeded, userID).Inc()
+ return cortexpb.Histogram{}, newNativeHistogramSampleSizeBytesExceededError(ls, histogramSample.Size(), limits.MaxNativeHistogramSampleSizeBytes)
+ }
+
+ // schema validation for native histogram
+ if histogramSample.Schema < histogram.ExponentialSchemaMin || histogramSample.Schema > histogram.ExponentialSchemaMax {
+ validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramInvalidSchema, userID).Inc()
+ return cortexpb.Histogram{}, newNativeHistogramSchemaInvalidError(ls, int(histogramSample.Schema))
+ }
+
+ if limits.MaxNativeHistogramBuckets == 0 {
+ return histogramSample, nil
+ }
+
+ var (
+ exceedLimit bool
+ )
+ if histogramSample.IsFloatHistogram() {
+ // Initial check to see if the bucket limit is exceeded or not. If not, we can avoid type casting.
+ exceedLimit = len(histogramSample.PositiveCounts)+len(histogramSample.NegativeCounts) > limits.MaxNativeHistogramBuckets
+ if !exceedLimit {
+ return histogramSample, nil
+ }
+ // Exceed limit.
+ if histogramSample.Schema <= histogram.ExponentialSchemaMin {
+ validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc()
+ return cortexpb.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets)
+ }
+ fh := cortexpb.FloatHistogramProtoToFloatHistogram(histogramSample)
+ oBuckets := len(fh.PositiveBuckets) + len(fh.NegativeBuckets)
+ for len(fh.PositiveBuckets)+len(fh.NegativeBuckets) > limits.MaxNativeHistogramBuckets {
+ if fh.Schema <= histogram.ExponentialSchemaMin {
+ validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc()
+ return cortexpb.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets)
+ }
+ fh = fh.ReduceResolution(fh.Schema - 1)
+ }
+ if oBuckets != len(fh.PositiveBuckets)+len(fh.NegativeBuckets) {
+ validateMetrics.HistogramSamplesReducedResolution.WithLabelValues(userID).Inc()
+ }
+ // If resolution reduced, convert new float histogram to protobuf type again.
+ return cortexpb.FloatHistogramToHistogramProto(histogramSample.TimestampMs, fh), nil
+ }
+
+ // Initial check to see if bucket limit is exceeded or not. If not, we can avoid type casting.
+ exceedLimit = len(histogramSample.PositiveDeltas)+len(histogramSample.NegativeDeltas) > limits.MaxNativeHistogramBuckets
+ if !exceedLimit {
+ return histogramSample, nil
+ }
+ // Exceed limit.
+ if histogramSample.Schema <= histogram.ExponentialSchemaMin {
+ validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc()
+ return cortexpb.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets)
+ }
+ h := cortexpb.HistogramProtoToHistogram(histogramSample)
+ oBuckets := len(h.PositiveBuckets) + len(h.NegativeBuckets)
+ for len(h.PositiveBuckets)+len(h.NegativeBuckets) > limits.MaxNativeHistogramBuckets {
+ if h.Schema <= histogram.ExponentialSchemaMin {
+ validateMetrics.DiscardedSamples.WithLabelValues(nativeHistogramBucketCountLimitExceeded, userID).Inc()
+ return cortexpb.Histogram{}, newHistogramBucketLimitExceededError(ls, limits.MaxNativeHistogramBuckets)
+ }
+ h = h.ReduceResolution(h.Schema - 1)
+ }
+ if oBuckets != len(h.PositiveBuckets)+len(h.NegativeBuckets) {
+ validateMetrics.HistogramSamplesReducedResolution.WithLabelValues(userID).Inc()
+ }
+ // If resolution reduced, convert new histogram to protobuf type again.
+ return cortexpb.HistogramToHistogramProto(histogramSample.TimestampMs, h), nil
+}
+
+func DeletePerUserValidationMetrics(validateMetrics *ValidateMetrics, userID string, log log.Logger) {
+ filter := map[string]string{"user": userID}
+
+ if err := util.DeleteMatchingLabels(validateMetrics.DiscardedSamples, filter); err != nil {
+ level.Warn(log).Log("msg", "failed to remove cortex_discarded_samples_total metric for user", "user", userID, "err", err)
+ }
+ if err := util.DeleteMatchingLabels(validateMetrics.DiscardedSamplesPerLabelSet, filter); err != nil {
+ level.Warn(log).Log("msg", "failed to remove cortex_discarded_samples_per_labelset_total metric for user", "user", userID, "err", err)
+ }
+ if err := util.DeleteMatchingLabels(validateMetrics.DiscardedExemplars, filter); err != nil {
+ level.Warn(log).Log("msg", "failed to remove cortex_discarded_exemplars_total metric for user", "user", userID, "err", err)
+ }
+ if err := util.DeleteMatchingLabels(validateMetrics.DiscardedMetadata, filter); err != nil {
+ level.Warn(log).Log("msg", "failed to remove cortex_discarded_metadata_total metric for user", "user", userID, "err", err)
+ }
+ if err := util.DeleteMatchingLabels(validateMetrics.HistogramSamplesReducedResolution, filter); err != nil {
+ level.Warn(log).Log("msg", "failed to remove cortex_reduced_resolution_histogram_samples_total metric for user", "user", userID, "err", err)
+ }
+ if err := util.DeleteMatchingLabels(validateMetrics.LabelSizeBytes, filter); err != nil {
+ level.Warn(log).Log("msg", "failed to remove cortex_label_size_bytes metric for user", "user", userID, "err", err)
+ }
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/worker_pool.go b/vendor/github.com/cortexproject/cortex/pkg/util/worker_pool.go
new file mode 100644
index 000000000..d46d05608
--- /dev/null
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/worker_pool.go
@@ -0,0 +1,85 @@
+package util
+
+import (
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promauto"
+)
+
+// This code was based on: https://github.com/grpc/grpc-go/blob/66ba4b264d26808cb7af3c86eee66e843472915e/server.go
+
+// serverWorkerResetThreshold defines how often the stack must be reset. Every
+// N requests, by spawning a new goroutine in its place, a worker can reset its
+// stack so that large stacks don't live in memory forever. 2^16 should allow
+// each goroutine stack to live for at least a few seconds in a typical
+// workload (assuming a QPS of a few thousand requests/sec).
+const serverWorkerResetThreshold = 1 << 16
+
+type AsyncExecutor interface {
+ Submit(f func())
+ Stop()
+}
+
+type noOpExecutor struct{}
+
+func (n noOpExecutor) Stop() {}
+
+func NewNoOpExecutor() AsyncExecutor {
+ return &noOpExecutor{}
+}
+
+func (n noOpExecutor) Submit(f func()) {
+ go f()
+}
+
+type workerPoolExecutor struct {
+ serverWorkerChannel chan func()
+ closeOnce sync.Once
+
+ fallbackTotal prometheus.Counter
+}
+
+func NewWorkerPool(name string, numWorkers int, reg prometheus.Registerer) AsyncExecutor {
+ wp := &workerPoolExecutor{
+ serverWorkerChannel: make(chan func()),
+ fallbackTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
+ Namespace: "cortex",
+ Name: "worker_pool_fallback_total",
+ Help: "The total number additional go routines that needed to be created to run jobs.",
+ ConstLabels: prometheus.Labels{"name": name},
+ }),
+ }
+
+ for range numWorkers {
+ go wp.run()
+ }
+
+ return wp
+}
+
+func (s *workerPoolExecutor) Stop() {
+ s.closeOnce.Do(func() {
+ close(s.serverWorkerChannel)
+ })
+}
+
+func (s *workerPoolExecutor) Submit(f func()) {
+ select {
+ case s.serverWorkerChannel <- f:
+ default:
+ s.fallbackTotal.Inc()
+ go f()
+ }
+}
+
+func (s *workerPoolExecutor) run() {
+ for range serverWorkerResetThreshold {
+ f, ok := <-s.serverWorkerChannel
+ if !ok {
+ return
+ }
+ f()
+ }
+ go s.run()
+}
diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/yaml.go b/vendor/github.com/cortexproject/cortex/pkg/util/yaml.go
index bb8b4d802..9286cfb40 100644
--- a/vendor/github.com/cortexproject/cortex/pkg/util/yaml.go
+++ b/vendor/github.com/cortexproject/cortex/pkg/util/yaml.go
@@ -4,13 +4,13 @@ import "gopkg.in/yaml.v2"
// YAMLMarshalUnmarshal utility function that converts a YAML interface in a map
// doing marshal and unmarshal of the parameter
-func YAMLMarshalUnmarshal(in interface{}) (map[interface{}]interface{}, error) {
+func YAMLMarshalUnmarshal(in any) (map[any]any, error) {
yamlBytes, err := yaml.Marshal(in)
if err != nil {
return nil, err
}
- object := make(map[interface{}]interface{})
+ object := make(map[any]any)
if err := yaml.Unmarshal(yamlBytes, object); err != nil {
return nil, err
}
diff --git a/vendor/github.com/cristalhq/hedgedhttp/LICENSE b/vendor/github.com/cristalhq/hedgedhttp/LICENSE
new file mode 100644
index 000000000..7e7c9798a
--- /dev/null
+++ b/vendor/github.com/cristalhq/hedgedhttp/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 cristaltech
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/cristalhq/hedgedhttp/README.md b/vendor/github.com/cristalhq/hedgedhttp/README.md
new file mode 100644
index 000000000..104213b35
--- /dev/null
+++ b/vendor/github.com/cristalhq/hedgedhttp/README.md
@@ -0,0 +1,80 @@
+# hedgedhttp
+
+[![build-img]][build-url]
+[![pkg-img]][pkg-url]
+[![reportcard-img]][reportcard-url]
+[![coverage-img]][coverage-url]
+[![version-img]][version-url]
+
+Hedged HTTP client which helps to reduce tail latency at scale.
+
+## Rationale
+
+See paper [Tail at Scale](https://www.barroso.org/publications/TheTailAtScale.pdf) by Jeffrey Dean, Luiz André Barroso. In short: the client first sends one request, but then sends an additional request after a timeout if the previous hasn't returned an answer in the expected time. The client cancels remaining requests once the first result is received.
+
+## Acknowledge
+
+Thanks to [Bohdan Storozhuk](https://github.com/storozhukbm) for the review and powerful hints.
+
+## Features
+
+* Simple API.
+* Easy to integrate.
+* Optimized for speed.
+* Clean and tested code.
+* Supports `http.Client` and `http.RoundTripper`.
+* Dependency-free.
+
+## Install
+
+Go version 1.16+
+
+```
+go get github.com/cristalhq/hedgedhttp
+```
+
+## Example
+
+```go
+ctx := context.Background()
+req, err := http.NewRequestWithContext(ctx, http.MethodGet, "https://google.com", http.NoBody)
+if err != nil {
+ panic(err)
+}
+
+timeout := 10 * time.Millisecond
+upto := 7
+client := &http.Client{Timeout: time.Second}
+hedged, err := hedgedhttp.NewClient(timeout, upto, client)
+if err != nil {
+ panic(err)
+}
+
+// will take `upto` requests, with a `timeout` delay between them
+resp, err := hedged.Do(req)
+if err != nil {
+ panic(err)
+}
+defer resp.Body.Close()
+```
+
+Also see examples: [examples_test.go](https://github.com/cristalhq/hedgedhttp/blob/main/examples_test.go).
+
+## Documentation
+
+See [these docs][pkg-url].
+
+## License
+
+[MIT License](LICENSE).
+
+[build-img]: https://github.com/cristalhq/hedgedhttp/workflows/build/badge.svg
+[build-url]: https://github.com/cristalhq/hedgedhttp/actions
+[pkg-img]: https://pkg.go.dev/badge/cristalhq/hedgedhttp
+[pkg-url]: https://pkg.go.dev/github.com/cristalhq/hedgedhttp
+[reportcard-img]: https://goreportcard.com/badge/cristalhq/hedgedhttp
+[reportcard-url]: https://goreportcard.com/report/cristalhq/hedgedhttp
+[coverage-img]: https://codecov.io/gh/cristalhq/hedgedhttp/branch/main/graph/badge.svg
+[coverage-url]: https://codecov.io/gh/cristalhq/hedgedhttp
+[version-img]: https://img.shields.io/github/v/release/cristalhq/hedgedhttp
+[version-url]: https://github.com/cristalhq/hedgedhttp/releases
diff --git a/vendor/github.com/cristalhq/hedgedhttp/hedged.go b/vendor/github.com/cristalhq/hedgedhttp/hedged.go
new file mode 100644
index 000000000..b7b33f50b
--- /dev/null
+++ b/vendor/github.com/cristalhq/hedgedhttp/hedged.go
@@ -0,0 +1,387 @@
+package hedgedhttp
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+)
+
+const infiniteTimeout = 30 * 24 * time.Hour // domain specific infinite
+
+// Client represents a hedged HTTP client.
+type Client struct {
+ rt http.RoundTripper
+ stats *Stats
+}
+
+// Config for the [Client].
+type Config struct {
+ // Transport of the [Client].
+ // Default is nil which results in [net/http.DefaultTransport].
+ Transport http.RoundTripper
+
+ // Upto says how much requests to make.
+ // Default is zero which means no hedged requests will be made.
+ Upto int
+
+ // Delay before 2 consequitive hedged requests.
+ Delay time.Duration
+
+ // Next returns the upto and delay for each HTTP that will be hedged.
+ // Default is nil which results in (Upto, Delay) result.
+ Next NextFn
+}
+
+// NextFn represents a function that is called for each HTTP request for retrieving hedging options.
+type NextFn func() (upto int, delay time.Duration)
+
+// New returns a new Client for the given config.
+func New(cfg Config) (*Client, error) {
+ switch {
+ case cfg.Delay < 0:
+ return nil, errors.New("hedgedhttp: timeout cannot be negative")
+ case cfg.Upto < 0:
+ return nil, errors.New("hedgedhttp: upto cannot be negative")
+ }
+ if cfg.Transport == nil {
+ cfg.Transport = http.DefaultTransport
+ }
+
+ rt, stats, err := NewRoundTripperAndStats(cfg.Delay, cfg.Upto, cfg.Transport)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO(cristaloleg): this should be removed after internals cleanup.
+ rt2, ok := rt.(*hedgedTransport)
+ if !ok {
+ panic(fmt.Sprintf("want *hedgedTransport got %T", rt))
+ }
+ rt2.next = cfg.Next
+
+ c := &Client{
+ rt: rt2,
+ stats: stats,
+ }
+ return c, nil
+}
+
+// Stats returns statistics for the given client, see [Stats] methods.
+func (c *Client) Stats() *Stats {
+ return c.stats
+}
+
+// Do does the same as [RoundTrip], this method is presented to align with [net/http.Client].
+func (c *Client) Do(req *http.Request) (*http.Response, error) {
+ return c.rt.RoundTrip(req)
+}
+
+// RoundTrip implements [net/http.RoundTripper] interface.
+func (c *Client) RoundTrip(req *http.Request) (*http.Response, error) {
+ return c.rt.RoundTrip(req)
+}
+
+// NewClient returns a new http.Client which implements hedged requests pattern.
+// Given Client starts a new request after a timeout from previous request.
+// Starts no more than upto requests.
+func NewClient(timeout time.Duration, upto int, client *http.Client) (*http.Client, error) {
+ newClient, _, err := NewClientAndStats(timeout, upto, client)
+ if err != nil {
+ return nil, err
+ }
+ return newClient, nil
+}
+
+// NewClientAndStats returns a new http.Client which implements hedged requests pattern
+// And Stats object that can be queried to obtain client's metrics.
+// Given Client starts a new request after a timeout from previous request.
+// Starts no more than upto requests.
+func NewClientAndStats(timeout time.Duration, upto int, client *http.Client) (*http.Client, *Stats, error) {
+ if client == nil {
+ client = &http.Client{
+ Timeout: 5 * time.Second,
+ }
+ }
+
+ newTransport, metrics, err := NewRoundTripperAndStats(timeout, upto, client.Transport)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ client.Transport = newTransport
+
+ return client, metrics, nil
+}
+
+// NewRoundTripper returns a new http.RoundTripper which implements hedged requests pattern.
+// Given RoundTripper starts a new request after a timeout from previous request.
+// Starts no more than upto requests.
+func NewRoundTripper(timeout time.Duration, upto int, rt http.RoundTripper) (http.RoundTripper, error) {
+ newRT, _, err := NewRoundTripperAndStats(timeout, upto, rt)
+ if err != nil {
+ return nil, err
+ }
+ return newRT, nil
+}
+
+// NewRoundTripperAndStats returns a new http.RoundTripper which implements hedged requests pattern
+// And Stats object that can be queried to obtain client's metrics.
+// Given RoundTripper starts a new request after a timeout from previous request.
+// Starts no more than upto requests.
+func NewRoundTripperAndStats(timeout time.Duration, upto int, rt http.RoundTripper) (http.RoundTripper, *Stats, error) {
+ switch {
+ case timeout < 0:
+ return nil, nil, errors.New("hedgedhttp: timeout cannot be negative")
+ case upto < 0:
+ return nil, nil, errors.New("hedgedhttp: upto cannot be negative")
+ }
+
+ if rt == nil {
+ rt = http.DefaultTransport
+ }
+
+ if timeout == 0 {
+ timeout = time.Nanosecond // smallest possible timeout if not set
+ }
+
+ hedged := &hedgedTransport{
+ rt: rt,
+ timeout: timeout,
+ upto: upto,
+ metrics: &Stats{},
+ }
+ return hedged, hedged.metrics, nil
+}
+
+type hedgedTransport struct {
+ rt http.RoundTripper
+ timeout time.Duration
+ upto int
+ next NextFn
+ metrics *Stats
+}
+
+func (ht *hedgedTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ mainCtx := req.Context()
+
+ upto, timeout := ht.upto, ht.timeout
+ if ht.next != nil {
+ upto, timeout = ht.next()
+ }
+
+ // no hedged requests, just a regular one.
+ if upto <= 0 {
+ return ht.rt.RoundTrip(req)
+ }
+ // rollback to default timeout.
+ if timeout < 0 {
+ timeout = ht.timeout
+ }
+
+ errOverall := &MultiError{}
+ resultCh := make(chan indexedResp, upto)
+ errorCh := make(chan error, upto)
+
+ ht.metrics.requestedRoundTripsInc()
+
+ resultIdx := -1
+ cancels := make([]func(), upto)
+
+ defer runInPool(func() {
+ for i, cancel := range cancels {
+ if i != resultIdx && cancel != nil {
+ ht.metrics.canceledSubRequestsInc()
+ cancel()
+ }
+ }
+ })
+
+ for sent := 0; len(errOverall.Errors) < upto; sent++ {
+ if sent < upto {
+ idx := sent
+ subReq, cancel := reqWithCtx(req, mainCtx, idx != 0)
+ cancels[idx] = cancel
+
+ runInPool(func() {
+ ht.metrics.actualRoundTripsInc()
+ resp, err := ht.rt.RoundTrip(subReq)
+ if err != nil {
+ ht.metrics.failedRoundTripsInc()
+ errorCh <- err
+ } else {
+ resultCh <- indexedResp{idx, resp}
+ }
+ })
+ }
+
+ // all request sent - effectively disabling timeout between requests
+ if sent == upto {
+ timeout = infiniteTimeout
+ }
+ resp, err := waitResult(mainCtx, resultCh, errorCh, timeout)
+
+ switch {
+ case resp.Resp != nil:
+ resultIdx = resp.Index
+ if resultIdx == 0 {
+ ht.metrics.originalRequestWinsInc()
+ } else {
+ ht.metrics.hedgedRequestWinsInc()
+ }
+ return resp.Resp, nil
+ case mainCtx.Err() != nil:
+ ht.metrics.canceledByUserRoundTripsInc()
+ return nil, mainCtx.Err()
+ case err != nil:
+ errOverall.Errors = append(errOverall.Errors, err)
+ }
+ }
+
+ // all request have returned errors
+ return nil, errOverall
+}
+
+func waitResult(ctx context.Context, resultCh <-chan indexedResp, errorCh <-chan error, timeout time.Duration) (indexedResp, error) {
+ // try to read result first before blocking on all other channels
+ select {
+ case res := <-resultCh:
+ return res, nil
+ default:
+ timer := getTimer(timeout)
+ defer returnTimer(timer)
+
+ select {
+ case res := <-resultCh:
+ return res, nil
+
+ case reqErr := <-errorCh:
+ return indexedResp{}, reqErr
+
+ case <-ctx.Done():
+ return indexedResp{}, ctx.Err()
+
+ case <-timer.C:
+ return indexedResp{}, nil // it's not a request timeout, it's timeout BETWEEN consecutive requests
+ }
+ }
+}
+
+type indexedResp struct {
+ Index int
+ Resp *http.Response
+}
+
+func reqWithCtx(r *http.Request, ctx context.Context, isHedged bool) (*http.Request, context.CancelFunc) {
+ ctx, cancel := context.WithCancel(ctx)
+ if isHedged {
+ ctx = context.WithValue(ctx, hedgedRequest{}, struct{}{})
+ }
+ req := r.WithContext(ctx)
+ return req, cancel
+}
+
+type hedgedRequest struct{}
+
+// IsHedgedRequest reports when a request is hedged.
+func IsHedgedRequest(r *http.Request) bool {
+ val := r.Context().Value(hedgedRequest{})
+ return val != nil
+}
+
+var taskQueue = make(chan func())
+
+func runInPool(task func()) {
+ select {
+ case taskQueue <- task:
+ // submitted, everything is ok
+
+ default:
+ go func() {
+ // do the given task
+ task()
+
+ const cleanupDuration = 10 * time.Second
+ cleanupTicker := time.NewTicker(cleanupDuration)
+ defer cleanupTicker.Stop()
+
+ for {
+ select {
+ case t := <-taskQueue:
+ t()
+ cleanupTicker.Reset(cleanupDuration)
+ case <-cleanupTicker.C:
+ return
+ }
+ }
+ }()
+ }
+}
+
+// MultiError is an error type to track multiple errors. This is used to
+// accumulate errors in cases and return them as a single "error".
+// Inspired by https://github.com/hashicorp/go-multierror
+type MultiError struct {
+ Errors []error
+ ErrorFormatFn ErrorFormatFunc
+}
+
+func (e *MultiError) Error() string {
+ fn := e.ErrorFormatFn
+ if fn == nil {
+ fn = listFormatFunc
+ }
+ return fn(e.Errors)
+}
+
+func (e *MultiError) String() string {
+ return fmt.Sprintf("*%#v", e.Errors)
+}
+
+// ErrorOrNil returns an error if there are some.
+func (e *MultiError) ErrorOrNil() error {
+ switch {
+ case e == nil || len(e.Errors) == 0:
+ return nil
+ default:
+ return e
+ }
+}
+
+// ErrorFormatFunc is called by MultiError to return the list of errors as a string.
+type ErrorFormatFunc func([]error) string
+
+func listFormatFunc(es []error) string {
+ if len(es) == 1 {
+ return fmt.Sprintf("1 error occurred:\n\t* %s\n\n", es[0])
+ }
+
+ points := make([]string, len(es))
+ for i, err := range es {
+ points[i] = fmt.Sprintf("* %s", err)
+ }
+
+ return fmt.Sprintf("%d errors occurred:\n\t%s\n\n", len(es), strings.Join(points, "\n\t"))
+}
+
+var timerPool = sync.Pool{New: func() interface{} {
+ return time.NewTimer(time.Second)
+}}
+
+func getTimer(duration time.Duration) *time.Timer {
+ timer := timerPool.Get().(*time.Timer)
+ timer.Reset(duration)
+ return timer
+}
+
+func returnTimer(timer *time.Timer) {
+ timer.Stop()
+ select {
+ case <-timer.C:
+ default:
+ }
+ timerPool.Put(timer)
+}
diff --git a/vendor/github.com/cristalhq/hedgedhttp/stats.go b/vendor/github.com/cristalhq/hedgedhttp/stats.go
new file mode 100644
index 000000000..f29331890
--- /dev/null
+++ b/vendor/github.com/cristalhq/hedgedhttp/stats.go
@@ -0,0 +1,87 @@
+package hedgedhttp
+
+import "sync/atomic"
+
+// atomicCounter is a false sharing safe counter.
+type atomicCounter struct {
+ count uint64
+ _ [7]uint64
+}
+
+type cacheLine [64]byte
+
+// Stats object that can be queried to obtain certain metrics and get better observability.
+type Stats struct {
+ _ cacheLine
+ requestedRoundTrips atomicCounter
+ actualRoundTrips atomicCounter
+ failedRoundTrips atomicCounter
+ originalRequestWins atomicCounter
+ hedgedRequestWins atomicCounter
+ canceledByUserRoundTrips atomicCounter
+ canceledSubRequests atomicCounter
+ _ cacheLine
+}
+
+func (s *Stats) requestedRoundTripsInc() { atomic.AddUint64(&s.requestedRoundTrips.count, 1) }
+func (s *Stats) actualRoundTripsInc() { atomic.AddUint64(&s.actualRoundTrips.count, 1) }
+func (s *Stats) failedRoundTripsInc() { atomic.AddUint64(&s.failedRoundTrips.count, 1) }
+func (s *Stats) originalRequestWinsInc() { atomic.AddUint64(&s.originalRequestWins.count, 1) }
+func (s *Stats) hedgedRequestWinsInc() { atomic.AddUint64(&s.hedgedRequestWins.count, 1) }
+func (s *Stats) canceledByUserRoundTripsInc() { atomic.AddUint64(&s.canceledByUserRoundTrips.count, 1) }
+func (s *Stats) canceledSubRequestsInc() { atomic.AddUint64(&s.canceledSubRequests.count, 1) }
+
+// RequestedRoundTrips returns count of requests that were requested by client.
+func (s *Stats) RequestedRoundTrips() uint64 {
+ return atomic.LoadUint64(&s.requestedRoundTrips.count)
+}
+
+// ActualRoundTrips returns count of requests that were actually sent.
+func (s *Stats) ActualRoundTrips() uint64 {
+ return atomic.LoadUint64(&s.actualRoundTrips.count)
+}
+
+// FailedRoundTrips returns count of requests that failed.
+func (s *Stats) FailedRoundTrips() uint64 {
+ return atomic.LoadUint64(&s.failedRoundTrips.count)
+}
+
+// OriginalRequestWins returns count of original requests that were faster than the original.
+func (s *Stats) OriginalRequestWins() uint64 {
+ return atomic.LoadUint64(&s.originalRequestWins.count)
+}
+
+// HedgedRequestWins returns count of hedged requests that were faster than the original.
+func (s *Stats) HedgedRequestWins() uint64 {
+ return atomic.LoadUint64(&s.hedgedRequestWins.count)
+}
+
+// CanceledByUserRoundTrips returns count of requests that were canceled by user, using request context.
+func (s *Stats) CanceledByUserRoundTrips() uint64 {
+ return atomic.LoadUint64(&s.canceledByUserRoundTrips.count)
+}
+
+// CanceledSubRequests returns count of hedged sub-requests that were canceled by transport.
+func (s *Stats) CanceledSubRequests() uint64 {
+ return atomic.LoadUint64(&s.canceledSubRequests.count)
+}
+
+// StatsSnapshot is a snapshot of Stats.
+type StatsSnapshot struct {
+ RequestedRoundTrips uint64 // count of requests that were requested by client
+ ActualRoundTrips uint64 // count of requests that were actually sent
+ FailedRoundTrips uint64 // count of requests that failed
+ CanceledByUserRoundTrips uint64 // count of requests that were canceled by user, using request context
+ CanceledSubRequests uint64 // count of hedged sub-requests that were canceled by transport
+}
+
+// Snapshot of the stats.
+func (s *Stats) Snapshot() StatsSnapshot {
+ return StatsSnapshot{
+ RequestedRoundTrips: s.RequestedRoundTrips(),
+ ActualRoundTrips: s.ActualRoundTrips(),
+ FailedRoundTrips: s.FailedRoundTrips(),
+ CanceledByUserRoundTrips: s.CanceledByUserRoundTrips(),
+ CanceledSubRequests: s.CanceledSubRequests(),
+ }
+}
diff --git a/vendor/github.com/dgryski/go-metro/LICENSE b/vendor/github.com/dgryski/go-metro/LICENSE
new file mode 100644
index 000000000..6243b617c
--- /dev/null
+++ b/vendor/github.com/dgryski/go-metro/LICENSE
@@ -0,0 +1,24 @@
+This package is a mechanical translation of the reference C++ code for
+MetroHash, available at https://github.com/jandrewrogers/MetroHash
+
+The MIT License (MIT)
+
+Copyright (c) 2016 Damian Gryski
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/dgryski/go-metro/README b/vendor/github.com/dgryski/go-metro/README
new file mode 100644
index 000000000..5ecebb385
--- /dev/null
+++ b/vendor/github.com/dgryski/go-metro/README
@@ -0,0 +1,6 @@
+MetroHash
+
+This package is a mechanical translation of the reference C++ code for
+MetroHash, available at https://github.com/jandrewrogers/MetroHash
+
+I claim no additional copyright over the original implementation.
diff --git a/vendor/github.com/dgryski/go-metro/metro128.go b/vendor/github.com/dgryski/go-metro/metro128.go
new file mode 100644
index 000000000..e8dd8ddbf
--- /dev/null
+++ b/vendor/github.com/dgryski/go-metro/metro128.go
@@ -0,0 +1,94 @@
+package metro
+
+import "encoding/binary"
+
+func rotate_right(v uint64, k uint) uint64 {
+ return (v >> k) | (v << (64 - k))
+}
+
+func Hash128(buffer []byte, seed uint64) (uint64, uint64) {
+
+ const (
+ k0 = 0xC83A91E1
+ k1 = 0x8648DBDB
+ k2 = 0x7BDEC03B
+ k3 = 0x2F5870A5
+ )
+
+ ptr := buffer
+
+ var v [4]uint64
+
+ v[0] = (seed - k0) * k3
+ v[1] = (seed + k1) * k2
+
+ if len(ptr) >= 32 {
+ v[2] = (seed + k0) * k2
+ v[3] = (seed - k1) * k3
+
+ for len(ptr) >= 32 {
+ v[0] += binary.LittleEndian.Uint64(ptr) * k0
+ ptr = ptr[8:]
+ v[0] = rotate_right(v[0], 29) + v[2]
+ v[1] += binary.LittleEndian.Uint64(ptr) * k1
+ ptr = ptr[8:]
+ v[1] = rotate_right(v[1], 29) + v[3]
+ v[2] += binary.LittleEndian.Uint64(ptr) * k2
+ ptr = ptr[8:]
+ v[2] = rotate_right(v[2], 29) + v[0]
+ v[3] += binary.LittleEndian.Uint64(ptr) * k3
+ ptr = ptr[8:]
+ v[3] = rotate_right(v[3], 29) + v[1]
+ }
+
+ v[2] ^= rotate_right(((v[0]+v[3])*k0)+v[1], 21) * k1
+ v[3] ^= rotate_right(((v[1]+v[2])*k1)+v[0], 21) * k0
+ v[0] ^= rotate_right(((v[0]+v[2])*k0)+v[3], 21) * k1
+ v[1] ^= rotate_right(((v[1]+v[3])*k1)+v[2], 21) * k0
+ }
+
+ if len(ptr) >= 16 {
+ v[0] += binary.LittleEndian.Uint64(ptr) * k2
+ ptr = ptr[8:]
+ v[0] = rotate_right(v[0], 33) * k3
+ v[1] += binary.LittleEndian.Uint64(ptr) * k2
+ ptr = ptr[8:]
+ v[1] = rotate_right(v[1], 33) * k3
+ v[0] ^= rotate_right((v[0]*k2)+v[1], 45) * k1
+ v[1] ^= rotate_right((v[1]*k3)+v[0], 45) * k0
+ }
+
+ if len(ptr) >= 8 {
+ v[0] += binary.LittleEndian.Uint64(ptr) * k2
+ ptr = ptr[8:]
+ v[0] = rotate_right(v[0], 33) * k3
+ v[0] ^= rotate_right((v[0]*k2)+v[1], 27) * k1
+ }
+
+ if len(ptr) >= 4 {
+ v[1] += uint64(binary.LittleEndian.Uint32(ptr)) * k2
+ ptr = ptr[4:]
+ v[1] = rotate_right(v[1], 33) * k3
+ v[1] ^= rotate_right((v[1]*k3)+v[0], 46) * k0
+ }
+
+ if len(ptr) >= 2 {
+ v[0] += uint64(binary.LittleEndian.Uint16(ptr)) * k2
+ ptr = ptr[2:]
+ v[0] = rotate_right(v[0], 33) * k3
+ v[0] ^= rotate_right((v[0]*k2)+v[1], 22) * k1
+ }
+
+ if len(ptr) >= 1 {
+ v[1] += uint64(ptr[0]) * k2
+ v[1] = rotate_right(v[1], 33) * k3
+ v[1] ^= rotate_right((v[1]*k3)+v[0], 58) * k0
+ }
+
+ v[0] += rotate_right((v[0]*k0)+v[1], 13)
+ v[1] += rotate_right((v[1]*k1)+v[0], 37)
+ v[0] += rotate_right((v[0]*k2)+v[1], 13)
+ v[1] += rotate_right((v[1]*k3)+v[0], 37)
+
+ return v[0], v[1]
+}
diff --git a/vendor/github.com/dgryski/go-metro/metro64.go b/vendor/github.com/dgryski/go-metro/metro64.go
new file mode 100644
index 000000000..458a91219
--- /dev/null
+++ b/vendor/github.com/dgryski/go-metro/metro64.go
@@ -0,0 +1,89 @@
+//go:build noasm || !amd64 || !gc || purego
+// +build noasm !amd64 !gc purego
+
+package metro
+
+import (
+ "encoding/binary"
+ "math/bits"
+)
+
+func Hash64(buffer []byte, seed uint64) uint64 {
+
+ const (
+ k0 = 0xD6D018F5
+ k1 = 0xA2AA033B
+ k2 = 0x62992FC1
+ k3 = 0x30BC5B29
+ )
+
+ ptr := buffer
+
+ hash := (seed + k2) * k0
+
+ if len(ptr) >= 32 {
+ v0, v1, v2, v3 := hash, hash, hash, hash
+
+ for len(ptr) >= 32 {
+ v0 += binary.LittleEndian.Uint64(ptr[:8]) * k0
+ v0 = bits.RotateLeft64(v0, -29) + v2
+ v1 += binary.LittleEndian.Uint64(ptr[8:16]) * k1
+ v1 = bits.RotateLeft64(v1, -29) + v3
+ v2 += binary.LittleEndian.Uint64(ptr[16:24]) * k2
+ v2 = bits.RotateLeft64(v2, -29) + v0
+ v3 += binary.LittleEndian.Uint64(ptr[24:32]) * k3
+ v3 = bits.RotateLeft64(v3, -29) + v1
+ ptr = ptr[32:]
+ }
+
+ v2 ^= bits.RotateLeft64(((v0+v3)*k0)+v1, -37) * k1
+ v3 ^= bits.RotateLeft64(((v1+v2)*k1)+v0, -37) * k0
+ v0 ^= bits.RotateLeft64(((v0+v2)*k0)+v3, -37) * k1
+ v1 ^= bits.RotateLeft64(((v1+v3)*k1)+v2, -37) * k0
+ hash += v0 ^ v1
+ }
+
+ if len(ptr) >= 16 {
+ v0 := hash + (binary.LittleEndian.Uint64(ptr[:8]) * k2)
+ v0 = bits.RotateLeft64(v0, -29) * k3
+ v1 := hash + (binary.LittleEndian.Uint64(ptr[8:16]) * k2)
+ v1 = bits.RotateLeft64(v1, -29) * k3
+ v0 ^= bits.RotateLeft64(v0*k0, -21) + v1
+ v1 ^= bits.RotateLeft64(v1*k3, -21) + v0
+ hash += v1
+ ptr = ptr[16:]
+ }
+
+ if len(ptr) >= 8 {
+ hash += binary.LittleEndian.Uint64(ptr[:8]) * k3
+ ptr = ptr[8:]
+ hash ^= bits.RotateLeft64(hash, -55) * k1
+ }
+
+ if len(ptr) >= 4 {
+ hash += uint64(binary.LittleEndian.Uint32(ptr[:4])) * k3
+ hash ^= bits.RotateLeft64(hash, -26) * k1
+ ptr = ptr[4:]
+ }
+
+ if len(ptr) >= 2 {
+ hash += uint64(binary.LittleEndian.Uint16(ptr[:2])) * k3
+ ptr = ptr[2:]
+ hash ^= bits.RotateLeft64(hash, -48) * k1
+ }
+
+ if len(ptr) >= 1 {
+ hash += uint64(ptr[0]) * k3
+ hash ^= bits.RotateLeft64(hash, -37) * k1
+ }
+
+ hash ^= bits.RotateLeft64(hash, -28)
+ hash *= k0
+ hash ^= bits.RotateLeft64(hash, -29)
+
+ return hash
+}
+
+func Hash64Str(buffer string, seed uint64) uint64 {
+ return Hash64([]byte(buffer), seed)
+}
diff --git a/vendor/github.com/dgryski/go-metro/metro_amd64.s b/vendor/github.com/dgryski/go-metro/metro_amd64.s
new file mode 100644
index 000000000..f37697582
--- /dev/null
+++ b/vendor/github.com/dgryski/go-metro/metro_amd64.s
@@ -0,0 +1,387 @@
+// Code generated by command: go run asm.go -out metro_amd64.s -stubs metro_stub.go -pkg metro. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego && !noasm
+
+#include "textflag.h"
+
+// func Hash64(buffer []byte, seed uint64) uint64
+TEXT ·Hash64(SB), NOSPLIT, $0-40
+ MOVQ seed+24(FP), AX
+ MOVQ buffer_base+0(FP), CX
+ MOVQ buffer_len+8(FP), DX
+ MOVQ $0xd6d018f5, BX
+ IMULQ BX, AX
+ MOVQ $0x52bc33fedbe4cbb5, BX
+ ADDQ BX, AX
+ CMPQ DX, $0x20
+ JLT after32
+ MOVQ AX, BX
+ MOVQ AX, SI
+ MOVQ AX, DI
+ MOVQ AX, R8
+
+loop:
+ MOVQ (CX), R9
+ MOVQ $0xd6d018f5, R10
+ IMULQ R10, R9
+ ADDQ R9, BX
+ RORQ $0x1d, BX
+ ADDQ DI, BX
+ MOVQ 8(CX), R9
+ MOVQ $0xa2aa033b, R10
+ IMULQ R10, R9
+ ADDQ R9, SI
+ RORQ $0x1d, SI
+ ADDQ R8, SI
+ MOVQ 16(CX), R9
+ MOVQ $0x62992fc1, R10
+ IMULQ R10, R9
+ ADDQ R9, DI
+ RORQ $0x1d, DI
+ ADDQ BX, DI
+ MOVQ 24(CX), R9
+ MOVQ $0x30bc5b29, R10
+ IMULQ R10, R9
+ ADDQ R9, R8
+ RORQ $0x1d, R8
+ ADDQ SI, R8
+ ADDQ $0x20, CX
+ SUBQ $0x20, DX
+ CMPQ DX, $0x20
+ JGE loop
+ MOVQ BX, R9
+ ADDQ R8, R9
+ MOVQ $0xd6d018f5, R10
+ IMULQ R10, R9
+ ADDQ SI, R9
+ RORQ $0x25, R9
+ MOVQ $0xa2aa033b, R10
+ IMULQ R10, R9
+ XORQ R9, DI
+ MOVQ SI, R9
+ ADDQ DI, R9
+ MOVQ $0xa2aa033b, R10
+ IMULQ R10, R9
+ ADDQ BX, R9
+ RORQ $0x25, R9
+ MOVQ $0xd6d018f5, R10
+ IMULQ R10, R9
+ XORQ R9, R8
+ MOVQ BX, R9
+ ADDQ DI, R9
+ MOVQ $0xd6d018f5, R10
+ IMULQ R10, R9
+ ADDQ R8, R9
+ RORQ $0x25, R9
+ MOVQ $0xa2aa033b, R10
+ IMULQ R10, R9
+ XORQ R9, BX
+ MOVQ SI, R9
+ ADDQ R8, R9
+ MOVQ $0xa2aa033b, R8
+ IMULQ R8, R9
+ ADDQ DI, R9
+ RORQ $0x25, R9
+ MOVQ $0xd6d018f5, DI
+ IMULQ DI, R9
+ XORQ R9, SI
+ XORQ SI, BX
+ ADDQ BX, AX
+
+after32:
+ CMPQ DX, $0x10
+ JLT after16
+ MOVQ (CX), BX
+ MOVQ $0x62992fc1, SI
+ IMULQ SI, BX
+ ADDQ AX, BX
+ ADDQ $0x08, CX
+ SUBQ $0x08, DX
+ RORQ $0x1d, BX
+ MOVQ $0x30bc5b29, SI
+ IMULQ SI, BX
+ MOVQ (CX), SI
+ MOVQ $0x62992fc1, DI
+ IMULQ DI, SI
+ ADDQ AX, SI
+ ADDQ $0x08, CX
+ SUBQ $0x08, DX
+ RORQ $0x1d, SI
+ MOVQ $0x30bc5b29, DI
+ IMULQ DI, SI
+ MOVQ BX, DI
+ MOVQ $0xd6d018f5, R8
+ IMULQ R8, DI
+ RORQ $0x15, DI
+ ADDQ SI, DI
+ XORQ DI, BX
+ MOVQ SI, DI
+ MOVQ $0x30bc5b29, R8
+ IMULQ R8, DI
+ RORQ $0x15, DI
+ ADDQ BX, DI
+ XORQ DI, SI
+ ADDQ SI, AX
+
+after16:
+ CMPQ DX, $0x08
+ JLT after8
+ MOVQ (CX), BX
+ MOVQ $0x30bc5b29, SI
+ IMULQ SI, BX
+ ADDQ BX, AX
+ ADDQ $0x08, CX
+ SUBQ $0x08, DX
+ MOVQ AX, BX
+ RORQ $0x37, BX
+ MOVQ $0xa2aa033b, SI
+ IMULQ SI, BX
+ XORQ BX, AX
+
+after8:
+ CMPQ DX, $0x04
+ JLT after4
+ XORQ BX, BX
+ MOVL (CX), BX
+ MOVQ $0x30bc5b29, SI
+ IMULQ SI, BX
+ ADDQ BX, AX
+ ADDQ $0x04, CX
+ SUBQ $0x04, DX
+ MOVQ AX, BX
+ RORQ $0x1a, BX
+ MOVQ $0xa2aa033b, SI
+ IMULQ SI, BX
+ XORQ BX, AX
+
+after4:
+ CMPQ DX, $0x02
+ JLT after2
+ XORQ BX, BX
+ MOVW (CX), BX
+ MOVQ $0x30bc5b29, SI
+ IMULQ SI, BX
+ ADDQ BX, AX
+ ADDQ $0x02, CX
+ SUBQ $0x02, DX
+ MOVQ AX, BX
+ RORQ $0x30, BX
+ MOVQ $0xa2aa033b, SI
+ IMULQ SI, BX
+ XORQ BX, AX
+
+after2:
+ CMPQ DX, $0x01
+ JLT after1
+ MOVBQZX (CX), CX
+ MOVQ $0x30bc5b29, DX
+ IMULQ DX, CX
+ ADDQ CX, AX
+ MOVQ AX, CX
+ RORQ $0x25, CX
+ MOVQ $0xa2aa033b, DX
+ IMULQ DX, CX
+ XORQ CX, AX
+
+after1:
+ MOVQ AX, CX
+ RORQ $0x1c, CX
+ XORQ CX, AX
+ MOVQ $0xd6d018f5, CX
+ IMULQ CX, AX
+ MOVQ AX, CX
+ RORQ $0x1d, CX
+ XORQ CX, AX
+ MOVQ AX, ret+32(FP)
+ RET
+
+// func Hash64Str(buffer string, seed uint64) uint64
+TEXT ·Hash64Str(SB), NOSPLIT, $0-32
+ MOVQ seed+16(FP), AX
+ MOVQ buffer_base+0(FP), CX
+ MOVQ buffer_len+8(FP), DX
+ MOVQ $0xd6d018f5, BX
+ IMULQ BX, AX
+ MOVQ $0x52bc33fedbe4cbb5, BX
+ ADDQ BX, AX
+ CMPQ DX, $0x20
+ JLT after32
+ MOVQ AX, BX
+ MOVQ AX, SI
+ MOVQ AX, DI
+ MOVQ AX, R8
+
+loop:
+ MOVQ (CX), R9
+ MOVQ $0xd6d018f5, R10
+ IMULQ R10, R9
+ ADDQ R9, BX
+ RORQ $0x1d, BX
+ ADDQ DI, BX
+ MOVQ 8(CX), R9
+ MOVQ $0xa2aa033b, R10
+ IMULQ R10, R9
+ ADDQ R9, SI
+ RORQ $0x1d, SI
+ ADDQ R8, SI
+ MOVQ 16(CX), R9
+ MOVQ $0x62992fc1, R10
+ IMULQ R10, R9
+ ADDQ R9, DI
+ RORQ $0x1d, DI
+ ADDQ BX, DI
+ MOVQ 24(CX), R9
+ MOVQ $0x30bc5b29, R10
+ IMULQ R10, R9
+ ADDQ R9, R8
+ RORQ $0x1d, R8
+ ADDQ SI, R8
+ ADDQ $0x20, CX
+ SUBQ $0x20, DX
+ CMPQ DX, $0x20
+ JGE loop
+ MOVQ BX, R9
+ ADDQ R8, R9
+ MOVQ $0xd6d018f5, R10
+ IMULQ R10, R9
+ ADDQ SI, R9
+ RORQ $0x25, R9
+ MOVQ $0xa2aa033b, R10
+ IMULQ R10, R9
+ XORQ R9, DI
+ MOVQ SI, R9
+ ADDQ DI, R9
+ MOVQ $0xa2aa033b, R10
+ IMULQ R10, R9
+ ADDQ BX, R9
+ RORQ $0x25, R9
+ MOVQ $0xd6d018f5, R10
+ IMULQ R10, R9
+ XORQ R9, R8
+ MOVQ BX, R9
+ ADDQ DI, R9
+ MOVQ $0xd6d018f5, R10
+ IMULQ R10, R9
+ ADDQ R8, R9
+ RORQ $0x25, R9
+ MOVQ $0xa2aa033b, R10
+ IMULQ R10, R9
+ XORQ R9, BX
+ MOVQ SI, R9
+ ADDQ R8, R9
+ MOVQ $0xa2aa033b, R8
+ IMULQ R8, R9
+ ADDQ DI, R9
+ RORQ $0x25, R9
+ MOVQ $0xd6d018f5, DI
+ IMULQ DI, R9
+ XORQ R9, SI
+ XORQ SI, BX
+ ADDQ BX, AX
+
+after32:
+ CMPQ DX, $0x10
+ JLT after16
+ MOVQ (CX), BX
+ MOVQ $0x62992fc1, SI
+ IMULQ SI, BX
+ ADDQ AX, BX
+ ADDQ $0x08, CX
+ SUBQ $0x08, DX
+ RORQ $0x1d, BX
+ MOVQ $0x30bc5b29, SI
+ IMULQ SI, BX
+ MOVQ (CX), SI
+ MOVQ $0x62992fc1, DI
+ IMULQ DI, SI
+ ADDQ AX, SI
+ ADDQ $0x08, CX
+ SUBQ $0x08, DX
+ RORQ $0x1d, SI
+ MOVQ $0x30bc5b29, DI
+ IMULQ DI, SI
+ MOVQ BX, DI
+ MOVQ $0xd6d018f5, R8
+ IMULQ R8, DI
+ RORQ $0x15, DI
+ ADDQ SI, DI
+ XORQ DI, BX
+ MOVQ SI, DI
+ MOVQ $0x30bc5b29, R8
+ IMULQ R8, DI
+ RORQ $0x15, DI
+ ADDQ BX, DI
+ XORQ DI, SI
+ ADDQ SI, AX
+
+after16:
+ CMPQ DX, $0x08
+ JLT after8
+ MOVQ (CX), BX
+ MOVQ $0x30bc5b29, SI
+ IMULQ SI, BX
+ ADDQ BX, AX
+ ADDQ $0x08, CX
+ SUBQ $0x08, DX
+ MOVQ AX, BX
+ RORQ $0x37, BX
+ MOVQ $0xa2aa033b, SI
+ IMULQ SI, BX
+ XORQ BX, AX
+
+after8:
+ CMPQ DX, $0x04
+ JLT after4
+ XORQ BX, BX
+ MOVL (CX), BX
+ MOVQ $0x30bc5b29, SI
+ IMULQ SI, BX
+ ADDQ BX, AX
+ ADDQ $0x04, CX
+ SUBQ $0x04, DX
+ MOVQ AX, BX
+ RORQ $0x1a, BX
+ MOVQ $0xa2aa033b, SI
+ IMULQ SI, BX
+ XORQ BX, AX
+
+after4:
+ CMPQ DX, $0x02
+ JLT after2
+ XORQ BX, BX
+ MOVW (CX), BX
+ MOVQ $0x30bc5b29, SI
+ IMULQ SI, BX
+ ADDQ BX, AX
+ ADDQ $0x02, CX
+ SUBQ $0x02, DX
+ MOVQ AX, BX
+ RORQ $0x30, BX
+ MOVQ $0xa2aa033b, SI
+ IMULQ SI, BX
+ XORQ BX, AX
+
+after2:
+ CMPQ DX, $0x01
+ JLT after1
+ MOVBQZX (CX), CX
+ MOVQ $0x30bc5b29, DX
+ IMULQ DX, CX
+ ADDQ CX, AX
+ MOVQ AX, CX
+ RORQ $0x25, CX
+ MOVQ $0xa2aa033b, DX
+ IMULQ DX, CX
+ XORQ CX, AX
+
+after1:
+ MOVQ AX, CX
+ RORQ $0x1c, CX
+ XORQ CX, AX
+ MOVQ $0xd6d018f5, CX
+ IMULQ CX, AX
+ MOVQ AX, CX
+ RORQ $0x1d, CX
+ XORQ CX, AX
+ MOVQ AX, ret+24(FP)
+ RET
diff --git a/vendor/github.com/dgryski/go-metro/metro_stub.go b/vendor/github.com/dgryski/go-metro/metro_stub.go
new file mode 100644
index 000000000..c691333b7
--- /dev/null
+++ b/vendor/github.com/dgryski/go-metro/metro_stub.go
@@ -0,0 +1,10 @@
+// Code generated by command: go run asm.go -out metro_amd64.s -stubs metro_stub.go -pkg metro. DO NOT EDIT.
+
+//go:build amd64 && gc && !purego && !noasm
+
+package metro
+
+//go:noescape
+func Hash64(buffer []byte, seed uint64) uint64
+
+func Hash64Str(buffer string, seed uint64) uint64
diff --git a/vendor/github.com/edsrzf/mmap-go/README.md b/vendor/github.com/edsrzf/mmap-go/README.md
index 1ac39f7ee..5df62b8ed 100644
--- a/vendor/github.com/edsrzf/mmap-go/README.md
+++ b/vendor/github.com/edsrzf/mmap-go/README.md
@@ -9,6 +9,6 @@ Operating System Support
========================
This package is tested using GitHub Actions on Linux, macOS, and Windows. It should also work on other Unix-like platforms, but hasn't been tested with them. I'm interested to hear about the results.
-I haven't been able to add more features without adding significant complexity, so mmap-go doesn't support `mprotect`, `mincore`, and maybe a few other things. If you're running on a Unix-like platform and need some of these features, I suggest Gustavo Niemeyer's [gommap](http://labix.org/gommap).
+This package compiles for Plan 9 and WebAssembly, but its functions always return errors.
-This package compiles on Plan 9, but its functions always return errors.
+Related functions such as `mprotect` and `mincore` aren't included. I haven't found a way to implement them on Windows without introducing significant complexity. If you're running on a Unix-like platform and really need these features, it should still be possible to implement them on top of this package via `syscall`.
diff --git a/vendor/github.com/edsrzf/mmap-go/mmap_wasm.go b/vendor/github.com/edsrzf/mmap-go/mmap_wasm.go
new file mode 100644
index 000000000..cfe1c50b0
--- /dev/null
+++ b/vendor/github.com/edsrzf/mmap-go/mmap_wasm.go
@@ -0,0 +1,27 @@
+// Copyright 2024 Evan Shaw. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mmap
+
+import "syscall"
+
+func mmap(len int, inprot, inflags, fd uintptr, off int64) ([]byte, error) {
+ return nil, syscall.ENOTSUP
+}
+
+func (m MMap) flush() error {
+ return syscall.ENOTSUP
+}
+
+func (m MMap) lock() error {
+ return syscall.ENOTSUP
+}
+
+func (m MMap) unlock() error {
+ return syscall.ENOTSUP
+}
+
+func (m MMap) unmap() error {
+ return syscall.ENOTSUP
+}
diff --git a/vendor/github.com/efficientgo/core/testutil/testorbench.go b/vendor/github.com/efficientgo/core/testutil/testorbench.go
index c36b8877a..69f3fe60f 100644
--- a/vendor/github.com/efficientgo/core/testutil/testorbench.go
+++ b/vendor/github.com/efficientgo/core/testutil/testorbench.go
@@ -30,7 +30,13 @@ type TB interface {
SetBytes(n int64)
N() int
+
ResetTimer()
+ StartTimer()
+ StopTimer()
+
+ ReportAllocs()
+ ReportMetric(n float64, unit string)
}
// tb implements TB as well as testing.TB interfaces.
@@ -78,8 +84,36 @@ func (t *tb) ResetTimer() {
}
}
+// StartTimer starts a timer, if it's a benchmark, noop otherwise.
+func (t *tb) StartTimer() {
+ if b, ok := t.TB.(*testing.B); ok {
+ b.StartTimer()
+ }
+}
+
+// StopTimer stops a timer, if it's a benchmark, noop otherwise.
+func (t *tb) StopTimer() {
+ if b, ok := t.TB.(*testing.B); ok {
+ b.StopTimer()
+ }
+}
+
// IsBenchmark returns true if it's a benchmark.
func (t *tb) IsBenchmark() bool {
_, ok := t.TB.(*testing.B)
return ok
}
+
+// ReportAllocs reports allocs if it's a benchmark, noop otherwise.
+func (t *tb) ReportAllocs() {
+ if b, ok := t.TB.(*testing.B); ok {
+ b.ReportAllocs()
+ }
+}
+
+// ReportMetric reports metrics if it's a benchmark, noop otherwise.
+func (t *tb) ReportMetric(n float64, unit string) {
+ if b, ok := t.TB.(*testing.B); ok {
+ b.ReportMetric(n, unit)
+ }
+}
diff --git a/vendor/github.com/efficientgo/tools/extkingpin/pathorcontent.go b/vendor/github.com/efficientgo/tools/extkingpin/pathorcontent.go
index 0a4b5ff17..7b58904fa 100644
--- a/vendor/github.com/efficientgo/tools/extkingpin/pathorcontent.go
+++ b/vendor/github.com/efficientgo/tools/extkingpin/pathorcontent.go
@@ -13,8 +13,8 @@ import (
"os"
"regexp"
+ "github.com/alecthomas/kingpin/v2"
"github.com/pkg/errors"
- "gopkg.in/alecthomas/kingpin.v2"
)
// PathOrContent is a flag type that defines two flags to fetch bytes. Either from file (*-file flag) or content (* flag).
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/LICENSE b/vendor/github.com/envoyproxy/go-control-plane/envoy/LICENSE
new file mode 100644
index 000000000..8dada3eda
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go
new file mode 100644
index 000000000..b2872bfb3
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.go
@@ -0,0 +1,607 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/certs.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Proto representation of certificate details. Admin endpoint uses this wrapper for “/certs“ to
+// display certificate information. See :ref:`/certs ` for more
+// information.
+type Certificates struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // List of certificates known to an Envoy.
+ Certificates []*Certificate `protobuf:"bytes,1,rep,name=certificates,proto3" json:"certificates,omitempty"`
+}
+
+func (x *Certificates) Reset() {
+ *x = Certificates{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Certificates) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Certificates) ProtoMessage() {}
+
+func (x *Certificates) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Certificates.ProtoReflect.Descriptor instead.
+func (*Certificates) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Certificates) GetCertificates() []*Certificate {
+ if x != nil {
+ return x.Certificates
+ }
+ return nil
+}
+
+type Certificate struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Details of CA certificate.
+ CaCert []*CertificateDetails `protobuf:"bytes,1,rep,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"`
+ // Details of Certificate Chain
+ CertChain []*CertificateDetails `protobuf:"bytes,2,rep,name=cert_chain,json=certChain,proto3" json:"cert_chain,omitempty"`
+}
+
+func (x *Certificate) Reset() {
+ *x = Certificate{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Certificate) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Certificate) ProtoMessage() {}
+
+func (x *Certificate) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Certificate.ProtoReflect.Descriptor instead.
+func (*Certificate) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Certificate) GetCaCert() []*CertificateDetails {
+ if x != nil {
+ return x.CaCert
+ }
+ return nil
+}
+
+func (x *Certificate) GetCertChain() []*CertificateDetails {
+ if x != nil {
+ return x.CertChain
+ }
+ return nil
+}
+
+// [#next-free-field: 8]
+type CertificateDetails struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Path of the certificate.
+ Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+ // Certificate Serial Number.
+ SerialNumber string `protobuf:"bytes,2,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
+ // List of Subject Alternate names.
+ SubjectAltNames []*SubjectAlternateName `protobuf:"bytes,3,rep,name=subject_alt_names,json=subjectAltNames,proto3" json:"subject_alt_names,omitempty"`
+ // Minimum of days until expiration of certificate and it's chain.
+ DaysUntilExpiration uint64 `protobuf:"varint,4,opt,name=days_until_expiration,json=daysUntilExpiration,proto3" json:"days_until_expiration,omitempty"`
+ // Indicates the time from which the certificate is valid.
+ ValidFrom *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=valid_from,json=validFrom,proto3" json:"valid_from,omitempty"`
+ // Indicates the time at which the certificate expires.
+ ExpirationTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=expiration_time,json=expirationTime,proto3" json:"expiration_time,omitempty"`
+ // Details related to the OCSP response associated with this certificate, if any.
+ OcspDetails *CertificateDetails_OcspDetails `protobuf:"bytes,7,opt,name=ocsp_details,json=ocspDetails,proto3" json:"ocsp_details,omitempty"`
+}
+
+func (x *CertificateDetails) Reset() {
+ *x = CertificateDetails{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CertificateDetails) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CertificateDetails) ProtoMessage() {}
+
+func (x *CertificateDetails) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CertificateDetails.ProtoReflect.Descriptor instead.
+func (*CertificateDetails) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *CertificateDetails) GetPath() string {
+ if x != nil {
+ return x.Path
+ }
+ return ""
+}
+
+func (x *CertificateDetails) GetSerialNumber() string {
+ if x != nil {
+ return x.SerialNumber
+ }
+ return ""
+}
+
+func (x *CertificateDetails) GetSubjectAltNames() []*SubjectAlternateName {
+ if x != nil {
+ return x.SubjectAltNames
+ }
+ return nil
+}
+
+func (x *CertificateDetails) GetDaysUntilExpiration() uint64 {
+ if x != nil {
+ return x.DaysUntilExpiration
+ }
+ return 0
+}
+
+func (x *CertificateDetails) GetValidFrom() *timestamppb.Timestamp {
+ if x != nil {
+ return x.ValidFrom
+ }
+ return nil
+}
+
+func (x *CertificateDetails) GetExpirationTime() *timestamppb.Timestamp {
+ if x != nil {
+ return x.ExpirationTime
+ }
+ return nil
+}
+
+func (x *CertificateDetails) GetOcspDetails() *CertificateDetails_OcspDetails {
+ if x != nil {
+ return x.OcspDetails
+ }
+ return nil
+}
+
+type SubjectAlternateName struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Subject Alternate Name.
+ //
+ // Types that are assignable to Name:
+ //
+ // *SubjectAlternateName_Dns
+ // *SubjectAlternateName_Uri
+ // *SubjectAlternateName_IpAddress
+ Name isSubjectAlternateName_Name `protobuf_oneof:"name"`
+}
+
+func (x *SubjectAlternateName) Reset() {
+ *x = SubjectAlternateName{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SubjectAlternateName) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SubjectAlternateName) ProtoMessage() {}
+
+func (x *SubjectAlternateName) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SubjectAlternateName.ProtoReflect.Descriptor instead.
+func (*SubjectAlternateName) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{3}
+}
+
+func (m *SubjectAlternateName) GetName() isSubjectAlternateName_Name {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (x *SubjectAlternateName) GetDns() string {
+ if x, ok := x.GetName().(*SubjectAlternateName_Dns); ok {
+ return x.Dns
+ }
+ return ""
+}
+
+func (x *SubjectAlternateName) GetUri() string {
+ if x, ok := x.GetName().(*SubjectAlternateName_Uri); ok {
+ return x.Uri
+ }
+ return ""
+}
+
+func (x *SubjectAlternateName) GetIpAddress() string {
+ if x, ok := x.GetName().(*SubjectAlternateName_IpAddress); ok {
+ return x.IpAddress
+ }
+ return ""
+}
+
+type isSubjectAlternateName_Name interface {
+ isSubjectAlternateName_Name()
+}
+
+type SubjectAlternateName_Dns struct {
+ Dns string `protobuf:"bytes,1,opt,name=dns,proto3,oneof"`
+}
+
+type SubjectAlternateName_Uri struct {
+ Uri string `protobuf:"bytes,2,opt,name=uri,proto3,oneof"`
+}
+
+type SubjectAlternateName_IpAddress struct {
+ IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress,proto3,oneof"`
+}
+
+func (*SubjectAlternateName_Dns) isSubjectAlternateName_Name() {}
+
+func (*SubjectAlternateName_Uri) isSubjectAlternateName_Name() {}
+
+func (*SubjectAlternateName_IpAddress) isSubjectAlternateName_Name() {}
+
+type CertificateDetails_OcspDetails struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Indicates the time from which the OCSP response is valid.
+ ValidFrom *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=valid_from,json=validFrom,proto3" json:"valid_from,omitempty"`
+ // Indicates the time at which the OCSP response expires.
+ Expiration *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=expiration,proto3" json:"expiration,omitempty"`
+}
+
+func (x *CertificateDetails_OcspDetails) Reset() {
+ *x = CertificateDetails_OcspDetails{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CertificateDetails_OcspDetails) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CertificateDetails_OcspDetails) ProtoMessage() {}
+
+func (x *CertificateDetails_OcspDetails) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_certs_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CertificateDetails_OcspDetails.ProtoReflect.Descriptor instead.
+func (*CertificateDetails_OcspDetails) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_certs_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *CertificateDetails_OcspDetails) GetValidFrom() *timestamppb.Timestamp {
+ if x != nil {
+ return x.ValidFrom
+ }
+ return nil
+}
+
+func (x *CertificateDetails_OcspDetails) GetExpiration() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Expiration
+ }
+ return nil
+}
+
+var File_envoy_admin_v3_certs_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_certs_proto_rawDesc = []byte{
+ 0x0a, 0x1a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x63, 0x65, 0x72, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75,
+ 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64,
+ 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x78, 0x0a, 0x0c, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12,
+ 0x3f, 0x0a, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x52, 0x0c, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73,
+ 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x65, 0x72,
+ 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x22, 0xb5, 0x01, 0x0a, 0x0b, 0x43, 0x65,
+ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x3b, 0x0a, 0x07, 0x63, 0x61, 0x5f,
+ 0x63, 0x65, 0x72, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x06,
+ 0x63, 0x61, 0x43, 0x65, 0x72, 0x74, 0x12, 0x41, 0x0a, 0x0a, 0x63, 0x65, 0x72, 0x74, 0x5f, 0x63,
+ 0x68, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x65, 0x72, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x09,
+ 0x63, 0x65, 0x72, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21,
+ 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x65, 0x22, 0xdc, 0x04, 0x0a, 0x12, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74,
+ 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x23, 0x0a, 0x0d,
+ 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x4e, 0x75, 0x6d, 0x62, 0x65,
+ 0x72, 0x12, 0x50, 0x0a, 0x11, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x74,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x75,
+ 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x4e, 0x61,
+ 0x6d, 0x65, 0x52, 0x0f, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x4e, 0x61,
+ 0x6d, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x64, 0x61, 0x79, 0x73, 0x5f, 0x75, 0x6e, 0x74, 0x69,
+ 0x6c, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x13, 0x64, 0x61, 0x79, 0x73, 0x55, 0x6e, 0x74, 0x69, 0x6c, 0x45, 0x78, 0x70,
+ 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x76, 0x61, 0x6c, 0x69, 0x64,
+ 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x46, 0x72,
+ 0x6f, 0x6d, 0x12, 0x43, 0x0a, 0x0f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x6f, 0x63, 0x73, 0x70, 0x5f,
+ 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43,
+ 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c,
+ 0x73, 0x2e, 0x4f, 0x63, 0x73, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x0b, 0x6f,
+ 0x63, 0x73, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x0b, 0x4f,
+ 0x63, 0x73, 0x70, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x76, 0x61,
+ 0x6c, 0x69, 0x64, 0x5f, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x76, 0x61, 0x6c, 0x69,
+ 0x64, 0x46, 0x72, 0x6f, 0x6d, 0x12, 0x3a, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x65,
+ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73,
+ 0x22, 0x98, 0x01, 0x0a, 0x14, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x03, 0x64, 0x6e, 0x73,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x12, 0x0a,
+ 0x03, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x75, 0x72,
+ 0x69, 0x12, 0x1f, 0x0a, 0x0a, 0x69, 0x70, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x69, 0x70, 0x41, 0x64, 0x64, 0x72, 0x65,
+ 0x73, 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53,
+ 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x65, 0x4e,
+ 0x61, 0x6d, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x42, 0x73, 0xba, 0x80, 0xc8,
+ 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70,
+ 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x76, 0x33, 0x42, 0x0a, 0x43, 0x65, 0x72, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_certs_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_certs_proto_rawDescData = file_envoy_admin_v3_certs_proto_rawDesc
+)
+
+func file_envoy_admin_v3_certs_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_certs_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_certs_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_certs_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_certs_proto_rawDescData
+}
+
+var file_envoy_admin_v3_certs_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_envoy_admin_v3_certs_proto_goTypes = []interface{}{
+ (*Certificates)(nil), // 0: envoy.admin.v3.Certificates
+ (*Certificate)(nil), // 1: envoy.admin.v3.Certificate
+ (*CertificateDetails)(nil), // 2: envoy.admin.v3.CertificateDetails
+ (*SubjectAlternateName)(nil), // 3: envoy.admin.v3.SubjectAlternateName
+ (*CertificateDetails_OcspDetails)(nil), // 4: envoy.admin.v3.CertificateDetails.OcspDetails
+ (*timestamppb.Timestamp)(nil), // 5: google.protobuf.Timestamp
+}
+var file_envoy_admin_v3_certs_proto_depIdxs = []int32{
+ 1, // 0: envoy.admin.v3.Certificates.certificates:type_name -> envoy.admin.v3.Certificate
+ 2, // 1: envoy.admin.v3.Certificate.ca_cert:type_name -> envoy.admin.v3.CertificateDetails
+ 2, // 2: envoy.admin.v3.Certificate.cert_chain:type_name -> envoy.admin.v3.CertificateDetails
+ 3, // 3: envoy.admin.v3.CertificateDetails.subject_alt_names:type_name -> envoy.admin.v3.SubjectAlternateName
+ 5, // 4: envoy.admin.v3.CertificateDetails.valid_from:type_name -> google.protobuf.Timestamp
+ 5, // 5: envoy.admin.v3.CertificateDetails.expiration_time:type_name -> google.protobuf.Timestamp
+ 4, // 6: envoy.admin.v3.CertificateDetails.ocsp_details:type_name -> envoy.admin.v3.CertificateDetails.OcspDetails
+ 5, // 7: envoy.admin.v3.CertificateDetails.OcspDetails.valid_from:type_name -> google.protobuf.Timestamp
+ 5, // 8: envoy.admin.v3.CertificateDetails.OcspDetails.expiration:type_name -> google.protobuf.Timestamp
+ 9, // [9:9] is the sub-list for method output_type
+ 9, // [9:9] is the sub-list for method input_type
+ 9, // [9:9] is the sub-list for extension type_name
+ 9, // [9:9] is the sub-list for extension extendee
+ 0, // [0:9] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_certs_proto_init() }
+func file_envoy_admin_v3_certs_proto_init() {
+ if File_envoy_admin_v3_certs_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_certs_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Certificates); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_certs_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Certificate); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_certs_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CertificateDetails); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_certs_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SubjectAlternateName); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_certs_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CertificateDetails_OcspDetails); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_admin_v3_certs_proto_msgTypes[3].OneofWrappers = []interface{}{
+ (*SubjectAlternateName_Dns)(nil),
+ (*SubjectAlternateName_Uri)(nil),
+ (*SubjectAlternateName_IpAddress)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_certs_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_certs_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_certs_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_certs_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_certs_proto = out.File
+ file_envoy_admin_v3_certs_proto_rawDesc = nil
+ file_envoy_admin_v3_certs_proto_goTypes = nil
+ file_envoy_admin_v3_certs_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go
new file mode 100644
index 000000000..413895689
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs.pb.validate.go
@@ -0,0 +1,870 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/certs.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Certificates with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Certificates) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Certificates with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in CertificatesMultiError, or
+// nil if none found.
+func (m *Certificates) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Certificates) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetCertificates() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificatesValidationError{
+ field: fmt.Sprintf("Certificates[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificatesValidationError{
+ field: fmt.Sprintf("Certificates[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificatesValidationError{
+ field: fmt.Sprintf("Certificates[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return CertificatesMultiError(errors)
+ }
+
+ return nil
+}
+
+// CertificatesMultiError is an error wrapping multiple validation errors
+// returned by Certificates.ValidateAll() if the designated constraints aren't met.
+type CertificatesMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CertificatesMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CertificatesMultiError) AllErrors() []error { return m }
+
+// CertificatesValidationError is the validation error returned by
+// Certificates.Validate if the designated constraints aren't met.
+type CertificatesValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CertificatesValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CertificatesValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CertificatesValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CertificatesValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CertificatesValidationError) ErrorName() string { return "CertificatesValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CertificatesValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCertificates.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CertificatesValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CertificatesValidationError{}
+
+// Validate checks the field values on Certificate with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Certificate) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Certificate with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in CertificateMultiError, or
+// nil if none found.
+func (m *Certificate) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Certificate) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetCaCert() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateValidationError{
+ field: fmt.Sprintf("CaCert[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateValidationError{
+ field: fmt.Sprintf("CaCert[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateValidationError{
+ field: fmt.Sprintf("CaCert[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetCertChain() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateValidationError{
+ field: fmt.Sprintf("CertChain[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateValidationError{
+ field: fmt.Sprintf("CertChain[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateValidationError{
+ field: fmt.Sprintf("CertChain[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return CertificateMultiError(errors)
+ }
+
+ return nil
+}
+
+// CertificateMultiError is an error wrapping multiple validation errors
+// returned by Certificate.ValidateAll() if the designated constraints aren't met.
+type CertificateMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CertificateMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CertificateMultiError) AllErrors() []error { return m }
+
+// CertificateValidationError is the validation error returned by
+// Certificate.Validate if the designated constraints aren't met.
+type CertificateValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CertificateValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CertificateValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CertificateValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CertificateValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CertificateValidationError) ErrorName() string { return "CertificateValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CertificateValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCertificate.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CertificateValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CertificateValidationError{}
+
+// Validate checks the field values on CertificateDetails with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *CertificateDetails) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CertificateDetails with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CertificateDetailsMultiError, or nil if none found.
+func (m *CertificateDetails) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CertificateDetails) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Path
+
+ // no validation rules for SerialNumber
+
+ for idx, item := range m.GetSubjectAltNames() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: fmt.Sprintf("SubjectAltNames[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: fmt.Sprintf("SubjectAltNames[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateDetailsValidationError{
+ field: fmt.Sprintf("SubjectAltNames[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ // no validation rules for DaysUntilExpiration
+
+ if all {
+ switch v := interface{}(m.GetValidFrom()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: "ValidFrom",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: "ValidFrom",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetValidFrom()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateDetailsValidationError{
+ field: "ValidFrom",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetExpirationTime()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: "ExpirationTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: "ExpirationTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExpirationTime()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateDetailsValidationError{
+ field: "ExpirationTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetOcspDetails()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: "OcspDetails",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateDetailsValidationError{
+ field: "OcspDetails",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOcspDetails()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateDetailsValidationError{
+ field: "OcspDetails",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return CertificateDetailsMultiError(errors)
+ }
+
+ return nil
+}
+
+// CertificateDetailsMultiError is an error wrapping multiple validation errors
+// returned by CertificateDetails.ValidateAll() if the designated constraints
+// aren't met.
+type CertificateDetailsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CertificateDetailsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CertificateDetailsMultiError) AllErrors() []error { return m }
+
+// CertificateDetailsValidationError is the validation error returned by
+// CertificateDetails.Validate if the designated constraints aren't met.
+type CertificateDetailsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CertificateDetailsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CertificateDetailsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CertificateDetailsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CertificateDetailsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CertificateDetailsValidationError) ErrorName() string {
+ return "CertificateDetailsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CertificateDetailsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCertificateDetails.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CertificateDetailsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CertificateDetailsValidationError{}
+
+// Validate checks the field values on SubjectAlternateName with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *SubjectAlternateName) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SubjectAlternateName with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// SubjectAlternateNameMultiError, or nil if none found.
+func (m *SubjectAlternateName) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SubjectAlternateName) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ switch v := m.Name.(type) {
+ case *SubjectAlternateName_Dns:
+ if v == nil {
+ err := SubjectAlternateNameValidationError{
+ field: "Name",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ // no validation rules for Dns
+ case *SubjectAlternateName_Uri:
+ if v == nil {
+ err := SubjectAlternateNameValidationError{
+ field: "Name",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ // no validation rules for Uri
+ case *SubjectAlternateName_IpAddress:
+ if v == nil {
+ err := SubjectAlternateNameValidationError{
+ field: "Name",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ // no validation rules for IpAddress
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return SubjectAlternateNameMultiError(errors)
+ }
+
+ return nil
+}
+
+// SubjectAlternateNameMultiError is an error wrapping multiple validation
+// errors returned by SubjectAlternateName.ValidateAll() if the designated
+// constraints aren't met.
+type SubjectAlternateNameMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SubjectAlternateNameMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SubjectAlternateNameMultiError) AllErrors() []error { return m }
+
+// SubjectAlternateNameValidationError is the validation error returned by
+// SubjectAlternateName.Validate if the designated constraints aren't met.
+type SubjectAlternateNameValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SubjectAlternateNameValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SubjectAlternateNameValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SubjectAlternateNameValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SubjectAlternateNameValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SubjectAlternateNameValidationError) ErrorName() string {
+ return "SubjectAlternateNameValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e SubjectAlternateNameValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSubjectAlternateName.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SubjectAlternateNameValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SubjectAlternateNameValidationError{}
+
+// Validate checks the field values on CertificateDetails_OcspDetails with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *CertificateDetails_OcspDetails) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CertificateDetails_OcspDetails with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// CertificateDetails_OcspDetailsMultiError, or nil if none found.
+func (m *CertificateDetails_OcspDetails) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CertificateDetails_OcspDetails) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetValidFrom()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateDetails_OcspDetailsValidationError{
+ field: "ValidFrom",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateDetails_OcspDetailsValidationError{
+ field: "ValidFrom",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetValidFrom()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateDetails_OcspDetailsValidationError{
+ field: "ValidFrom",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetExpiration()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CertificateDetails_OcspDetailsValidationError{
+ field: "Expiration",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CertificateDetails_OcspDetailsValidationError{
+ field: "Expiration",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExpiration()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CertificateDetails_OcspDetailsValidationError{
+ field: "Expiration",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return CertificateDetails_OcspDetailsMultiError(errors)
+ }
+
+ return nil
+}
+
+// CertificateDetails_OcspDetailsMultiError is an error wrapping multiple
+// validation errors returned by CertificateDetails_OcspDetails.ValidateAll()
+// if the designated constraints aren't met.
+type CertificateDetails_OcspDetailsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CertificateDetails_OcspDetailsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CertificateDetails_OcspDetailsMultiError) AllErrors() []error { return m }
+
+// CertificateDetails_OcspDetailsValidationError is the validation error
+// returned by CertificateDetails_OcspDetails.Validate if the designated
+// constraints aren't met.
+type CertificateDetails_OcspDetailsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CertificateDetails_OcspDetailsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CertificateDetails_OcspDetailsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CertificateDetails_OcspDetailsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CertificateDetails_OcspDetailsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CertificateDetails_OcspDetailsValidationError) ErrorName() string {
+ return "CertificateDetails_OcspDetailsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CertificateDetails_OcspDetailsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCertificateDetails_OcspDetails.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CertificateDetails_OcspDetailsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CertificateDetails_OcspDetailsValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go
new file mode 100644
index 000000000..3c325787d
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/certs_vtproto.pb.go
@@ -0,0 +1,504 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/certs.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *Certificates) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Certificates) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Certificates) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Certificates) > 0 {
+ for iNdEx := len(m.Certificates) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Certificates[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Certificate) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Certificate) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Certificate) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.CertChain) > 0 {
+ for iNdEx := len(m.CertChain) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.CertChain[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.CaCert) > 0 {
+ for iNdEx := len(m.CaCert) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.CaCert[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CertificateDetails_OcspDetails) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CertificateDetails_OcspDetails) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *CertificateDetails_OcspDetails) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Expiration != nil {
+ size, err := (*timestamppb.Timestamp)(m.Expiration).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ValidFrom != nil {
+ size, err := (*timestamppb.Timestamp)(m.ValidFrom).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CertificateDetails) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CertificateDetails) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *CertificateDetails) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.OcspDetails != nil {
+ size, err := m.OcspDetails.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.ExpirationTime != nil {
+ size, err := (*timestamppb.Timestamp)(m.ExpirationTime).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.ValidFrom != nil {
+ size, err := (*timestamppb.Timestamp)(m.ValidFrom).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.DaysUntilExpiration != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DaysUntilExpiration))
+ i--
+ dAtA[i] = 0x20
+ }
+ if len(m.SubjectAltNames) > 0 {
+ for iNdEx := len(m.SubjectAltNames) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.SubjectAltNames[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.SerialNumber) > 0 {
+ i -= len(m.SerialNumber)
+ copy(dAtA[i:], m.SerialNumber)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SerialNumber)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Path) > 0 {
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SubjectAlternateName) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SubjectAlternateName) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SubjectAlternateName) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if msg, ok := m.Name.(*SubjectAlternateName_IpAddress); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.Name.(*SubjectAlternateName_Uri); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.Name.(*SubjectAlternateName_Dns); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SubjectAlternateName_Dns) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SubjectAlternateName_Dns) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= len(m.Dns)
+ copy(dAtA[i:], m.Dns)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Dns)))
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+func (m *SubjectAlternateName_Uri) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SubjectAlternateName_Uri) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= len(m.Uri)
+ copy(dAtA[i:], m.Uri)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Uri)))
+ i--
+ dAtA[i] = 0x12
+ return len(dAtA) - i, nil
+}
+func (m *SubjectAlternateName_IpAddress) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SubjectAlternateName_IpAddress) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= len(m.IpAddress)
+ copy(dAtA[i:], m.IpAddress)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.IpAddress)))
+ i--
+ dAtA[i] = 0x1a
+ return len(dAtA) - i, nil
+}
+func (m *Certificates) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Certificates) > 0 {
+ for _, e := range m.Certificates {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Certificate) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.CaCert) > 0 {
+ for _, e := range m.CaCert {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.CertChain) > 0 {
+ for _, e := range m.CertChain {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *CertificateDetails_OcspDetails) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ValidFrom != nil {
+ l = (*timestamppb.Timestamp)(m.ValidFrom).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Expiration != nil {
+ l = (*timestamppb.Timestamp)(m.Expiration).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *CertificateDetails) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Path)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.SerialNumber)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.SubjectAltNames) > 0 {
+ for _, e := range m.SubjectAltNames {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.DaysUntilExpiration != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.DaysUntilExpiration))
+ }
+ if m.ValidFrom != nil {
+ l = (*timestamppb.Timestamp)(m.ValidFrom).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ExpirationTime != nil {
+ l = (*timestamppb.Timestamp)(m.ExpirationTime).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.OcspDetails != nil {
+ l = m.OcspDetails.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *SubjectAlternateName) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if vtmsg, ok := m.Name.(interface{ SizeVT() int }); ok {
+ n += vtmsg.SizeVT()
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *SubjectAlternateName_Dns) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Dns)
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ return n
+}
+func (m *SubjectAlternateName_Uri) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Uri)
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ return n
+}
+func (m *SubjectAlternateName_IpAddress) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.IpAddress)
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go
new file mode 100644
index 000000000..ee2239572
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.go
@@ -0,0 +1,744 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/clusters.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v31 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
+ v32 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Admin endpoint uses this wrapper for “/clusters“ to display cluster status information.
+// See :ref:`/clusters ` for more information.
+type Clusters struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Mapping from cluster name to each cluster's status.
+ ClusterStatuses []*ClusterStatus `protobuf:"bytes,1,rep,name=cluster_statuses,json=clusterStatuses,proto3" json:"cluster_statuses,omitempty"`
+}
+
+func (x *Clusters) Reset() {
+ *x = Clusters{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Clusters) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Clusters) ProtoMessage() {}
+
+func (x *Clusters) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Clusters.ProtoReflect.Descriptor instead.
+func (*Clusters) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Clusters) GetClusterStatuses() []*ClusterStatus {
+ if x != nil {
+ return x.ClusterStatuses
+ }
+ return nil
+}
+
+// Details an individual cluster's current status.
+// [#next-free-field: 9]
+type ClusterStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Name of the cluster.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Denotes whether this cluster was added via API or configured statically.
+ AddedViaApi bool `protobuf:"varint,2,opt,name=added_via_api,json=addedViaApi,proto3" json:"added_via_api,omitempty"`
+ // The success rate threshold used in the last interval.
+ // If
+ // :ref:`outlier_detection.split_external_local_origin_errors`
+ // is “false“, all errors: externally and locally generated were used to calculate the threshold.
+ // If
+ // :ref:`outlier_detection.split_external_local_origin_errors`
+ // is “true“, only externally generated errors were used to calculate the threshold.
+ // The threshold is used to eject hosts based on their success rate. See
+ // :ref:`Cluster outlier detection ` documentation for details.
+ //
+ // Note: this field may be omitted in any of the three following cases:
+ //
+ // 1. There were not enough hosts with enough request volume to proceed with success rate based
+ // outlier ejection.
+ // 2. The threshold is computed to be < 0 because a negative value implies that there was no
+ // threshold for that interval.
+ // 3. Outlier detection is not enabled for this cluster.
+ SuccessRateEjectionThreshold *v3.Percent `protobuf:"bytes,3,opt,name=success_rate_ejection_threshold,json=successRateEjectionThreshold,proto3" json:"success_rate_ejection_threshold,omitempty"`
+ // Mapping from host address to the host's current status.
+ HostStatuses []*HostStatus `protobuf:"bytes,4,rep,name=host_statuses,json=hostStatuses,proto3" json:"host_statuses,omitempty"`
+ // The success rate threshold used in the last interval when only locally originated failures were
+ // taken into account and externally originated errors were treated as success.
+ // This field should be interpreted only when
+ // :ref:`outlier_detection.split_external_local_origin_errors`
+ // is “true“. The threshold is used to eject hosts based on their success rate.
+ // See :ref:`Cluster outlier detection ` documentation for
+ // details.
+ //
+ // Note: this field may be omitted in any of the three following cases:
+ //
+ // 1. There were not enough hosts with enough request volume to proceed with success rate based
+ // outlier ejection.
+ // 2. The threshold is computed to be < 0 because a negative value implies that there was no
+ // threshold for that interval.
+ // 3. Outlier detection is not enabled for this cluster.
+ LocalOriginSuccessRateEjectionThreshold *v3.Percent `protobuf:"bytes,5,opt,name=local_origin_success_rate_ejection_threshold,json=localOriginSuccessRateEjectionThreshold,proto3" json:"local_origin_success_rate_ejection_threshold,omitempty"`
+ // :ref:`Circuit breaking ` settings of the cluster.
+ CircuitBreakers *v31.CircuitBreakers `protobuf:"bytes,6,opt,name=circuit_breakers,json=circuitBreakers,proto3" json:"circuit_breakers,omitempty"`
+ // Observability name of the cluster.
+ ObservabilityName string `protobuf:"bytes,7,opt,name=observability_name,json=observabilityName,proto3" json:"observability_name,omitempty"`
+ // The :ref:`EDS service name ` if the cluster is an EDS cluster.
+ EdsServiceName string `protobuf:"bytes,8,opt,name=eds_service_name,json=edsServiceName,proto3" json:"eds_service_name,omitempty"`
+}
+
+func (x *ClusterStatus) Reset() {
+ *x = ClusterStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClusterStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClusterStatus) ProtoMessage() {}
+
+func (x *ClusterStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClusterStatus.ProtoReflect.Descriptor instead.
+func (*ClusterStatus) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ClusterStatus) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ClusterStatus) GetAddedViaApi() bool {
+ if x != nil {
+ return x.AddedViaApi
+ }
+ return false
+}
+
+func (x *ClusterStatus) GetSuccessRateEjectionThreshold() *v3.Percent {
+ if x != nil {
+ return x.SuccessRateEjectionThreshold
+ }
+ return nil
+}
+
+func (x *ClusterStatus) GetHostStatuses() []*HostStatus {
+ if x != nil {
+ return x.HostStatuses
+ }
+ return nil
+}
+
+func (x *ClusterStatus) GetLocalOriginSuccessRateEjectionThreshold() *v3.Percent {
+ if x != nil {
+ return x.LocalOriginSuccessRateEjectionThreshold
+ }
+ return nil
+}
+
+func (x *ClusterStatus) GetCircuitBreakers() *v31.CircuitBreakers {
+ if x != nil {
+ return x.CircuitBreakers
+ }
+ return nil
+}
+
+func (x *ClusterStatus) GetObservabilityName() string {
+ if x != nil {
+ return x.ObservabilityName
+ }
+ return ""
+}
+
+func (x *ClusterStatus) GetEdsServiceName() string {
+ if x != nil {
+ return x.EdsServiceName
+ }
+ return ""
+}
+
+// Current state of a particular host.
+// [#next-free-field: 10]
+type HostStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Address of this host.
+ Address *v32.Address `protobuf:"bytes,1,opt,name=address,proto3" json:"address,omitempty"`
+ // List of stats specific to this host.
+ Stats []*SimpleMetric `protobuf:"bytes,2,rep,name=stats,proto3" json:"stats,omitempty"`
+ // The host's current health status.
+ HealthStatus *HostHealthStatus `protobuf:"bytes,3,opt,name=health_status,json=healthStatus,proto3" json:"health_status,omitempty"`
+ // Request success rate for this host over the last calculated interval.
+ // If
+ // :ref:`outlier_detection.split_external_local_origin_errors`
+ // is “false“, all errors: externally and locally generated were used in success rate
+ // calculation. If
+ // :ref:`outlier_detection.split_external_local_origin_errors`
+ // is “true“, only externally generated errors were used in success rate calculation.
+ // See :ref:`Cluster outlier detection ` documentation for
+ // details.
+ //
+ // Note: the message will not be present if host did not have enough request volume to calculate
+ // success rate or the cluster did not have enough hosts to run through success rate outlier
+ // ejection.
+ SuccessRate *v3.Percent `protobuf:"bytes,4,opt,name=success_rate,json=successRate,proto3" json:"success_rate,omitempty"`
+ // The host's weight. If not configured, the value defaults to 1.
+ Weight uint32 `protobuf:"varint,5,opt,name=weight,proto3" json:"weight,omitempty"`
+ // The hostname of the host, if applicable.
+ Hostname string `protobuf:"bytes,6,opt,name=hostname,proto3" json:"hostname,omitempty"`
+ // The host's priority. If not configured, the value defaults to 0 (highest priority).
+ Priority uint32 `protobuf:"varint,7,opt,name=priority,proto3" json:"priority,omitempty"`
+ // Request success rate for this host over the last calculated
+ // interval when only locally originated errors are taken into account and externally originated
+ // errors were treated as success.
+ // This field should be interpreted only when
+ // :ref:`outlier_detection.split_external_local_origin_errors`
+ // is “true“.
+ // See :ref:`Cluster outlier detection ` documentation for
+ // details.
+ //
+ // Note: the message will not be present if host did not have enough request volume to calculate
+ // success rate or the cluster did not have enough hosts to run through success rate outlier
+ // ejection.
+ LocalOriginSuccessRate *v3.Percent `protobuf:"bytes,8,opt,name=local_origin_success_rate,json=localOriginSuccessRate,proto3" json:"local_origin_success_rate,omitempty"`
+ // locality of the host.
+ Locality *v32.Locality `protobuf:"bytes,9,opt,name=locality,proto3" json:"locality,omitempty"`
+}
+
+func (x *HostStatus) Reset() {
+ *x = HostStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HostStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HostStatus) ProtoMessage() {}
+
+func (x *HostStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HostStatus.ProtoReflect.Descriptor instead.
+func (*HostStatus) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *HostStatus) GetAddress() *v32.Address {
+ if x != nil {
+ return x.Address
+ }
+ return nil
+}
+
+func (x *HostStatus) GetStats() []*SimpleMetric {
+ if x != nil {
+ return x.Stats
+ }
+ return nil
+}
+
+func (x *HostStatus) GetHealthStatus() *HostHealthStatus {
+ if x != nil {
+ return x.HealthStatus
+ }
+ return nil
+}
+
+func (x *HostStatus) GetSuccessRate() *v3.Percent {
+ if x != nil {
+ return x.SuccessRate
+ }
+ return nil
+}
+
+func (x *HostStatus) GetWeight() uint32 {
+ if x != nil {
+ return x.Weight
+ }
+ return 0
+}
+
+func (x *HostStatus) GetHostname() string {
+ if x != nil {
+ return x.Hostname
+ }
+ return ""
+}
+
+func (x *HostStatus) GetPriority() uint32 {
+ if x != nil {
+ return x.Priority
+ }
+ return 0
+}
+
+func (x *HostStatus) GetLocalOriginSuccessRate() *v3.Percent {
+ if x != nil {
+ return x.LocalOriginSuccessRate
+ }
+ return nil
+}
+
+func (x *HostStatus) GetLocality() *v32.Locality {
+ if x != nil {
+ return x.Locality
+ }
+ return nil
+}
+
+// Health status for a host.
+// [#next-free-field: 9]
+type HostHealthStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The host is currently failing active health checks.
+ FailedActiveHealthCheck bool `protobuf:"varint,1,opt,name=failed_active_health_check,json=failedActiveHealthCheck,proto3" json:"failed_active_health_check,omitempty"`
+ // The host is currently considered an outlier and has been ejected.
+ FailedOutlierCheck bool `protobuf:"varint,2,opt,name=failed_outlier_check,json=failedOutlierCheck,proto3" json:"failed_outlier_check,omitempty"`
+ // The host is currently being marked as degraded through active health checking.
+ FailedActiveDegradedCheck bool `protobuf:"varint,4,opt,name=failed_active_degraded_check,json=failedActiveDegradedCheck,proto3" json:"failed_active_degraded_check,omitempty"`
+ // The host has been removed from service discovery, but is being stabilized due to active
+ // health checking.
+ PendingDynamicRemoval bool `protobuf:"varint,5,opt,name=pending_dynamic_removal,json=pendingDynamicRemoval,proto3" json:"pending_dynamic_removal,omitempty"`
+ // The host has not yet been health checked.
+ PendingActiveHc bool `protobuf:"varint,6,opt,name=pending_active_hc,json=pendingActiveHc,proto3" json:"pending_active_hc,omitempty"`
+ // The host should be excluded from panic, spillover, etc. calculations because it was explicitly
+ // taken out of rotation via protocol signal and is not meant to be routed to.
+ ExcludedViaImmediateHcFail bool `protobuf:"varint,7,opt,name=excluded_via_immediate_hc_fail,json=excludedViaImmediateHcFail,proto3" json:"excluded_via_immediate_hc_fail,omitempty"`
+ // The host failed active HC due to timeout.
+ ActiveHcTimeout bool `protobuf:"varint,8,opt,name=active_hc_timeout,json=activeHcTimeout,proto3" json:"active_hc_timeout,omitempty"`
+ // Health status as reported by EDS. Note: only HEALTHY and UNHEALTHY are currently supported
+ // here.
+ // [#comment:TODO(mrice32): pipe through remaining EDS health status possibilities.]
+ EdsHealthStatus v32.HealthStatus `protobuf:"varint,3,opt,name=eds_health_status,json=edsHealthStatus,proto3,enum=envoy.config.core.v3.HealthStatus" json:"eds_health_status,omitempty"`
+}
+
+func (x *HostHealthStatus) Reset() {
+ *x = HostHealthStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HostHealthStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HostHealthStatus) ProtoMessage() {}
+
+func (x *HostHealthStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_clusters_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HostHealthStatus.ProtoReflect.Descriptor instead.
+func (*HostHealthStatus) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_clusters_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *HostHealthStatus) GetFailedActiveHealthCheck() bool {
+ if x != nil {
+ return x.FailedActiveHealthCheck
+ }
+ return false
+}
+
+func (x *HostHealthStatus) GetFailedOutlierCheck() bool {
+ if x != nil {
+ return x.FailedOutlierCheck
+ }
+ return false
+}
+
+func (x *HostHealthStatus) GetFailedActiveDegradedCheck() bool {
+ if x != nil {
+ return x.FailedActiveDegradedCheck
+ }
+ return false
+}
+
+func (x *HostHealthStatus) GetPendingDynamicRemoval() bool {
+ if x != nil {
+ return x.PendingDynamicRemoval
+ }
+ return false
+}
+
+func (x *HostHealthStatus) GetPendingActiveHc() bool {
+ if x != nil {
+ return x.PendingActiveHc
+ }
+ return false
+}
+
+func (x *HostHealthStatus) GetExcludedViaImmediateHcFail() bool {
+ if x != nil {
+ return x.ExcludedViaImmediateHcFail
+ }
+ return false
+}
+
+func (x *HostHealthStatus) GetActiveHcTimeout() bool {
+ if x != nil {
+ return x.ActiveHcTimeout
+ }
+ return false
+}
+
+func (x *HostHealthStatus) GetEdsHealthStatus() v32.HealthStatus {
+ if x != nil {
+ return x.EdsHealthStatus
+ }
+ return v32.HealthStatus(0)
+}
+
+var File_envoy_admin_v3_clusters_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_clusters_proto_rawDesc = []byte{
+ 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a,
+ 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x2f,
+ 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2d, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x5f, 0x62,
+ 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f,
+ 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f,
+ 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x79, 0x0a, 0x08, 0x43, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33,
+ 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x3a,
+ 0x23, 0x9a, 0xc5, 0x88, 0x1e, 0x1e, 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x73, 0x22, 0xb6, 0x04, 0x0a, 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0d, 0x61, 0x64,
+ 0x64, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x61, 0x5f, 0x61, 0x70, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0b, 0x61, 0x64, 0x64, 0x65, 0x64, 0x56, 0x69, 0x61, 0x41, 0x70, 0x69, 0x12, 0x5d,
+ 0x0a, 0x1f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65,
+ 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
+ 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52,
+ 0x1c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x45, 0x6a, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x3f, 0x0a,
+ 0x0d, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x04,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x52, 0x0c, 0x68, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x75,
+ 0x0a, 0x2c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73,
+ 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, 0x6a, 0x65, 0x63,
+ 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x27, 0x6c, 0x6f,
+ 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x52, 0x61, 0x74, 0x65, 0x45, 0x6a, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65,
+ 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x53, 0x0a, 0x10, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74,
+ 0x5f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69,
+ 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x0f, 0x63, 0x69, 0x72, 0x63, 0x75,
+ 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x6f, 0x62,
+ 0x73, 0x65, 0x72, 0x76, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6f, 0x62, 0x73, 0x65, 0x72, 0x76, 0x61, 0x62,
+ 0x69, 0x6c, 0x69, 0x74, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x65, 0x64, 0x73,
+ 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0e, 0x65, 0x64, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4e,
+ 0x61, 0x6d, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a, 0x21, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e,
+ 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x81, 0x04,
+ 0x0a, 0x0a, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x37, 0x0a, 0x07,
+ 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64,
+ 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72,
+ 0x69, 0x63, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x45, 0x0a, 0x0d, 0x68, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76,
+ 0x33, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x12, 0x39, 0x0a, 0x0c, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74,
+ 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x0b,
+ 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x77,
+ 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x77, 0x65, 0x69,
+ 0x67, 0x68, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12,
+ 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x0d, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x51, 0x0a, 0x19, 0x6c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x5f, 0x73, 0x75, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50,
+ 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x16, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x4f, 0x72, 0x69,
+ 0x67, 0x69, 0x6e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x61, 0x74, 0x65, 0x12, 0x3a,
+ 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79,
+ 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e,
+ 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76,
+ 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x22, 0x93, 0x04, 0x0a, 0x10, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64,
+ 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63,
+ 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x66, 0x61, 0x69, 0x6c,
+ 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x12, 0x30, 0x0a, 0x14, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x6f, 0x75,
+ 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x12, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72,
+ 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x3f, 0x0a, 0x1c, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f,
+ 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f,
+ 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x66, 0x61, 0x69,
+ 0x6c, 0x65, 0x64, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x67, 0x72, 0x61, 0x64, 0x65,
+ 0x64, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x36, 0x0a, 0x17, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e,
+ 0x67, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x61,
+ 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67,
+ 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x12, 0x2a,
+ 0x0a, 0x11, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x68, 0x63, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x70, 0x65, 0x6e, 0x64, 0x69,
+ 0x6e, 0x67, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x48, 0x63, 0x12, 0x42, 0x0a, 0x1e, 0x65, 0x78,
+ 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x5f, 0x76, 0x69, 0x61, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64,
+ 0x69, 0x61, 0x74, 0x65, 0x5f, 0x68, 0x63, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x1a, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x56, 0x69, 0x61, 0x49,
+ 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x48, 0x63, 0x46, 0x61, 0x69, 0x6c, 0x12, 0x2a,
+ 0x0a, 0x11, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x68, 0x63, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x63, 0x74, 0x69, 0x76,
+ 0x65, 0x48, 0x63, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x4e, 0x0a, 0x11, 0x65, 0x64,
+ 0x73, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61,
+ 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0f, 0x65, 0x64, 0x73, 0x48, 0x65,
+ 0x61, 0x6c, 0x74, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e,
+ 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76,
+ 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74,
+ 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x76, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10,
+ 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42,
+ 0x0d, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_clusters_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_clusters_proto_rawDescData = file_envoy_admin_v3_clusters_proto_rawDesc
+)
+
+func file_envoy_admin_v3_clusters_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_clusters_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_clusters_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_clusters_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_clusters_proto_rawDescData
+}
+
+var file_envoy_admin_v3_clusters_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
+var file_envoy_admin_v3_clusters_proto_goTypes = []interface{}{
+ (*Clusters)(nil), // 0: envoy.admin.v3.Clusters
+ (*ClusterStatus)(nil), // 1: envoy.admin.v3.ClusterStatus
+ (*HostStatus)(nil), // 2: envoy.admin.v3.HostStatus
+ (*HostHealthStatus)(nil), // 3: envoy.admin.v3.HostHealthStatus
+ (*v3.Percent)(nil), // 4: envoy.type.v3.Percent
+ (*v31.CircuitBreakers)(nil), // 5: envoy.config.cluster.v3.CircuitBreakers
+ (*v32.Address)(nil), // 6: envoy.config.core.v3.Address
+ (*SimpleMetric)(nil), // 7: envoy.admin.v3.SimpleMetric
+ (*v32.Locality)(nil), // 8: envoy.config.core.v3.Locality
+ (v32.HealthStatus)(0), // 9: envoy.config.core.v3.HealthStatus
+}
+var file_envoy_admin_v3_clusters_proto_depIdxs = []int32{
+ 1, // 0: envoy.admin.v3.Clusters.cluster_statuses:type_name -> envoy.admin.v3.ClusterStatus
+ 4, // 1: envoy.admin.v3.ClusterStatus.success_rate_ejection_threshold:type_name -> envoy.type.v3.Percent
+ 2, // 2: envoy.admin.v3.ClusterStatus.host_statuses:type_name -> envoy.admin.v3.HostStatus
+ 4, // 3: envoy.admin.v3.ClusterStatus.local_origin_success_rate_ejection_threshold:type_name -> envoy.type.v3.Percent
+ 5, // 4: envoy.admin.v3.ClusterStatus.circuit_breakers:type_name -> envoy.config.cluster.v3.CircuitBreakers
+ 6, // 5: envoy.admin.v3.HostStatus.address:type_name -> envoy.config.core.v3.Address
+ 7, // 6: envoy.admin.v3.HostStatus.stats:type_name -> envoy.admin.v3.SimpleMetric
+ 3, // 7: envoy.admin.v3.HostStatus.health_status:type_name -> envoy.admin.v3.HostHealthStatus
+ 4, // 8: envoy.admin.v3.HostStatus.success_rate:type_name -> envoy.type.v3.Percent
+ 4, // 9: envoy.admin.v3.HostStatus.local_origin_success_rate:type_name -> envoy.type.v3.Percent
+ 8, // 10: envoy.admin.v3.HostStatus.locality:type_name -> envoy.config.core.v3.Locality
+ 9, // 11: envoy.admin.v3.HostHealthStatus.eds_health_status:type_name -> envoy.config.core.v3.HealthStatus
+ 12, // [12:12] is the sub-list for method output_type
+ 12, // [12:12] is the sub-list for method input_type
+ 12, // [12:12] is the sub-list for extension type_name
+ 12, // [12:12] is the sub-list for extension extendee
+ 0, // [0:12] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_clusters_proto_init() }
+func file_envoy_admin_v3_clusters_proto_init() {
+ if File_envoy_admin_v3_clusters_proto != nil {
+ return
+ }
+ file_envoy_admin_v3_metrics_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_clusters_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Clusters); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_clusters_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClusterStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_clusters_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HostStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_clusters_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HostHealthStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_clusters_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 4,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_clusters_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_clusters_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_clusters_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_clusters_proto = out.File
+ file_envoy_admin_v3_clusters_proto_rawDesc = nil
+ file_envoy_admin_v3_clusters_proto_goTypes = nil
+ file_envoy_admin_v3_clusters_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go
new file mode 100644
index 000000000..d7658a09f
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters.pb.validate.go
@@ -0,0 +1,803 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/clusters.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+
+ _ = v3.HealthStatus(0)
+)
+
+// Validate checks the field values on Clusters with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Clusters) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Clusters with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ClustersMultiError, or nil
+// if none found.
+func (m *Clusters) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Clusters) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetClusterStatuses() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersValidationError{
+ field: fmt.Sprintf("ClusterStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersValidationError{
+ field: fmt.Sprintf("ClusterStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersValidationError{
+ field: fmt.Sprintf("ClusterStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ClustersMultiError(errors)
+ }
+
+ return nil
+}
+
+// ClustersMultiError is an error wrapping multiple validation errors returned
+// by Clusters.ValidateAll() if the designated constraints aren't met.
+type ClustersMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ClustersMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ClustersMultiError) AllErrors() []error { return m }
+
+// ClustersValidationError is the validation error returned by
+// Clusters.Validate if the designated constraints aren't met.
+type ClustersValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ClustersValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ClustersValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ClustersValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ClustersValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ClustersValidationError) ErrorName() string { return "ClustersValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ClustersValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sClusters.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ClustersValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ClustersValidationError{}
+
+// Validate checks the field values on ClusterStatus with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ClusterStatus) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ClusterStatus with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ClusterStatusMultiError, or
+// nil if none found.
+func (m *ClusterStatus) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ClusterStatus) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ // no validation rules for AddedViaApi
+
+ if all {
+ switch v := interface{}(m.GetSuccessRateEjectionThreshold()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: "SuccessRateEjectionThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: "SuccessRateEjectionThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSuccessRateEjectionThreshold()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterStatusValidationError{
+ field: "SuccessRateEjectionThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetHostStatuses() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: fmt.Sprintf("HostStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: fmt.Sprintf("HostStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterStatusValidationError{
+ field: fmt.Sprintf("HostStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetLocalOriginSuccessRateEjectionThreshold()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: "LocalOriginSuccessRateEjectionThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: "LocalOriginSuccessRateEjectionThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLocalOriginSuccessRateEjectionThreshold()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterStatusValidationError{
+ field: "LocalOriginSuccessRateEjectionThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetCircuitBreakers()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: "CircuitBreakers",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterStatusValidationError{
+ field: "CircuitBreakers",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCircuitBreakers()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterStatusValidationError{
+ field: "CircuitBreakers",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ObservabilityName
+
+ // no validation rules for EdsServiceName
+
+ if len(errors) > 0 {
+ return ClusterStatusMultiError(errors)
+ }
+
+ return nil
+}
+
+// ClusterStatusMultiError is an error wrapping multiple validation errors
+// returned by ClusterStatus.ValidateAll() if the designated constraints
+// aren't met.
+type ClusterStatusMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ClusterStatusMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ClusterStatusMultiError) AllErrors() []error { return m }
+
+// ClusterStatusValidationError is the validation error returned by
+// ClusterStatus.Validate if the designated constraints aren't met.
+type ClusterStatusValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ClusterStatusValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ClusterStatusValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ClusterStatusValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ClusterStatusValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ClusterStatusValidationError) ErrorName() string { return "ClusterStatusValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ClusterStatusValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sClusterStatus.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ClusterStatusValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ClusterStatusValidationError{}
+
+// Validate checks the field values on HostStatus with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *HostStatus) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HostStatus with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in HostStatusMultiError, or
+// nil if none found.
+func (m *HostStatus) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HostStatus) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetAddress()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HostStatusValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetStats() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: fmt.Sprintf("Stats[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: fmt.Sprintf("Stats[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HostStatusValidationError{
+ field: fmt.Sprintf("Stats[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetHealthStatus()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "HealthStatus",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "HealthStatus",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHealthStatus()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HostStatusValidationError{
+ field: "HealthStatus",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetSuccessRate()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "SuccessRate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "SuccessRate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSuccessRate()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HostStatusValidationError{
+ field: "SuccessRate",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Weight
+
+ // no validation rules for Hostname
+
+ // no validation rules for Priority
+
+ if all {
+ switch v := interface{}(m.GetLocalOriginSuccessRate()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "LocalOriginSuccessRate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "LocalOriginSuccessRate",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLocalOriginSuccessRate()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HostStatusValidationError{
+ field: "LocalOriginSuccessRate",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLocality()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "Locality",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HostStatusValidationError{
+ field: "Locality",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLocality()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HostStatusValidationError{
+ field: "Locality",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return HostStatusMultiError(errors)
+ }
+
+ return nil
+}
+
+// HostStatusMultiError is an error wrapping multiple validation errors
+// returned by HostStatus.ValidateAll() if the designated constraints aren't met.
+type HostStatusMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HostStatusMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HostStatusMultiError) AllErrors() []error { return m }
+
+// HostStatusValidationError is the validation error returned by
+// HostStatus.Validate if the designated constraints aren't met.
+type HostStatusValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HostStatusValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HostStatusValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HostStatusValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HostStatusValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HostStatusValidationError) ErrorName() string { return "HostStatusValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HostStatusValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHostStatus.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HostStatusValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HostStatusValidationError{}
+
+// Validate checks the field values on HostHealthStatus with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *HostHealthStatus) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HostHealthStatus with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// HostHealthStatusMultiError, or nil if none found.
+func (m *HostHealthStatus) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HostHealthStatus) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for FailedActiveHealthCheck
+
+ // no validation rules for FailedOutlierCheck
+
+ // no validation rules for FailedActiveDegradedCheck
+
+ // no validation rules for PendingDynamicRemoval
+
+ // no validation rules for PendingActiveHc
+
+ // no validation rules for ExcludedViaImmediateHcFail
+
+ // no validation rules for ActiveHcTimeout
+
+ // no validation rules for EdsHealthStatus
+
+ if len(errors) > 0 {
+ return HostHealthStatusMultiError(errors)
+ }
+
+ return nil
+}
+
+// HostHealthStatusMultiError is an error wrapping multiple validation errors
+// returned by HostHealthStatus.ValidateAll() if the designated constraints
+// aren't met.
+type HostHealthStatusMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HostHealthStatusMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HostHealthStatusMultiError) AllErrors() []error { return m }
+
+// HostHealthStatusValidationError is the validation error returned by
+// HostHealthStatus.Validate if the designated constraints aren't met.
+type HostHealthStatusValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HostHealthStatusValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HostHealthStatusValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HostHealthStatusValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HostHealthStatusValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HostHealthStatusValidationError) ErrorName() string { return "HostHealthStatusValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HostHealthStatusValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHostHealthStatus.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HostHealthStatusValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HostHealthStatusValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go
new file mode 100644
index 000000000..418581107
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/clusters_vtproto.pb.go
@@ -0,0 +1,656 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/clusters.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *Clusters) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Clusters) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Clusters) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.ClusterStatuses) > 0 {
+ for iNdEx := len(m.ClusterStatuses) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.ClusterStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterStatus) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterStatus) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ClusterStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.EdsServiceName) > 0 {
+ i -= len(m.EdsServiceName)
+ copy(dAtA[i:], m.EdsServiceName)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EdsServiceName)))
+ i--
+ dAtA[i] = 0x42
+ }
+ if len(m.ObservabilityName) > 0 {
+ i -= len(m.ObservabilityName)
+ copy(dAtA[i:], m.ObservabilityName)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ObservabilityName)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.CircuitBreakers != nil {
+ if vtmsg, ok := interface{}(m.CircuitBreakers).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.CircuitBreakers)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.LocalOriginSuccessRateEjectionThreshold != nil {
+ if vtmsg, ok := interface{}(m.LocalOriginSuccessRateEjectionThreshold).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.LocalOriginSuccessRateEjectionThreshold)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if len(m.HostStatuses) > 0 {
+ for iNdEx := len(m.HostStatuses) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.HostStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if m.SuccessRateEjectionThreshold != nil {
+ if vtmsg, ok := interface{}(m.SuccessRateEjectionThreshold).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.SuccessRateEjectionThreshold)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.AddedViaApi {
+ i--
+ if m.AddedViaApi {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HostStatus) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HostStatus) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *HostStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Locality != nil {
+ if vtmsg, ok := interface{}(m.Locality).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Locality)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.LocalOriginSuccessRate != nil {
+ if vtmsg, ok := interface{}(m.LocalOriginSuccessRate).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.LocalOriginSuccessRate)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.Priority != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority))
+ i--
+ dAtA[i] = 0x38
+ }
+ if len(m.Hostname) > 0 {
+ i -= len(m.Hostname)
+ copy(dAtA[i:], m.Hostname)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Hostname)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.Weight != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Weight))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.SuccessRate != nil {
+ if vtmsg, ok := interface{}(m.SuccessRate).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.SuccessRate)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.HealthStatus != nil {
+ size, err := m.HealthStatus.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Stats) > 0 {
+ for iNdEx := len(m.Stats) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Stats[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Address != nil {
+ if vtmsg, ok := interface{}(m.Address).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Address)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HostHealthStatus) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HostHealthStatus) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *HostHealthStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ActiveHcTimeout {
+ i--
+ if m.ActiveHcTimeout {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x40
+ }
+ if m.ExcludedViaImmediateHcFail {
+ i--
+ if m.ExcludedViaImmediateHcFail {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x38
+ }
+ if m.PendingActiveHc {
+ i--
+ if m.PendingActiveHc {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.PendingDynamicRemoval {
+ i--
+ if m.PendingDynamicRemoval {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.FailedActiveDegradedCheck {
+ i--
+ if m.FailedActiveDegradedCheck {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.EdsHealthStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.EdsHealthStatus))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.FailedOutlierCheck {
+ i--
+ if m.FailedOutlierCheck {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.FailedActiveHealthCheck {
+ i--
+ if m.FailedActiveHealthCheck {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Clusters) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ClusterStatuses) > 0 {
+ for _, e := range m.ClusterStatuses {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ClusterStatus) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.AddedViaApi {
+ n += 2
+ }
+ if m.SuccessRateEjectionThreshold != nil {
+ if size, ok := interface{}(m.SuccessRateEjectionThreshold).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.SuccessRateEjectionThreshold)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.HostStatuses) > 0 {
+ for _, e := range m.HostStatuses {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.LocalOriginSuccessRateEjectionThreshold != nil {
+ if size, ok := interface{}(m.LocalOriginSuccessRateEjectionThreshold).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.LocalOriginSuccessRateEjectionThreshold)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.CircuitBreakers != nil {
+ if size, ok := interface{}(m.CircuitBreakers).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.CircuitBreakers)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.ObservabilityName)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.EdsServiceName)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *HostStatus) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Address != nil {
+ if size, ok := interface{}(m.Address).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Address)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.Stats) > 0 {
+ for _, e := range m.Stats {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.HealthStatus != nil {
+ l = m.HealthStatus.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.SuccessRate != nil {
+ if size, ok := interface{}(m.SuccessRate).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.SuccessRate)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Weight != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Weight))
+ }
+ l = len(m.Hostname)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Priority != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority))
+ }
+ if m.LocalOriginSuccessRate != nil {
+ if size, ok := interface{}(m.LocalOriginSuccessRate).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.LocalOriginSuccessRate)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Locality != nil {
+ if size, ok := interface{}(m.Locality).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Locality)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *HostHealthStatus) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.FailedActiveHealthCheck {
+ n += 2
+ }
+ if m.FailedOutlierCheck {
+ n += 2
+ }
+ if m.EdsHealthStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.EdsHealthStatus))
+ }
+ if m.FailedActiveDegradedCheck {
+ n += 2
+ }
+ if m.PendingDynamicRemoval {
+ n += 2
+ }
+ if m.PendingActiveHc {
+ n += 2
+ }
+ if m.ExcludedViaImmediateHcFail {
+ n += 2
+ }
+ if m.ActiveHcTimeout {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go
new file mode 100644
index 000000000..c742c74db
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.go
@@ -0,0 +1,642 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/config_dump.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The :ref:`/config_dump ` admin endpoint uses this wrapper
+// message to maintain and serve arbitrary configuration information from any component in Envoy.
+type ConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This list is serialized and dumped in its entirety at the
+ // :ref:`/config_dump ` endpoint.
+ //
+ // The following configurations are currently supported and will be dumped in the order given
+ // below:
+ //
+ // * “bootstrap“: :ref:`BootstrapConfigDump `
+ // * “clusters“: :ref:`ClustersConfigDump `
+ // * “ecds_filter_http“: :ref:`EcdsConfigDump `
+ // * “ecds_filter_quic_listener“: :ref:`EcdsConfigDump `
+ // * “ecds_filter_tcp_listener“: :ref:`EcdsConfigDump `
+ // * “endpoints“: :ref:`EndpointsConfigDump `
+ // * “listeners“: :ref:`ListenersConfigDump `
+ // * “scoped_routes“: :ref:`ScopedRoutesConfigDump `
+ // * “routes“: :ref:`RoutesConfigDump `
+ // * “secrets“: :ref:`SecretsConfigDump `
+ //
+ // EDS Configuration will only be dumped by using parameter “?include_eds“
+ //
+ // Currently ECDS is supported in HTTP and listener filters. Note, ECDS configuration for
+ // either HTTP or listener filter will only be dumped if it is actually configured.
+ //
+ // You can filter output with the resource and mask query parameters.
+ // See :ref:`/config_dump?resource={} `,
+ // :ref:`/config_dump?mask={} `,
+ // or :ref:`/config_dump?resource={},mask={}
+ // ` for more information.
+ Configs []*anypb.Any `protobuf:"bytes,1,rep,name=configs,proto3" json:"configs,omitempty"`
+}
+
+func (x *ConfigDump) Reset() {
+ *x = ConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ConfigDump) ProtoMessage() {}
+
+func (x *ConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ConfigDump.ProtoReflect.Descriptor instead.
+func (*ConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ConfigDump) GetConfigs() []*anypb.Any {
+ if x != nil {
+ return x.Configs
+ }
+ return nil
+}
+
+// This message describes the bootstrap configuration that Envoy was started with. This includes
+// any CLI overrides that were merged. Bootstrap configuration information can be used to recreate
+// the static portions of an Envoy configuration by reusing the output as the bootstrap
+// configuration for another Envoy.
+type BootstrapConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Bootstrap *v3.Bootstrap `protobuf:"bytes,1,opt,name=bootstrap,proto3" json:"bootstrap,omitempty"`
+ // The timestamp when the BootstrapConfig was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+}
+
+func (x *BootstrapConfigDump) Reset() {
+ *x = BootstrapConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BootstrapConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BootstrapConfigDump) ProtoMessage() {}
+
+func (x *BootstrapConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BootstrapConfigDump.ProtoReflect.Descriptor instead.
+func (*BootstrapConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *BootstrapConfigDump) GetBootstrap() *v3.Bootstrap {
+ if x != nil {
+ return x.Bootstrap
+ }
+ return nil
+}
+
+func (x *BootstrapConfigDump) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+// Envoys SDS implementation fills this message with all secrets fetched dynamically via SDS.
+type SecretsConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The statically loaded secrets.
+ StaticSecrets []*SecretsConfigDump_StaticSecret `protobuf:"bytes,1,rep,name=static_secrets,json=staticSecrets,proto3" json:"static_secrets,omitempty"`
+ // The dynamically loaded active secrets. These are secrets that are available to service
+ // clusters or listeners.
+ DynamicActiveSecrets []*SecretsConfigDump_DynamicSecret `protobuf:"bytes,2,rep,name=dynamic_active_secrets,json=dynamicActiveSecrets,proto3" json:"dynamic_active_secrets,omitempty"`
+ // The dynamically loaded warming secrets. These are secrets that are currently undergoing
+ // warming in preparation to service clusters or listeners.
+ DynamicWarmingSecrets []*SecretsConfigDump_DynamicSecret `protobuf:"bytes,3,rep,name=dynamic_warming_secrets,json=dynamicWarmingSecrets,proto3" json:"dynamic_warming_secrets,omitempty"`
+}
+
+func (x *SecretsConfigDump) Reset() {
+ *x = SecretsConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SecretsConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecretsConfigDump) ProtoMessage() {}
+
+func (x *SecretsConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SecretsConfigDump.ProtoReflect.Descriptor instead.
+func (*SecretsConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *SecretsConfigDump) GetStaticSecrets() []*SecretsConfigDump_StaticSecret {
+ if x != nil {
+ return x.StaticSecrets
+ }
+ return nil
+}
+
+func (x *SecretsConfigDump) GetDynamicActiveSecrets() []*SecretsConfigDump_DynamicSecret {
+ if x != nil {
+ return x.DynamicActiveSecrets
+ }
+ return nil
+}
+
+func (x *SecretsConfigDump) GetDynamicWarmingSecrets() []*SecretsConfigDump_DynamicSecret {
+ if x != nil {
+ return x.DynamicWarmingSecrets
+ }
+ return nil
+}
+
+// DynamicSecret contains secret information fetched via SDS.
+// [#next-free-field: 7]
+type SecretsConfigDump_DynamicSecret struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name assigned to the secret.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // This is the per-resource version information.
+ VersionInfo string `protobuf:"bytes,2,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The timestamp when the secret was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+ // The actual secret information.
+ // Security sensitive information is redacted (replaced with "[redacted]") for
+ // private keys and passwords in TLS certificates.
+ Secret *anypb.Any `protobuf:"bytes,4,opt,name=secret,proto3" json:"secret,omitempty"`
+ // Set if the last update failed, cleared after the next successful update.
+ // The *error_state* field contains the rejected version of this particular
+ // resource along with the reason and timestamp. For successfully updated or
+ // acknowledged resource, this field should be empty.
+ // [#not-implemented-hide:]
+ ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"`
+ // The client status of this resource.
+ // [#not-implemented-hide:]
+ ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"`
+}
+
+func (x *SecretsConfigDump_DynamicSecret) Reset() {
+ *x = SecretsConfigDump_DynamicSecret{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SecretsConfigDump_DynamicSecret) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecretsConfigDump_DynamicSecret) ProtoMessage() {}
+
+func (x *SecretsConfigDump_DynamicSecret) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SecretsConfigDump_DynamicSecret.ProtoReflect.Descriptor instead.
+func (*SecretsConfigDump_DynamicSecret) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *SecretsConfigDump_DynamicSecret) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *SecretsConfigDump_DynamicSecret) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *SecretsConfigDump_DynamicSecret) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+func (x *SecretsConfigDump_DynamicSecret) GetSecret() *anypb.Any {
+ if x != nil {
+ return x.Secret
+ }
+ return nil
+}
+
+func (x *SecretsConfigDump_DynamicSecret) GetErrorState() *UpdateFailureState {
+ if x != nil {
+ return x.ErrorState
+ }
+ return nil
+}
+
+func (x *SecretsConfigDump_DynamicSecret) GetClientStatus() ClientResourceStatus {
+ if x != nil {
+ return x.ClientStatus
+ }
+ return ClientResourceStatus_UNKNOWN
+}
+
+// StaticSecret specifies statically loaded secret in bootstrap.
+type SecretsConfigDump_StaticSecret struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name assigned to the secret.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The timestamp when the secret was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+ // The actual secret information.
+ // Security sensitive information is redacted (replaced with "[redacted]") for
+ // private keys and passwords in TLS certificates.
+ Secret *anypb.Any `protobuf:"bytes,3,opt,name=secret,proto3" json:"secret,omitempty"`
+}
+
+func (x *SecretsConfigDump_StaticSecret) Reset() {
+ *x = SecretsConfigDump_StaticSecret{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SecretsConfigDump_StaticSecret) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecretsConfigDump_StaticSecret) ProtoMessage() {}
+
+func (x *SecretsConfigDump_StaticSecret) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SecretsConfigDump_StaticSecret.ProtoReflect.Descriptor instead.
+func (*SecretsConfigDump_StaticSecret) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *SecretsConfigDump_StaticSecret) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *SecretsConfigDump_StaticSecret) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+func (x *SecretsConfigDump_StaticSecret) GetSecret() *anypb.Any {
+ if x != nil {
+ return x.Secret
+ }
+ return nil
+}
+
+var File_envoy_admin_v3_config_dump_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_config_dump_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f,
+ 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x73,
+ 0x68, 0x61, 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
+ 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, 0x0a, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x73, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, 0xc8, 0x01, 0x0a, 0x13, 0x42, 0x6f,
+ 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d,
+ 0x70, 0x12, 0x42, 0x0a, 0x09, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33,
+ 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x74,
+ 0x73, 0x74, 0x72, 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x64, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61,
+ 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x44, 0x75, 0x6d, 0x70, 0x22, 0xb7, 0x07, 0x0a, 0x11, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x55, 0x0a, 0x0e, 0x73, 0x74,
+ 0x61, 0x74, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72,
+ 0x65, 0x74, 0x52, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74,
+ 0x73, 0x12, 0x65, 0x0a, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x74,
+ 0x69, 0x76, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72,
+ 0x65, 0x74, 0x52, 0x14, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76,
+ 0x65, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x67, 0x0a, 0x17, 0x64, 0x79, 0x6e, 0x61,
+ 0x6d, 0x69, 0x63, 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x63, 0x72,
+ 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65,
+ 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e,
+ 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x15, 0x64, 0x79, 0x6e, 0x61,
+ 0x6d, 0x69, 0x63, 0x57, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74,
+ 0x73, 0x1a, 0xff, 0x02, 0x0a, 0x0d, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63,
+ 0x72, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61,
+ 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61,
+ 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x65, 0x63,
+ 0x72, 0x65, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
+ 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c,
+ 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x65, 0x63,
+ 0x72, 0x65, 0x74, 0x1a, 0xca, 0x01, 0x0a, 0x0c, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65,
+ 0x63, 0x72, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74,
+ 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65,
+ 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x06, 0x73,
+ 0x65, 0x63, 0x72, 0x65, 0x74, 0x3a, 0x39, 0x9a, 0xc5, 0x88, 0x1e, 0x34, 0x0a, 0x32, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44,
+ 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74,
+ 0x3a, 0x2c, 0x9a, 0xc5, 0x88, 0x1e, 0x27, 0x0a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x63,
+ 0x72, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x42, 0x78,
+ 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e,
+ 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_config_dump_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_config_dump_proto_rawDescData = file_envoy_admin_v3_config_dump_proto_rawDesc
+)
+
+func file_envoy_admin_v3_config_dump_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_config_dump_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_config_dump_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_config_dump_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_config_dump_proto_rawDescData
+}
+
+var file_envoy_admin_v3_config_dump_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_envoy_admin_v3_config_dump_proto_goTypes = []interface{}{
+ (*ConfigDump)(nil), // 0: envoy.admin.v3.ConfigDump
+ (*BootstrapConfigDump)(nil), // 1: envoy.admin.v3.BootstrapConfigDump
+ (*SecretsConfigDump)(nil), // 2: envoy.admin.v3.SecretsConfigDump
+ (*SecretsConfigDump_DynamicSecret)(nil), // 3: envoy.admin.v3.SecretsConfigDump.DynamicSecret
+ (*SecretsConfigDump_StaticSecret)(nil), // 4: envoy.admin.v3.SecretsConfigDump.StaticSecret
+ (*anypb.Any)(nil), // 5: google.protobuf.Any
+ (*v3.Bootstrap)(nil), // 6: envoy.config.bootstrap.v3.Bootstrap
+ (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp
+ (*UpdateFailureState)(nil), // 8: envoy.admin.v3.UpdateFailureState
+ (ClientResourceStatus)(0), // 9: envoy.admin.v3.ClientResourceStatus
+}
+var file_envoy_admin_v3_config_dump_proto_depIdxs = []int32{
+ 5, // 0: envoy.admin.v3.ConfigDump.configs:type_name -> google.protobuf.Any
+ 6, // 1: envoy.admin.v3.BootstrapConfigDump.bootstrap:type_name -> envoy.config.bootstrap.v3.Bootstrap
+ 7, // 2: envoy.admin.v3.BootstrapConfigDump.last_updated:type_name -> google.protobuf.Timestamp
+ 4, // 3: envoy.admin.v3.SecretsConfigDump.static_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.StaticSecret
+ 3, // 4: envoy.admin.v3.SecretsConfigDump.dynamic_active_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.DynamicSecret
+ 3, // 5: envoy.admin.v3.SecretsConfigDump.dynamic_warming_secrets:type_name -> envoy.admin.v3.SecretsConfigDump.DynamicSecret
+ 7, // 6: envoy.admin.v3.SecretsConfigDump.DynamicSecret.last_updated:type_name -> google.protobuf.Timestamp
+ 5, // 7: envoy.admin.v3.SecretsConfigDump.DynamicSecret.secret:type_name -> google.protobuf.Any
+ 8, // 8: envoy.admin.v3.SecretsConfigDump.DynamicSecret.error_state:type_name -> envoy.admin.v3.UpdateFailureState
+ 9, // 9: envoy.admin.v3.SecretsConfigDump.DynamicSecret.client_status:type_name -> envoy.admin.v3.ClientResourceStatus
+ 7, // 10: envoy.admin.v3.SecretsConfigDump.StaticSecret.last_updated:type_name -> google.protobuf.Timestamp
+ 5, // 11: envoy.admin.v3.SecretsConfigDump.StaticSecret.secret:type_name -> google.protobuf.Any
+ 12, // [12:12] is the sub-list for method output_type
+ 12, // [12:12] is the sub-list for method input_type
+ 12, // [12:12] is the sub-list for extension type_name
+ 12, // [12:12] is the sub-list for extension extendee
+ 0, // [0:12] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_config_dump_proto_init() }
+func file_envoy_admin_v3_config_dump_proto_init() {
+ if File_envoy_admin_v3_config_dump_proto != nil {
+ return
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_init()
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_config_dump_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BootstrapConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SecretsConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SecretsConfigDump_DynamicSecret); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SecretsConfigDump_StaticSecret); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_config_dump_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_config_dump_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_config_dump_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_config_dump_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_config_dump_proto = out.File
+ file_envoy_admin_v3_config_dump_proto_rawDesc = nil
+ file_envoy_admin_v3_config_dump_proto_goTypes = nil
+ file_envoy_admin_v3_config_dump_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go
new file mode 100644
index 000000000..6f494af0b
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump.pb.validate.go
@@ -0,0 +1,893 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/config_dump.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ConfigDump with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ConfigDump with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ConfigDumpMultiError, or
+// nil if none found.
+func (m *ConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ConfigDumpValidationError{
+ field: fmt.Sprintf("Configs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ConfigDumpValidationError{
+ field: fmt.Sprintf("Configs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ConfigDumpValidationError{
+ field: fmt.Sprintf("Configs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// ConfigDumpMultiError is an error wrapping multiple validation errors
+// returned by ConfigDump.ValidateAll() if the designated constraints aren't met.
+type ConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ConfigDumpMultiError) AllErrors() []error { return m }
+
+// ConfigDumpValidationError is the validation error returned by
+// ConfigDump.Validate if the designated constraints aren't met.
+type ConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ConfigDumpValidationError) ErrorName() string { return "ConfigDumpValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ConfigDumpValidationError{}
+
+// Validate checks the field values on BootstrapConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *BootstrapConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on BootstrapConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// BootstrapConfigDumpMultiError, or nil if none found.
+func (m *BootstrapConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *BootstrapConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetBootstrap()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapConfigDumpValidationError{
+ field: "Bootstrap",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapConfigDumpValidationError{
+ field: "Bootstrap",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetBootstrap()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapConfigDumpValidationError{
+ field: "Bootstrap",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapConfigDumpValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapConfigDumpValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapConfigDumpValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return BootstrapConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// BootstrapConfigDumpMultiError is an error wrapping multiple validation
+// errors returned by BootstrapConfigDump.ValidateAll() if the designated
+// constraints aren't met.
+type BootstrapConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m BootstrapConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m BootstrapConfigDumpMultiError) AllErrors() []error { return m }
+
+// BootstrapConfigDumpValidationError is the validation error returned by
+// BootstrapConfigDump.Validate if the designated constraints aren't met.
+type BootstrapConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e BootstrapConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e BootstrapConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e BootstrapConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e BootstrapConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e BootstrapConfigDumpValidationError) ErrorName() string {
+ return "BootstrapConfigDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e BootstrapConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrapConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = BootstrapConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = BootstrapConfigDumpValidationError{}
+
+// Validate checks the field values on SecretsConfigDump with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *SecretsConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SecretsConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// SecretsConfigDumpMultiError, or nil if none found.
+func (m *SecretsConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SecretsConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetStaticSecrets() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("StaticSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("StaticSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("StaticSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicActiveSecrets() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicActiveSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicWarmingSecrets() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicWarmingSecrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return SecretsConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// SecretsConfigDumpMultiError is an error wrapping multiple validation errors
+// returned by SecretsConfigDump.ValidateAll() if the designated constraints
+// aren't met.
+type SecretsConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SecretsConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SecretsConfigDumpMultiError) AllErrors() []error { return m }
+
+// SecretsConfigDumpValidationError is the validation error returned by
+// SecretsConfigDump.Validate if the designated constraints aren't met.
+type SecretsConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SecretsConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SecretsConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SecretsConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SecretsConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SecretsConfigDumpValidationError) ErrorName() string {
+ return "SecretsConfigDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e SecretsConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSecretsConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SecretsConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SecretsConfigDumpValidationError{}
+
+// Validate checks the field values on SecretsConfigDump_DynamicSecret with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *SecretsConfigDump_DynamicSecret) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SecretsConfigDump_DynamicSecret with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// SecretsConfigDump_DynamicSecretMultiError, or nil if none found.
+func (m *SecretsConfigDump_DynamicSecret) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SecretsConfigDump_DynamicSecret) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ // no validation rules for VersionInfo
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDump_DynamicSecretValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetSecret()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{
+ field: "Secret",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{
+ field: "Secret",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSecret()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDump_DynamicSecretValidationError{
+ field: "Secret",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetErrorState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDump_DynamicSecretValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDump_DynamicSecretValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ClientStatus
+
+ if len(errors) > 0 {
+ return SecretsConfigDump_DynamicSecretMultiError(errors)
+ }
+
+ return nil
+}
+
+// SecretsConfigDump_DynamicSecretMultiError is an error wrapping multiple
+// validation errors returned by SecretsConfigDump_DynamicSecret.ValidateAll()
+// if the designated constraints aren't met.
+type SecretsConfigDump_DynamicSecretMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SecretsConfigDump_DynamicSecretMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SecretsConfigDump_DynamicSecretMultiError) AllErrors() []error { return m }
+
+// SecretsConfigDump_DynamicSecretValidationError is the validation error
+// returned by SecretsConfigDump_DynamicSecret.Validate if the designated
+// constraints aren't met.
+type SecretsConfigDump_DynamicSecretValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SecretsConfigDump_DynamicSecretValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SecretsConfigDump_DynamicSecretValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SecretsConfigDump_DynamicSecretValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SecretsConfigDump_DynamicSecretValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SecretsConfigDump_DynamicSecretValidationError) ErrorName() string {
+ return "SecretsConfigDump_DynamicSecretValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e SecretsConfigDump_DynamicSecretValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSecretsConfigDump_DynamicSecret.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SecretsConfigDump_DynamicSecretValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SecretsConfigDump_DynamicSecretValidationError{}
+
+// Validate checks the field values on SecretsConfigDump_StaticSecret with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *SecretsConfigDump_StaticSecret) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SecretsConfigDump_StaticSecret with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// SecretsConfigDump_StaticSecretMultiError, or nil if none found.
+func (m *SecretsConfigDump_StaticSecret) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SecretsConfigDump_StaticSecret) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDump_StaticSecretValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDump_StaticSecretValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDump_StaticSecretValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetSecret()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, SecretsConfigDump_StaticSecretValidationError{
+ field: "Secret",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, SecretsConfigDump_StaticSecretValidationError{
+ field: "Secret",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetSecret()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return SecretsConfigDump_StaticSecretValidationError{
+ field: "Secret",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return SecretsConfigDump_StaticSecretMultiError(errors)
+ }
+
+ return nil
+}
+
+// SecretsConfigDump_StaticSecretMultiError is an error wrapping multiple
+// validation errors returned by SecretsConfigDump_StaticSecret.ValidateAll()
+// if the designated constraints aren't met.
+type SecretsConfigDump_StaticSecretMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SecretsConfigDump_StaticSecretMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SecretsConfigDump_StaticSecretMultiError) AllErrors() []error { return m }
+
+// SecretsConfigDump_StaticSecretValidationError is the validation error
+// returned by SecretsConfigDump_StaticSecret.Validate if the designated
+// constraints aren't met.
+type SecretsConfigDump_StaticSecretValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SecretsConfigDump_StaticSecretValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SecretsConfigDump_StaticSecretValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SecretsConfigDump_StaticSecretValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SecretsConfigDump_StaticSecretValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SecretsConfigDump_StaticSecretValidationError) ErrorName() string {
+ return "SecretsConfigDump_StaticSecretValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e SecretsConfigDump_StaticSecretValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSecretsConfigDump_StaticSecret.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SecretsConfigDump_StaticSecretValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SecretsConfigDump_StaticSecretValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go
new file mode 100644
index 000000000..f48e702a1
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.go
@@ -0,0 +1,2254 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/config_dump_shared.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Resource status from the view of a xDS client, which tells the synchronization
+// status between the xDS client and the xDS server.
+type ClientResourceStatus int32
+
+const (
+ // Resource status is not available/unknown.
+ ClientResourceStatus_UNKNOWN ClientResourceStatus = 0
+ // Client requested this resource but hasn't received any update from management
+ // server. The client will not fail requests, but will queue them until update
+ // arrives or the client times out waiting for the resource.
+ ClientResourceStatus_REQUESTED ClientResourceStatus = 1
+ // This resource has been requested by the client but has either not been
+ // delivered by the server or was previously delivered by the server and then
+ // subsequently removed from resources provided by the server. For more
+ // information, please refer to the :ref:`"Knowing When a Requested Resource
+ // Does Not Exist" ` section.
+ ClientResourceStatus_DOES_NOT_EXIST ClientResourceStatus = 2
+ // Client received this resource and replied with ACK.
+ ClientResourceStatus_ACKED ClientResourceStatus = 3
+ // Client received this resource and replied with NACK.
+ ClientResourceStatus_NACKED ClientResourceStatus = 4
+ // Client received an error from the control plane. The attached config
+ // dump is the most recent accepted one. If no config is accepted yet,
+ // the attached config dump will be empty.
+ ClientResourceStatus_RECEIVED_ERROR ClientResourceStatus = 5
+ // Client timed out waiting for the resource from the control plane.
+ ClientResourceStatus_TIMEOUT ClientResourceStatus = 6
+)
+
+// Enum value maps for ClientResourceStatus.
+var (
+ ClientResourceStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "REQUESTED",
+ 2: "DOES_NOT_EXIST",
+ 3: "ACKED",
+ 4: "NACKED",
+ 5: "RECEIVED_ERROR",
+ 6: "TIMEOUT",
+ }
+ ClientResourceStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "REQUESTED": 1,
+ "DOES_NOT_EXIST": 2,
+ "ACKED": 3,
+ "NACKED": 4,
+ "RECEIVED_ERROR": 5,
+ "TIMEOUT": 6,
+ }
+)
+
+func (x ClientResourceStatus) Enum() *ClientResourceStatus {
+ p := new(ClientResourceStatus)
+ *p = x
+ return p
+}
+
+func (x ClientResourceStatus) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ClientResourceStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_admin_v3_config_dump_shared_proto_enumTypes[0].Descriptor()
+}
+
+func (ClientResourceStatus) Type() protoreflect.EnumType {
+ return &file_envoy_admin_v3_config_dump_shared_proto_enumTypes[0]
+}
+
+func (x ClientResourceStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ClientResourceStatus.Descriptor instead.
+func (ClientResourceStatus) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{0}
+}
+
+type UpdateFailureState struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // What the component configuration would have been if the update had succeeded.
+ // This field may not be populated by xDS clients due to storage overhead.
+ FailedConfiguration *anypb.Any `protobuf:"bytes,1,opt,name=failed_configuration,json=failedConfiguration,proto3" json:"failed_configuration,omitempty"`
+ // Time of the latest failed update attempt.
+ LastUpdateAttempt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_update_attempt,json=lastUpdateAttempt,proto3" json:"last_update_attempt,omitempty"`
+ // Details about the last failed update attempt.
+ Details string `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"`
+ // This is the version of the rejected resource.
+ // [#not-implemented-hide:]
+ VersionInfo string `protobuf:"bytes,4,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+}
+
+func (x *UpdateFailureState) Reset() {
+ *x = UpdateFailureState{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UpdateFailureState) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UpdateFailureState) ProtoMessage() {}
+
+func (x *UpdateFailureState) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UpdateFailureState.ProtoReflect.Descriptor instead.
+func (*UpdateFailureState) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *UpdateFailureState) GetFailedConfiguration() *anypb.Any {
+ if x != nil {
+ return x.FailedConfiguration
+ }
+ return nil
+}
+
+func (x *UpdateFailureState) GetLastUpdateAttempt() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdateAttempt
+ }
+ return nil
+}
+
+func (x *UpdateFailureState) GetDetails() string {
+ if x != nil {
+ return x.Details
+ }
+ return ""
+}
+
+func (x *UpdateFailureState) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+// Envoy's listener manager fills this message with all currently known listeners. Listener
+// configuration information can be used to recreate an Envoy configuration by populating all
+// listeners as static listeners or by returning them in a LDS response.
+type ListenersConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is the :ref:`version_info ` in the
+ // last processed LDS discovery response. If there are only static bootstrap listeners, this field
+ // will be "".
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The statically loaded listener configs.
+ StaticListeners []*ListenersConfigDump_StaticListener `protobuf:"bytes,2,rep,name=static_listeners,json=staticListeners,proto3" json:"static_listeners,omitempty"`
+ // State for any warming, active, or draining listeners.
+ DynamicListeners []*ListenersConfigDump_DynamicListener `protobuf:"bytes,3,rep,name=dynamic_listeners,json=dynamicListeners,proto3" json:"dynamic_listeners,omitempty"`
+}
+
+func (x *ListenersConfigDump) Reset() {
+ *x = ListenersConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListenersConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListenersConfigDump) ProtoMessage() {}
+
+func (x *ListenersConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListenersConfigDump.ProtoReflect.Descriptor instead.
+func (*ListenersConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListenersConfigDump) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *ListenersConfigDump) GetStaticListeners() []*ListenersConfigDump_StaticListener {
+ if x != nil {
+ return x.StaticListeners
+ }
+ return nil
+}
+
+func (x *ListenersConfigDump) GetDynamicListeners() []*ListenersConfigDump_DynamicListener {
+ if x != nil {
+ return x.DynamicListeners
+ }
+ return nil
+}
+
+// Envoy's cluster manager fills this message with all currently known clusters. Cluster
+// configuration information can be used to recreate an Envoy configuration by populating all
+// clusters as static clusters or by returning them in a CDS response.
+type ClustersConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is the :ref:`version_info ` in the
+ // last processed CDS discovery response. If there are only static bootstrap clusters, this field
+ // will be "".
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The statically loaded cluster configs.
+ StaticClusters []*ClustersConfigDump_StaticCluster `protobuf:"bytes,2,rep,name=static_clusters,json=staticClusters,proto3" json:"static_clusters,omitempty"`
+ // The dynamically loaded active clusters. These are clusters that are available to service
+ // data plane traffic.
+ DynamicActiveClusters []*ClustersConfigDump_DynamicCluster `protobuf:"bytes,3,rep,name=dynamic_active_clusters,json=dynamicActiveClusters,proto3" json:"dynamic_active_clusters,omitempty"`
+ // The dynamically loaded warming clusters. These are clusters that are currently undergoing
+ // warming in preparation to service data plane traffic. Note that if attempting to recreate an
+ // Envoy configuration from a configuration dump, the warming clusters should generally be
+ // discarded.
+ DynamicWarmingClusters []*ClustersConfigDump_DynamicCluster `protobuf:"bytes,4,rep,name=dynamic_warming_clusters,json=dynamicWarmingClusters,proto3" json:"dynamic_warming_clusters,omitempty"`
+}
+
+func (x *ClustersConfigDump) Reset() {
+ *x = ClustersConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClustersConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClustersConfigDump) ProtoMessage() {}
+
+func (x *ClustersConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClustersConfigDump.ProtoReflect.Descriptor instead.
+func (*ClustersConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ClustersConfigDump) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *ClustersConfigDump) GetStaticClusters() []*ClustersConfigDump_StaticCluster {
+ if x != nil {
+ return x.StaticClusters
+ }
+ return nil
+}
+
+func (x *ClustersConfigDump) GetDynamicActiveClusters() []*ClustersConfigDump_DynamicCluster {
+ if x != nil {
+ return x.DynamicActiveClusters
+ }
+ return nil
+}
+
+func (x *ClustersConfigDump) GetDynamicWarmingClusters() []*ClustersConfigDump_DynamicCluster {
+ if x != nil {
+ return x.DynamicWarmingClusters
+ }
+ return nil
+}
+
+// Envoy's RDS implementation fills this message with all currently loaded routes, as described by
+// their RouteConfiguration objects. Static routes that are either defined in the bootstrap configuration
+// or defined inline while configuring listeners are separated from those configured dynamically via RDS.
+// Route configuration information can be used to recreate an Envoy configuration by populating all routes
+// as static routes or by returning them in RDS responses.
+type RoutesConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The statically loaded route configs.
+ StaticRouteConfigs []*RoutesConfigDump_StaticRouteConfig `protobuf:"bytes,2,rep,name=static_route_configs,json=staticRouteConfigs,proto3" json:"static_route_configs,omitempty"`
+ // The dynamically loaded route configs.
+ DynamicRouteConfigs []*RoutesConfigDump_DynamicRouteConfig `protobuf:"bytes,3,rep,name=dynamic_route_configs,json=dynamicRouteConfigs,proto3" json:"dynamic_route_configs,omitempty"`
+}
+
+func (x *RoutesConfigDump) Reset() {
+ *x = RoutesConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RoutesConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RoutesConfigDump) ProtoMessage() {}
+
+func (x *RoutesConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RoutesConfigDump.ProtoReflect.Descriptor instead.
+func (*RoutesConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *RoutesConfigDump) GetStaticRouteConfigs() []*RoutesConfigDump_StaticRouteConfig {
+ if x != nil {
+ return x.StaticRouteConfigs
+ }
+ return nil
+}
+
+func (x *RoutesConfigDump) GetDynamicRouteConfigs() []*RoutesConfigDump_DynamicRouteConfig {
+ if x != nil {
+ return x.DynamicRouteConfigs
+ }
+ return nil
+}
+
+// Envoy's scoped RDS implementation fills this message with all currently loaded route
+// configuration scopes (defined via ScopedRouteConfigurationsSet protos). This message lists both
+// the scopes defined inline with the higher order object (i.e., the HttpConnectionManager) and the
+// dynamically obtained scopes via the SRDS API.
+type ScopedRoutesConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The statically loaded scoped route configs.
+ InlineScopedRouteConfigs []*ScopedRoutesConfigDump_InlineScopedRouteConfigs `protobuf:"bytes,1,rep,name=inline_scoped_route_configs,json=inlineScopedRouteConfigs,proto3" json:"inline_scoped_route_configs,omitempty"`
+ // The dynamically loaded scoped route configs.
+ DynamicScopedRouteConfigs []*ScopedRoutesConfigDump_DynamicScopedRouteConfigs `protobuf:"bytes,2,rep,name=dynamic_scoped_route_configs,json=dynamicScopedRouteConfigs,proto3" json:"dynamic_scoped_route_configs,omitempty"`
+}
+
+func (x *ScopedRoutesConfigDump) Reset() {
+ *x = ScopedRoutesConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ScopedRoutesConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ScopedRoutesConfigDump) ProtoMessage() {}
+
+func (x *ScopedRoutesConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ScopedRoutesConfigDump.ProtoReflect.Descriptor instead.
+func (*ScopedRoutesConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ScopedRoutesConfigDump) GetInlineScopedRouteConfigs() []*ScopedRoutesConfigDump_InlineScopedRouteConfigs {
+ if x != nil {
+ return x.InlineScopedRouteConfigs
+ }
+ return nil
+}
+
+func (x *ScopedRoutesConfigDump) GetDynamicScopedRouteConfigs() []*ScopedRoutesConfigDump_DynamicScopedRouteConfigs {
+ if x != nil {
+ return x.DynamicScopedRouteConfigs
+ }
+ return nil
+}
+
+// Envoy's admin fill this message with all currently known endpoints. Endpoint
+// configuration information can be used to recreate an Envoy configuration by populating all
+// endpoints as static endpoints or by returning them in an EDS response.
+type EndpointsConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The statically loaded endpoint configs.
+ StaticEndpointConfigs []*EndpointsConfigDump_StaticEndpointConfig `protobuf:"bytes,2,rep,name=static_endpoint_configs,json=staticEndpointConfigs,proto3" json:"static_endpoint_configs,omitempty"`
+ // The dynamically loaded endpoint configs.
+ DynamicEndpointConfigs []*EndpointsConfigDump_DynamicEndpointConfig `protobuf:"bytes,3,rep,name=dynamic_endpoint_configs,json=dynamicEndpointConfigs,proto3" json:"dynamic_endpoint_configs,omitempty"`
+}
+
+func (x *EndpointsConfigDump) Reset() {
+ *x = EndpointsConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EndpointsConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EndpointsConfigDump) ProtoMessage() {}
+
+func (x *EndpointsConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EndpointsConfigDump.ProtoReflect.Descriptor instead.
+func (*EndpointsConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *EndpointsConfigDump) GetStaticEndpointConfigs() []*EndpointsConfigDump_StaticEndpointConfig {
+ if x != nil {
+ return x.StaticEndpointConfigs
+ }
+ return nil
+}
+
+func (x *EndpointsConfigDump) GetDynamicEndpointConfigs() []*EndpointsConfigDump_DynamicEndpointConfig {
+ if x != nil {
+ return x.DynamicEndpointConfigs
+ }
+ return nil
+}
+
+// Envoy's ECDS service fills this message with all currently extension
+// configuration. Extension configuration information can be used to recreate
+// an Envoy ECDS listener and HTTP filters as static filters or by returning
+// them in ECDS response.
+type EcdsConfigDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The ECDS filter configs.
+ EcdsFilters []*EcdsConfigDump_EcdsFilterConfig `protobuf:"bytes,1,rep,name=ecds_filters,json=ecdsFilters,proto3" json:"ecds_filters,omitempty"`
+}
+
+func (x *EcdsConfigDump) Reset() {
+ *x = EcdsConfigDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EcdsConfigDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EcdsConfigDump) ProtoMessage() {}
+
+func (x *EcdsConfigDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EcdsConfigDump.ProtoReflect.Descriptor instead.
+func (*EcdsConfigDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *EcdsConfigDump) GetEcdsFilters() []*EcdsConfigDump_EcdsFilterConfig {
+ if x != nil {
+ return x.EcdsFilters
+ }
+ return nil
+}
+
+// Describes a statically loaded listener.
+type ListenersConfigDump_StaticListener struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The listener config.
+ Listener *anypb.Any `protobuf:"bytes,1,opt,name=listener,proto3" json:"listener,omitempty"`
+ // The timestamp when the Listener was last successfully updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+}
+
+func (x *ListenersConfigDump_StaticListener) Reset() {
+ *x = ListenersConfigDump_StaticListener{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListenersConfigDump_StaticListener) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListenersConfigDump_StaticListener) ProtoMessage() {}
+
+func (x *ListenersConfigDump_StaticListener) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListenersConfigDump_StaticListener.ProtoReflect.Descriptor instead.
+func (*ListenersConfigDump_StaticListener) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 0}
+}
+
+func (x *ListenersConfigDump_StaticListener) GetListener() *anypb.Any {
+ if x != nil {
+ return x.Listener
+ }
+ return nil
+}
+
+func (x *ListenersConfigDump_StaticListener) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+type ListenersConfigDump_DynamicListenerState struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is the per-resource version information. This version is currently taken from the
+ // :ref:`version_info ` field at the time
+ // that the listener was loaded. In the future, discrete per-listener versions may be supported
+ // by the API.
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The listener config.
+ Listener *anypb.Any `protobuf:"bytes,2,opt,name=listener,proto3" json:"listener,omitempty"`
+ // The timestamp when the Listener was last successfully updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+}
+
+func (x *ListenersConfigDump_DynamicListenerState) Reset() {
+ *x = ListenersConfigDump_DynamicListenerState{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListenersConfigDump_DynamicListenerState) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListenersConfigDump_DynamicListenerState) ProtoMessage() {}
+
+func (x *ListenersConfigDump_DynamicListenerState) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListenersConfigDump_DynamicListenerState.ProtoReflect.Descriptor instead.
+func (*ListenersConfigDump_DynamicListenerState) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 1}
+}
+
+func (x *ListenersConfigDump_DynamicListenerState) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *ListenersConfigDump_DynamicListenerState) GetListener() *anypb.Any {
+ if x != nil {
+ return x.Listener
+ }
+ return nil
+}
+
+func (x *ListenersConfigDump_DynamicListenerState) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+// Describes a dynamically loaded listener via the LDS API.
+// [#next-free-field: 7]
+type ListenersConfigDump_DynamicListener struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name or unique id of this listener, pulled from the DynamicListenerState config.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The listener state for any active listener by this name.
+ // These are listeners that are available to service data plane traffic.
+ ActiveState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,2,opt,name=active_state,json=activeState,proto3" json:"active_state,omitempty"`
+ // The listener state for any warming listener by this name.
+ // These are listeners that are currently undergoing warming in preparation to service data
+ // plane traffic. Note that if attempting to recreate an Envoy configuration from a
+ // configuration dump, the warming listeners should generally be discarded.
+ WarmingState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,3,opt,name=warming_state,json=warmingState,proto3" json:"warming_state,omitempty"`
+ // The listener state for any draining listener by this name.
+ // These are listeners that are currently undergoing draining in preparation to stop servicing
+ // data plane traffic. Note that if attempting to recreate an Envoy configuration from a
+ // configuration dump, the draining listeners should generally be discarded.
+ DrainingState *ListenersConfigDump_DynamicListenerState `protobuf:"bytes,4,opt,name=draining_state,json=drainingState,proto3" json:"draining_state,omitempty"`
+ // Set if the last update failed, cleared after the next successful update.
+ // The “error_state“ field contains the rejected version of this particular
+ // resource along with the reason and timestamp. For successfully updated or
+ // acknowledged resource, this field should be empty.
+ ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"`
+ // The client status of this resource.
+ // [#not-implemented-hide:]
+ ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"`
+}
+
+func (x *ListenersConfigDump_DynamicListener) Reset() {
+ *x = ListenersConfigDump_DynamicListener{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListenersConfigDump_DynamicListener) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListenersConfigDump_DynamicListener) ProtoMessage() {}
+
+func (x *ListenersConfigDump_DynamicListener) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListenersConfigDump_DynamicListener.ProtoReflect.Descriptor instead.
+func (*ListenersConfigDump_DynamicListener) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{1, 2}
+}
+
+func (x *ListenersConfigDump_DynamicListener) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListenersConfigDump_DynamicListener) GetActiveState() *ListenersConfigDump_DynamicListenerState {
+ if x != nil {
+ return x.ActiveState
+ }
+ return nil
+}
+
+func (x *ListenersConfigDump_DynamicListener) GetWarmingState() *ListenersConfigDump_DynamicListenerState {
+ if x != nil {
+ return x.WarmingState
+ }
+ return nil
+}
+
+func (x *ListenersConfigDump_DynamicListener) GetDrainingState() *ListenersConfigDump_DynamicListenerState {
+ if x != nil {
+ return x.DrainingState
+ }
+ return nil
+}
+
+func (x *ListenersConfigDump_DynamicListener) GetErrorState() *UpdateFailureState {
+ if x != nil {
+ return x.ErrorState
+ }
+ return nil
+}
+
+func (x *ListenersConfigDump_DynamicListener) GetClientStatus() ClientResourceStatus {
+ if x != nil {
+ return x.ClientStatus
+ }
+ return ClientResourceStatus_UNKNOWN
+}
+
+// Describes a statically loaded cluster.
+type ClustersConfigDump_StaticCluster struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The cluster config.
+ Cluster *anypb.Any `protobuf:"bytes,1,opt,name=cluster,proto3" json:"cluster,omitempty"`
+ // The timestamp when the Cluster was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+}
+
+func (x *ClustersConfigDump_StaticCluster) Reset() {
+ *x = ClustersConfigDump_StaticCluster{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClustersConfigDump_StaticCluster) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClustersConfigDump_StaticCluster) ProtoMessage() {}
+
+func (x *ClustersConfigDump_StaticCluster) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClustersConfigDump_StaticCluster.ProtoReflect.Descriptor instead.
+func (*ClustersConfigDump_StaticCluster) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *ClustersConfigDump_StaticCluster) GetCluster() *anypb.Any {
+ if x != nil {
+ return x.Cluster
+ }
+ return nil
+}
+
+func (x *ClustersConfigDump_StaticCluster) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+// Describes a dynamically loaded cluster via the CDS API.
+// [#next-free-field: 6]
+type ClustersConfigDump_DynamicCluster struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is the per-resource version information. This version is currently taken from the
+ // :ref:`version_info ` field at the time
+ // that the cluster was loaded. In the future, discrete per-cluster versions may be supported by
+ // the API.
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The cluster config.
+ Cluster *anypb.Any `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"`
+ // The timestamp when the Cluster was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+ // Set if the last update failed, cleared after the next successful update.
+ // The “error_state“ field contains the rejected version of this particular
+ // resource along with the reason and timestamp. For successfully updated or
+ // acknowledged resource, this field should be empty.
+ // [#not-implemented-hide:]
+ ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"`
+ // The client status of this resource.
+ // [#not-implemented-hide:]
+ ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"`
+}
+
+func (x *ClustersConfigDump_DynamicCluster) Reset() {
+ *x = ClustersConfigDump_DynamicCluster{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClustersConfigDump_DynamicCluster) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClustersConfigDump_DynamicCluster) ProtoMessage() {}
+
+func (x *ClustersConfigDump_DynamicCluster) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClustersConfigDump_DynamicCluster.ProtoReflect.Descriptor instead.
+func (*ClustersConfigDump_DynamicCluster) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{2, 1}
+}
+
+func (x *ClustersConfigDump_DynamicCluster) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *ClustersConfigDump_DynamicCluster) GetCluster() *anypb.Any {
+ if x != nil {
+ return x.Cluster
+ }
+ return nil
+}
+
+func (x *ClustersConfigDump_DynamicCluster) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+func (x *ClustersConfigDump_DynamicCluster) GetErrorState() *UpdateFailureState {
+ if x != nil {
+ return x.ErrorState
+ }
+ return nil
+}
+
+func (x *ClustersConfigDump_DynamicCluster) GetClientStatus() ClientResourceStatus {
+ if x != nil {
+ return x.ClientStatus
+ }
+ return ClientResourceStatus_UNKNOWN
+}
+
+type RoutesConfigDump_StaticRouteConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The route config.
+ RouteConfig *anypb.Any `protobuf:"bytes,1,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"`
+ // The timestamp when the Route was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+}
+
+func (x *RoutesConfigDump_StaticRouteConfig) Reset() {
+ *x = RoutesConfigDump_StaticRouteConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RoutesConfigDump_StaticRouteConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RoutesConfigDump_StaticRouteConfig) ProtoMessage() {}
+
+func (x *RoutesConfigDump_StaticRouteConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RoutesConfigDump_StaticRouteConfig.ProtoReflect.Descriptor instead.
+func (*RoutesConfigDump_StaticRouteConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *RoutesConfigDump_StaticRouteConfig) GetRouteConfig() *anypb.Any {
+ if x != nil {
+ return x.RouteConfig
+ }
+ return nil
+}
+
+func (x *RoutesConfigDump_StaticRouteConfig) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+// [#next-free-field: 6]
+type RoutesConfigDump_DynamicRouteConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is the per-resource version information. This version is currently taken from the
+ // :ref:`version_info ` field at the time that
+ // the route configuration was loaded.
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The route config.
+ RouteConfig *anypb.Any `protobuf:"bytes,2,opt,name=route_config,json=routeConfig,proto3" json:"route_config,omitempty"`
+ // The timestamp when the Route was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+ // Set if the last update failed, cleared after the next successful update.
+ // The “error_state“ field contains the rejected version of this particular
+ // resource along with the reason and timestamp. For successfully updated or
+ // acknowledged resource, this field should be empty.
+ // [#not-implemented-hide:]
+ ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"`
+ // The client status of this resource.
+ // [#not-implemented-hide:]
+ ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"`
+}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) Reset() {
+ *x = RoutesConfigDump_DynamicRouteConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RoutesConfigDump_DynamicRouteConfig) ProtoMessage() {}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RoutesConfigDump_DynamicRouteConfig.ProtoReflect.Descriptor instead.
+func (*RoutesConfigDump_DynamicRouteConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{3, 1}
+}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) GetRouteConfig() *anypb.Any {
+ if x != nil {
+ return x.RouteConfig
+ }
+ return nil
+}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) GetErrorState() *UpdateFailureState {
+ if x != nil {
+ return x.ErrorState
+ }
+ return nil
+}
+
+func (x *RoutesConfigDump_DynamicRouteConfig) GetClientStatus() ClientResourceStatus {
+ if x != nil {
+ return x.ClientStatus
+ }
+ return ClientResourceStatus_UNKNOWN
+}
+
+type ScopedRoutesConfigDump_InlineScopedRouteConfigs struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name assigned to the scoped route configurations.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The scoped route configurations.
+ ScopedRouteConfigs []*anypb.Any `protobuf:"bytes,2,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"`
+ // The timestamp when the scoped route config set was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+}
+
+func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) Reset() {
+ *x = ScopedRoutesConfigDump_InlineScopedRouteConfigs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ScopedRoutesConfigDump_InlineScopedRouteConfigs) ProtoMessage() {}
+
+func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ScopedRoutesConfigDump_InlineScopedRouteConfigs.ProtoReflect.Descriptor instead.
+func (*ScopedRoutesConfigDump_InlineScopedRouteConfigs) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetScopedRouteConfigs() []*anypb.Any {
+ if x != nil {
+ return x.ScopedRouteConfigs
+ }
+ return nil
+}
+
+func (x *ScopedRoutesConfigDump_InlineScopedRouteConfigs) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+// [#next-free-field: 7]
+type ScopedRoutesConfigDump_DynamicScopedRouteConfigs struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name assigned to the scoped route configurations.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // This is the per-resource version information. This version is currently taken from the
+ // :ref:`version_info ` field at the time that
+ // the scoped routes configuration was loaded.
+ VersionInfo string `protobuf:"bytes,2,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The scoped route configurations.
+ ScopedRouteConfigs []*anypb.Any `protobuf:"bytes,3,rep,name=scoped_route_configs,json=scopedRouteConfigs,proto3" json:"scoped_route_configs,omitempty"`
+ // The timestamp when the scoped route config set was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+ // Set if the last update failed, cleared after the next successful update.
+ // The “error_state“ field contains the rejected version of this particular
+ // resource along with the reason and timestamp. For successfully updated or
+ // acknowledged resource, this field should be empty.
+ // [#not-implemented-hide:]
+ ErrorState *UpdateFailureState `protobuf:"bytes,5,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"`
+ // The client status of this resource.
+ // [#not-implemented-hide:]
+ ClientStatus ClientResourceStatus `protobuf:"varint,6,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"`
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Reset() {
+ *x = ScopedRoutesConfigDump_DynamicScopedRouteConfigs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ProtoMessage() {}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ScopedRoutesConfigDump_DynamicScopedRouteConfigs.ProtoReflect.Descriptor instead.
+func (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{4, 1}
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetScopedRouteConfigs() []*anypb.Any {
+ if x != nil {
+ return x.ScopedRouteConfigs
+ }
+ return nil
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetErrorState() *UpdateFailureState {
+ if x != nil {
+ return x.ErrorState
+ }
+ return nil
+}
+
+func (x *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) GetClientStatus() ClientResourceStatus {
+ if x != nil {
+ return x.ClientStatus
+ }
+ return ClientResourceStatus_UNKNOWN
+}
+
+type EndpointsConfigDump_StaticEndpointConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The endpoint config.
+ EndpointConfig *anypb.Any `protobuf:"bytes,1,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"`
+ // [#not-implemented-hide:] The timestamp when the Endpoint was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+}
+
+func (x *EndpointsConfigDump_StaticEndpointConfig) Reset() {
+ *x = EndpointsConfigDump_StaticEndpointConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EndpointsConfigDump_StaticEndpointConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EndpointsConfigDump_StaticEndpointConfig) ProtoMessage() {}
+
+func (x *EndpointsConfigDump_StaticEndpointConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EndpointsConfigDump_StaticEndpointConfig.ProtoReflect.Descriptor instead.
+func (*EndpointsConfigDump_StaticEndpointConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5, 0}
+}
+
+func (x *EndpointsConfigDump_StaticEndpointConfig) GetEndpointConfig() *anypb.Any {
+ if x != nil {
+ return x.EndpointConfig
+ }
+ return nil
+}
+
+func (x *EndpointsConfigDump_StaticEndpointConfig) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+// [#next-free-field: 6]
+type EndpointsConfigDump_DynamicEndpointConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // [#not-implemented-hide:] This is the per-resource version information. This version is currently taken from the
+ // :ref:`version_info ` field at the time that
+ // the endpoint configuration was loaded.
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The endpoint config.
+ EndpointConfig *anypb.Any `protobuf:"bytes,2,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"`
+ // [#not-implemented-hide:] The timestamp when the Endpoint was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+ // Set if the last update failed, cleared after the next successful update.
+ // The “error_state“ field contains the rejected version of this particular
+ // resource along with the reason and timestamp. For successfully updated or
+ // acknowledged resource, this field should be empty.
+ // [#not-implemented-hide:]
+ ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"`
+ // The client status of this resource.
+ // [#not-implemented-hide:]
+ ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"`
+}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) Reset() {
+ *x = EndpointsConfigDump_DynamicEndpointConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EndpointsConfigDump_DynamicEndpointConfig) ProtoMessage() {}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EndpointsConfigDump_DynamicEndpointConfig.ProtoReflect.Descriptor instead.
+func (*EndpointsConfigDump_DynamicEndpointConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{5, 1}
+}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) GetEndpointConfig() *anypb.Any {
+ if x != nil {
+ return x.EndpointConfig
+ }
+ return nil
+}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) GetErrorState() *UpdateFailureState {
+ if x != nil {
+ return x.ErrorState
+ }
+ return nil
+}
+
+func (x *EndpointsConfigDump_DynamicEndpointConfig) GetClientStatus() ClientResourceStatus {
+ if x != nil {
+ return x.ClientStatus
+ }
+ return ClientResourceStatus_UNKNOWN
+}
+
+// [#next-free-field: 6]
+type EcdsConfigDump_EcdsFilterConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // This is the per-resource version information. This version is currently
+ // taken from the :ref:`version_info
+ // `
+ // field at the time that the ECDS filter was loaded.
+ VersionInfo string `protobuf:"bytes,1,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"`
+ // The ECDS filter config.
+ EcdsFilter *anypb.Any `protobuf:"bytes,2,opt,name=ecds_filter,json=ecdsFilter,proto3" json:"ecds_filter,omitempty"`
+ // The timestamp when the ECDS filter was last updated.
+ LastUpdated *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=last_updated,json=lastUpdated,proto3" json:"last_updated,omitempty"`
+ // Set if the last update failed, cleared after the next successful update.
+ // The “error_state“ field contains the rejected version of this
+ // particular resource along with the reason and timestamp. For successfully
+ // updated or acknowledged resource, this field should be empty.
+ // [#not-implemented-hide:]
+ ErrorState *UpdateFailureState `protobuf:"bytes,4,opt,name=error_state,json=errorState,proto3" json:"error_state,omitempty"`
+ // The client status of this resource.
+ // [#not-implemented-hide:]
+ ClientStatus ClientResourceStatus `protobuf:"varint,5,opt,name=client_status,json=clientStatus,proto3,enum=envoy.admin.v3.ClientResourceStatus" json:"client_status,omitempty"`
+}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) Reset() {
+ *x = EcdsConfigDump_EcdsFilterConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EcdsConfigDump_EcdsFilterConfig) ProtoMessage() {}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use EcdsConfigDump_EcdsFilterConfig.ProtoReflect.Descriptor instead.
+func (*EcdsConfigDump_EcdsFilterConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP(), []int{6, 0}
+}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) GetVersionInfo() string {
+ if x != nil {
+ return x.VersionInfo
+ }
+ return ""
+}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) GetEcdsFilter() *anypb.Any {
+ if x != nil {
+ return x.EcdsFilter
+ }
+ return nil
+}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) GetLastUpdated() *timestamppb.Timestamp {
+ if x != nil {
+ return x.LastUpdated
+ }
+ return nil
+}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) GetErrorState() *UpdateFailureState {
+ if x != nil {
+ return x.ErrorState
+ }
+ return nil
+}
+
+func (x *EcdsConfigDump_EcdsFilterConfig) GetClientStatus() ClientResourceStatus {
+ if x != nil {
+ return x.ClientStatus
+ }
+ return ClientResourceStatus_UNKNOWN
+}
+
+var File_envoy_admin_v3_config_dump_shared_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_config_dump_shared_proto_rawDesc = []byte{
+ 0x0a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x73, 0x68, 0x61,
+ 0x72, 0x65, 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e,
+ 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x95, 0x02, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47,
+ 0x0a, 0x14, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
+ 0x6e, 0x79, 0x52, 0x13, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4a, 0x0a, 0x13, 0x6c, 0x61, 0x73, 0x74, 0x5f,
+ 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
+ 0x52, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x74, 0x74, 0x65,
+ 0x6d, 0x70, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x21, 0x0a,
+ 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f,
+ 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22,
+ 0xf3, 0x09, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x5d, 0x0a, 0x10, 0x73, 0x74,
+ 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63,
+ 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63,
+ 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x60, 0x0a, 0x11, 0x64, 0x79, 0x6e,
+ 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69,
+ 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d,
+ 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x1a, 0xc0, 0x01, 0x0a, 0x0e,
+ 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x30,
+ 0x0a, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72,
+ 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a,
+ 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74,
+ 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e,
+ 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x1a, 0xef,
+ 0x01, 0x0a, 0x14, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e,
+ 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x30, 0x0a, 0x08, 0x6c, 0x69,
+ 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41,
+ 0x6e, 0x79, 0x52, 0x08, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c,
+ 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b,
+ 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x43, 0x9a, 0xc5, 0x88,
+ 0x1e, 0x3e, 0x0a, 0x3c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72,
+ 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61,
+ 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x1a, 0x92, 0x04, 0x0a, 0x0f, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74,
+ 0x65, 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x5b, 0x0a, 0x0c, 0x61, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44,
+ 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65,
+ 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5d, 0x0a, 0x0d, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67,
+ 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69,
+ 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d,
+ 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65,
+ 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x53,
+ 0x74, 0x61, 0x74, 0x65, 0x12, 0x5f, 0x0a, 0x0e, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67,
+ 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69,
+ 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d,
+ 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65,
+ 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67,
+ 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, 0x0a, 0x37, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4c, 0x69, 0x73,
+ 0x74, 0x65, 0x6e, 0x65, 0x72, 0x3a, 0x2e, 0x9a, 0xc5, 0x88, 0x1e, 0x29, 0x0a, 0x27, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x44, 0x75, 0x6d, 0x70, 0x22, 0xca, 0x07, 0x0a, 0x12, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c,
+ 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12,
+ 0x59, 0x0a, 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61,
+ 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0e, 0x73, 0x74, 0x61, 0x74,
+ 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x69, 0x0a, 0x17, 0x64, 0x79,
+ 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e,
+ 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x15,
+ 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x6b, 0x0a, 0x18, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63,
+ 0x5f, 0x77, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61,
+ 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x16, 0x64, 0x79, 0x6e, 0x61,
+ 0x6d, 0x69, 0x63, 0x57, 0x61, 0x72, 0x6d, 0x69, 0x6e, 0x67, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x73, 0x1a, 0xbb, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x64, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36, 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e,
+ 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x1a, 0xf0, 0x02, 0x0a, 0x0e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69,
+ 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x6c, 0x75, 0x73,
+ 0x74, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e,
+ 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x22, 0xdd, 0x06, 0x0a, 0x10, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x64, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x74, 0x69,
+ 0x63, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x6f,
+ 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, 0x69,
+ 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x67, 0x0a,
+ 0x15, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f,
+ 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44,
+ 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x52, 0x13, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xca, 0x01, 0x0a, 0x11, 0x53, 0x74, 0x61, 0x74, 0x69,
+ 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0c,
+ 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64,
+ 0x61, 0x74, 0x65, 0x64, 0x3a, 0x3d, 0x9a, 0xc5, 0x88, 0x1e, 0x38, 0x0a, 0x36, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61,
+ 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d,
+ 0x70, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x1a, 0xff, 0x02, 0x0a, 0x12, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52,
+ 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x37, 0x0a,
+ 0x0c, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0b, 0x72, 0x6f, 0x75, 0x74, 0x65,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75,
+ 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73,
+ 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c,
+ 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3e, 0x9a, 0xc5, 0x88, 0x1e, 0x39, 0x0a, 0x37, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x2b, 0x9a, 0xc5, 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x22, 0x8c, 0x08, 0x0a, 0x16, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75,
+ 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x7e, 0x0a,
+ 0x1b, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72,
+ 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e,
+ 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x73, 0x52, 0x18, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65,
+ 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x81, 0x01,
+ 0x0a, 0x1c, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64,
+ 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74,
+ 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e,
+ 0x61, 0x6d, 0x69, 0x63, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x19, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53,
+ 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x73, 0x1a, 0x81, 0x02, 0x0a, 0x18, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70,
+ 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x46, 0x0a, 0x14, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75,
+ 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x12, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f,
+ 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61,
+ 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61,
+ 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x4a, 0x9a, 0xc5, 0x88, 0x1e, 0x45,
+ 0x0a, 0x43, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74,
+ 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x49, 0x6e, 0x6c,
+ 0x69, 0x6e, 0x65, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x73, 0x1a, 0xb6, 0x03, 0x0a, 0x19, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69,
+ 0x63, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x46, 0x0a, 0x14, 0x73, 0x63,
+ 0x6f, 0x70, 0x65, 0x64, 0x5f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x12,
+ 0x73, 0x63, 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65,
+ 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46, 0x61,
+ 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x3a, 0x4b, 0x9a, 0xc5, 0x88, 0x1e, 0x46, 0x0a, 0x44, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63,
+ 0x6f, 0x70, 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x53, 0x63, 0x6f, 0x70,
+ 0x65, 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x3a, 0x31,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65,
+ 0x64, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d,
+ 0x70, 0x22, 0xde, 0x05, 0x0a, 0x13, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x70, 0x0a, 0x17, 0x73, 0x74, 0x61,
+ 0x74, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x6e, 0x64, 0x70,
+ 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e,
+ 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x52, 0x15, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70,
+ 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x12, 0x73, 0x0a, 0x18, 0x64,
+ 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45,
+ 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69,
+ 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69,
+ 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73,
+ 0x1a, 0x94, 0x01, 0x0a, 0x14, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f,
+ 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0f, 0x65, 0x6e, 0x64,
+ 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69,
+ 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74,
+ 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xc8, 0x02, 0x0a, 0x15, 0x44, 0x79, 0x6e, 0x61,
+ 0x6d, 0x69, 0x63, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66,
+ 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3d, 0x0a, 0x0f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74,
+ 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x41, 0x6e, 0x79, 0x52, 0x0e, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x46,
+ 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e,
+ 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x22, 0x89, 0x04, 0x0a, 0x0e, 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x52, 0x0a, 0x0c, 0x65, 0x63, 0x64, 0x73, 0x5f, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x63, 0x64,
+ 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2e, 0x45, 0x63, 0x64, 0x73,
+ 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x65, 0x63,
+ 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, 0xf7, 0x02, 0x0a, 0x10, 0x45, 0x63,
+ 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21,
+ 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66,
+ 0x6f, 0x12, 0x35, 0x0a, 0x0b, 0x65, 0x63, 0x64, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, 0x65, 0x63,
+ 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x0c, 0x6c, 0x61, 0x73, 0x74,
+ 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x6c, 0x61, 0x73, 0x74,
+ 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x12, 0x43, 0x0a, 0x0b, 0x65, 0x72, 0x72, 0x6f, 0x72,
+ 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65,
+ 0x52, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x49, 0x0a, 0x0d,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c,
+ 0x70, 0x68, 0x61, 0x2e, 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x2e, 0x45, 0x63, 0x64, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e,
+ 0x45, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75, 0x6d, 0x70, 0x2a, 0x7e,
+ 0x0a, 0x14, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
+ 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x45, 0x44,
+ 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x44, 0x4f, 0x45, 0x53, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x45,
+ 0x58, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10,
+ 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x04, 0x12, 0x12, 0x0a,
+ 0x0e, 0x52, 0x45, 0x43, 0x45, 0x49, 0x56, 0x45, 0x44, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10,
+ 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x54, 0x49, 0x4d, 0x45, 0x4f, 0x55, 0x54, 0x10, 0x06, 0x42, 0x7e,
+ 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x15, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x44, 0x75,
+ 0x6d, 0x70, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
+ 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
+ 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_config_dump_shared_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_config_dump_shared_proto_rawDescData = file_envoy_admin_v3_config_dump_shared_proto_rawDesc
+)
+
+func file_envoy_admin_v3_config_dump_shared_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_config_dump_shared_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_config_dump_shared_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_config_dump_shared_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_config_dump_shared_proto_rawDescData
+}
+
+var file_envoy_admin_v3_config_dump_shared_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_admin_v3_config_dump_shared_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
+var file_envoy_admin_v3_config_dump_shared_proto_goTypes = []interface{}{
+ (ClientResourceStatus)(0), // 0: envoy.admin.v3.ClientResourceStatus
+ (*UpdateFailureState)(nil), // 1: envoy.admin.v3.UpdateFailureState
+ (*ListenersConfigDump)(nil), // 2: envoy.admin.v3.ListenersConfigDump
+ (*ClustersConfigDump)(nil), // 3: envoy.admin.v3.ClustersConfigDump
+ (*RoutesConfigDump)(nil), // 4: envoy.admin.v3.RoutesConfigDump
+ (*ScopedRoutesConfigDump)(nil), // 5: envoy.admin.v3.ScopedRoutesConfigDump
+ (*EndpointsConfigDump)(nil), // 6: envoy.admin.v3.EndpointsConfigDump
+ (*EcdsConfigDump)(nil), // 7: envoy.admin.v3.EcdsConfigDump
+ (*ListenersConfigDump_StaticListener)(nil), // 8: envoy.admin.v3.ListenersConfigDump.StaticListener
+ (*ListenersConfigDump_DynamicListenerState)(nil), // 9: envoy.admin.v3.ListenersConfigDump.DynamicListenerState
+ (*ListenersConfigDump_DynamicListener)(nil), // 10: envoy.admin.v3.ListenersConfigDump.DynamicListener
+ (*ClustersConfigDump_StaticCluster)(nil), // 11: envoy.admin.v3.ClustersConfigDump.StaticCluster
+ (*ClustersConfigDump_DynamicCluster)(nil), // 12: envoy.admin.v3.ClustersConfigDump.DynamicCluster
+ (*RoutesConfigDump_StaticRouteConfig)(nil), // 13: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig
+ (*RoutesConfigDump_DynamicRouteConfig)(nil), // 14: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig
+ (*ScopedRoutesConfigDump_InlineScopedRouteConfigs)(nil), // 15: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs
+ (*ScopedRoutesConfigDump_DynamicScopedRouteConfigs)(nil), // 16: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs
+ (*EndpointsConfigDump_StaticEndpointConfig)(nil), // 17: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig
+ (*EndpointsConfigDump_DynamicEndpointConfig)(nil), // 18: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig
+ (*EcdsConfigDump_EcdsFilterConfig)(nil), // 19: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig
+ (*anypb.Any)(nil), // 20: google.protobuf.Any
+ (*timestamppb.Timestamp)(nil), // 21: google.protobuf.Timestamp
+}
+var file_envoy_admin_v3_config_dump_shared_proto_depIdxs = []int32{
+ 20, // 0: envoy.admin.v3.UpdateFailureState.failed_configuration:type_name -> google.protobuf.Any
+ 21, // 1: envoy.admin.v3.UpdateFailureState.last_update_attempt:type_name -> google.protobuf.Timestamp
+ 8, // 2: envoy.admin.v3.ListenersConfigDump.static_listeners:type_name -> envoy.admin.v3.ListenersConfigDump.StaticListener
+ 10, // 3: envoy.admin.v3.ListenersConfigDump.dynamic_listeners:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListener
+ 11, // 4: envoy.admin.v3.ClustersConfigDump.static_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.StaticCluster
+ 12, // 5: envoy.admin.v3.ClustersConfigDump.dynamic_active_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.DynamicCluster
+ 12, // 6: envoy.admin.v3.ClustersConfigDump.dynamic_warming_clusters:type_name -> envoy.admin.v3.ClustersConfigDump.DynamicCluster
+ 13, // 7: envoy.admin.v3.RoutesConfigDump.static_route_configs:type_name -> envoy.admin.v3.RoutesConfigDump.StaticRouteConfig
+ 14, // 8: envoy.admin.v3.RoutesConfigDump.dynamic_route_configs:type_name -> envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig
+ 15, // 9: envoy.admin.v3.ScopedRoutesConfigDump.inline_scoped_route_configs:type_name -> envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs
+ 16, // 10: envoy.admin.v3.ScopedRoutesConfigDump.dynamic_scoped_route_configs:type_name -> envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs
+ 17, // 11: envoy.admin.v3.EndpointsConfigDump.static_endpoint_configs:type_name -> envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig
+ 18, // 12: envoy.admin.v3.EndpointsConfigDump.dynamic_endpoint_configs:type_name -> envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig
+ 19, // 13: envoy.admin.v3.EcdsConfigDump.ecds_filters:type_name -> envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig
+ 20, // 14: envoy.admin.v3.ListenersConfigDump.StaticListener.listener:type_name -> google.protobuf.Any
+ 21, // 15: envoy.admin.v3.ListenersConfigDump.StaticListener.last_updated:type_name -> google.protobuf.Timestamp
+ 20, // 16: envoy.admin.v3.ListenersConfigDump.DynamicListenerState.listener:type_name -> google.protobuf.Any
+ 21, // 17: envoy.admin.v3.ListenersConfigDump.DynamicListenerState.last_updated:type_name -> google.protobuf.Timestamp
+ 9, // 18: envoy.admin.v3.ListenersConfigDump.DynamicListener.active_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState
+ 9, // 19: envoy.admin.v3.ListenersConfigDump.DynamicListener.warming_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState
+ 9, // 20: envoy.admin.v3.ListenersConfigDump.DynamicListener.draining_state:type_name -> envoy.admin.v3.ListenersConfigDump.DynamicListenerState
+ 1, // 21: envoy.admin.v3.ListenersConfigDump.DynamicListener.error_state:type_name -> envoy.admin.v3.UpdateFailureState
+ 0, // 22: envoy.admin.v3.ListenersConfigDump.DynamicListener.client_status:type_name -> envoy.admin.v3.ClientResourceStatus
+ 20, // 23: envoy.admin.v3.ClustersConfigDump.StaticCluster.cluster:type_name -> google.protobuf.Any
+ 21, // 24: envoy.admin.v3.ClustersConfigDump.StaticCluster.last_updated:type_name -> google.protobuf.Timestamp
+ 20, // 25: envoy.admin.v3.ClustersConfigDump.DynamicCluster.cluster:type_name -> google.protobuf.Any
+ 21, // 26: envoy.admin.v3.ClustersConfigDump.DynamicCluster.last_updated:type_name -> google.protobuf.Timestamp
+ 1, // 27: envoy.admin.v3.ClustersConfigDump.DynamicCluster.error_state:type_name -> envoy.admin.v3.UpdateFailureState
+ 0, // 28: envoy.admin.v3.ClustersConfigDump.DynamicCluster.client_status:type_name -> envoy.admin.v3.ClientResourceStatus
+ 20, // 29: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig.route_config:type_name -> google.protobuf.Any
+ 21, // 30: envoy.admin.v3.RoutesConfigDump.StaticRouteConfig.last_updated:type_name -> google.protobuf.Timestamp
+ 20, // 31: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.route_config:type_name -> google.protobuf.Any
+ 21, // 32: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.last_updated:type_name -> google.protobuf.Timestamp
+ 1, // 33: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState
+ 0, // 34: envoy.admin.v3.RoutesConfigDump.DynamicRouteConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus
+ 20, // 35: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs.scoped_route_configs:type_name -> google.protobuf.Any
+ 21, // 36: envoy.admin.v3.ScopedRoutesConfigDump.InlineScopedRouteConfigs.last_updated:type_name -> google.protobuf.Timestamp
+ 20, // 37: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.scoped_route_configs:type_name -> google.protobuf.Any
+ 21, // 38: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.last_updated:type_name -> google.protobuf.Timestamp
+ 1, // 39: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.error_state:type_name -> envoy.admin.v3.UpdateFailureState
+ 0, // 40: envoy.admin.v3.ScopedRoutesConfigDump.DynamicScopedRouteConfigs.client_status:type_name -> envoy.admin.v3.ClientResourceStatus
+ 20, // 41: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig.endpoint_config:type_name -> google.protobuf.Any
+ 21, // 42: envoy.admin.v3.EndpointsConfigDump.StaticEndpointConfig.last_updated:type_name -> google.protobuf.Timestamp
+ 20, // 43: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.endpoint_config:type_name -> google.protobuf.Any
+ 21, // 44: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.last_updated:type_name -> google.protobuf.Timestamp
+ 1, // 45: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState
+ 0, // 46: envoy.admin.v3.EndpointsConfigDump.DynamicEndpointConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus
+ 20, // 47: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.ecds_filter:type_name -> google.protobuf.Any
+ 21, // 48: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.last_updated:type_name -> google.protobuf.Timestamp
+ 1, // 49: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.error_state:type_name -> envoy.admin.v3.UpdateFailureState
+ 0, // 50: envoy.admin.v3.EcdsConfigDump.EcdsFilterConfig.client_status:type_name -> envoy.admin.v3.ClientResourceStatus
+ 51, // [51:51] is the sub-list for method output_type
+ 51, // [51:51] is the sub-list for method input_type
+ 51, // [51:51] is the sub-list for extension type_name
+ 51, // [51:51] is the sub-list for extension extendee
+ 0, // [0:51] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_config_dump_shared_proto_init() }
+func file_envoy_admin_v3_config_dump_shared_proto_init() {
+ if File_envoy_admin_v3_config_dump_shared_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UpdateFailureState); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListenersConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClustersConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RoutesConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ScopedRoutesConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EndpointsConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EcdsConfigDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListenersConfigDump_StaticListener); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListenersConfigDump_DynamicListenerState); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListenersConfigDump_DynamicListener); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClustersConfigDump_StaticCluster); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClustersConfigDump_DynamicCluster); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RoutesConfigDump_StaticRouteConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RoutesConfigDump_DynamicRouteConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ScopedRoutesConfigDump_InlineScopedRouteConfigs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ScopedRoutesConfigDump_DynamicScopedRouteConfigs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EndpointsConfigDump_StaticEndpointConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EndpointsConfigDump_DynamicEndpointConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_config_dump_shared_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*EcdsConfigDump_EcdsFilterConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_config_dump_shared_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 19,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_config_dump_shared_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_config_dump_shared_proto_depIdxs,
+ EnumInfos: file_envoy_admin_v3_config_dump_shared_proto_enumTypes,
+ MessageInfos: file_envoy_admin_v3_config_dump_shared_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_config_dump_shared_proto = out.File
+ file_envoy_admin_v3_config_dump_shared_proto_rawDesc = nil
+ file_envoy_admin_v3_config_dump_shared_proto_goTypes = nil
+ file_envoy_admin_v3_config_dump_shared_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go
new file mode 100644
index 000000000..dd16990ad
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared.pb.validate.go
@@ -0,0 +1,3435 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/config_dump_shared.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on UpdateFailureState with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *UpdateFailureState) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on UpdateFailureState with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// UpdateFailureStateMultiError, or nil if none found.
+func (m *UpdateFailureState) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *UpdateFailureState) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetFailedConfiguration()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, UpdateFailureStateValidationError{
+ field: "FailedConfiguration",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, UpdateFailureStateValidationError{
+ field: "FailedConfiguration",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetFailedConfiguration()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return UpdateFailureStateValidationError{
+ field: "FailedConfiguration",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdateAttempt()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, UpdateFailureStateValidationError{
+ field: "LastUpdateAttempt",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, UpdateFailureStateValidationError{
+ field: "LastUpdateAttempt",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdateAttempt()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return UpdateFailureStateValidationError{
+ field: "LastUpdateAttempt",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Details
+
+ // no validation rules for VersionInfo
+
+ if len(errors) > 0 {
+ return UpdateFailureStateMultiError(errors)
+ }
+
+ return nil
+}
+
+// UpdateFailureStateMultiError is an error wrapping multiple validation errors
+// returned by UpdateFailureState.ValidateAll() if the designated constraints
+// aren't met.
+type UpdateFailureStateMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m UpdateFailureStateMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m UpdateFailureStateMultiError) AllErrors() []error { return m }
+
+// UpdateFailureStateValidationError is the validation error returned by
+// UpdateFailureState.Validate if the designated constraints aren't met.
+type UpdateFailureStateValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e UpdateFailureStateValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e UpdateFailureStateValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e UpdateFailureStateValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e UpdateFailureStateValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e UpdateFailureStateValidationError) ErrorName() string {
+ return "UpdateFailureStateValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e UpdateFailureStateValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sUpdateFailureState.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = UpdateFailureStateValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = UpdateFailureStateValidationError{}
+
+// Validate checks the field values on ListenersConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ListenersConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ListenersConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ListenersConfigDumpMultiError, or nil if none found.
+func (m *ListenersConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ListenersConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for VersionInfo
+
+ for idx, item := range m.GetStaticListeners() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDumpValidationError{
+ field: fmt.Sprintf("StaticListeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDumpValidationError{
+ field: fmt.Sprintf("StaticListeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDumpValidationError{
+ field: fmt.Sprintf("StaticListeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicListeners() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicListeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicListeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicListeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ListenersConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListenersConfigDumpMultiError is an error wrapping multiple validation
+// errors returned by ListenersConfigDump.ValidateAll() if the designated
+// constraints aren't met.
+type ListenersConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListenersConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListenersConfigDumpMultiError) AllErrors() []error { return m }
+
+// ListenersConfigDumpValidationError is the validation error returned by
+// ListenersConfigDump.Validate if the designated constraints aren't met.
+type ListenersConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListenersConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListenersConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListenersConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListenersConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListenersConfigDumpValidationError) ErrorName() string {
+ return "ListenersConfigDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ListenersConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListenersConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListenersConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListenersConfigDumpValidationError{}
+
+// Validate checks the field values on ClustersConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ClustersConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ClustersConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ClustersConfigDumpMultiError, or nil if none found.
+func (m *ClustersConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ClustersConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for VersionInfo
+
+ for idx, item := range m.GetStaticClusters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("StaticClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("StaticClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("StaticClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicActiveClusters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicActiveClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicActiveClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicActiveClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicWarmingClusters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicWarmingClusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ClustersConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// ClustersConfigDumpMultiError is an error wrapping multiple validation errors
+// returned by ClustersConfigDump.ValidateAll() if the designated constraints
+// aren't met.
+type ClustersConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ClustersConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ClustersConfigDumpMultiError) AllErrors() []error { return m }
+
+// ClustersConfigDumpValidationError is the validation error returned by
+// ClustersConfigDump.Validate if the designated constraints aren't met.
+type ClustersConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ClustersConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ClustersConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ClustersConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ClustersConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ClustersConfigDumpValidationError) ErrorName() string {
+ return "ClustersConfigDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ClustersConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sClustersConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ClustersConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ClustersConfigDumpValidationError{}
+
+// Validate checks the field values on RoutesConfigDump with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *RoutesConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RoutesConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RoutesConfigDumpMultiError, or nil if none found.
+func (m *RoutesConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RoutesConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetStaticRouteConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RoutesConfigDumpValidationError{
+ field: fmt.Sprintf("StaticRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RoutesConfigDumpValidationError{
+ field: fmt.Sprintf("StaticRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RoutesConfigDumpValidationError{
+ field: fmt.Sprintf("StaticRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicRouteConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RoutesConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RoutesConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RoutesConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return RoutesConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// RoutesConfigDumpMultiError is an error wrapping multiple validation errors
+// returned by RoutesConfigDump.ValidateAll() if the designated constraints
+// aren't met.
+type RoutesConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RoutesConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RoutesConfigDumpMultiError) AllErrors() []error { return m }
+
+// RoutesConfigDumpValidationError is the validation error returned by
+// RoutesConfigDump.Validate if the designated constraints aren't met.
+type RoutesConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RoutesConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RoutesConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RoutesConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RoutesConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RoutesConfigDumpValidationError) ErrorName() string { return "RoutesConfigDumpValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RoutesConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRoutesConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RoutesConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RoutesConfigDumpValidationError{}
+
+// Validate checks the field values on ScopedRoutesConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ScopedRoutesConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ScopedRoutesConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ScopedRoutesConfigDumpMultiError, or nil if none found.
+func (m *ScopedRoutesConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ScopedRoutesConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetInlineScopedRouteConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDumpValidationError{
+ field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDumpValidationError{
+ field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ScopedRoutesConfigDumpValidationError{
+ field: fmt.Sprintf("InlineScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicScopedRouteConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ScopedRoutesConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ScopedRoutesConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// ScopedRoutesConfigDumpMultiError is an error wrapping multiple validation
+// errors returned by ScopedRoutesConfigDump.ValidateAll() if the designated
+// constraints aren't met.
+type ScopedRoutesConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ScopedRoutesConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ScopedRoutesConfigDumpMultiError) AllErrors() []error { return m }
+
+// ScopedRoutesConfigDumpValidationError is the validation error returned by
+// ScopedRoutesConfigDump.Validate if the designated constraints aren't met.
+type ScopedRoutesConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ScopedRoutesConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ScopedRoutesConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ScopedRoutesConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ScopedRoutesConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ScopedRoutesConfigDumpValidationError) ErrorName() string {
+ return "ScopedRoutesConfigDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ScopedRoutesConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sScopedRoutesConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ScopedRoutesConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ScopedRoutesConfigDumpValidationError{}
+
+// Validate checks the field values on EndpointsConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *EndpointsConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on EndpointsConfigDump with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// EndpointsConfigDumpMultiError, or nil if none found.
+func (m *EndpointsConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *EndpointsConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetStaticEndpointConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EndpointsConfigDumpValidationError{
+ field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EndpointsConfigDumpValidationError{
+ field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EndpointsConfigDumpValidationError{
+ field: fmt.Sprintf("StaticEndpointConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetDynamicEndpointConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EndpointsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EndpointsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EndpointsConfigDumpValidationError{
+ field: fmt.Sprintf("DynamicEndpointConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return EndpointsConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// EndpointsConfigDumpMultiError is an error wrapping multiple validation
+// errors returned by EndpointsConfigDump.ValidateAll() if the designated
+// constraints aren't met.
+type EndpointsConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m EndpointsConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m EndpointsConfigDumpMultiError) AllErrors() []error { return m }
+
+// EndpointsConfigDumpValidationError is the validation error returned by
+// EndpointsConfigDump.Validate if the designated constraints aren't met.
+type EndpointsConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EndpointsConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EndpointsConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EndpointsConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EndpointsConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EndpointsConfigDumpValidationError) ErrorName() string {
+ return "EndpointsConfigDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e EndpointsConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEndpointsConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EndpointsConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EndpointsConfigDumpValidationError{}
+
+// Validate checks the field values on EcdsConfigDump with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *EcdsConfigDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on EcdsConfigDump with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in EcdsConfigDumpMultiError,
+// or nil if none found.
+func (m *EcdsConfigDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *EcdsConfigDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetEcdsFilters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EcdsConfigDumpValidationError{
+ field: fmt.Sprintf("EcdsFilters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EcdsConfigDumpValidationError{
+ field: fmt.Sprintf("EcdsFilters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EcdsConfigDumpValidationError{
+ field: fmt.Sprintf("EcdsFilters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return EcdsConfigDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// EcdsConfigDumpMultiError is an error wrapping multiple validation errors
+// returned by EcdsConfigDump.ValidateAll() if the designated constraints
+// aren't met.
+type EcdsConfigDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m EcdsConfigDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m EcdsConfigDumpMultiError) AllErrors() []error { return m }
+
+// EcdsConfigDumpValidationError is the validation error returned by
+// EcdsConfigDump.Validate if the designated constraints aren't met.
+type EcdsConfigDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EcdsConfigDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EcdsConfigDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EcdsConfigDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EcdsConfigDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EcdsConfigDumpValidationError) ErrorName() string { return "EcdsConfigDumpValidationError" }
+
+// Error satisfies the builtin error interface
+func (e EcdsConfigDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEcdsConfigDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EcdsConfigDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EcdsConfigDumpValidationError{}
+
+// Validate checks the field values on ListenersConfigDump_StaticListener with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *ListenersConfigDump_StaticListener) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ListenersConfigDump_StaticListener
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// ListenersConfigDump_StaticListenerMultiError, or nil if none found.
+func (m *ListenersConfigDump_StaticListener) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ListenersConfigDump_StaticListener) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetListener()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_StaticListenerValidationError{
+ field: "Listener",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_StaticListenerValidationError{
+ field: "Listener",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetListener()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_StaticListenerValidationError{
+ field: "Listener",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_StaticListenerValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_StaticListenerValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_StaticListenerValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ListenersConfigDump_StaticListenerMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListenersConfigDump_StaticListenerMultiError is an error wrapping multiple
+// validation errors returned by
+// ListenersConfigDump_StaticListener.ValidateAll() if the designated
+// constraints aren't met.
+type ListenersConfigDump_StaticListenerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListenersConfigDump_StaticListenerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListenersConfigDump_StaticListenerMultiError) AllErrors() []error { return m }
+
+// ListenersConfigDump_StaticListenerValidationError is the validation error
+// returned by ListenersConfigDump_StaticListener.Validate if the designated
+// constraints aren't met.
+type ListenersConfigDump_StaticListenerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListenersConfigDump_StaticListenerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListenersConfigDump_StaticListenerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListenersConfigDump_StaticListenerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListenersConfigDump_StaticListenerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListenersConfigDump_StaticListenerValidationError) ErrorName() string {
+ return "ListenersConfigDump_StaticListenerValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ListenersConfigDump_StaticListenerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListenersConfigDump_StaticListener.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListenersConfigDump_StaticListenerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListenersConfigDump_StaticListenerValidationError{}
+
+// Validate checks the field values on ListenersConfigDump_DynamicListenerState
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *ListenersConfigDump_DynamicListenerState) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// ListenersConfigDump_DynamicListenerState with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// ListenersConfigDump_DynamicListenerStateMultiError, or nil if none found.
+func (m *ListenersConfigDump_DynamicListenerState) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ListenersConfigDump_DynamicListenerState) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for VersionInfo
+
+ if all {
+ switch v := interface{}(m.GetListener()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{
+ field: "Listener",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{
+ field: "Listener",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetListener()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_DynamicListenerStateValidationError{
+ field: "Listener",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerStateValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_DynamicListenerStateValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ListenersConfigDump_DynamicListenerStateMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListenersConfigDump_DynamicListenerStateMultiError is an error wrapping
+// multiple validation errors returned by
+// ListenersConfigDump_DynamicListenerState.ValidateAll() if the designated
+// constraints aren't met.
+type ListenersConfigDump_DynamicListenerStateMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListenersConfigDump_DynamicListenerStateMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListenersConfigDump_DynamicListenerStateMultiError) AllErrors() []error { return m }
+
+// ListenersConfigDump_DynamicListenerStateValidationError is the validation
+// error returned by ListenersConfigDump_DynamicListenerState.Validate if the
+// designated constraints aren't met.
+type ListenersConfigDump_DynamicListenerStateValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListenersConfigDump_DynamicListenerStateValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListenersConfigDump_DynamicListenerStateValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListenersConfigDump_DynamicListenerStateValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListenersConfigDump_DynamicListenerStateValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListenersConfigDump_DynamicListenerStateValidationError) ErrorName() string {
+ return "ListenersConfigDump_DynamicListenerStateValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ListenersConfigDump_DynamicListenerStateValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListenersConfigDump_DynamicListenerState.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListenersConfigDump_DynamicListenerStateValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListenersConfigDump_DynamicListenerStateValidationError{}
+
+// Validate checks the field values on ListenersConfigDump_DynamicListener with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *ListenersConfigDump_DynamicListener) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ListenersConfigDump_DynamicListener
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// ListenersConfigDump_DynamicListenerMultiError, or nil if none found.
+func (m *ListenersConfigDump_DynamicListener) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ListenersConfigDump_DynamicListener) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ if all {
+ switch v := interface{}(m.GetActiveState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "ActiveState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "ActiveState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetActiveState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_DynamicListenerValidationError{
+ field: "ActiveState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetWarmingState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "WarmingState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "WarmingState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetWarmingState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_DynamicListenerValidationError{
+ field: "WarmingState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetDrainingState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "DrainingState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "DrainingState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDrainingState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_DynamicListenerValidationError{
+ field: "DrainingState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetErrorState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersConfigDump_DynamicListenerValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersConfigDump_DynamicListenerValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ClientStatus
+
+ if len(errors) > 0 {
+ return ListenersConfigDump_DynamicListenerMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListenersConfigDump_DynamicListenerMultiError is an error wrapping multiple
+// validation errors returned by
+// ListenersConfigDump_DynamicListener.ValidateAll() if the designated
+// constraints aren't met.
+type ListenersConfigDump_DynamicListenerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListenersConfigDump_DynamicListenerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListenersConfigDump_DynamicListenerMultiError) AllErrors() []error { return m }
+
+// ListenersConfigDump_DynamicListenerValidationError is the validation error
+// returned by ListenersConfigDump_DynamicListener.Validate if the designated
+// constraints aren't met.
+type ListenersConfigDump_DynamicListenerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListenersConfigDump_DynamicListenerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListenersConfigDump_DynamicListenerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListenersConfigDump_DynamicListenerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListenersConfigDump_DynamicListenerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListenersConfigDump_DynamicListenerValidationError) ErrorName() string {
+ return "ListenersConfigDump_DynamicListenerValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ListenersConfigDump_DynamicListenerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListenersConfigDump_DynamicListener.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListenersConfigDump_DynamicListenerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListenersConfigDump_DynamicListenerValidationError{}
+
+// Validate checks the field values on ClustersConfigDump_StaticCluster with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *ClustersConfigDump_StaticCluster) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ClustersConfigDump_StaticCluster with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// ClustersConfigDump_StaticClusterMultiError, or nil if none found.
+func (m *ClustersConfigDump_StaticCluster) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ClustersConfigDump_StaticCluster) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetCluster()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDump_StaticClusterValidationError{
+ field: "Cluster",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDump_StaticClusterValidationError{
+ field: "Cluster",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCluster()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDump_StaticClusterValidationError{
+ field: "Cluster",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDump_StaticClusterValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDump_StaticClusterValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDump_StaticClusterValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ClustersConfigDump_StaticClusterMultiError(errors)
+ }
+
+ return nil
+}
+
+// ClustersConfigDump_StaticClusterMultiError is an error wrapping multiple
+// validation errors returned by
+// ClustersConfigDump_StaticCluster.ValidateAll() if the designated
+// constraints aren't met.
+type ClustersConfigDump_StaticClusterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ClustersConfigDump_StaticClusterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ClustersConfigDump_StaticClusterMultiError) AllErrors() []error { return m }
+
+// ClustersConfigDump_StaticClusterValidationError is the validation error
+// returned by ClustersConfigDump_StaticCluster.Validate if the designated
+// constraints aren't met.
+type ClustersConfigDump_StaticClusterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ClustersConfigDump_StaticClusterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ClustersConfigDump_StaticClusterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ClustersConfigDump_StaticClusterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ClustersConfigDump_StaticClusterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ClustersConfigDump_StaticClusterValidationError) ErrorName() string {
+ return "ClustersConfigDump_StaticClusterValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ClustersConfigDump_StaticClusterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sClustersConfigDump_StaticCluster.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ClustersConfigDump_StaticClusterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ClustersConfigDump_StaticClusterValidationError{}
+
+// Validate checks the field values on ClustersConfigDump_DynamicCluster with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *ClustersConfigDump_DynamicCluster) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ClustersConfigDump_DynamicCluster
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// ClustersConfigDump_DynamicClusterMultiError, or nil if none found.
+func (m *ClustersConfigDump_DynamicCluster) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ClustersConfigDump_DynamicCluster) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for VersionInfo
+
+ if all {
+ switch v := interface{}(m.GetCluster()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{
+ field: "Cluster",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{
+ field: "Cluster",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCluster()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDump_DynamicClusterValidationError{
+ field: "Cluster",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDump_DynamicClusterValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetErrorState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClustersConfigDump_DynamicClusterValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClustersConfigDump_DynamicClusterValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ClientStatus
+
+ if len(errors) > 0 {
+ return ClustersConfigDump_DynamicClusterMultiError(errors)
+ }
+
+ return nil
+}
+
+// ClustersConfigDump_DynamicClusterMultiError is an error wrapping multiple
+// validation errors returned by
+// ClustersConfigDump_DynamicCluster.ValidateAll() if the designated
+// constraints aren't met.
+type ClustersConfigDump_DynamicClusterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ClustersConfigDump_DynamicClusterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ClustersConfigDump_DynamicClusterMultiError) AllErrors() []error { return m }
+
+// ClustersConfigDump_DynamicClusterValidationError is the validation error
+// returned by ClustersConfigDump_DynamicCluster.Validate if the designated
+// constraints aren't met.
+type ClustersConfigDump_DynamicClusterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ClustersConfigDump_DynamicClusterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ClustersConfigDump_DynamicClusterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ClustersConfigDump_DynamicClusterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ClustersConfigDump_DynamicClusterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ClustersConfigDump_DynamicClusterValidationError) ErrorName() string {
+ return "ClustersConfigDump_DynamicClusterValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ClustersConfigDump_DynamicClusterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sClustersConfigDump_DynamicCluster.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ClustersConfigDump_DynamicClusterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ClustersConfigDump_DynamicClusterValidationError{}
+
+// Validate checks the field values on RoutesConfigDump_StaticRouteConfig with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *RoutesConfigDump_StaticRouteConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RoutesConfigDump_StaticRouteConfig
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// RoutesConfigDump_StaticRouteConfigMultiError, or nil if none found.
+func (m *RoutesConfigDump_StaticRouteConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RoutesConfigDump_StaticRouteConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetRouteConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{
+ field: "RouteConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{
+ field: "RouteConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RoutesConfigDump_StaticRouteConfigValidationError{
+ field: "RouteConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RoutesConfigDump_StaticRouteConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RoutesConfigDump_StaticRouteConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return RoutesConfigDump_StaticRouteConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// RoutesConfigDump_StaticRouteConfigMultiError is an error wrapping multiple
+// validation errors returned by
+// RoutesConfigDump_StaticRouteConfig.ValidateAll() if the designated
+// constraints aren't met.
+type RoutesConfigDump_StaticRouteConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RoutesConfigDump_StaticRouteConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RoutesConfigDump_StaticRouteConfigMultiError) AllErrors() []error { return m }
+
+// RoutesConfigDump_StaticRouteConfigValidationError is the validation error
+// returned by RoutesConfigDump_StaticRouteConfig.Validate if the designated
+// constraints aren't met.
+type RoutesConfigDump_StaticRouteConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RoutesConfigDump_StaticRouteConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RoutesConfigDump_StaticRouteConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RoutesConfigDump_StaticRouteConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RoutesConfigDump_StaticRouteConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RoutesConfigDump_StaticRouteConfigValidationError) ErrorName() string {
+ return "RoutesConfigDump_StaticRouteConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RoutesConfigDump_StaticRouteConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRoutesConfigDump_StaticRouteConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RoutesConfigDump_StaticRouteConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RoutesConfigDump_StaticRouteConfigValidationError{}
+
+// Validate checks the field values on RoutesConfigDump_DynamicRouteConfig with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the first error encountered is returned, or nil if there are
+// no violations.
+func (m *RoutesConfigDump_DynamicRouteConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RoutesConfigDump_DynamicRouteConfig
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the result is a list of violation errors wrapped in
+// RoutesConfigDump_DynamicRouteConfigMultiError, or nil if none found.
+func (m *RoutesConfigDump_DynamicRouteConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RoutesConfigDump_DynamicRouteConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for VersionInfo
+
+ if all {
+ switch v := interface{}(m.GetRouteConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "RouteConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "RouteConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRouteConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "RouteConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetErrorState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RoutesConfigDump_DynamicRouteConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ClientStatus
+
+ if len(errors) > 0 {
+ return RoutesConfigDump_DynamicRouteConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// RoutesConfigDump_DynamicRouteConfigMultiError is an error wrapping multiple
+// validation errors returned by
+// RoutesConfigDump_DynamicRouteConfig.ValidateAll() if the designated
+// constraints aren't met.
+type RoutesConfigDump_DynamicRouteConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RoutesConfigDump_DynamicRouteConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RoutesConfigDump_DynamicRouteConfigMultiError) AllErrors() []error { return m }
+
+// RoutesConfigDump_DynamicRouteConfigValidationError is the validation error
+// returned by RoutesConfigDump_DynamicRouteConfig.Validate if the designated
+// constraints aren't met.
+type RoutesConfigDump_DynamicRouteConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RoutesConfigDump_DynamicRouteConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RoutesConfigDump_DynamicRouteConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RoutesConfigDump_DynamicRouteConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RoutesConfigDump_DynamicRouteConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RoutesConfigDump_DynamicRouteConfigValidationError) ErrorName() string {
+ return "RoutesConfigDump_DynamicRouteConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RoutesConfigDump_DynamicRouteConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRoutesConfigDump_DynamicRouteConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RoutesConfigDump_DynamicRouteConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RoutesConfigDump_DynamicRouteConfigValidationError{}
+
+// Validate checks the field values on
+// ScopedRoutesConfigDump_InlineScopedRouteConfigs with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// ScopedRoutesConfigDump_InlineScopedRouteConfigs with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in
+// ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError, or nil if none found.
+func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ for idx, item := range m.GetScopedRouteConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{
+ field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{
+ field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{
+ field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError(errors)
+ }
+
+ return nil
+}
+
+// ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError is an error
+// wrapping multiple validation errors returned by
+// ScopedRoutesConfigDump_InlineScopedRouteConfigs.ValidateAll() if the
+// designated constraints aren't met.
+type ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ScopedRoutesConfigDump_InlineScopedRouteConfigsMultiError) AllErrors() []error { return m }
+
+// ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError is the
+// validation error returned by
+// ScopedRoutesConfigDump_InlineScopedRouteConfigs.Validate if the designated
+// constraints aren't met.
+type ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) ErrorName() string {
+ return "ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sScopedRoutesConfigDump_InlineScopedRouteConfigs.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ScopedRoutesConfigDump_InlineScopedRouteConfigsValidationError{}
+
+// Validate checks the field values on
+// ScopedRoutesConfigDump_DynamicScopedRouteConfigs with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// ScopedRoutesConfigDump_DynamicScopedRouteConfigs with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in
+// ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError, or nil if none found.
+func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ // no validation rules for VersionInfo
+
+ for idx, item := range m.GetScopedRouteConfigs() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: fmt.Sprintf("ScopedRouteConfigs[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetErrorState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ClientStatus
+
+ if len(errors) > 0 {
+ return ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError(errors)
+ }
+
+ return nil
+}
+
+// ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError is an error
+// wrapping multiple validation errors returned by
+// ScopedRoutesConfigDump_DynamicScopedRouteConfigs.ValidateAll() if the
+// designated constraints aren't met.
+type ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ScopedRoutesConfigDump_DynamicScopedRouteConfigsMultiError) AllErrors() []error { return m }
+
+// ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError is the
+// validation error returned by
+// ScopedRoutesConfigDump_DynamicScopedRouteConfigs.Validate if the designated
+// constraints aren't met.
+type ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Field() string {
+ return e.field
+}
+
+// Reason function returns reason value.
+func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Reason() string {
+ return e.reason
+}
+
+// Cause function returns cause value.
+func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Cause() error {
+ return e.cause
+}
+
+// Key function returns key value.
+func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) ErrorName() string {
+ return "ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sScopedRoutesConfigDump_DynamicScopedRouteConfigs.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ScopedRoutesConfigDump_DynamicScopedRouteConfigsValidationError{}
+
+// Validate checks the field values on EndpointsConfigDump_StaticEndpointConfig
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *EndpointsConfigDump_StaticEndpointConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// EndpointsConfigDump_StaticEndpointConfig with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// EndpointsConfigDump_StaticEndpointConfigMultiError, or nil if none found.
+func (m *EndpointsConfigDump_StaticEndpointConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *EndpointsConfigDump_StaticEndpointConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetEndpointConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{
+ field: "EndpointConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{
+ field: "EndpointConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetEndpointConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EndpointsConfigDump_StaticEndpointConfigValidationError{
+ field: "EndpointConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EndpointsConfigDump_StaticEndpointConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EndpointsConfigDump_StaticEndpointConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return EndpointsConfigDump_StaticEndpointConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// EndpointsConfigDump_StaticEndpointConfigMultiError is an error wrapping
+// multiple validation errors returned by
+// EndpointsConfigDump_StaticEndpointConfig.ValidateAll() if the designated
+// constraints aren't met.
+type EndpointsConfigDump_StaticEndpointConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m EndpointsConfigDump_StaticEndpointConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m EndpointsConfigDump_StaticEndpointConfigMultiError) AllErrors() []error { return m }
+
+// EndpointsConfigDump_StaticEndpointConfigValidationError is the validation
+// error returned by EndpointsConfigDump_StaticEndpointConfig.Validate if the
+// designated constraints aren't met.
+type EndpointsConfigDump_StaticEndpointConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EndpointsConfigDump_StaticEndpointConfigValidationError) ErrorName() string {
+ return "EndpointsConfigDump_StaticEndpointConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e EndpointsConfigDump_StaticEndpointConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEndpointsConfigDump_StaticEndpointConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EndpointsConfigDump_StaticEndpointConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EndpointsConfigDump_StaticEndpointConfigValidationError{}
+
+// Validate checks the field values on
+// EndpointsConfigDump_DynamicEndpointConfig with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *EndpointsConfigDump_DynamicEndpointConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// EndpointsConfigDump_DynamicEndpointConfig with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// EndpointsConfigDump_DynamicEndpointConfigMultiError, or nil if none found.
+func (m *EndpointsConfigDump_DynamicEndpointConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *EndpointsConfigDump_DynamicEndpointConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for VersionInfo
+
+ if all {
+ switch v := interface{}(m.GetEndpointConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "EndpointConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "EndpointConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetEndpointConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "EndpointConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetErrorState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EndpointsConfigDump_DynamicEndpointConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ClientStatus
+
+ if len(errors) > 0 {
+ return EndpointsConfigDump_DynamicEndpointConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// EndpointsConfigDump_DynamicEndpointConfigMultiError is an error wrapping
+// multiple validation errors returned by
+// EndpointsConfigDump_DynamicEndpointConfig.ValidateAll() if the designated
+// constraints aren't met.
+type EndpointsConfigDump_DynamicEndpointConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m EndpointsConfigDump_DynamicEndpointConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m EndpointsConfigDump_DynamicEndpointConfigMultiError) AllErrors() []error { return m }
+
+// EndpointsConfigDump_DynamicEndpointConfigValidationError is the validation
+// error returned by EndpointsConfigDump_DynamicEndpointConfig.Validate if the
+// designated constraints aren't met.
+type EndpointsConfigDump_DynamicEndpointConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) ErrorName() string {
+ return "EndpointsConfigDump_DynamicEndpointConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e EndpointsConfigDump_DynamicEndpointConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEndpointsConfigDump_DynamicEndpointConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EndpointsConfigDump_DynamicEndpointConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EndpointsConfigDump_DynamicEndpointConfigValidationError{}
+
+// Validate checks the field values on EcdsConfigDump_EcdsFilterConfig with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *EcdsConfigDump_EcdsFilterConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on EcdsConfigDump_EcdsFilterConfig with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// EcdsConfigDump_EcdsFilterConfigMultiError, or nil if none found.
+func (m *EcdsConfigDump_EcdsFilterConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *EcdsConfigDump_EcdsFilterConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for VersionInfo
+
+ if all {
+ switch v := interface{}(m.GetEcdsFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "EcdsFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "EcdsFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetEcdsFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "EcdsFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLastUpdated()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLastUpdated()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "LastUpdated",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetErrorState()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetErrorState()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return EcdsConfigDump_EcdsFilterConfigValidationError{
+ field: "ErrorState",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for ClientStatus
+
+ if len(errors) > 0 {
+ return EcdsConfigDump_EcdsFilterConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// EcdsConfigDump_EcdsFilterConfigMultiError is an error wrapping multiple
+// validation errors returned by EcdsConfigDump_EcdsFilterConfig.ValidateAll()
+// if the designated constraints aren't met.
+type EcdsConfigDump_EcdsFilterConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m EcdsConfigDump_EcdsFilterConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m EcdsConfigDump_EcdsFilterConfigMultiError) AllErrors() []error { return m }
+
+// EcdsConfigDump_EcdsFilterConfigValidationError is the validation error
+// returned by EcdsConfigDump_EcdsFilterConfig.Validate if the designated
+// constraints aren't met.
+type EcdsConfigDump_EcdsFilterConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e EcdsConfigDump_EcdsFilterConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e EcdsConfigDump_EcdsFilterConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e EcdsConfigDump_EcdsFilterConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e EcdsConfigDump_EcdsFilterConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e EcdsConfigDump_EcdsFilterConfigValidationError) ErrorName() string {
+ return "EcdsConfigDump_EcdsFilterConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e EcdsConfigDump_EcdsFilterConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sEcdsConfigDump_EcdsFilterConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = EcdsConfigDump_EcdsFilterConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = EcdsConfigDump_EcdsFilterConfigValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go
new file mode 100644
index 000000000..934de8568
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_shared_vtproto.pb.go
@@ -0,0 +1,1715 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/config_dump_shared.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ anypb "github.com/planetscale/vtprotobuf/types/known/anypb"
+ timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *UpdateFailureState) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UpdateFailureState) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *UpdateFailureState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Details) > 0 {
+ i -= len(m.Details)
+ copy(dAtA[i:], m.Details)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Details)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.LastUpdateAttempt != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdateAttempt).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.FailedConfiguration != nil {
+ size, err := (*anypb.Any)(m.FailedConfiguration).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ListenersConfigDump_StaticListener) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ListenersConfigDump_StaticListener) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ListenersConfigDump_StaticListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Listener != nil {
+ size, err := (*anypb.Any)(m.Listener).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ListenersConfigDump_DynamicListenerState) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ListenersConfigDump_DynamicListenerState) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ListenersConfigDump_DynamicListenerState) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Listener != nil {
+ size, err := (*anypb.Any)(m.Listener).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ListenersConfigDump_DynamicListener) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ListenersConfigDump_DynamicListener) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ListenersConfigDump_DynamicListener) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ClientStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus))
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.ErrorState != nil {
+ size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.DrainingState != nil {
+ size, err := m.DrainingState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.WarmingState != nil {
+ size, err := m.WarmingState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.ActiveState != nil {
+ size, err := m.ActiveState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ListenersConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ListenersConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ListenersConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.DynamicListeners) > 0 {
+ for iNdEx := len(m.DynamicListeners) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicListeners[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.StaticListeners) > 0 {
+ for iNdEx := len(m.StaticListeners) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.StaticListeners[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClustersConfigDump_StaticCluster) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClustersConfigDump_StaticCluster) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ClustersConfigDump_StaticCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Cluster != nil {
+ size, err := (*anypb.Any)(m.Cluster).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClustersConfigDump_DynamicCluster) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClustersConfigDump_DynamicCluster) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ClustersConfigDump_DynamicCluster) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ClientStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.ErrorState != nil {
+ size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Cluster != nil {
+ size, err := (*anypb.Any)(m.Cluster).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClustersConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClustersConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ClustersConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.DynamicWarmingClusters) > 0 {
+ for iNdEx := len(m.DynamicWarmingClusters) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicWarmingClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.DynamicActiveClusters) > 0 {
+ for iNdEx := len(m.DynamicActiveClusters) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicActiveClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.StaticClusters) > 0 {
+ for iNdEx := len(m.StaticClusters) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.StaticClusters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RoutesConfigDump_StaticRouteConfig) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RoutesConfigDump_StaticRouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RoutesConfigDump_StaticRouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.RouteConfig != nil {
+ size, err := (*anypb.Any)(m.RouteConfig).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RoutesConfigDump_DynamicRouteConfig) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RoutesConfigDump_DynamicRouteConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RoutesConfigDump_DynamicRouteConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ClientStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.ErrorState != nil {
+ size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.RouteConfig != nil {
+ size, err := (*anypb.Any)(m.RouteConfig).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RoutesConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RoutesConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RoutesConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.DynamicRouteConfigs) > 0 {
+ for iNdEx := len(m.DynamicRouteConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.StaticRouteConfigs) > 0 {
+ for iNdEx := len(m.StaticRouteConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.StaticRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ScopedRouteConfigs) > 0 {
+ for iNdEx := len(m.ScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := (*anypb.Any)(m.ScopedRouteConfigs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ClientStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus))
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.ErrorState != nil {
+ size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.ScopedRouteConfigs) > 0 {
+ for iNdEx := len(m.ScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := (*anypb.Any)(m.ScopedRouteConfigs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ScopedRoutesConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ScopedRoutesConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ScopedRoutesConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.DynamicScopedRouteConfigs) > 0 {
+ for iNdEx := len(m.DynamicScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicScopedRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.InlineScopedRouteConfigs) > 0 {
+ for iNdEx := len(m.InlineScopedRouteConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.InlineScopedRouteConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *EndpointsConfigDump_StaticEndpointConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.EndpointConfig != nil {
+ size, err := (*anypb.Any)(m.EndpointConfig).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *EndpointsConfigDump_DynamicEndpointConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ClientStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.ErrorState != nil {
+ size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.EndpointConfig != nil {
+ size, err := (*anypb.Any)(m.EndpointConfig).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EndpointsConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EndpointsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *EndpointsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.DynamicEndpointConfigs) > 0 {
+ for iNdEx := len(m.DynamicEndpointConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicEndpointConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.StaticEndpointConfigs) > 0 {
+ for iNdEx := len(m.StaticEndpointConfigs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.StaticEndpointConfigs[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EcdsConfigDump_EcdsFilterConfig) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EcdsConfigDump_EcdsFilterConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *EcdsConfigDump_EcdsFilterConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ClientStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.ErrorState != nil {
+ size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.EcdsFilter != nil {
+ size, err := (*anypb.Any)(m.EcdsFilter).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EcdsConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EcdsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *EcdsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.EcdsFilters) > 0 {
+ for iNdEx := len(m.EcdsFilters) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.EcdsFilters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *UpdateFailureState) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.FailedConfiguration != nil {
+ l = (*anypb.Any)(m.FailedConfiguration).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdateAttempt != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdateAttempt).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.Details)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ListenersConfigDump_StaticListener) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Listener != nil {
+ l = (*anypb.Any)(m.Listener).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ListenersConfigDump_DynamicListenerState) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Listener != nil {
+ l = (*anypb.Any)(m.Listener).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ListenersConfigDump_DynamicListener) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ActiveState != nil {
+ l = m.ActiveState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.WarmingState != nil {
+ l = m.WarmingState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.DrainingState != nil {
+ l = m.DrainingState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ErrorState != nil {
+ l = m.ErrorState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClientStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ListenersConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.StaticListeners) > 0 {
+ for _, e := range m.StaticListeners {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicListeners) > 0 {
+ for _, e := range m.DynamicListeners {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ClustersConfigDump_StaticCluster) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Cluster != nil {
+ l = (*anypb.Any)(m.Cluster).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ClustersConfigDump_DynamicCluster) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Cluster != nil {
+ l = (*anypb.Any)(m.Cluster).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ErrorState != nil {
+ l = m.ErrorState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClientStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ClustersConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.StaticClusters) > 0 {
+ for _, e := range m.StaticClusters {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicActiveClusters) > 0 {
+ for _, e := range m.DynamicActiveClusters {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicWarmingClusters) > 0 {
+ for _, e := range m.DynamicWarmingClusters {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RoutesConfigDump_StaticRouteConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.RouteConfig != nil {
+ l = (*anypb.Any)(m.RouteConfig).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RoutesConfigDump_DynamicRouteConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.RouteConfig != nil {
+ l = (*anypb.Any)(m.RouteConfig).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ErrorState != nil {
+ l = m.ErrorState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClientStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RoutesConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.StaticRouteConfigs) > 0 {
+ for _, e := range m.StaticRouteConfigs {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicRouteConfigs) > 0 {
+ for _, e := range m.DynamicRouteConfigs {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ScopedRoutesConfigDump_InlineScopedRouteConfigs) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.ScopedRouteConfigs) > 0 {
+ for _, e := range m.ScopedRouteConfigs {
+ l = (*anypb.Any)(e).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ScopedRoutesConfigDump_DynamicScopedRouteConfigs) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.ScopedRouteConfigs) > 0 {
+ for _, e := range m.ScopedRouteConfigs {
+ l = (*anypb.Any)(e).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ErrorState != nil {
+ l = m.ErrorState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClientStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ScopedRoutesConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.InlineScopedRouteConfigs) > 0 {
+ for _, e := range m.InlineScopedRouteConfigs {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicScopedRouteConfigs) > 0 {
+ for _, e := range m.DynamicScopedRouteConfigs {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *EndpointsConfigDump_StaticEndpointConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.EndpointConfig != nil {
+ l = (*anypb.Any)(m.EndpointConfig).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *EndpointsConfigDump_DynamicEndpointConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.EndpointConfig != nil {
+ l = (*anypb.Any)(m.EndpointConfig).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ErrorState != nil {
+ l = m.ErrorState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClientStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *EndpointsConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.StaticEndpointConfigs) > 0 {
+ for _, e := range m.StaticEndpointConfigs {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicEndpointConfigs) > 0 {
+ for _, e := range m.DynamicEndpointConfigs {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *EcdsConfigDump_EcdsFilterConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.EcdsFilter != nil {
+ l = (*anypb.Any)(m.EcdsFilter).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ErrorState != nil {
+ l = m.ErrorState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClientStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *EcdsConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.EcdsFilters) > 0 {
+ for _, e := range m.EcdsFilters {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go
new file mode 100644
index 000000000..78e37eec9
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/config_dump_vtproto.pb.go
@@ -0,0 +1,466 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/config_dump.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ anypb "github.com/planetscale/vtprotobuf/types/known/anypb"
+ timestamppb "github.com/planetscale/vtprotobuf/types/known/timestamppb"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *ConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Configs) > 0 {
+ for iNdEx := len(m.Configs) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := (*anypb.Any)(m.Configs[iNdEx]).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *BootstrapConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *BootstrapConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *BootstrapConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Bootstrap != nil {
+ if vtmsg, ok := interface{}(m.Bootstrap).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Bootstrap)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SecretsConfigDump_DynamicSecret) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecretsConfigDump_DynamicSecret) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SecretsConfigDump_DynamicSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.ClientStatus != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ClientStatus))
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.ErrorState != nil {
+ size, err := m.ErrorState.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Secret != nil {
+ size, err := (*anypb.Any)(m.Secret).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.VersionInfo) > 0 {
+ i -= len(m.VersionInfo)
+ copy(dAtA[i:], m.VersionInfo)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.VersionInfo)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SecretsConfigDump_StaticSecret) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecretsConfigDump_StaticSecret) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SecretsConfigDump_StaticSecret) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Secret != nil {
+ size, err := (*anypb.Any)(m.Secret).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.LastUpdated != nil {
+ size, err := (*timestamppb.Timestamp)(m.LastUpdated).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SecretsConfigDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SecretsConfigDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SecretsConfigDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.DynamicWarmingSecrets) > 0 {
+ for iNdEx := len(m.DynamicWarmingSecrets) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicWarmingSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.DynamicActiveSecrets) > 0 {
+ for iNdEx := len(m.DynamicActiveSecrets) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.DynamicActiveSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.StaticSecrets) > 0 {
+ for iNdEx := len(m.StaticSecrets) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.StaticSecrets[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Configs) > 0 {
+ for _, e := range m.Configs {
+ l = (*anypb.Any)(e).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *BootstrapConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Bootstrap != nil {
+ if size, ok := interface{}(m.Bootstrap).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Bootstrap)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *SecretsConfigDump_DynamicSecret) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.VersionInfo)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Secret != nil {
+ l = (*anypb.Any)(m.Secret).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ErrorState != nil {
+ l = m.ErrorState.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClientStatus != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.ClientStatus))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *SecretsConfigDump_StaticSecret) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LastUpdated != nil {
+ l = (*timestamppb.Timestamp)(m.LastUpdated).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Secret != nil {
+ l = (*anypb.Any)(m.Secret).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *SecretsConfigDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.StaticSecrets) > 0 {
+ for _, e := range m.StaticSecrets {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicActiveSecrets) > 0 {
+ for _, e := range m.DynamicActiveSecrets {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.DynamicWarmingSecrets) > 0 {
+ for _, e := range m.DynamicWarmingSecrets {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go
new file mode 100644
index 000000000..632817fac
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.go
@@ -0,0 +1,241 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/init_dump.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Dumps of unready targets of envoy init managers. Envoy's admin fills this message with init managers,
+// which provides the information of their unready targets.
+// The :ref:`/init_dump ` will dump all unready targets information.
+type UnreadyTargetsDumps struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // You can choose specific component to dump unready targets with mask query parameter.
+ // See :ref:`/init_dump?mask={} ` for more information.
+ // The dumps of unready targets of all init managers.
+ UnreadyTargetsDumps []*UnreadyTargetsDumps_UnreadyTargetsDump `protobuf:"bytes,1,rep,name=unready_targets_dumps,json=unreadyTargetsDumps,proto3" json:"unready_targets_dumps,omitempty"`
+}
+
+func (x *UnreadyTargetsDumps) Reset() {
+ *x = UnreadyTargetsDumps{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UnreadyTargetsDumps) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnreadyTargetsDumps) ProtoMessage() {}
+
+func (x *UnreadyTargetsDumps) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnreadyTargetsDumps.ProtoReflect.Descriptor instead.
+func (*UnreadyTargetsDumps) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_init_dump_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *UnreadyTargetsDumps) GetUnreadyTargetsDumps() []*UnreadyTargetsDumps_UnreadyTargetsDump {
+ if x != nil {
+ return x.UnreadyTargetsDumps
+ }
+ return nil
+}
+
+// Message of unready targets information of an init manager.
+type UnreadyTargetsDumps_UnreadyTargetsDump struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Name of the init manager. Example: "init_manager_xxx".
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Names of unready targets of the init manager. Example: "target_xxx".
+ TargetNames []string `protobuf:"bytes,2,rep,name=target_names,json=targetNames,proto3" json:"target_names,omitempty"`
+}
+
+func (x *UnreadyTargetsDumps_UnreadyTargetsDump) Reset() {
+ *x = UnreadyTargetsDumps_UnreadyTargetsDump{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *UnreadyTargetsDumps_UnreadyTargetsDump) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*UnreadyTargetsDumps_UnreadyTargetsDump) ProtoMessage() {}
+
+func (x *UnreadyTargetsDumps_UnreadyTargetsDump) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_init_dump_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use UnreadyTargetsDumps_UnreadyTargetsDump.ProtoReflect.Descriptor instead.
+func (*UnreadyTargetsDumps_UnreadyTargetsDump) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_init_dump_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *UnreadyTargetsDumps_UnreadyTargetsDump) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *UnreadyTargetsDumps_UnreadyTargetsDump) GetTargetNames() []string {
+ if x != nil {
+ return x.TargetNames
+ }
+ return nil
+}
+
+var File_envoy_admin_v3_init_dump_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_init_dump_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x69, 0x6e, 0x69, 0x74, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33,
+ 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0xce, 0x01, 0x0a, 0x13, 0x55, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65,
+ 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x12, 0x6a, 0x0a, 0x15, 0x75, 0x6e, 0x72, 0x65, 0x61,
+ 0x64, 0x79, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x55, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x2e, 0x55, 0x6e, 0x72, 0x65,
+ 0x61, 0x64, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x52, 0x13,
+ 0x75, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75,
+ 0x6d, 0x70, 0x73, 0x1a, 0x4b, 0x0a, 0x12, 0x55, 0x6e, 0x72, 0x65, 0x61, 0x64, 0x79, 0x54, 0x61,
+ 0x72, 0x67, 0x65, 0x74, 0x73, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a,
+ 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x73,
+ 0x42, 0x76, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0d, 0x49, 0x6e, 0x69, 0x74, 0x44, 0x75,
+ 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e,
+ 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_init_dump_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_init_dump_proto_rawDescData = file_envoy_admin_v3_init_dump_proto_rawDesc
+)
+
+func file_envoy_admin_v3_init_dump_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_init_dump_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_init_dump_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_init_dump_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_init_dump_proto_rawDescData
+}
+
+var file_envoy_admin_v3_init_dump_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_admin_v3_init_dump_proto_goTypes = []interface{}{
+ (*UnreadyTargetsDumps)(nil), // 0: envoy.admin.v3.UnreadyTargetsDumps
+ (*UnreadyTargetsDumps_UnreadyTargetsDump)(nil), // 1: envoy.admin.v3.UnreadyTargetsDumps.UnreadyTargetsDump
+}
+var file_envoy_admin_v3_init_dump_proto_depIdxs = []int32{
+ 1, // 0: envoy.admin.v3.UnreadyTargetsDumps.unready_targets_dumps:type_name -> envoy.admin.v3.UnreadyTargetsDumps.UnreadyTargetsDump
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_init_dump_proto_init() }
+func file_envoy_admin_v3_init_dump_proto_init() {
+ if File_envoy_admin_v3_init_dump_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_init_dump_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UnreadyTargetsDumps); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_init_dump_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*UnreadyTargetsDumps_UnreadyTargetsDump); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_init_dump_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_init_dump_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_init_dump_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_init_dump_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_init_dump_proto = out.File
+ file_envoy_admin_v3_init_dump_proto_rawDesc = nil
+ file_envoy_admin_v3_init_dump_proto_goTypes = nil
+ file_envoy_admin_v3_init_dump_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go
new file mode 100644
index 000000000..f746a1264
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump.pb.validate.go
@@ -0,0 +1,281 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/init_dump.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on UnreadyTargetsDumps with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *UnreadyTargetsDumps) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on UnreadyTargetsDumps with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// UnreadyTargetsDumpsMultiError, or nil if none found.
+func (m *UnreadyTargetsDumps) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *UnreadyTargetsDumps) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetUnreadyTargetsDumps() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, UnreadyTargetsDumpsValidationError{
+ field: fmt.Sprintf("UnreadyTargetsDumps[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, UnreadyTargetsDumpsValidationError{
+ field: fmt.Sprintf("UnreadyTargetsDumps[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return UnreadyTargetsDumpsValidationError{
+ field: fmt.Sprintf("UnreadyTargetsDumps[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return UnreadyTargetsDumpsMultiError(errors)
+ }
+
+ return nil
+}
+
+// UnreadyTargetsDumpsMultiError is an error wrapping multiple validation
+// errors returned by UnreadyTargetsDumps.ValidateAll() if the designated
+// constraints aren't met.
+type UnreadyTargetsDumpsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m UnreadyTargetsDumpsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m UnreadyTargetsDumpsMultiError) AllErrors() []error { return m }
+
+// UnreadyTargetsDumpsValidationError is the validation error returned by
+// UnreadyTargetsDumps.Validate if the designated constraints aren't met.
+type UnreadyTargetsDumpsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e UnreadyTargetsDumpsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e UnreadyTargetsDumpsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e UnreadyTargetsDumpsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e UnreadyTargetsDumpsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e UnreadyTargetsDumpsValidationError) ErrorName() string {
+ return "UnreadyTargetsDumpsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e UnreadyTargetsDumpsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sUnreadyTargetsDumps.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = UnreadyTargetsDumpsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = UnreadyTargetsDumpsValidationError{}
+
+// Validate checks the field values on UnreadyTargetsDumps_UnreadyTargetsDump
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *UnreadyTargetsDumps_UnreadyTargetsDump) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// UnreadyTargetsDumps_UnreadyTargetsDump with the rules defined in the proto
+// definition for this message. If any rules are violated, the result is a
+// list of violation errors wrapped in
+// UnreadyTargetsDumps_UnreadyTargetsDumpMultiError, or nil if none found.
+func (m *UnreadyTargetsDumps_UnreadyTargetsDump) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *UnreadyTargetsDumps_UnreadyTargetsDump) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ if len(errors) > 0 {
+ return UnreadyTargetsDumps_UnreadyTargetsDumpMultiError(errors)
+ }
+
+ return nil
+}
+
+// UnreadyTargetsDumps_UnreadyTargetsDumpMultiError is an error wrapping
+// multiple validation errors returned by
+// UnreadyTargetsDumps_UnreadyTargetsDump.ValidateAll() if the designated
+// constraints aren't met.
+type UnreadyTargetsDumps_UnreadyTargetsDumpMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m UnreadyTargetsDumps_UnreadyTargetsDumpMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m UnreadyTargetsDumps_UnreadyTargetsDumpMultiError) AllErrors() []error { return m }
+
+// UnreadyTargetsDumps_UnreadyTargetsDumpValidationError is the validation
+// error returned by UnreadyTargetsDumps_UnreadyTargetsDump.Validate if the
+// designated constraints aren't met.
+type UnreadyTargetsDumps_UnreadyTargetsDumpValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) ErrorName() string {
+ return "UnreadyTargetsDumps_UnreadyTargetsDumpValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e UnreadyTargetsDumps_UnreadyTargetsDumpValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sUnreadyTargetsDumps_UnreadyTargetsDump.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = UnreadyTargetsDumps_UnreadyTargetsDumpValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = UnreadyTargetsDumps_UnreadyTargetsDumpValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go
new file mode 100644
index 000000000..d957042b8
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/init_dump_vtproto.pb.go
@@ -0,0 +1,149 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/init_dump.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *UnreadyTargetsDumps_UnreadyTargetsDump) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.TargetNames) > 0 {
+ for iNdEx := len(m.TargetNames) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.TargetNames[iNdEx])
+ copy(dAtA[i:], m.TargetNames[iNdEx])
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TargetNames[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *UnreadyTargetsDumps) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UnreadyTargetsDumps) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *UnreadyTargetsDumps) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.UnreadyTargetsDumps) > 0 {
+ for iNdEx := len(m.UnreadyTargetsDumps) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.UnreadyTargetsDumps[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *UnreadyTargetsDumps_UnreadyTargetsDump) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.TargetNames) > 0 {
+ for _, s := range m.TargetNames {
+ l = len(s)
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *UnreadyTargetsDumps) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.UnreadyTargetsDumps) > 0 {
+ for _, e := range m.UnreadyTargetsDumps {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go
new file mode 100644
index 000000000..71ab9ed88
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.go
@@ -0,0 +1,268 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/listeners.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Admin endpoint uses this wrapper for “/listeners“ to display listener status information.
+// See :ref:`/listeners ` for more information.
+type Listeners struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // List of listener statuses.
+ ListenerStatuses []*ListenerStatus `protobuf:"bytes,1,rep,name=listener_statuses,json=listenerStatuses,proto3" json:"listener_statuses,omitempty"`
+}
+
+func (x *Listeners) Reset() {
+ *x = Listeners{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_listeners_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Listeners) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Listeners) ProtoMessage() {}
+
+func (x *Listeners) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_listeners_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Listeners.ProtoReflect.Descriptor instead.
+func (*Listeners) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_listeners_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Listeners) GetListenerStatuses() []*ListenerStatus {
+ if x != nil {
+ return x.ListenerStatuses
+ }
+ return nil
+}
+
+// Details an individual listener's current status.
+type ListenerStatus struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Name of the listener
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The actual local address that the listener is listening on. If a listener was configured
+ // to listen on port 0, then this address has the port that was allocated by the OS.
+ LocalAddress *v3.Address `protobuf:"bytes,2,opt,name=local_address,json=localAddress,proto3" json:"local_address,omitempty"`
+ // The additional addresses the listener is listening on as specified via the :ref:`additional_addresses `
+ // configuration.
+ AdditionalLocalAddresses []*v3.Address `protobuf:"bytes,3,rep,name=additional_local_addresses,json=additionalLocalAddresses,proto3" json:"additional_local_addresses,omitempty"`
+}
+
+func (x *ListenerStatus) Reset() {
+ *x = ListenerStatus{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_listeners_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ListenerStatus) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListenerStatus) ProtoMessage() {}
+
+func (x *ListenerStatus) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_listeners_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListenerStatus.ProtoReflect.Descriptor instead.
+func (*ListenerStatus) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_listeners_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ListenerStatus) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *ListenerStatus) GetLocalAddress() *v3.Address {
+ if x != nil {
+ return x.LocalAddress
+ }
+ return nil
+}
+
+func (x *ListenerStatus) GetAdditionalLocalAddresses() []*v3.Address {
+ if x != nil {
+ return x.AdditionalLocalAddresses
+ }
+ return nil
+}
+
+var File_envoy_admin_v3_listeners_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_listeners_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33,
+ 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x7e, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e,
+ 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, 0x11, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e,
+ 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x10,
+ 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73,
+ 0x3a, 0x24, 0x9a, 0xc5, 0x88, 0x1e, 0x1f, 0x0a, 0x1d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73,
+ 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x22, 0xf0, 0x01, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x65,
+ 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
+ 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a,
+ 0x0d, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64, 0x72,
+ 0x65, 0x73, 0x73, 0x52, 0x0c, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x12, 0x5b, 0x0a, 0x1a, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f,
+ 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18,
+ 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x52, 0x18, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c,
+ 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x3a, 0x29,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65,
+ 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x77, 0xba, 0x80, 0xc8, 0xd1, 0x06,
+ 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f,
+ 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76,
+ 0x33, 0x42, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_listeners_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_listeners_proto_rawDescData = file_envoy_admin_v3_listeners_proto_rawDesc
+)
+
+func file_envoy_admin_v3_listeners_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_listeners_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_listeners_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_listeners_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_listeners_proto_rawDescData
+}
+
+var file_envoy_admin_v3_listeners_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_admin_v3_listeners_proto_goTypes = []interface{}{
+ (*Listeners)(nil), // 0: envoy.admin.v3.Listeners
+ (*ListenerStatus)(nil), // 1: envoy.admin.v3.ListenerStatus
+ (*v3.Address)(nil), // 2: envoy.config.core.v3.Address
+}
+var file_envoy_admin_v3_listeners_proto_depIdxs = []int32{
+ 1, // 0: envoy.admin.v3.Listeners.listener_statuses:type_name -> envoy.admin.v3.ListenerStatus
+ 2, // 1: envoy.admin.v3.ListenerStatus.local_address:type_name -> envoy.config.core.v3.Address
+ 2, // 2: envoy.admin.v3.ListenerStatus.additional_local_addresses:type_name -> envoy.config.core.v3.Address
+ 3, // [3:3] is the sub-list for method output_type
+ 3, // [3:3] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_listeners_proto_init() }
+func file_envoy_admin_v3_listeners_proto_init() {
+ if File_envoy_admin_v3_listeners_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_listeners_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Listeners); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_listeners_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ListenerStatus); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_listeners_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_listeners_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_listeners_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_listeners_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_listeners_proto = out.File
+ file_envoy_admin_v3_listeners_proto_rawDesc = nil
+ file_envoy_admin_v3_listeners_proto_goTypes = nil
+ file_envoy_admin_v3_listeners_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go
new file mode 100644
index 000000000..02cce2639
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners.pb.validate.go
@@ -0,0 +1,335 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/listeners.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Listeners with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Listeners) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Listeners with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ListenersMultiError, or nil
+// if none found.
+func (m *Listeners) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Listeners) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetListenerStatuses() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenersValidationError{
+ field: fmt.Sprintf("ListenerStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenersValidationError{
+ field: fmt.Sprintf("ListenerStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenersValidationError{
+ field: fmt.Sprintf("ListenerStatuses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ListenersMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListenersMultiError is an error wrapping multiple validation errors returned
+// by Listeners.ValidateAll() if the designated constraints aren't met.
+type ListenersMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListenersMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListenersMultiError) AllErrors() []error { return m }
+
+// ListenersValidationError is the validation error returned by
+// Listeners.Validate if the designated constraints aren't met.
+type ListenersValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListenersValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListenersValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListenersValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListenersValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListenersValidationError) ErrorName() string { return "ListenersValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ListenersValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListeners.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListenersValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListenersValidationError{}
+
+// Validate checks the field values on ListenerStatus with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ListenerStatus) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ListenerStatus with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ListenerStatusMultiError,
+// or nil if none found.
+func (m *ListenerStatus) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ListenerStatus) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ if all {
+ switch v := interface{}(m.GetLocalAddress()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenerStatusValidationError{
+ field: "LocalAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenerStatusValidationError{
+ field: "LocalAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLocalAddress()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenerStatusValidationError{
+ field: "LocalAddress",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetAdditionalLocalAddresses() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ListenerStatusValidationError{
+ field: fmt.Sprintf("AdditionalLocalAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ListenerStatusValidationError{
+ field: fmt.Sprintf("AdditionalLocalAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ListenerStatusValidationError{
+ field: fmt.Sprintf("AdditionalLocalAddresses[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ListenerStatusMultiError(errors)
+ }
+
+ return nil
+}
+
+// ListenerStatusMultiError is an error wrapping multiple validation errors
+// returned by ListenerStatus.ValidateAll() if the designated constraints
+// aren't met.
+type ListenerStatusMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ListenerStatusMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ListenerStatusMultiError) AllErrors() []error { return m }
+
+// ListenerStatusValidationError is the validation error returned by
+// ListenerStatus.Validate if the designated constraints aren't met.
+type ListenerStatusValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ListenerStatusValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ListenerStatusValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ListenerStatusValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ListenerStatusValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ListenerStatusValidationError) ErrorName() string { return "ListenerStatusValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ListenerStatusValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sListenerStatus.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ListenerStatusValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ListenerStatusValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go
new file mode 100644
index 000000000..816437acf
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/listeners_vtproto.pb.go
@@ -0,0 +1,203 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/listeners.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *Listeners) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Listeners) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Listeners) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.ListenerStatuses) > 0 {
+ for iNdEx := len(m.ListenerStatuses) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.ListenerStatuses[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ListenerStatus) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ListenerStatus) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ListenerStatus) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.AdditionalLocalAddresses) > 0 {
+ for iNdEx := len(m.AdditionalLocalAddresses) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.AdditionalLocalAddresses[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.AdditionalLocalAddresses[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.LocalAddress != nil {
+ if vtmsg, ok := interface{}(m.LocalAddress).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.LocalAddress)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Listeners) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.ListenerStatuses) > 0 {
+ for _, e := range m.ListenerStatuses {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ListenerStatus) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LocalAddress != nil {
+ if size, ok := interface{}(m.LocalAddress).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.LocalAddress)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.AdditionalLocalAddresses) > 0 {
+ for _, e := range m.AdditionalLocalAddresses {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go
new file mode 100644
index 000000000..74f0a2d4e
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.go
@@ -0,0 +1,228 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/memory.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Proto representation of the internal memory consumption of an Envoy instance. These represent
+// values extracted from an internal TCMalloc instance. For more information, see the section of the
+// docs entitled ["Generic Tcmalloc Status"](https://gperftools.github.io/gperftools/tcmalloc.html).
+// [#next-free-field: 7]
+type Memory struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The number of bytes allocated by the heap for Envoy. This is an alias for
+ // “generic.current_allocated_bytes“.
+ Allocated uint64 `protobuf:"varint,1,opt,name=allocated,proto3" json:"allocated,omitempty"`
+ // The number of bytes reserved by the heap but not necessarily allocated. This is an alias for
+ // “generic.heap_size“.
+ HeapSize uint64 `protobuf:"varint,2,opt,name=heap_size,json=heapSize,proto3" json:"heap_size,omitempty"`
+ // The number of bytes in free, unmapped pages in the page heap. These bytes always count towards
+ // virtual memory usage, and depending on the OS, typically do not count towards physical memory
+ // usage. This is an alias for “tcmalloc.pageheap_unmapped_bytes“.
+ PageheapUnmapped uint64 `protobuf:"varint,3,opt,name=pageheap_unmapped,json=pageheapUnmapped,proto3" json:"pageheap_unmapped,omitempty"`
+ // The number of bytes in free, mapped pages in the page heap. These bytes always count towards
+ // virtual memory usage, and unless the underlying memory is swapped out by the OS, they also
+ // count towards physical memory usage. This is an alias for “tcmalloc.pageheap_free_bytes“.
+ PageheapFree uint64 `protobuf:"varint,4,opt,name=pageheap_free,json=pageheapFree,proto3" json:"pageheap_free,omitempty"`
+ // The amount of memory used by the TCMalloc thread caches (for small objects). This is an alias
+ // for “tcmalloc.current_total_thread_cache_bytes“.
+ TotalThreadCache uint64 `protobuf:"varint,5,opt,name=total_thread_cache,json=totalThreadCache,proto3" json:"total_thread_cache,omitempty"`
+ // The number of bytes of the physical memory usage by the allocator. This is an alias for
+ // “generic.total_physical_bytes“.
+ TotalPhysicalBytes uint64 `protobuf:"varint,6,opt,name=total_physical_bytes,json=totalPhysicalBytes,proto3" json:"total_physical_bytes,omitempty"`
+}
+
+func (x *Memory) Reset() {
+ *x = Memory{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_memory_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Memory) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Memory) ProtoMessage() {}
+
+func (x *Memory) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_memory_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Memory.ProtoReflect.Descriptor instead.
+func (*Memory) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_memory_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Memory) GetAllocated() uint64 {
+ if x != nil {
+ return x.Allocated
+ }
+ return 0
+}
+
+func (x *Memory) GetHeapSize() uint64 {
+ if x != nil {
+ return x.HeapSize
+ }
+ return 0
+}
+
+func (x *Memory) GetPageheapUnmapped() uint64 {
+ if x != nil {
+ return x.PageheapUnmapped
+ }
+ return 0
+}
+
+func (x *Memory) GetPageheapFree() uint64 {
+ if x != nil {
+ return x.PageheapFree
+ }
+ return 0
+}
+
+func (x *Memory) GetTotalThreadCache() uint64 {
+ if x != nil {
+ return x.TotalThreadCache
+ }
+ return 0
+}
+
+func (x *Memory) GetTotalPhysicalBytes() uint64 {
+ if x != nil {
+ return x.TotalPhysicalBytes
+ }
+ return 0
+}
+
+var File_envoy_admin_v3_memory_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_memory_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1d, 0x75,
+ 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64,
+ 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22,
+ 0x98, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c,
+ 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x61,
+ 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x70,
+ 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x68, 0x65, 0x61,
+ 0x70, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x61, 0x67, 0x65, 0x68, 0x65, 0x61,
+ 0x70, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04,
+ 0x52, 0x10, 0x70, 0x61, 0x67, 0x65, 0x68, 0x65, 0x61, 0x70, 0x55, 0x6e, 0x6d, 0x61, 0x70, 0x70,
+ 0x65, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x61, 0x67, 0x65, 0x68, 0x65, 0x61, 0x70, 0x5f, 0x66,
+ 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x70, 0x61, 0x67, 0x65, 0x68,
+ 0x65, 0x61, 0x70, 0x46, 0x72, 0x65, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c,
+ 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64,
+ 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70,
+ 0x68, 0x79, 0x73, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x06, 0x20,
+ 0x01, 0x28, 0x04, 0x52, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x68, 0x79, 0x73, 0x69, 0x63,
+ 0x61, 0x6c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x3a, 0x21, 0x9a, 0xc5, 0x88, 0x1e, 0x1c, 0x0a, 0x1a,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c,
+ 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x42, 0x74, 0xba, 0x80, 0xc8, 0xd1,
+ 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72,
+ 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x42, 0x0b, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
+ 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74,
+ 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f,
+ 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_memory_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_memory_proto_rawDescData = file_envoy_admin_v3_memory_proto_rawDesc
+)
+
+func file_envoy_admin_v3_memory_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_memory_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_memory_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_memory_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_memory_proto_rawDescData
+}
+
+var file_envoy_admin_v3_memory_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_admin_v3_memory_proto_goTypes = []interface{}{
+ (*Memory)(nil), // 0: envoy.admin.v3.Memory
+}
+var file_envoy_admin_v3_memory_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_memory_proto_init() }
+func file_envoy_admin_v3_memory_proto_init() {
+ if File_envoy_admin_v3_memory_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_memory_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Memory); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_memory_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_memory_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_memory_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_memory_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_memory_proto = out.File
+ file_envoy_admin_v3_memory_proto_rawDesc = nil
+ file_envoy_admin_v3_memory_proto_goTypes = nil
+ file_envoy_admin_v3_memory_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go
new file mode 100644
index 000000000..bcb9c1d20
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory.pb.validate.go
@@ -0,0 +1,147 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/memory.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Memory with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Memory) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Memory with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in MemoryMultiError, or nil if none found.
+func (m *Memory) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Memory) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Allocated
+
+ // no validation rules for HeapSize
+
+ // no validation rules for PageheapUnmapped
+
+ // no validation rules for PageheapFree
+
+ // no validation rules for TotalThreadCache
+
+ // no validation rules for TotalPhysicalBytes
+
+ if len(errors) > 0 {
+ return MemoryMultiError(errors)
+ }
+
+ return nil
+}
+
+// MemoryMultiError is an error wrapping multiple validation errors returned by
+// Memory.ValidateAll() if the designated constraints aren't met.
+type MemoryMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MemoryMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MemoryMultiError) AllErrors() []error { return m }
+
+// MemoryValidationError is the validation error returned by Memory.Validate if
+// the designated constraints aren't met.
+type MemoryValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MemoryValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MemoryValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MemoryValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MemoryValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MemoryValidationError) ErrorName() string { return "MemoryValidationError" }
+
+// Error satisfies the builtin error interface
+func (e MemoryValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMemory.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MemoryValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MemoryValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go
new file mode 100644
index 000000000..6e3a23688
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/memory_vtproto.pb.go
@@ -0,0 +1,110 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/memory.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *Memory) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Memory) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Memory) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.TotalPhysicalBytes != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalPhysicalBytes))
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.TotalThreadCache != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.TotalThreadCache))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.PageheapFree != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PageheapFree))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.PageheapUnmapped != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.PageheapUnmapped))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.HeapSize != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.HeapSize))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Allocated != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Allocated))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Memory) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Allocated != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Allocated))
+ }
+ if m.HeapSize != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.HeapSize))
+ }
+ if m.PageheapUnmapped != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.PageheapUnmapped))
+ }
+ if m.PageheapFree != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.PageheapFree))
+ }
+ if m.TotalThreadCache != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalThreadCache))
+ }
+ if m.TotalPhysicalBytes != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.TotalPhysicalBytes))
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go
new file mode 100644
index 000000000..21866a3e7
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.go
@@ -0,0 +1,234 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/metrics.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type SimpleMetric_Type int32
+
+const (
+ SimpleMetric_COUNTER SimpleMetric_Type = 0
+ SimpleMetric_GAUGE SimpleMetric_Type = 1
+)
+
+// Enum value maps for SimpleMetric_Type.
+var (
+ SimpleMetric_Type_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ }
+ SimpleMetric_Type_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ }
+)
+
+func (x SimpleMetric_Type) Enum() *SimpleMetric_Type {
+ p := new(SimpleMetric_Type)
+ *p = x
+ return p
+}
+
+func (x SimpleMetric_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SimpleMetric_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_admin_v3_metrics_proto_enumTypes[0].Descriptor()
+}
+
+func (SimpleMetric_Type) Type() protoreflect.EnumType {
+ return &file_envoy_admin_v3_metrics_proto_enumTypes[0]
+}
+
+func (x SimpleMetric_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SimpleMetric_Type.Descriptor instead.
+func (SimpleMetric_Type) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_metrics_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// Proto representation of an Envoy Counter or Gauge value.
+type SimpleMetric struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Type of the metric represented.
+ Type SimpleMetric_Type `protobuf:"varint,1,opt,name=type,proto3,enum=envoy.admin.v3.SimpleMetric_Type" json:"type,omitempty"`
+ // Current metric value.
+ Value uint64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
+ // Name of the metric.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (x *SimpleMetric) Reset() {
+ *x = SimpleMetric{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_metrics_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SimpleMetric) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SimpleMetric) ProtoMessage() {}
+
+func (x *SimpleMetric) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_metrics_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SimpleMetric.ProtoReflect.Descriptor instead.
+func (*SimpleMetric) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_metrics_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *SimpleMetric) GetType() SimpleMetric_Type {
+ if x != nil {
+ return x.Type
+ }
+ return SimpleMetric_COUNTER
+}
+
+func (x *SimpleMetric) GetValue() uint64 {
+ if x != nil {
+ return x.Value
+ }
+ return 0
+}
+
+func (x *SimpleMetric) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+var File_envoy_admin_v3_metrics_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_metrics_proto_rawDesc = []byte{
+ 0x0a, 0x1c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x1d,
+ 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75,
+ 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x22, 0xb8, 0x01, 0x0a, 0x0c, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x12, 0x35, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x21, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33,
+ 0x2e, 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x22, 0x1e, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f,
+ 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45,
+ 0x10, 0x01, 0x3a, 0x27, 0x9a, 0xc5, 0x88, 0x1e, 0x22, 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53,
+ 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x42, 0x75, 0xba, 0x80, 0xc8,
+ 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70,
+ 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x76, 0x33, 0x42, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f,
+ 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_metrics_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_metrics_proto_rawDescData = file_envoy_admin_v3_metrics_proto_rawDesc
+)
+
+func file_envoy_admin_v3_metrics_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_metrics_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_metrics_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_metrics_proto_rawDescData
+}
+
+var file_envoy_admin_v3_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_envoy_admin_v3_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_admin_v3_metrics_proto_goTypes = []interface{}{
+ (SimpleMetric_Type)(0), // 0: envoy.admin.v3.SimpleMetric.Type
+ (*SimpleMetric)(nil), // 1: envoy.admin.v3.SimpleMetric
+}
+var file_envoy_admin_v3_metrics_proto_depIdxs = []int32{
+ 0, // 0: envoy.admin.v3.SimpleMetric.type:type_name -> envoy.admin.v3.SimpleMetric.Type
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_metrics_proto_init() }
+func file_envoy_admin_v3_metrics_proto_init() {
+ if File_envoy_admin_v3_metrics_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SimpleMetric); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_metrics_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_metrics_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_metrics_proto_depIdxs,
+ EnumInfos: file_envoy_admin_v3_metrics_proto_enumTypes,
+ MessageInfos: file_envoy_admin_v3_metrics_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_metrics_proto = out.File
+ file_envoy_admin_v3_metrics_proto_rawDesc = nil
+ file_envoy_admin_v3_metrics_proto_goTypes = nil
+ file_envoy_admin_v3_metrics_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go
new file mode 100644
index 000000000..903d70e19
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics.pb.validate.go
@@ -0,0 +1,142 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/metrics.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on SimpleMetric with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *SimpleMetric) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on SimpleMetric with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in SimpleMetricMultiError, or
+// nil if none found.
+func (m *SimpleMetric) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *SimpleMetric) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Type
+
+ // no validation rules for Value
+
+ // no validation rules for Name
+
+ if len(errors) > 0 {
+ return SimpleMetricMultiError(errors)
+ }
+
+ return nil
+}
+
+// SimpleMetricMultiError is an error wrapping multiple validation errors
+// returned by SimpleMetric.ValidateAll() if the designated constraints aren't met.
+type SimpleMetricMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m SimpleMetricMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m SimpleMetricMultiError) AllErrors() []error { return m }
+
+// SimpleMetricValidationError is the validation error returned by
+// SimpleMetric.Validate if the designated constraints aren't met.
+type SimpleMetricValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e SimpleMetricValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e SimpleMetricValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e SimpleMetricValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e SimpleMetricValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e SimpleMetricValidationError) ErrorName() string { return "SimpleMetricValidationError" }
+
+// Error satisfies the builtin error interface
+func (e SimpleMetricValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sSimpleMetric.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = SimpleMetricValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = SimpleMetricValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go
new file mode 100644
index 000000000..0c09ae045
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/metrics_vtproto.pb.go
@@ -0,0 +1,89 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/metrics.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *SimpleMetric) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SimpleMetric) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *SimpleMetric) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Value != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Value))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Type != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SimpleMetric) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Type != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Type))
+ }
+ if m.Value != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Value))
+ }
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go
new file mode 100644
index 000000000..d78d94e57
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.go
@@ -0,0 +1,191 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/mutex_stats.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Proto representation of the statistics collected upon absl::Mutex contention, if Envoy is run
+// under :option:`--enable-mutex-tracing`. For more information, see the “absl::Mutex“
+// [docs](https://abseil.io/about/design/mutex#extra-features).
+//
+// *NB*: The wait cycles below are measured by “absl::base_internal::CycleClock“, and may not
+// correspond to core clock frequency. For more information, see the “CycleClock“
+// [docs](https://github.com/abseil/abseil-cpp/blob/master/absl/base/internal/cycleclock.h).
+type MutexStats struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The number of individual mutex contentions which have occurred since startup.
+ NumContentions uint64 `protobuf:"varint,1,opt,name=num_contentions,json=numContentions,proto3" json:"num_contentions,omitempty"`
+ // The length of the current contention wait cycle.
+ CurrentWaitCycles uint64 `protobuf:"varint,2,opt,name=current_wait_cycles,json=currentWaitCycles,proto3" json:"current_wait_cycles,omitempty"`
+ // The lifetime total of all contention wait cycles.
+ LifetimeWaitCycles uint64 `protobuf:"varint,3,opt,name=lifetime_wait_cycles,json=lifetimeWaitCycles,proto3" json:"lifetime_wait_cycles,omitempty"`
+}
+
+func (x *MutexStats) Reset() {
+ *x = MutexStats{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_mutex_stats_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MutexStats) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MutexStats) ProtoMessage() {}
+
+func (x *MutexStats) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_mutex_stats_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MutexStats.ProtoReflect.Descriptor instead.
+func (*MutexStats) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_mutex_stats_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *MutexStats) GetNumContentions() uint64 {
+ if x != nil {
+ return x.NumContentions
+ }
+ return 0
+}
+
+func (x *MutexStats) GetCurrentWaitCycles() uint64 {
+ if x != nil {
+ return x.CurrentWaitCycles
+ }
+ return 0
+}
+
+func (x *MutexStats) GetLifetimeWaitCycles() uint64 {
+ if x != nil {
+ return x.LifetimeWaitCycles
+ }
+ return 0
+}
+
+var File_envoy_admin_v3_mutex_stats_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_mutex_stats_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x6d, 0x75, 0x74, 0x65, 0x78, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbe, 0x01, 0x0a, 0x0a, 0x4d, 0x75, 0x74, 0x65, 0x78, 0x53, 0x74,
+ 0x61, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6e, 0x75,
+ 0x6d, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x13,
+ 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x63, 0x79, 0x63,
+ 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x63, 0x75, 0x72, 0x72, 0x65,
+ 0x6e, 0x74, 0x57, 0x61, 0x69, 0x74, 0x43, 0x79, 0x63, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x14,
+ 0x6c, 0x69, 0x66, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x63, 0x79,
+ 0x63, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x6c, 0x69, 0x66, 0x65,
+ 0x74, 0x69, 0x6d, 0x65, 0x57, 0x61, 0x69, 0x74, 0x43, 0x79, 0x63, 0x6c, 0x65, 0x73, 0x3a, 0x25,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x4d, 0x75, 0x74, 0x65, 0x78,
+ 0x53, 0x74, 0x61, 0x74, 0x73, 0x42, 0x78, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a,
+ 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x4d,
+ 0x75, 0x74, 0x65, 0x78, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_mutex_stats_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_mutex_stats_proto_rawDescData = file_envoy_admin_v3_mutex_stats_proto_rawDesc
+)
+
+func file_envoy_admin_v3_mutex_stats_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_mutex_stats_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_mutex_stats_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_mutex_stats_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_mutex_stats_proto_rawDescData
+}
+
+var file_envoy_admin_v3_mutex_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_admin_v3_mutex_stats_proto_goTypes = []interface{}{
+ (*MutexStats)(nil), // 0: envoy.admin.v3.MutexStats
+}
+var file_envoy_admin_v3_mutex_stats_proto_depIdxs = []int32{
+ 0, // [0:0] is the sub-list for method output_type
+ 0, // [0:0] is the sub-list for method input_type
+ 0, // [0:0] is the sub-list for extension type_name
+ 0, // [0:0] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_mutex_stats_proto_init() }
+func file_envoy_admin_v3_mutex_stats_proto_init() {
+ if File_envoy_admin_v3_mutex_stats_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_mutex_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MutexStats); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_mutex_stats_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_mutex_stats_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_mutex_stats_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_mutex_stats_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_mutex_stats_proto = out.File
+ file_envoy_admin_v3_mutex_stats_proto_rawDesc = nil
+ file_envoy_admin_v3_mutex_stats_proto_goTypes = nil
+ file_envoy_admin_v3_mutex_stats_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go
new file mode 100644
index 000000000..236524c54
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats.pb.validate.go
@@ -0,0 +1,142 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/mutex_stats.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on MutexStats with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *MutexStats) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MutexStats with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in MutexStatsMultiError, or
+// nil if none found.
+func (m *MutexStats) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MutexStats) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for NumContentions
+
+ // no validation rules for CurrentWaitCycles
+
+ // no validation rules for LifetimeWaitCycles
+
+ if len(errors) > 0 {
+ return MutexStatsMultiError(errors)
+ }
+
+ return nil
+}
+
+// MutexStatsMultiError is an error wrapping multiple validation errors
+// returned by MutexStats.ValidateAll() if the designated constraints aren't met.
+type MutexStatsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MutexStatsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MutexStatsMultiError) AllErrors() []error { return m }
+
+// MutexStatsValidationError is the validation error returned by
+// MutexStats.Validate if the designated constraints aren't met.
+type MutexStatsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MutexStatsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MutexStatsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MutexStatsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MutexStatsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MutexStatsValidationError) ErrorName() string { return "MutexStatsValidationError" }
+
+// Error satisfies the builtin error interface
+func (e MutexStatsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMutexStats.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MutexStatsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MutexStatsValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go
new file mode 100644
index 000000000..4318cbc99
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/mutex_stats_vtproto.pb.go
@@ -0,0 +1,86 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/mutex_stats.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *MutexStats) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MutexStats) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *MutexStats) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LifetimeWaitCycles != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LifetimeWaitCycles))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.CurrentWaitCycles != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.CurrentWaitCycles))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.NumContentions != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.NumContentions))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MutexStats) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NumContentions != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.NumContentions))
+ }
+ if m.CurrentWaitCycles != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.CurrentWaitCycles))
+ }
+ if m.LifetimeWaitCycles != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.LifetimeWaitCycles))
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go
new file mode 100644
index 000000000..fa32074d9
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.go
@@ -0,0 +1,987 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/server_info.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ServerInfo_State int32
+
+const (
+ // Server is live and serving traffic.
+ ServerInfo_LIVE ServerInfo_State = 0
+ // Server is draining listeners in response to external health checks failing.
+ ServerInfo_DRAINING ServerInfo_State = 1
+ // Server has not yet completed cluster manager initialization.
+ ServerInfo_PRE_INITIALIZING ServerInfo_State = 2
+ // Server is running the cluster manager initialization callbacks (e.g., RDS).
+ ServerInfo_INITIALIZING ServerInfo_State = 3
+)
+
+// Enum value maps for ServerInfo_State.
+var (
+ ServerInfo_State_name = map[int32]string{
+ 0: "LIVE",
+ 1: "DRAINING",
+ 2: "PRE_INITIALIZING",
+ 3: "INITIALIZING",
+ }
+ ServerInfo_State_value = map[string]int32{
+ "LIVE": 0,
+ "DRAINING": 1,
+ "PRE_INITIALIZING": 2,
+ "INITIALIZING": 3,
+ }
+)
+
+func (x ServerInfo_State) Enum() *ServerInfo_State {
+ p := new(ServerInfo_State)
+ *p = x
+ return p
+}
+
+func (x ServerInfo_State) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ServerInfo_State) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_admin_v3_server_info_proto_enumTypes[0].Descriptor()
+}
+
+func (ServerInfo_State) Type() protoreflect.EnumType {
+ return &file_envoy_admin_v3_server_info_proto_enumTypes[0]
+}
+
+func (x ServerInfo_State) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ServerInfo_State.Descriptor instead.
+func (ServerInfo_State) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{0, 0}
+}
+
+type CommandLineOptions_IpVersion int32
+
+const (
+ CommandLineOptions_v4 CommandLineOptions_IpVersion = 0
+ CommandLineOptions_v6 CommandLineOptions_IpVersion = 1
+)
+
+// Enum value maps for CommandLineOptions_IpVersion.
+var (
+ CommandLineOptions_IpVersion_name = map[int32]string{
+ 0: "v4",
+ 1: "v6",
+ }
+ CommandLineOptions_IpVersion_value = map[string]int32{
+ "v4": 0,
+ "v6": 1,
+ }
+)
+
+func (x CommandLineOptions_IpVersion) Enum() *CommandLineOptions_IpVersion {
+ p := new(CommandLineOptions_IpVersion)
+ *p = x
+ return p
+}
+
+func (x CommandLineOptions_IpVersion) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CommandLineOptions_IpVersion) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_admin_v3_server_info_proto_enumTypes[1].Descriptor()
+}
+
+func (CommandLineOptions_IpVersion) Type() protoreflect.EnumType {
+ return &file_envoy_admin_v3_server_info_proto_enumTypes[1]
+}
+
+func (x CommandLineOptions_IpVersion) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use CommandLineOptions_IpVersion.Descriptor instead.
+func (CommandLineOptions_IpVersion) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1, 0}
+}
+
+type CommandLineOptions_Mode int32
+
+const (
+ // Validate configs and then serve traffic normally.
+ CommandLineOptions_Serve CommandLineOptions_Mode = 0
+ // Validate configs and exit.
+ CommandLineOptions_Validate CommandLineOptions_Mode = 1
+ // Completely load and initialize the config, and then exit without running the listener loop.
+ CommandLineOptions_InitOnly CommandLineOptions_Mode = 2
+)
+
+// Enum value maps for CommandLineOptions_Mode.
+var (
+ CommandLineOptions_Mode_name = map[int32]string{
+ 0: "Serve",
+ 1: "Validate",
+ 2: "InitOnly",
+ }
+ CommandLineOptions_Mode_value = map[string]int32{
+ "Serve": 0,
+ "Validate": 1,
+ "InitOnly": 2,
+ }
+)
+
+func (x CommandLineOptions_Mode) Enum() *CommandLineOptions_Mode {
+ p := new(CommandLineOptions_Mode)
+ *p = x
+ return p
+}
+
+func (x CommandLineOptions_Mode) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CommandLineOptions_Mode) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_admin_v3_server_info_proto_enumTypes[2].Descriptor()
+}
+
+func (CommandLineOptions_Mode) Type() protoreflect.EnumType {
+ return &file_envoy_admin_v3_server_info_proto_enumTypes[2]
+}
+
+func (x CommandLineOptions_Mode) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use CommandLineOptions_Mode.Descriptor instead.
+func (CommandLineOptions_Mode) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1, 1}
+}
+
+type CommandLineOptions_DrainStrategy int32
+
+const (
+ // Gradually discourage connections over the course of the drain period.
+ CommandLineOptions_Gradual CommandLineOptions_DrainStrategy = 0
+ // Discourage all connections for the duration of the drain sequence.
+ CommandLineOptions_Immediate CommandLineOptions_DrainStrategy = 1
+)
+
+// Enum value maps for CommandLineOptions_DrainStrategy.
+var (
+ CommandLineOptions_DrainStrategy_name = map[int32]string{
+ 0: "Gradual",
+ 1: "Immediate",
+ }
+ CommandLineOptions_DrainStrategy_value = map[string]int32{
+ "Gradual": 0,
+ "Immediate": 1,
+ }
+)
+
+func (x CommandLineOptions_DrainStrategy) Enum() *CommandLineOptions_DrainStrategy {
+ p := new(CommandLineOptions_DrainStrategy)
+ *p = x
+ return p
+}
+
+func (x CommandLineOptions_DrainStrategy) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CommandLineOptions_DrainStrategy) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_admin_v3_server_info_proto_enumTypes[3].Descriptor()
+}
+
+func (CommandLineOptions_DrainStrategy) Type() protoreflect.EnumType {
+ return &file_envoy_admin_v3_server_info_proto_enumTypes[3]
+}
+
+func (x CommandLineOptions_DrainStrategy) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use CommandLineOptions_DrainStrategy.Descriptor instead.
+func (CommandLineOptions_DrainStrategy) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1, 2}
+}
+
+// Proto representation of the value returned by /server_info, containing
+// server version/server status information.
+// [#next-free-field: 8]
+type ServerInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Server version.
+ Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+ // State of the server.
+ State ServerInfo_State `protobuf:"varint,2,opt,name=state,proto3,enum=envoy.admin.v3.ServerInfo_State" json:"state,omitempty"`
+ // Uptime since current epoch was started.
+ UptimeCurrentEpoch *durationpb.Duration `protobuf:"bytes,3,opt,name=uptime_current_epoch,json=uptimeCurrentEpoch,proto3" json:"uptime_current_epoch,omitempty"`
+ // Uptime since the start of the first epoch.
+ UptimeAllEpochs *durationpb.Duration `protobuf:"bytes,4,opt,name=uptime_all_epochs,json=uptimeAllEpochs,proto3" json:"uptime_all_epochs,omitempty"`
+ // Hot restart version.
+ HotRestartVersion string `protobuf:"bytes,5,opt,name=hot_restart_version,json=hotRestartVersion,proto3" json:"hot_restart_version,omitempty"`
+ // Command line options the server is currently running with.
+ CommandLineOptions *CommandLineOptions `protobuf:"bytes,6,opt,name=command_line_options,json=commandLineOptions,proto3" json:"command_line_options,omitempty"`
+ // Populated node identity of this server.
+ Node *v3.Node `protobuf:"bytes,7,opt,name=node,proto3" json:"node,omitempty"`
+}
+
+func (x *ServerInfo) Reset() {
+ *x = ServerInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_server_info_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ServerInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerInfo) ProtoMessage() {}
+
+func (x *ServerInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_server_info_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerInfo.ProtoReflect.Descriptor instead.
+func (*ServerInfo) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ServerInfo) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *ServerInfo) GetState() ServerInfo_State {
+ if x != nil {
+ return x.State
+ }
+ return ServerInfo_LIVE
+}
+
+func (x *ServerInfo) GetUptimeCurrentEpoch() *durationpb.Duration {
+ if x != nil {
+ return x.UptimeCurrentEpoch
+ }
+ return nil
+}
+
+func (x *ServerInfo) GetUptimeAllEpochs() *durationpb.Duration {
+ if x != nil {
+ return x.UptimeAllEpochs
+ }
+ return nil
+}
+
+func (x *ServerInfo) GetHotRestartVersion() string {
+ if x != nil {
+ return x.HotRestartVersion
+ }
+ return ""
+}
+
+func (x *ServerInfo) GetCommandLineOptions() *CommandLineOptions {
+ if x != nil {
+ return x.CommandLineOptions
+ }
+ return nil
+}
+
+func (x *ServerInfo) GetNode() *v3.Node {
+ if x != nil {
+ return x.Node
+ }
+ return nil
+}
+
+// [#next-free-field: 42]
+type CommandLineOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // See :option:`--base-id` for details.
+ BaseId uint64 `protobuf:"varint,1,opt,name=base_id,json=baseId,proto3" json:"base_id,omitempty"`
+ // See :option:`--use-dynamic-base-id` for details.
+ UseDynamicBaseId bool `protobuf:"varint,31,opt,name=use_dynamic_base_id,json=useDynamicBaseId,proto3" json:"use_dynamic_base_id,omitempty"`
+ // See :option:`--skip-hot-restart-on-no-parent` for details.
+ SkipHotRestartOnNoParent bool `protobuf:"varint,39,opt,name=skip_hot_restart_on_no_parent,json=skipHotRestartOnNoParent,proto3" json:"skip_hot_restart_on_no_parent,omitempty"`
+ // See :option:`--skip-hot-restart-parent-stats` for details.
+ SkipHotRestartParentStats bool `protobuf:"varint,40,opt,name=skip_hot_restart_parent_stats,json=skipHotRestartParentStats,proto3" json:"skip_hot_restart_parent_stats,omitempty"`
+ // See :option:`--base-id-path` for details.
+ BaseIdPath string `protobuf:"bytes,32,opt,name=base_id_path,json=baseIdPath,proto3" json:"base_id_path,omitempty"`
+ // See :option:`--concurrency` for details.
+ Concurrency uint32 `protobuf:"varint,2,opt,name=concurrency,proto3" json:"concurrency,omitempty"`
+ // See :option:`--config-path` for details.
+ ConfigPath string `protobuf:"bytes,3,opt,name=config_path,json=configPath,proto3" json:"config_path,omitempty"`
+ // See :option:`--config-yaml` for details.
+ ConfigYaml string `protobuf:"bytes,4,opt,name=config_yaml,json=configYaml,proto3" json:"config_yaml,omitempty"`
+ // See :option:`--allow-unknown-static-fields` for details.
+ AllowUnknownStaticFields bool `protobuf:"varint,5,opt,name=allow_unknown_static_fields,json=allowUnknownStaticFields,proto3" json:"allow_unknown_static_fields,omitempty"`
+ // See :option:`--reject-unknown-dynamic-fields` for details.
+ RejectUnknownDynamicFields bool `protobuf:"varint,26,opt,name=reject_unknown_dynamic_fields,json=rejectUnknownDynamicFields,proto3" json:"reject_unknown_dynamic_fields,omitempty"`
+ // See :option:`--ignore-unknown-dynamic-fields` for details.
+ IgnoreUnknownDynamicFields bool `protobuf:"varint,30,opt,name=ignore_unknown_dynamic_fields,json=ignoreUnknownDynamicFields,proto3" json:"ignore_unknown_dynamic_fields,omitempty"`
+ // See :option:`--skip-deprecated-logs` for details.
+ SkipDeprecatedLogs bool `protobuf:"varint,41,opt,name=skip_deprecated_logs,json=skipDeprecatedLogs,proto3" json:"skip_deprecated_logs,omitempty"`
+ // See :option:`--admin-address-path` for details.
+ AdminAddressPath string `protobuf:"bytes,6,opt,name=admin_address_path,json=adminAddressPath,proto3" json:"admin_address_path,omitempty"`
+ // See :option:`--local-address-ip-version` for details.
+ LocalAddressIpVersion CommandLineOptions_IpVersion `protobuf:"varint,7,opt,name=local_address_ip_version,json=localAddressIpVersion,proto3,enum=envoy.admin.v3.CommandLineOptions_IpVersion" json:"local_address_ip_version,omitempty"`
+ // See :option:`--log-level` for details.
+ LogLevel string `protobuf:"bytes,8,opt,name=log_level,json=logLevel,proto3" json:"log_level,omitempty"`
+ // See :option:`--component-log-level` for details.
+ ComponentLogLevel string `protobuf:"bytes,9,opt,name=component_log_level,json=componentLogLevel,proto3" json:"component_log_level,omitempty"`
+ // See :option:`--log-format` for details.
+ LogFormat string `protobuf:"bytes,10,opt,name=log_format,json=logFormat,proto3" json:"log_format,omitempty"`
+ // See :option:`--log-format-escaped` for details.
+ LogFormatEscaped bool `protobuf:"varint,27,opt,name=log_format_escaped,json=logFormatEscaped,proto3" json:"log_format_escaped,omitempty"`
+ // See :option:`--log-path` for details.
+ LogPath string `protobuf:"bytes,11,opt,name=log_path,json=logPath,proto3" json:"log_path,omitempty"`
+ // See :option:`--service-cluster` for details.
+ ServiceCluster string `protobuf:"bytes,13,opt,name=service_cluster,json=serviceCluster,proto3" json:"service_cluster,omitempty"`
+ // See :option:`--service-node` for details.
+ ServiceNode string `protobuf:"bytes,14,opt,name=service_node,json=serviceNode,proto3" json:"service_node,omitempty"`
+ // See :option:`--service-zone` for details.
+ ServiceZone string `protobuf:"bytes,15,opt,name=service_zone,json=serviceZone,proto3" json:"service_zone,omitempty"`
+ // See :option:`--file-flush-interval-msec` for details.
+ FileFlushInterval *durationpb.Duration `protobuf:"bytes,16,opt,name=file_flush_interval,json=fileFlushInterval,proto3" json:"file_flush_interval,omitempty"`
+ // See :option:`--drain-time-s` for details.
+ DrainTime *durationpb.Duration `protobuf:"bytes,17,opt,name=drain_time,json=drainTime,proto3" json:"drain_time,omitempty"`
+ // See :option:`--drain-strategy` for details.
+ DrainStrategy CommandLineOptions_DrainStrategy `protobuf:"varint,33,opt,name=drain_strategy,json=drainStrategy,proto3,enum=envoy.admin.v3.CommandLineOptions_DrainStrategy" json:"drain_strategy,omitempty"`
+ // See :option:`--parent-shutdown-time-s` for details.
+ ParentShutdownTime *durationpb.Duration `protobuf:"bytes,18,opt,name=parent_shutdown_time,json=parentShutdownTime,proto3" json:"parent_shutdown_time,omitempty"`
+ // See :option:`--mode` for details.
+ Mode CommandLineOptions_Mode `protobuf:"varint,19,opt,name=mode,proto3,enum=envoy.admin.v3.CommandLineOptions_Mode" json:"mode,omitempty"`
+ // See :option:`--disable-hot-restart` for details.
+ DisableHotRestart bool `protobuf:"varint,22,opt,name=disable_hot_restart,json=disableHotRestart,proto3" json:"disable_hot_restart,omitempty"`
+ // See :option:`--enable-mutex-tracing` for details.
+ EnableMutexTracing bool `protobuf:"varint,23,opt,name=enable_mutex_tracing,json=enableMutexTracing,proto3" json:"enable_mutex_tracing,omitempty"`
+ // See :option:`--restart-epoch` for details.
+ RestartEpoch uint32 `protobuf:"varint,24,opt,name=restart_epoch,json=restartEpoch,proto3" json:"restart_epoch,omitempty"`
+ // See :option:`--cpuset-threads` for details.
+ CpusetThreads bool `protobuf:"varint,25,opt,name=cpuset_threads,json=cpusetThreads,proto3" json:"cpuset_threads,omitempty"`
+ // See :option:`--disable-extensions` for details.
+ DisabledExtensions []string `protobuf:"bytes,28,rep,name=disabled_extensions,json=disabledExtensions,proto3" json:"disabled_extensions,omitempty"`
+ // See :option:`--enable-fine-grain-logging` for details.
+ EnableFineGrainLogging bool `protobuf:"varint,34,opt,name=enable_fine_grain_logging,json=enableFineGrainLogging,proto3" json:"enable_fine_grain_logging,omitempty"`
+ // See :option:`--socket-path` for details.
+ SocketPath string `protobuf:"bytes,35,opt,name=socket_path,json=socketPath,proto3" json:"socket_path,omitempty"`
+ // See :option:`--socket-mode` for details.
+ SocketMode uint32 `protobuf:"varint,36,opt,name=socket_mode,json=socketMode,proto3" json:"socket_mode,omitempty"`
+ // See :option:`--enable-core-dump` for details.
+ EnableCoreDump bool `protobuf:"varint,37,opt,name=enable_core_dump,json=enableCoreDump,proto3" json:"enable_core_dump,omitempty"`
+ // See :option:`--stats-tag` for details.
+ StatsTag []string `protobuf:"bytes,38,rep,name=stats_tag,json=statsTag,proto3" json:"stats_tag,omitempty"`
+}
+
+func (x *CommandLineOptions) Reset() {
+ *x = CommandLineOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_server_info_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CommandLineOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CommandLineOptions) ProtoMessage() {}
+
+func (x *CommandLineOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_server_info_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CommandLineOptions.ProtoReflect.Descriptor instead.
+func (*CommandLineOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_server_info_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *CommandLineOptions) GetBaseId() uint64 {
+ if x != nil {
+ return x.BaseId
+ }
+ return 0
+}
+
+func (x *CommandLineOptions) GetUseDynamicBaseId() bool {
+ if x != nil {
+ return x.UseDynamicBaseId
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetSkipHotRestartOnNoParent() bool {
+ if x != nil {
+ return x.SkipHotRestartOnNoParent
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetSkipHotRestartParentStats() bool {
+ if x != nil {
+ return x.SkipHotRestartParentStats
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetBaseIdPath() string {
+ if x != nil {
+ return x.BaseIdPath
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetConcurrency() uint32 {
+ if x != nil {
+ return x.Concurrency
+ }
+ return 0
+}
+
+func (x *CommandLineOptions) GetConfigPath() string {
+ if x != nil {
+ return x.ConfigPath
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetConfigYaml() string {
+ if x != nil {
+ return x.ConfigYaml
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetAllowUnknownStaticFields() bool {
+ if x != nil {
+ return x.AllowUnknownStaticFields
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetRejectUnknownDynamicFields() bool {
+ if x != nil {
+ return x.RejectUnknownDynamicFields
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetIgnoreUnknownDynamicFields() bool {
+ if x != nil {
+ return x.IgnoreUnknownDynamicFields
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetSkipDeprecatedLogs() bool {
+ if x != nil {
+ return x.SkipDeprecatedLogs
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetAdminAddressPath() string {
+ if x != nil {
+ return x.AdminAddressPath
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetLocalAddressIpVersion() CommandLineOptions_IpVersion {
+ if x != nil {
+ return x.LocalAddressIpVersion
+ }
+ return CommandLineOptions_v4
+}
+
+func (x *CommandLineOptions) GetLogLevel() string {
+ if x != nil {
+ return x.LogLevel
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetComponentLogLevel() string {
+ if x != nil {
+ return x.ComponentLogLevel
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetLogFormat() string {
+ if x != nil {
+ return x.LogFormat
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetLogFormatEscaped() bool {
+ if x != nil {
+ return x.LogFormatEscaped
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetLogPath() string {
+ if x != nil {
+ return x.LogPath
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetServiceCluster() string {
+ if x != nil {
+ return x.ServiceCluster
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetServiceNode() string {
+ if x != nil {
+ return x.ServiceNode
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetServiceZone() string {
+ if x != nil {
+ return x.ServiceZone
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetFileFlushInterval() *durationpb.Duration {
+ if x != nil {
+ return x.FileFlushInterval
+ }
+ return nil
+}
+
+func (x *CommandLineOptions) GetDrainTime() *durationpb.Duration {
+ if x != nil {
+ return x.DrainTime
+ }
+ return nil
+}
+
+func (x *CommandLineOptions) GetDrainStrategy() CommandLineOptions_DrainStrategy {
+ if x != nil {
+ return x.DrainStrategy
+ }
+ return CommandLineOptions_Gradual
+}
+
+func (x *CommandLineOptions) GetParentShutdownTime() *durationpb.Duration {
+ if x != nil {
+ return x.ParentShutdownTime
+ }
+ return nil
+}
+
+func (x *CommandLineOptions) GetMode() CommandLineOptions_Mode {
+ if x != nil {
+ return x.Mode
+ }
+ return CommandLineOptions_Serve
+}
+
+func (x *CommandLineOptions) GetDisableHotRestart() bool {
+ if x != nil {
+ return x.DisableHotRestart
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetEnableMutexTracing() bool {
+ if x != nil {
+ return x.EnableMutexTracing
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetRestartEpoch() uint32 {
+ if x != nil {
+ return x.RestartEpoch
+ }
+ return 0
+}
+
+func (x *CommandLineOptions) GetCpusetThreads() bool {
+ if x != nil {
+ return x.CpusetThreads
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetDisabledExtensions() []string {
+ if x != nil {
+ return x.DisabledExtensions
+ }
+ return nil
+}
+
+func (x *CommandLineOptions) GetEnableFineGrainLogging() bool {
+ if x != nil {
+ return x.EnableFineGrainLogging
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetSocketPath() string {
+ if x != nil {
+ return x.SocketPath
+ }
+ return ""
+}
+
+func (x *CommandLineOptions) GetSocketMode() uint32 {
+ if x != nil {
+ return x.SocketMode
+ }
+ return 0
+}
+
+func (x *CommandLineOptions) GetEnableCoreDump() bool {
+ if x != nil {
+ return x.EnableCoreDump
+ }
+ return false
+}
+
+func (x *CommandLineOptions) GetStatsTag() []string {
+ if x != nil {
+ return x.StatsTag
+ }
+ return nil
+}
+
+var File_envoy_admin_v3_server_info_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_server_info_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x98, 0x04, 0x0a, 0x0a, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72,
+ 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36,
+ 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x53,
+ 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52,
+ 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x4b, 0x0a, 0x14, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65,
+ 0x5f, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x12, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x45, 0x70,
+ 0x6f, 0x63, 0x68, 0x12, 0x45, 0x0a, 0x11, 0x75, 0x70, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x61, 0x6c,
+ 0x6c, 0x5f, 0x65, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x75, 0x70, 0x74, 0x69, 0x6d,
+ 0x65, 0x41, 0x6c, 0x6c, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x68, 0x6f,
+ 0x74, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x54, 0x0a, 0x14, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e,
+ 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x12, 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65,
+ 0x22, 0x47, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x56,
+ 0x45, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x52, 0x41, 0x49, 0x4e, 0x49, 0x4e, 0x47, 0x10,
+ 0x01, 0x12, 0x14, 0x0a, 0x10, 0x50, 0x52, 0x45, 0x5f, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c,
+ 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x4e, 0x49, 0x54, 0x49,
+ 0x41, 0x4c, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20,
+ 0x0a, 0x1e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32,
+ 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f,
+ 0x22, 0x90, 0x10, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65,
+ 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x17, 0x0a, 0x07, 0x62, 0x61, 0x73, 0x65, 0x5f,
+ 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64,
+ 0x12, 0x2d, 0x0a, 0x13, 0x75, 0x73, 0x65, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f,
+ 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x75,
+ 0x73, 0x65, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x42, 0x61, 0x73, 0x65, 0x49, 0x64, 0x12,
+ 0x3f, 0x0a, 0x1d, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x68, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x5f, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x18, 0x27, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x73, 0x6b, 0x69, 0x70, 0x48, 0x6f, 0x74, 0x52,
+ 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x4f, 0x6e, 0x4e, 0x6f, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x12, 0x40, 0x0a, 0x1d, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x68, 0x6f, 0x74, 0x5f, 0x72, 0x65, 0x73,
+ 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x73, 0x18, 0x28, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x73, 0x6b, 0x69, 0x70, 0x48, 0x6f, 0x74,
+ 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61,
+ 0x74, 0x73, 0x12, 0x20, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x70, 0x61,
+ 0x74, 0x68, 0x18, 0x20, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x61, 0x73, 0x65, 0x49, 0x64,
+ 0x50, 0x61, 0x74, 0x68, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65,
+ 0x6e, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x63, 0x75,
+ 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x5f, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x59, 0x61, 0x6d, 0x6c, 0x12, 0x3d, 0x0a, 0x1b, 0x61, 0x6c, 0x6c, 0x6f,
+ 0x77, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63,
+ 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x61,
+ 0x6c, 0x6c, 0x6f, 0x77, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x69,
+ 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x72, 0x65, 0x6a, 0x65, 0x63,
+ 0x74, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69,
+ 0x63, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1a,
+ 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x44, 0x79, 0x6e,
+ 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x41, 0x0a, 0x1d, 0x69, 0x67,
+ 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x64, 0x79, 0x6e,
+ 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x1a, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e,
+ 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x30, 0x0a,
+ 0x14, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x5f, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x29, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x73, 0x6b, 0x69,
+ 0x70, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x6f, 0x67, 0x73, 0x12,
+ 0x2c, 0x0a, 0x12, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73,
+ 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x65, 0x0a,
+ 0x18, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x69,
+ 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x2c, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33,
+ 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6c,
+ 0x6f, 0x63, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x49, 0x70, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65,
+ 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x12, 0x2e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x6c,
+ 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11,
+ 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x4c, 0x65, 0x76, 0x65,
+ 0x6c, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
+ 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74,
+ 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x5f, 0x65,
+ 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x6f,
+ 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x45, 0x73, 0x63, 0x61, 0x70, 0x65, 0x64, 0x12, 0x19,
+ 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x6e, 0x6f,
+ 0x64, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x5f, 0x7a, 0x6f, 0x6e, 0x65, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x5a, 0x6f, 0x6e, 0x65, 0x12, 0x49, 0x0a, 0x13, 0x66, 0x69, 0x6c, 0x65,
+ 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18,
+ 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x11, 0x66, 0x69, 0x6c, 0x65, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72,
+ 0x76, 0x61, 0x6c, 0x12, 0x38, 0x0a, 0x0a, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x09, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x57, 0x0a,
+ 0x0e, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18,
+ 0x21, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64,
+ 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69,
+ 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x53,
+ 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0d, 0x64, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74,
+ 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x4b, 0x0a, 0x14, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74,
+ 0x5f, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x12,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x54,
+ 0x69, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65,
+ 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x68, 0x6f, 0x74, 0x5f,
+ 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x64,
+ 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x48, 0x6f, 0x74, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74,
+ 0x12, 0x30, 0x0a, 0x14, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x75, 0x74, 0x65, 0x78,
+ 0x5f, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12,
+ 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x75, 0x74, 0x65, 0x78, 0x54, 0x72, 0x61, 0x63, 0x69,
+ 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x65, 0x70,
+ 0x6f, 0x63, 0x68, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x74, 0x61,
+ 0x72, 0x74, 0x45, 0x70, 0x6f, 0x63, 0x68, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x70, 0x75, 0x73, 0x65,
+ 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0d, 0x63, 0x70, 0x75, 0x73, 0x65, 0x74, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x2f,
+ 0x0a, 0x13, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x64, 0x69, 0x73,
+ 0x61, 0x62, 0x6c, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x39, 0x0a, 0x19, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6e, 0x65, 0x5f, 0x67,
+ 0x72, 0x61, 0x69, 0x6e, 0x5f, 0x6c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x18, 0x22, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x16, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6e, 0x65, 0x47, 0x72,
+ 0x61, 0x69, 0x6e, 0x4c, 0x6f, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f,
+ 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x23, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73,
+ 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0d,
+ 0x52, 0x0a, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x10,
+ 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x6d, 0x70,
+ 0x18, 0x25, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f,
+ 0x72, 0x65, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f,
+ 0x74, 0x61, 0x67, 0x18, 0x26, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x73,
+ 0x54, 0x61, 0x67, 0x22, 0x1b, 0x0a, 0x09, 0x49, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e,
+ 0x12, 0x06, 0x0a, 0x02, 0x76, 0x34, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x76, 0x36, 0x10, 0x01,
+ 0x22, 0x2d, 0x0a, 0x04, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x65, 0x72, 0x76,
+ 0x65, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x10,
+ 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x6e, 0x69, 0x74, 0x4f, 0x6e, 0x6c, 0x79, 0x10, 0x02, 0x22,
+ 0x2b, 0x0a, 0x0d, 0x44, 0x72, 0x61, 0x69, 0x6e, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79,
+ 0x12, 0x0b, 0x0a, 0x07, 0x47, 0x72, 0x61, 0x64, 0x75, 0x61, 0x6c, 0x10, 0x00, 0x12, 0x0d, 0x0a,
+ 0x09, 0x49, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x10, 0x01, 0x3a, 0x2d, 0x9a, 0xc5,
+ 0x88, 0x1e, 0x28, 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e,
+ 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64,
+ 0x4c, 0x69, 0x6e, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, 0x0c, 0x10,
+ 0x0d, 0x4a, 0x04, 0x08, 0x14, 0x10, 0x15, 0x4a, 0x04, 0x08, 0x15, 0x10, 0x16, 0x4a, 0x04, 0x08,
+ 0x1d, 0x10, 0x1e, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x52, 0x10,
+ 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x62, 0x6a, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x6c, 0x65, 0x6e,
+ 0x52, 0x11, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x42, 0x78, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69,
+ 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x0f, 0x53, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d,
+ 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
+ 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d,
+ 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_server_info_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_server_info_proto_rawDescData = file_envoy_admin_v3_server_info_proto_rawDesc
+)
+
+func file_envoy_admin_v3_server_info_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_server_info_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_server_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_server_info_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_server_info_proto_rawDescData
+}
+
+var file_envoy_admin_v3_server_info_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
+var file_envoy_admin_v3_server_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_envoy_admin_v3_server_info_proto_goTypes = []interface{}{
+ (ServerInfo_State)(0), // 0: envoy.admin.v3.ServerInfo.State
+ (CommandLineOptions_IpVersion)(0), // 1: envoy.admin.v3.CommandLineOptions.IpVersion
+ (CommandLineOptions_Mode)(0), // 2: envoy.admin.v3.CommandLineOptions.Mode
+ (CommandLineOptions_DrainStrategy)(0), // 3: envoy.admin.v3.CommandLineOptions.DrainStrategy
+ (*ServerInfo)(nil), // 4: envoy.admin.v3.ServerInfo
+ (*CommandLineOptions)(nil), // 5: envoy.admin.v3.CommandLineOptions
+ (*durationpb.Duration)(nil), // 6: google.protobuf.Duration
+ (*v3.Node)(nil), // 7: envoy.config.core.v3.Node
+}
+var file_envoy_admin_v3_server_info_proto_depIdxs = []int32{
+ 0, // 0: envoy.admin.v3.ServerInfo.state:type_name -> envoy.admin.v3.ServerInfo.State
+ 6, // 1: envoy.admin.v3.ServerInfo.uptime_current_epoch:type_name -> google.protobuf.Duration
+ 6, // 2: envoy.admin.v3.ServerInfo.uptime_all_epochs:type_name -> google.protobuf.Duration
+ 5, // 3: envoy.admin.v3.ServerInfo.command_line_options:type_name -> envoy.admin.v3.CommandLineOptions
+ 7, // 4: envoy.admin.v3.ServerInfo.node:type_name -> envoy.config.core.v3.Node
+ 1, // 5: envoy.admin.v3.CommandLineOptions.local_address_ip_version:type_name -> envoy.admin.v3.CommandLineOptions.IpVersion
+ 6, // 6: envoy.admin.v3.CommandLineOptions.file_flush_interval:type_name -> google.protobuf.Duration
+ 6, // 7: envoy.admin.v3.CommandLineOptions.drain_time:type_name -> google.protobuf.Duration
+ 3, // 8: envoy.admin.v3.CommandLineOptions.drain_strategy:type_name -> envoy.admin.v3.CommandLineOptions.DrainStrategy
+ 6, // 9: envoy.admin.v3.CommandLineOptions.parent_shutdown_time:type_name -> google.protobuf.Duration
+ 2, // 10: envoy.admin.v3.CommandLineOptions.mode:type_name -> envoy.admin.v3.CommandLineOptions.Mode
+ 11, // [11:11] is the sub-list for method output_type
+ 11, // [11:11] is the sub-list for method input_type
+ 11, // [11:11] is the sub-list for extension type_name
+ 11, // [11:11] is the sub-list for extension extendee
+ 0, // [0:11] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_server_info_proto_init() }
+func file_envoy_admin_v3_server_info_proto_init() {
+ if File_envoy_admin_v3_server_info_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_server_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ServerInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_admin_v3_server_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CommandLineOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_server_info_proto_rawDesc,
+ NumEnums: 4,
+ NumMessages: 2,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_server_info_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_server_info_proto_depIdxs,
+ EnumInfos: file_envoy_admin_v3_server_info_proto_enumTypes,
+ MessageInfos: file_envoy_admin_v3_server_info_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_server_info_proto = out.File
+ file_envoy_admin_v3_server_info_proto_rawDesc = nil
+ file_envoy_admin_v3_server_info_proto_goTypes = nil
+ file_envoy_admin_v3_server_info_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go
new file mode 100644
index 000000000..516156241
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info.pb.validate.go
@@ -0,0 +1,511 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/server_info.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ServerInfo with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ServerInfo) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ServerInfo with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ServerInfoMultiError, or
+// nil if none found.
+func (m *ServerInfo) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ServerInfo) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Version
+
+ // no validation rules for State
+
+ if all {
+ switch v := interface{}(m.GetUptimeCurrentEpoch()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "UptimeCurrentEpoch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "UptimeCurrentEpoch",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetUptimeCurrentEpoch()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ServerInfoValidationError{
+ field: "UptimeCurrentEpoch",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetUptimeAllEpochs()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "UptimeAllEpochs",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "UptimeAllEpochs",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetUptimeAllEpochs()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ServerInfoValidationError{
+ field: "UptimeAllEpochs",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for HotRestartVersion
+
+ if all {
+ switch v := interface{}(m.GetCommandLineOptions()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "CommandLineOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "CommandLineOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCommandLineOptions()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ServerInfoValidationError{
+ field: "CommandLineOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetNode()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "Node",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ServerInfoValidationError{
+ field: "Node",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ServerInfoValidationError{
+ field: "Node",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ServerInfoMultiError(errors)
+ }
+
+ return nil
+}
+
+// ServerInfoMultiError is an error wrapping multiple validation errors
+// returned by ServerInfo.ValidateAll() if the designated constraints aren't met.
+type ServerInfoMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ServerInfoMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ServerInfoMultiError) AllErrors() []error { return m }
+
+// ServerInfoValidationError is the validation error returned by
+// ServerInfo.Validate if the designated constraints aren't met.
+type ServerInfoValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ServerInfoValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ServerInfoValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ServerInfoValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ServerInfoValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ServerInfoValidationError) ErrorName() string { return "ServerInfoValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ServerInfoValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sServerInfo.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ServerInfoValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ServerInfoValidationError{}
+
+// Validate checks the field values on CommandLineOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *CommandLineOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CommandLineOptions with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CommandLineOptionsMultiError, or nil if none found.
+func (m *CommandLineOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CommandLineOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for BaseId
+
+ // no validation rules for UseDynamicBaseId
+
+ // no validation rules for SkipHotRestartOnNoParent
+
+ // no validation rules for SkipHotRestartParentStats
+
+ // no validation rules for BaseIdPath
+
+ // no validation rules for Concurrency
+
+ // no validation rules for ConfigPath
+
+ // no validation rules for ConfigYaml
+
+ // no validation rules for AllowUnknownStaticFields
+
+ // no validation rules for RejectUnknownDynamicFields
+
+ // no validation rules for IgnoreUnknownDynamicFields
+
+ // no validation rules for SkipDeprecatedLogs
+
+ // no validation rules for AdminAddressPath
+
+ // no validation rules for LocalAddressIpVersion
+
+ // no validation rules for LogLevel
+
+ // no validation rules for ComponentLogLevel
+
+ // no validation rules for LogFormat
+
+ // no validation rules for LogFormatEscaped
+
+ // no validation rules for LogPath
+
+ // no validation rules for ServiceCluster
+
+ // no validation rules for ServiceNode
+
+ // no validation rules for ServiceZone
+
+ if all {
+ switch v := interface{}(m.GetFileFlushInterval()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CommandLineOptionsValidationError{
+ field: "FileFlushInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CommandLineOptionsValidationError{
+ field: "FileFlushInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetFileFlushInterval()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CommandLineOptionsValidationError{
+ field: "FileFlushInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetDrainTime()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CommandLineOptionsValidationError{
+ field: "DrainTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CommandLineOptionsValidationError{
+ field: "DrainTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDrainTime()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CommandLineOptionsValidationError{
+ field: "DrainTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for DrainStrategy
+
+ if all {
+ switch v := interface{}(m.GetParentShutdownTime()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CommandLineOptionsValidationError{
+ field: "ParentShutdownTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CommandLineOptionsValidationError{
+ field: "ParentShutdownTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetParentShutdownTime()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CommandLineOptionsValidationError{
+ field: "ParentShutdownTime",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for Mode
+
+ // no validation rules for DisableHotRestart
+
+ // no validation rules for EnableMutexTracing
+
+ // no validation rules for RestartEpoch
+
+ // no validation rules for CpusetThreads
+
+ // no validation rules for EnableFineGrainLogging
+
+ // no validation rules for SocketPath
+
+ // no validation rules for SocketMode
+
+ // no validation rules for EnableCoreDump
+
+ if len(errors) > 0 {
+ return CommandLineOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// CommandLineOptionsMultiError is an error wrapping multiple validation errors
+// returned by CommandLineOptions.ValidateAll() if the designated constraints
+// aren't met.
+type CommandLineOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CommandLineOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CommandLineOptionsMultiError) AllErrors() []error { return m }
+
+// CommandLineOptionsValidationError is the validation error returned by
+// CommandLineOptions.Validate if the designated constraints aren't met.
+type CommandLineOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CommandLineOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CommandLineOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CommandLineOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CommandLineOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CommandLineOptionsValidationError) ErrorName() string {
+ return "CommandLineOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CommandLineOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCommandLineOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CommandLineOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CommandLineOptionsValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go
new file mode 100644
index 000000000..ca7e4ede3
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/server_info_vtproto.pb.go
@@ -0,0 +1,686 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/server_info.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *ServerInfo) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ServerInfo) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ServerInfo) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Node != nil {
+ if vtmsg, ok := interface{}(m.Node).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Node)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.CommandLineOptions != nil {
+ size, err := m.CommandLineOptions.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.HotRestartVersion) > 0 {
+ i -= len(m.HotRestartVersion)
+ copy(dAtA[i:], m.HotRestartVersion)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HotRestartVersion)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.UptimeAllEpochs != nil {
+ size, err := (*durationpb.Duration)(m.UptimeAllEpochs).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.UptimeCurrentEpoch != nil {
+ size, err := (*durationpb.Duration)(m.UptimeCurrentEpoch).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.State != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.State))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CommandLineOptions) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CommandLineOptions) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *CommandLineOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.SkipDeprecatedLogs {
+ i--
+ if m.SkipDeprecatedLogs {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xc8
+ }
+ if m.SkipHotRestartParentStats {
+ i--
+ if m.SkipHotRestartParentStats {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xc0
+ }
+ if m.SkipHotRestartOnNoParent {
+ i--
+ if m.SkipHotRestartOnNoParent {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xb8
+ }
+ if len(m.StatsTag) > 0 {
+ for iNdEx := len(m.StatsTag) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.StatsTag[iNdEx])
+ copy(dAtA[i:], m.StatsTag[iNdEx])
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StatsTag[iNdEx])))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xb2
+ }
+ }
+ if m.EnableCoreDump {
+ i--
+ if m.EnableCoreDump {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xa8
+ }
+ if m.SocketMode != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SocketMode))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xa0
+ }
+ if len(m.SocketPath) > 0 {
+ i -= len(m.SocketPath)
+ copy(dAtA[i:], m.SocketPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SocketPath)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x9a
+ }
+ if m.EnableFineGrainLogging {
+ i--
+ if m.EnableFineGrainLogging {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x90
+ }
+ if m.DrainStrategy != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.DrainStrategy))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x88
+ }
+ if len(m.BaseIdPath) > 0 {
+ i -= len(m.BaseIdPath)
+ copy(dAtA[i:], m.BaseIdPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.BaseIdPath)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x82
+ }
+ if m.UseDynamicBaseId {
+ i--
+ if m.UseDynamicBaseId {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xf8
+ }
+ if m.IgnoreUnknownDynamicFields {
+ i--
+ if m.IgnoreUnknownDynamicFields {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xf0
+ }
+ if len(m.DisabledExtensions) > 0 {
+ for iNdEx := len(m.DisabledExtensions) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.DisabledExtensions[iNdEx])
+ copy(dAtA[i:], m.DisabledExtensions[iNdEx])
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DisabledExtensions[iNdEx])))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xe2
+ }
+ }
+ if m.LogFormatEscaped {
+ i--
+ if m.LogFormatEscaped {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xd8
+ }
+ if m.RejectUnknownDynamicFields {
+ i--
+ if m.RejectUnknownDynamicFields {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xd0
+ }
+ if m.CpusetThreads {
+ i--
+ if m.CpusetThreads {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xc8
+ }
+ if m.RestartEpoch != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.RestartEpoch))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xc0
+ }
+ if m.EnableMutexTracing {
+ i--
+ if m.EnableMutexTracing {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xb8
+ }
+ if m.DisableHotRestart {
+ i--
+ if m.DisableHotRestart {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xb0
+ }
+ if m.Mode != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Mode))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x98
+ }
+ if m.ParentShutdownTime != nil {
+ size, err := (*durationpb.Duration)(m.ParentShutdownTime).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x92
+ }
+ if m.DrainTime != nil {
+ size, err := (*durationpb.Duration)(m.DrainTime).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x8a
+ }
+ if m.FileFlushInterval != nil {
+ size, err := (*durationpb.Duration)(m.FileFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x82
+ }
+ if len(m.ServiceZone) > 0 {
+ i -= len(m.ServiceZone)
+ copy(dAtA[i:], m.ServiceZone)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceZone)))
+ i--
+ dAtA[i] = 0x7a
+ }
+ if len(m.ServiceNode) > 0 {
+ i -= len(m.ServiceNode)
+ copy(dAtA[i:], m.ServiceNode)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceNode)))
+ i--
+ dAtA[i] = 0x72
+ }
+ if len(m.ServiceCluster) > 0 {
+ i -= len(m.ServiceCluster)
+ copy(dAtA[i:], m.ServiceCluster)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ServiceCluster)))
+ i--
+ dAtA[i] = 0x6a
+ }
+ if len(m.LogPath) > 0 {
+ i -= len(m.LogPath)
+ copy(dAtA[i:], m.LogPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogPath)))
+ i--
+ dAtA[i] = 0x5a
+ }
+ if len(m.LogFormat) > 0 {
+ i -= len(m.LogFormat)
+ copy(dAtA[i:], m.LogFormat)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogFormat)))
+ i--
+ dAtA[i] = 0x52
+ }
+ if len(m.ComponentLogLevel) > 0 {
+ i -= len(m.ComponentLogLevel)
+ copy(dAtA[i:], m.ComponentLogLevel)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ComponentLogLevel)))
+ i--
+ dAtA[i] = 0x4a
+ }
+ if len(m.LogLevel) > 0 {
+ i -= len(m.LogLevel)
+ copy(dAtA[i:], m.LogLevel)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LogLevel)))
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.LocalAddressIpVersion != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LocalAddressIpVersion))
+ i--
+ dAtA[i] = 0x38
+ }
+ if len(m.AdminAddressPath) > 0 {
+ i -= len(m.AdminAddressPath)
+ copy(dAtA[i:], m.AdminAddressPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AdminAddressPath)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.AllowUnknownStaticFields {
+ i--
+ if m.AllowUnknownStaticFields {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ if len(m.ConfigYaml) > 0 {
+ i -= len(m.ConfigYaml)
+ copy(dAtA[i:], m.ConfigYaml)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigYaml)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.ConfigPath) > 0 {
+ i -= len(m.ConfigPath)
+ copy(dAtA[i:], m.ConfigPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigPath)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Concurrency != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Concurrency))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.BaseId != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BaseId))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ServerInfo) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.State != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.State))
+ }
+ if m.UptimeCurrentEpoch != nil {
+ l = (*durationpb.Duration)(m.UptimeCurrentEpoch).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.UptimeAllEpochs != nil {
+ l = (*durationpb.Duration)(m.UptimeAllEpochs).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.HotRestartVersion)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.CommandLineOptions != nil {
+ l = m.CommandLineOptions.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Node != nil {
+ if size, ok := interface{}(m.Node).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Node)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *CommandLineOptions) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.BaseId != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.BaseId))
+ }
+ if m.Concurrency != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Concurrency))
+ }
+ l = len(m.ConfigPath)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.ConfigYaml)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.AllowUnknownStaticFields {
+ n += 2
+ }
+ l = len(m.AdminAddressPath)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LocalAddressIpVersion != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.LocalAddressIpVersion))
+ }
+ l = len(m.LogLevel)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.ComponentLogLevel)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.LogFormat)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.LogPath)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.ServiceCluster)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.ServiceNode)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.ServiceZone)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.FileFlushInterval != nil {
+ l = (*durationpb.Duration)(m.FileFlushInterval).SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.DrainTime != nil {
+ l = (*durationpb.Duration)(m.DrainTime).SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ParentShutdownTime != nil {
+ l = (*durationpb.Duration)(m.ParentShutdownTime).SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Mode != 0 {
+ n += 2 + protohelpers.SizeOfVarint(uint64(m.Mode))
+ }
+ if m.DisableHotRestart {
+ n += 3
+ }
+ if m.EnableMutexTracing {
+ n += 3
+ }
+ if m.RestartEpoch != 0 {
+ n += 2 + protohelpers.SizeOfVarint(uint64(m.RestartEpoch))
+ }
+ if m.CpusetThreads {
+ n += 3
+ }
+ if m.RejectUnknownDynamicFields {
+ n += 3
+ }
+ if m.LogFormatEscaped {
+ n += 3
+ }
+ if len(m.DisabledExtensions) > 0 {
+ for _, s := range m.DisabledExtensions {
+ l = len(s)
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.IgnoreUnknownDynamicFields {
+ n += 3
+ }
+ if m.UseDynamicBaseId {
+ n += 3
+ }
+ l = len(m.BaseIdPath)
+ if l > 0 {
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.DrainStrategy != 0 {
+ n += 2 + protohelpers.SizeOfVarint(uint64(m.DrainStrategy))
+ }
+ if m.EnableFineGrainLogging {
+ n += 3
+ }
+ l = len(m.SocketPath)
+ if l > 0 {
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.SocketMode != 0 {
+ n += 2 + protohelpers.SizeOfVarint(uint64(m.SocketMode))
+ }
+ if m.EnableCoreDump {
+ n += 3
+ }
+ if len(m.StatsTag) > 0 {
+ for _, s := range m.StatsTag {
+ l = len(s)
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.SkipHotRestartOnNoParent {
+ n += 3
+ }
+ if m.SkipHotRestartParentStats {
+ n += 3
+ }
+ if m.SkipDeprecatedLogs {
+ n += 3
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go
new file mode 100644
index 000000000..71c429162
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.go
@@ -0,0 +1,182 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/admin/v3/tap.proto
+
+package adminv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/tap/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The /tap admin request body that is used to configure an active tap session.
+type TapRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The opaque configuration ID used to match the configuration to a loaded extension.
+ // A tap extension configures a similar opaque ID that is used to match.
+ ConfigId string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"`
+ // The tap configuration to load.
+ TapConfig *v3.TapConfig `protobuf:"bytes,2,opt,name=tap_config,json=tapConfig,proto3" json:"tap_config,omitempty"`
+}
+
+func (x *TapRequest) Reset() {
+ *x = TapRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_admin_v3_tap_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TapRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TapRequest) ProtoMessage() {}
+
+func (x *TapRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_admin_v3_tap_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TapRequest.ProtoReflect.Descriptor instead.
+func (*TapRequest) Descriptor() ([]byte, []int) {
+ return file_envoy_admin_v3_tap_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TapRequest) GetConfigId() string {
+ if x != nil {
+ return x.ConfigId
+ }
+ return ""
+}
+
+func (x *TapRequest) GetTapConfig() *v3.TapConfig {
+ if x != nil {
+ return x.TapConfig
+ }
+ return nil
+}
+
+var File_envoy_admin_v3_tap_proto protoreflect.FileDescriptor
+
+var file_envoy_admin_v3_tap_proto_rawDesc = []byte{
+ 0x0a, 0x18, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33,
+ 0x2f, 0x74, 0x61, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x33, 0x1a, 0x20, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x74, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64,
+ 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70,
+ 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x70, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02,
+ 0x10, 0x01, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x49, 0x64, 0x12, 0x47, 0x0a, 0x0a,
+ 0x74, 0x61, 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x74, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x61, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x09, 0x74, 0x61, 0x70, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x25, 0x9a, 0xc5, 0x88, 0x1e, 0x20, 0x0a, 0x1e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x61, 0x6c, 0x70, 0x68,
+ 0x61, 0x2e, 0x54, 0x61, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x71, 0xba, 0x80,
+ 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x1c, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x64, 0x6d, 0x69,
+ 0x6e, 0x2e, 0x76, 0x33, 0x42, 0x08, 0x54, 0x61, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
+ 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x76, 0x33, 0x62,
+ 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_admin_v3_tap_proto_rawDescOnce sync.Once
+ file_envoy_admin_v3_tap_proto_rawDescData = file_envoy_admin_v3_tap_proto_rawDesc
+)
+
+func file_envoy_admin_v3_tap_proto_rawDescGZIP() []byte {
+ file_envoy_admin_v3_tap_proto_rawDescOnce.Do(func() {
+ file_envoy_admin_v3_tap_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_admin_v3_tap_proto_rawDescData)
+ })
+ return file_envoy_admin_v3_tap_proto_rawDescData
+}
+
+var file_envoy_admin_v3_tap_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_admin_v3_tap_proto_goTypes = []interface{}{
+ (*TapRequest)(nil), // 0: envoy.admin.v3.TapRequest
+ (*v3.TapConfig)(nil), // 1: envoy.config.tap.v3.TapConfig
+}
+var file_envoy_admin_v3_tap_proto_depIdxs = []int32{
+ 1, // 0: envoy.admin.v3.TapRequest.tap_config:type_name -> envoy.config.tap.v3.TapConfig
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_envoy_admin_v3_tap_proto_init() }
+func file_envoy_admin_v3_tap_proto_init() {
+ if File_envoy_admin_v3_tap_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_admin_v3_tap_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TapRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_admin_v3_tap_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_admin_v3_tap_proto_goTypes,
+ DependencyIndexes: file_envoy_admin_v3_tap_proto_depIdxs,
+ MessageInfos: file_envoy_admin_v3_tap_proto_msgTypes,
+ }.Build()
+ File_envoy_admin_v3_tap_proto = out.File
+ file_envoy_admin_v3_tap_proto_rawDesc = nil
+ file_envoy_admin_v3_tap_proto_goTypes = nil
+ file_envoy_admin_v3_tap_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go
new file mode 100644
index 000000000..d524f2aef
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap.pb.validate.go
@@ -0,0 +1,187 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/admin/v3/tap.proto
+
+package adminv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on TapRequest with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *TapRequest) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TapRequest with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in TapRequestMultiError, or
+// nil if none found.
+func (m *TapRequest) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TapRequest) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetConfigId()) < 1 {
+ err := TapRequestValidationError{
+ field: "ConfigId",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetTapConfig() == nil {
+ err := TapRequestValidationError{
+ field: "TapConfig",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetTapConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, TapRequestValidationError{
+ field: "TapConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, TapRequestValidationError{
+ field: "TapConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTapConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return TapRequestValidationError{
+ field: "TapConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return TapRequestMultiError(errors)
+ }
+
+ return nil
+}
+
+// TapRequestMultiError is an error wrapping multiple validation errors
+// returned by TapRequest.ValidateAll() if the designated constraints aren't met.
+type TapRequestMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TapRequestMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TapRequestMultiError) AllErrors() []error { return m }
+
+// TapRequestValidationError is the validation error returned by
+// TapRequest.Validate if the designated constraints aren't met.
+type TapRequestValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TapRequestValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TapRequestValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TapRequestValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TapRequestValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TapRequestValidationError) ErrorName() string { return "TapRequestValidationError" }
+
+// Error satisfies the builtin error interface
+func (e TapRequestValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTapRequest.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TapRequestValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TapRequestValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go
new file mode 100644
index 000000000..4524bfb4f
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/admin/v3/tap_vtproto.pb.go
@@ -0,0 +1,106 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/admin/v3/tap.proto
+
+package adminv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *TapRequest) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TapRequest) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *TapRequest) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.TapConfig != nil {
+ if vtmsg, ok := interface{}(m.TapConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.TapConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.ConfigId) > 0 {
+ i -= len(m.ConfigId)
+ copy(dAtA[i:], m.ConfigId)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ConfigId)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *TapRequest) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.ConfigId)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.TapConfig != nil {
+ if size, ok := interface{}(m.TapConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.TapConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go
new file mode 100644
index 000000000..d748e467a
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.go
@@ -0,0 +1,159 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/annotations/deprecation.proto
+
+package annotations
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_envoy_annotations_deprecation_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 189503207,
+ Name: "envoy.annotations.disallowed_by_default",
+ Tag: "varint,189503207,opt,name=disallowed_by_default",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 157299826,
+ Name: "envoy.annotations.deprecated_at_minor_version",
+ Tag: "bytes,157299826,opt,name=deprecated_at_minor_version",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumValueOptions)(nil),
+ ExtensionType: (*bool)(nil),
+ Field: 70100853,
+ Name: "envoy.annotations.disallowed_by_default_enum",
+ Tag: "varint,70100853,opt,name=disallowed_by_default_enum",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumValueOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 181198657,
+ Name: "envoy.annotations.deprecated_at_minor_version_enum",
+ Tag: "bytes,181198657,opt,name=deprecated_at_minor_version_enum",
+ Filename: "envoy/annotations/deprecation.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // optional bool disallowed_by_default = 189503207;
+ E_DisallowedByDefault = &file_envoy_annotations_deprecation_proto_extTypes[0]
+ // The API major and minor version on which the field was deprecated
+ // (e.g., "3.5" for major version 3 and minor version 5).
+ //
+ // optional string deprecated_at_minor_version = 157299826;
+ E_DeprecatedAtMinorVersion = &file_envoy_annotations_deprecation_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.EnumValueOptions.
+var (
+ // optional bool disallowed_by_default_enum = 70100853;
+ E_DisallowedByDefaultEnum = &file_envoy_annotations_deprecation_proto_extTypes[2]
+ // The API major and minor version on which the enum value was deprecated
+ // (e.g., "3.5" for major version 3 and minor version 5).
+ //
+ // optional string deprecated_at_minor_version_enum = 181198657;
+ E_DeprecatedAtMinorVersionEnum = &file_envoy_annotations_deprecation_proto_extTypes[3]
+)
+
+var File_envoy_annotations_deprecation_proto protoreflect.FileDescriptor
+
+var file_envoy_annotations_deprecation_proto_rawDesc = []byte{
+ 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x54, 0x0a, 0x15, 0x64, 0x69,
+ 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x62, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0xe7, 0xad, 0xae, 0x5a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x64, 0x69, 0x73,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x3a, 0x5f, 0x0a, 0x1b, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61,
+ 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12,
+ 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf2,
+ 0xe8, 0x80, 0x4b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x18, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
+ 0x74, 0x65, 0x64, 0x41, 0x74, 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f,
+ 0x6e, 0x3a, 0x61, 0x0a, 0x1a, 0x64, 0x69, 0x73, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f,
+ 0x62, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12,
+ 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0xf5, 0xce, 0xb6, 0x21, 0x20, 0x01, 0x28, 0x08, 0x52, 0x17, 0x64, 0x69, 0x73,
+ 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x42, 0x79, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x6c, 0x0a, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
+ 0x65, 0x64, 0x5f, 0x61, 0x74, 0x5f, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xbe, 0xb3, 0x56,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x1c, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
+ 0x41, 0x74, 0x4d, 0x69, 0x6e, 0x6f, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x45, 0x6e,
+ 0x75, 0x6d, 0x42, 0x3a, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63,
+ 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_envoy_annotations_deprecation_proto_goTypes = []interface{}{
+ (*descriptorpb.FieldOptions)(nil), // 0: google.protobuf.FieldOptions
+ (*descriptorpb.EnumValueOptions)(nil), // 1: google.protobuf.EnumValueOptions
+}
+var file_envoy_annotations_deprecation_proto_depIdxs = []int32{
+ 0, // 0: envoy.annotations.disallowed_by_default:extendee -> google.protobuf.FieldOptions
+ 0, // 1: envoy.annotations.deprecated_at_minor_version:extendee -> google.protobuf.FieldOptions
+ 1, // 2: envoy.annotations.disallowed_by_default_enum:extendee -> google.protobuf.EnumValueOptions
+ 1, // 3: envoy.annotations.deprecated_at_minor_version_enum:extendee -> google.protobuf.EnumValueOptions
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 4, // [4:4] is the sub-list for extension type_name
+ 0, // [0:4] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_annotations_deprecation_proto_init() }
+func file_envoy_annotations_deprecation_proto_init() {
+ if File_envoy_annotations_deprecation_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_annotations_deprecation_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 4,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_annotations_deprecation_proto_goTypes,
+ DependencyIndexes: file_envoy_annotations_deprecation_proto_depIdxs,
+ ExtensionInfos: file_envoy_annotations_deprecation_proto_extTypes,
+ }.Build()
+ File_envoy_annotations_deprecation_proto = out.File
+ file_envoy_annotations_deprecation_proto_rawDesc = nil
+ file_envoy_annotations_deprecation_proto_goTypes = nil
+ file_envoy_annotations_deprecation_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go
new file mode 100644
index 000000000..be58aa524
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/deprecation.pb.validate.go
@@ -0,0 +1,37 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/annotations/deprecation.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go
new file mode 100644
index 000000000..7ec2d7c31
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.go
@@ -0,0 +1,179 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/annotations/resource.proto
+
+package annotations
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ResourceAnnotation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Annotation for xDS services that indicates the fully-qualified Protobuf type for the resource
+ // type.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+}
+
+func (x *ResourceAnnotation) Reset() {
+ *x = ResourceAnnotation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_annotations_resource_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceAnnotation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceAnnotation) ProtoMessage() {}
+
+func (x *ResourceAnnotation) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_annotations_resource_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceAnnotation.ProtoReflect.Descriptor instead.
+func (*ResourceAnnotation) Descriptor() ([]byte, []int) {
+ return file_envoy_annotations_resource_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ResourceAnnotation) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+var file_envoy_annotations_resource_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.ServiceOptions)(nil),
+ ExtensionType: (*ResourceAnnotation)(nil),
+ Field: 265073217,
+ Name: "envoy.annotations.resource",
+ Tag: "bytes,265073217,opt,name=resource",
+ Filename: "envoy/annotations/resource.proto",
+ },
+}
+
+// Extension fields to descriptorpb.ServiceOptions.
+var (
+ // optional envoy.annotations.ResourceAnnotation resource = 265073217;
+ E_Resource = &file_envoy_annotations_resource_proto_extTypes[0]
+)
+
+var File_envoy_annotations_resource_proto protoreflect.FileDescriptor
+
+var file_envoy_annotations_resource_proto_rawDesc = []byte{
+ 0x0a, 0x20, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x12, 0x11, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
+ 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x28, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x3a, 0x65, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1f, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1,
+ 0xe4, 0xb2, 0x7e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x3a, 0x5a, 0x38, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78,
+ 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61,
+ 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_annotations_resource_proto_rawDescOnce sync.Once
+ file_envoy_annotations_resource_proto_rawDescData = file_envoy_annotations_resource_proto_rawDesc
+)
+
+func file_envoy_annotations_resource_proto_rawDescGZIP() []byte {
+ file_envoy_annotations_resource_proto_rawDescOnce.Do(func() {
+ file_envoy_annotations_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_annotations_resource_proto_rawDescData)
+ })
+ return file_envoy_annotations_resource_proto_rawDescData
+}
+
+var file_envoy_annotations_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_envoy_annotations_resource_proto_goTypes = []interface{}{
+ (*ResourceAnnotation)(nil), // 0: envoy.annotations.ResourceAnnotation
+ (*descriptorpb.ServiceOptions)(nil), // 1: google.protobuf.ServiceOptions
+}
+var file_envoy_annotations_resource_proto_depIdxs = []int32{
+ 1, // 0: envoy.annotations.resource:extendee -> google.protobuf.ServiceOptions
+ 0, // 1: envoy.annotations.resource:type_name -> envoy.annotations.ResourceAnnotation
+ 2, // [2:2] is the sub-list for method output_type
+ 2, // [2:2] is the sub-list for method input_type
+ 1, // [1:2] is the sub-list for extension type_name
+ 0, // [0:1] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_envoy_annotations_resource_proto_init() }
+func file_envoy_annotations_resource_proto_init() {
+ if File_envoy_annotations_resource_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_annotations_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceAnnotation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_annotations_resource_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_annotations_resource_proto_goTypes,
+ DependencyIndexes: file_envoy_annotations_resource_proto_depIdxs,
+ MessageInfos: file_envoy_annotations_resource_proto_msgTypes,
+ ExtensionInfos: file_envoy_annotations_resource_proto_extTypes,
+ }.Build()
+ File_envoy_annotations_resource_proto = out.File
+ file_envoy_annotations_resource_proto_rawDesc = nil
+ file_envoy_annotations_resource_proto_goTypes = nil
+ file_envoy_annotations_resource_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go
new file mode 100644
index 000000000..2929a5813
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource.pb.validate.go
@@ -0,0 +1,141 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/annotations/resource.proto
+
+package annotations
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on ResourceAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ResourceAnnotation) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResourceAnnotation with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ResourceAnnotationMultiError, or nil if none found.
+func (m *ResourceAnnotation) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResourceAnnotation) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Type
+
+ if len(errors) > 0 {
+ return ResourceAnnotationMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResourceAnnotationMultiError is an error wrapping multiple validation errors
+// returned by ResourceAnnotation.ValidateAll() if the designated constraints
+// aren't met.
+type ResourceAnnotationMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResourceAnnotationMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResourceAnnotationMultiError) AllErrors() []error { return m }
+
+// ResourceAnnotationValidationError is the validation error returned by
+// ResourceAnnotation.Validate if the designated constraints aren't met.
+type ResourceAnnotationValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResourceAnnotationValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResourceAnnotationValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResourceAnnotationValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResourceAnnotationValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResourceAnnotationValidationError) ErrorName() string {
+ return "ResourceAnnotationValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ResourceAnnotationValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResourceAnnotation.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResourceAnnotationValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResourceAnnotationValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go
new file mode 100644
index 000000000..324cb0916
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/annotations/resource_vtproto.pb.go
@@ -0,0 +1,73 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/annotations/resource.proto
+
+package annotations
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *ResourceAnnotation) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResourceAnnotation) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ResourceAnnotation) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Type) > 0 {
+ i -= len(m.Type)
+ copy(dAtA[i:], m.Type)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Type)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ResourceAnnotation) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Type)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go
new file mode 100644
index 000000000..f434e6d40
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.go
@@ -0,0 +1,1926 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/config/accesslog/v3/accesslog.proto
+
+package accesslogv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ v32 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
+ v34 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3"
+ v33 "github.com/envoyproxy/go-control-plane/envoy/type/matcher/v3"
+ v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ComparisonFilter_Op int32
+
+const (
+ // =
+ ComparisonFilter_EQ ComparisonFilter_Op = 0
+ // >=
+ ComparisonFilter_GE ComparisonFilter_Op = 1
+ // <=
+ ComparisonFilter_LE ComparisonFilter_Op = 2
+)
+
+// Enum value maps for ComparisonFilter_Op.
+var (
+ ComparisonFilter_Op_name = map[int32]string{
+ 0: "EQ",
+ 1: "GE",
+ 2: "LE",
+ }
+ ComparisonFilter_Op_value = map[string]int32{
+ "EQ": 0,
+ "GE": 1,
+ "LE": 2,
+ }
+)
+
+func (x ComparisonFilter_Op) Enum() *ComparisonFilter_Op {
+ p := new(ComparisonFilter_Op)
+ *p = x
+ return p
+}
+
+func (x ComparisonFilter_Op) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (ComparisonFilter_Op) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[0].Descriptor()
+}
+
+func (ComparisonFilter_Op) Type() protoreflect.EnumType {
+ return &file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[0]
+}
+
+func (x ComparisonFilter_Op) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use ComparisonFilter_Op.Descriptor instead.
+func (ComparisonFilter_Op) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{2, 0}
+}
+
+type GrpcStatusFilter_Status int32
+
+const (
+ GrpcStatusFilter_OK GrpcStatusFilter_Status = 0
+ GrpcStatusFilter_CANCELED GrpcStatusFilter_Status = 1
+ GrpcStatusFilter_UNKNOWN GrpcStatusFilter_Status = 2
+ GrpcStatusFilter_INVALID_ARGUMENT GrpcStatusFilter_Status = 3
+ GrpcStatusFilter_DEADLINE_EXCEEDED GrpcStatusFilter_Status = 4
+ GrpcStatusFilter_NOT_FOUND GrpcStatusFilter_Status = 5
+ GrpcStatusFilter_ALREADY_EXISTS GrpcStatusFilter_Status = 6
+ GrpcStatusFilter_PERMISSION_DENIED GrpcStatusFilter_Status = 7
+ GrpcStatusFilter_RESOURCE_EXHAUSTED GrpcStatusFilter_Status = 8
+ GrpcStatusFilter_FAILED_PRECONDITION GrpcStatusFilter_Status = 9
+ GrpcStatusFilter_ABORTED GrpcStatusFilter_Status = 10
+ GrpcStatusFilter_OUT_OF_RANGE GrpcStatusFilter_Status = 11
+ GrpcStatusFilter_UNIMPLEMENTED GrpcStatusFilter_Status = 12
+ GrpcStatusFilter_INTERNAL GrpcStatusFilter_Status = 13
+ GrpcStatusFilter_UNAVAILABLE GrpcStatusFilter_Status = 14
+ GrpcStatusFilter_DATA_LOSS GrpcStatusFilter_Status = 15
+ GrpcStatusFilter_UNAUTHENTICATED GrpcStatusFilter_Status = 16
+)
+
+// Enum value maps for GrpcStatusFilter_Status.
+var (
+ GrpcStatusFilter_Status_name = map[int32]string{
+ 0: "OK",
+ 1: "CANCELED",
+ 2: "UNKNOWN",
+ 3: "INVALID_ARGUMENT",
+ 4: "DEADLINE_EXCEEDED",
+ 5: "NOT_FOUND",
+ 6: "ALREADY_EXISTS",
+ 7: "PERMISSION_DENIED",
+ 8: "RESOURCE_EXHAUSTED",
+ 9: "FAILED_PRECONDITION",
+ 10: "ABORTED",
+ 11: "OUT_OF_RANGE",
+ 12: "UNIMPLEMENTED",
+ 13: "INTERNAL",
+ 14: "UNAVAILABLE",
+ 15: "DATA_LOSS",
+ 16: "UNAUTHENTICATED",
+ }
+ GrpcStatusFilter_Status_value = map[string]int32{
+ "OK": 0,
+ "CANCELED": 1,
+ "UNKNOWN": 2,
+ "INVALID_ARGUMENT": 3,
+ "DEADLINE_EXCEEDED": 4,
+ "NOT_FOUND": 5,
+ "ALREADY_EXISTS": 6,
+ "PERMISSION_DENIED": 7,
+ "RESOURCE_EXHAUSTED": 8,
+ "FAILED_PRECONDITION": 9,
+ "ABORTED": 10,
+ "OUT_OF_RANGE": 11,
+ "UNIMPLEMENTED": 12,
+ "INTERNAL": 13,
+ "UNAVAILABLE": 14,
+ "DATA_LOSS": 15,
+ "UNAUTHENTICATED": 16,
+ }
+)
+
+func (x GrpcStatusFilter_Status) Enum() *GrpcStatusFilter_Status {
+ p := new(GrpcStatusFilter_Status)
+ *p = x
+ return p
+}
+
+func (x GrpcStatusFilter_Status) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GrpcStatusFilter_Status) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[1].Descriptor()
+}
+
+func (GrpcStatusFilter_Status) Type() protoreflect.EnumType {
+ return &file_envoy_config_accesslog_v3_accesslog_proto_enumTypes[1]
+}
+
+func (x GrpcStatusFilter_Status) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GrpcStatusFilter_Status.Descriptor instead.
+func (GrpcStatusFilter_Status) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{12, 0}
+}
+
+type AccessLog struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the access log extension configuration.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Filter which is used to determine if the access log needs to be written.
+ Filter *AccessLogFilter `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"`
+ // Custom configuration that must be set according to the access logger extension being instantiated.
+ // [#extension-category: envoy.access_loggers]
+ //
+ // Types that are assignable to ConfigType:
+ //
+ // *AccessLog_TypedConfig
+ ConfigType isAccessLog_ConfigType `protobuf_oneof:"config_type"`
+}
+
+func (x *AccessLog) Reset() {
+ *x = AccessLog{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AccessLog) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AccessLog) ProtoMessage() {}
+
+func (x *AccessLog) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AccessLog.ProtoReflect.Descriptor instead.
+func (*AccessLog) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *AccessLog) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *AccessLog) GetFilter() *AccessLogFilter {
+ if x != nil {
+ return x.Filter
+ }
+ return nil
+}
+
+func (m *AccessLog) GetConfigType() isAccessLog_ConfigType {
+ if m != nil {
+ return m.ConfigType
+ }
+ return nil
+}
+
+func (x *AccessLog) GetTypedConfig() *anypb.Any {
+ if x, ok := x.GetConfigType().(*AccessLog_TypedConfig); ok {
+ return x.TypedConfig
+ }
+ return nil
+}
+
+type isAccessLog_ConfigType interface {
+ isAccessLog_ConfigType()
+}
+
+type AccessLog_TypedConfig struct {
+ TypedConfig *anypb.Any `protobuf:"bytes,4,opt,name=typed_config,json=typedConfig,proto3,oneof"`
+}
+
+func (*AccessLog_TypedConfig) isAccessLog_ConfigType() {}
+
+// [#next-free-field: 14]
+type AccessLogFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to FilterSpecifier:
+ //
+ // *AccessLogFilter_StatusCodeFilter
+ // *AccessLogFilter_DurationFilter
+ // *AccessLogFilter_NotHealthCheckFilter
+ // *AccessLogFilter_TraceableFilter
+ // *AccessLogFilter_RuntimeFilter
+ // *AccessLogFilter_AndFilter
+ // *AccessLogFilter_OrFilter
+ // *AccessLogFilter_HeaderFilter
+ // *AccessLogFilter_ResponseFlagFilter
+ // *AccessLogFilter_GrpcStatusFilter
+ // *AccessLogFilter_ExtensionFilter
+ // *AccessLogFilter_MetadataFilter
+ // *AccessLogFilter_LogTypeFilter
+ FilterSpecifier isAccessLogFilter_FilterSpecifier `protobuf_oneof:"filter_specifier"`
+}
+
+func (x *AccessLogFilter) Reset() {
+ *x = AccessLogFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AccessLogFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AccessLogFilter) ProtoMessage() {}
+
+func (x *AccessLogFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AccessLogFilter.ProtoReflect.Descriptor instead.
+func (*AccessLogFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{1}
+}
+
+func (m *AccessLogFilter) GetFilterSpecifier() isAccessLogFilter_FilterSpecifier {
+ if m != nil {
+ return m.FilterSpecifier
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetStatusCodeFilter() *StatusCodeFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_StatusCodeFilter); ok {
+ return x.StatusCodeFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetDurationFilter() *DurationFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_DurationFilter); ok {
+ return x.DurationFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetNotHealthCheckFilter() *NotHealthCheckFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_NotHealthCheckFilter); ok {
+ return x.NotHealthCheckFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetTraceableFilter() *TraceableFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_TraceableFilter); ok {
+ return x.TraceableFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetRuntimeFilter() *RuntimeFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_RuntimeFilter); ok {
+ return x.RuntimeFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetAndFilter() *AndFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_AndFilter); ok {
+ return x.AndFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetOrFilter() *OrFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_OrFilter); ok {
+ return x.OrFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetHeaderFilter() *HeaderFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_HeaderFilter); ok {
+ return x.HeaderFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetResponseFlagFilter() *ResponseFlagFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_ResponseFlagFilter); ok {
+ return x.ResponseFlagFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetGrpcStatusFilter() *GrpcStatusFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_GrpcStatusFilter); ok {
+ return x.GrpcStatusFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetExtensionFilter() *ExtensionFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_ExtensionFilter); ok {
+ return x.ExtensionFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetMetadataFilter() *MetadataFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_MetadataFilter); ok {
+ return x.MetadataFilter
+ }
+ return nil
+}
+
+func (x *AccessLogFilter) GetLogTypeFilter() *LogTypeFilter {
+ if x, ok := x.GetFilterSpecifier().(*AccessLogFilter_LogTypeFilter); ok {
+ return x.LogTypeFilter
+ }
+ return nil
+}
+
+type isAccessLogFilter_FilterSpecifier interface {
+ isAccessLogFilter_FilterSpecifier()
+}
+
+type AccessLogFilter_StatusCodeFilter struct {
+ // Status code filter.
+ StatusCodeFilter *StatusCodeFilter `protobuf:"bytes,1,opt,name=status_code_filter,json=statusCodeFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_DurationFilter struct {
+ // Duration filter.
+ DurationFilter *DurationFilter `protobuf:"bytes,2,opt,name=duration_filter,json=durationFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_NotHealthCheckFilter struct {
+ // Not health check filter.
+ NotHealthCheckFilter *NotHealthCheckFilter `protobuf:"bytes,3,opt,name=not_health_check_filter,json=notHealthCheckFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_TraceableFilter struct {
+ // Traceable filter.
+ TraceableFilter *TraceableFilter `protobuf:"bytes,4,opt,name=traceable_filter,json=traceableFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_RuntimeFilter struct {
+ // Runtime filter.
+ RuntimeFilter *RuntimeFilter `protobuf:"bytes,5,opt,name=runtime_filter,json=runtimeFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_AndFilter struct {
+ // And filter.
+ AndFilter *AndFilter `protobuf:"bytes,6,opt,name=and_filter,json=andFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_OrFilter struct {
+ // Or filter.
+ OrFilter *OrFilter `protobuf:"bytes,7,opt,name=or_filter,json=orFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_HeaderFilter struct {
+ // Header filter.
+ HeaderFilter *HeaderFilter `protobuf:"bytes,8,opt,name=header_filter,json=headerFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_ResponseFlagFilter struct {
+ // Response flag filter.
+ ResponseFlagFilter *ResponseFlagFilter `protobuf:"bytes,9,opt,name=response_flag_filter,json=responseFlagFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_GrpcStatusFilter struct {
+ // gRPC status filter.
+ GrpcStatusFilter *GrpcStatusFilter `protobuf:"bytes,10,opt,name=grpc_status_filter,json=grpcStatusFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_ExtensionFilter struct {
+ // Extension filter.
+ // [#extension-category: envoy.access_loggers.extension_filters]
+ ExtensionFilter *ExtensionFilter `protobuf:"bytes,11,opt,name=extension_filter,json=extensionFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_MetadataFilter struct {
+ // Metadata Filter
+ MetadataFilter *MetadataFilter `protobuf:"bytes,12,opt,name=metadata_filter,json=metadataFilter,proto3,oneof"`
+}
+
+type AccessLogFilter_LogTypeFilter struct {
+ // Log Type Filter
+ LogTypeFilter *LogTypeFilter `protobuf:"bytes,13,opt,name=log_type_filter,json=logTypeFilter,proto3,oneof"`
+}
+
+func (*AccessLogFilter_StatusCodeFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_DurationFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_NotHealthCheckFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_TraceableFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_RuntimeFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_AndFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_OrFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_HeaderFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_ResponseFlagFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_GrpcStatusFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_ExtensionFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_MetadataFilter) isAccessLogFilter_FilterSpecifier() {}
+
+func (*AccessLogFilter_LogTypeFilter) isAccessLogFilter_FilterSpecifier() {}
+
+// Filter on an integer comparison.
+type ComparisonFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Comparison operator.
+ Op ComparisonFilter_Op `protobuf:"varint,1,opt,name=op,proto3,enum=envoy.config.accesslog.v3.ComparisonFilter_Op" json:"op,omitempty"`
+ // Value to compare against.
+ Value *v3.RuntimeUInt32 `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *ComparisonFilter) Reset() {
+ *x = ComparisonFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ComparisonFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ComparisonFilter) ProtoMessage() {}
+
+func (x *ComparisonFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ComparisonFilter.ProtoReflect.Descriptor instead.
+func (*ComparisonFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ComparisonFilter) GetOp() ComparisonFilter_Op {
+ if x != nil {
+ return x.Op
+ }
+ return ComparisonFilter_EQ
+}
+
+func (x *ComparisonFilter) GetValue() *v3.RuntimeUInt32 {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+// Filters on HTTP response/status code.
+type StatusCodeFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Comparison.
+ Comparison *ComparisonFilter `protobuf:"bytes,1,opt,name=comparison,proto3" json:"comparison,omitempty"`
+}
+
+func (x *StatusCodeFilter) Reset() {
+ *x = StatusCodeFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *StatusCodeFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*StatusCodeFilter) ProtoMessage() {}
+
+func (x *StatusCodeFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use StatusCodeFilter.ProtoReflect.Descriptor instead.
+func (*StatusCodeFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *StatusCodeFilter) GetComparison() *ComparisonFilter {
+ if x != nil {
+ return x.Comparison
+ }
+ return nil
+}
+
+// Filters based on the duration of the request or stream, in milliseconds.
+// For end of stream access logs, the total duration of the stream will be used.
+// For :ref:`periodic access logs`,
+// the duration of the stream at the time of log recording will be used.
+type DurationFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Comparison.
+ Comparison *ComparisonFilter `protobuf:"bytes,1,opt,name=comparison,proto3" json:"comparison,omitempty"`
+}
+
+func (x *DurationFilter) Reset() {
+ *x = DurationFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DurationFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DurationFilter) ProtoMessage() {}
+
+func (x *DurationFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DurationFilter.ProtoReflect.Descriptor instead.
+func (*DurationFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *DurationFilter) GetComparison() *ComparisonFilter {
+ if x != nil {
+ return x.Comparison
+ }
+ return nil
+}
+
+// Filters for requests that are not health check requests. A health check
+// request is marked by the health check filter.
+type NotHealthCheckFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *NotHealthCheckFilter) Reset() {
+ *x = NotHealthCheckFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *NotHealthCheckFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*NotHealthCheckFilter) ProtoMessage() {}
+
+func (x *NotHealthCheckFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use NotHealthCheckFilter.ProtoReflect.Descriptor instead.
+func (*NotHealthCheckFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{5}
+}
+
+// Filters for requests that are traceable. See the tracing overview for more
+// information on how a request becomes traceable.
+type TraceableFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *TraceableFilter) Reset() {
+ *x = TraceableFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TraceableFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TraceableFilter) ProtoMessage() {}
+
+func (x *TraceableFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TraceableFilter.ProtoReflect.Descriptor instead.
+func (*TraceableFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{6}
+}
+
+// Filters requests based on runtime-configurable sampling rates.
+type RuntimeFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies a key used to look up a custom sampling rate from the runtime configuration. If a value is found for this
+ // key, it will override the default sampling rate specified in “percent_sampled“.
+ RuntimeKey string `protobuf:"bytes,1,opt,name=runtime_key,json=runtimeKey,proto3" json:"runtime_key,omitempty"`
+ // Defines the default sampling percentage when no runtime override is present. If not specified, the default is
+ // **0%** (with a denominator of 100).
+ PercentSampled *v31.FractionalPercent `protobuf:"bytes,2,opt,name=percent_sampled,json=percentSampled,proto3" json:"percent_sampled,omitempty"`
+ // Controls how sampling decisions are made.
+ //
+ // - Default behavior (“false“):
+ //
+ // - Uses the :ref:`x-request-id` as a consistent sampling pivot.
+ // - When :ref:`x-request-id` is present, sampling will be consistent
+ // across multiple hosts based on both the “runtime_key“ and
+ // :ref:`x-request-id`.
+ // - Useful for tracking related requests across a distributed system.
+ //
+ // - When set to “true“ or :ref:`x-request-id` is missing:
+ //
+ // - Sampling decisions are made randomly based only on the “runtime_key“.
+ // - Useful in complex filter configurations (like nested
+ // :ref:`AndFilter`/
+ // :ref:`OrFilter` blocks) where independent probability
+ // calculations are desired.
+ // - Can be used to implement logging kill switches with predictable probability distributions.
+ UseIndependentRandomness bool `protobuf:"varint,3,opt,name=use_independent_randomness,json=useIndependentRandomness,proto3" json:"use_independent_randomness,omitempty"`
+}
+
+func (x *RuntimeFilter) Reset() {
+ *x = RuntimeFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeFilter) ProtoMessage() {}
+
+func (x *RuntimeFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeFilter.ProtoReflect.Descriptor instead.
+func (*RuntimeFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *RuntimeFilter) GetRuntimeKey() string {
+ if x != nil {
+ return x.RuntimeKey
+ }
+ return ""
+}
+
+func (x *RuntimeFilter) GetPercentSampled() *v31.FractionalPercent {
+ if x != nil {
+ return x.PercentSampled
+ }
+ return nil
+}
+
+func (x *RuntimeFilter) GetUseIndependentRandomness() bool {
+ if x != nil {
+ return x.UseIndependentRandomness
+ }
+ return false
+}
+
+// Performs a logical “and” operation on the result of each filter in filters.
+// Filters are evaluated sequentially and if one of them returns false, the
+// filter returns false immediately.
+type AndFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Filters []*AccessLogFilter `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
+}
+
+func (x *AndFilter) Reset() {
+ *x = AndFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AndFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AndFilter) ProtoMessage() {}
+
+func (x *AndFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AndFilter.ProtoReflect.Descriptor instead.
+func (*AndFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *AndFilter) GetFilters() []*AccessLogFilter {
+ if x != nil {
+ return x.Filters
+ }
+ return nil
+}
+
+// Performs a logical “or” operation on the result of each individual filter.
+// Filters are evaluated sequentially and if one of them returns true, the
+// filter returns true immediately.
+type OrFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Filters []*AccessLogFilter `protobuf:"bytes,2,rep,name=filters,proto3" json:"filters,omitempty"`
+}
+
+func (x *OrFilter) Reset() {
+ *x = OrFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *OrFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*OrFilter) ProtoMessage() {}
+
+func (x *OrFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use OrFilter.ProtoReflect.Descriptor instead.
+func (*OrFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *OrFilter) GetFilters() []*AccessLogFilter {
+ if x != nil {
+ return x.Filters
+ }
+ return nil
+}
+
+// Filters requests based on the presence or value of a request header.
+type HeaderFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Only requests with a header which matches the specified HeaderMatcher will
+ // pass the filter check.
+ Header *v32.HeaderMatcher `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+}
+
+func (x *HeaderFilter) Reset() {
+ *x = HeaderFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *HeaderFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderFilter) ProtoMessage() {}
+
+func (x *HeaderFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HeaderFilter.ProtoReflect.Descriptor instead.
+func (*HeaderFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *HeaderFilter) GetHeader() *v32.HeaderMatcher {
+ if x != nil {
+ return x.Header
+ }
+ return nil
+}
+
+// Filters requests that received responses with an Envoy response flag set.
+// A list of the response flags can be found
+// in the access log formatter
+// :ref:`documentation`.
+type ResponseFlagFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Only responses with the any of the flags listed in this field will be
+ // logged. This field is optional. If it is not specified, then any response
+ // flag will pass the filter check.
+ Flags []string `protobuf:"bytes,1,rep,name=flags,proto3" json:"flags,omitempty"`
+}
+
+func (x *ResponseFlagFilter) Reset() {
+ *x = ResponseFlagFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResponseFlagFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResponseFlagFilter) ProtoMessage() {}
+
+func (x *ResponseFlagFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResponseFlagFilter.ProtoReflect.Descriptor instead.
+func (*ResponseFlagFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *ResponseFlagFilter) GetFlags() []string {
+ if x != nil {
+ return x.Flags
+ }
+ return nil
+}
+
+// Filters gRPC requests based on their response status. If a gRPC status is not
+// provided, the filter will infer the status from the HTTP status code.
+type GrpcStatusFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Logs only responses that have any one of the gRPC statuses in this field.
+ Statuses []GrpcStatusFilter_Status `protobuf:"varint,1,rep,packed,name=statuses,proto3,enum=envoy.config.accesslog.v3.GrpcStatusFilter_Status" json:"statuses,omitempty"`
+ // If included and set to true, the filter will instead block all responses
+ // with a gRPC status or inferred gRPC status enumerated in statuses, and
+ // allow all other responses.
+ Exclude bool `protobuf:"varint,2,opt,name=exclude,proto3" json:"exclude,omitempty"`
+}
+
+func (x *GrpcStatusFilter) Reset() {
+ *x = GrpcStatusFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *GrpcStatusFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcStatusFilter) ProtoMessage() {}
+
+func (x *GrpcStatusFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcStatusFilter.ProtoReflect.Descriptor instead.
+func (*GrpcStatusFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *GrpcStatusFilter) GetStatuses() []GrpcStatusFilter_Status {
+ if x != nil {
+ return x.Statuses
+ }
+ return nil
+}
+
+func (x *GrpcStatusFilter) GetExclude() bool {
+ if x != nil {
+ return x.Exclude
+ }
+ return false
+}
+
+// Filters based on matching dynamic metadata.
+// If the matcher path and key correspond to an existing key in dynamic
+// metadata, the request is logged only if the matcher value is equal to the
+// metadata value. If the matcher path and key *do not* correspond to an
+// existing key in dynamic metadata, the request is logged only if
+// match_if_key_not_found is "true" or unset.
+type MetadataFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Matcher to check metadata for specified value. For example, to match on the
+ // access_log_hint metadata, set the filter to "envoy.common" and the path to
+ // "access_log_hint", and the value to "true".
+ Matcher *v33.MetadataMatcher `protobuf:"bytes,1,opt,name=matcher,proto3" json:"matcher,omitempty"`
+ // Default result if the key does not exist in dynamic metadata: if unset or
+ // true, then log; if false, then don't log.
+ MatchIfKeyNotFound *wrapperspb.BoolValue `protobuf:"bytes,2,opt,name=match_if_key_not_found,json=matchIfKeyNotFound,proto3" json:"match_if_key_not_found,omitempty"`
+}
+
+func (x *MetadataFilter) Reset() {
+ *x = MetadataFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetadataFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetadataFilter) ProtoMessage() {}
+
+func (x *MetadataFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetadataFilter.ProtoReflect.Descriptor instead.
+func (*MetadataFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{13}
+}
+
+func (x *MetadataFilter) GetMatcher() *v33.MetadataMatcher {
+ if x != nil {
+ return x.Matcher
+ }
+ return nil
+}
+
+func (x *MetadataFilter) GetMatchIfKeyNotFound() *wrapperspb.BoolValue {
+ if x != nil {
+ return x.MatchIfKeyNotFound
+ }
+ return nil
+}
+
+// Filters based on access log type.
+type LogTypeFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Logs only records which their type is one of the types defined in this field.
+ Types []v34.AccessLogType `protobuf:"varint,1,rep,packed,name=types,proto3,enum=envoy.data.accesslog.v3.AccessLogType" json:"types,omitempty"`
+ // If this field is set to true, the filter will instead block all records
+ // with a access log type in types field, and allow all other records.
+ Exclude bool `protobuf:"varint,2,opt,name=exclude,proto3" json:"exclude,omitempty"`
+}
+
+func (x *LogTypeFilter) Reset() {
+ *x = LogTypeFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LogTypeFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LogTypeFilter) ProtoMessage() {}
+
+func (x *LogTypeFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LogTypeFilter.ProtoReflect.Descriptor instead.
+func (*LogTypeFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{14}
+}
+
+func (x *LogTypeFilter) GetTypes() []v34.AccessLogType {
+ if x != nil {
+ return x.Types
+ }
+ return nil
+}
+
+func (x *LogTypeFilter) GetExclude() bool {
+ if x != nil {
+ return x.Exclude
+ }
+ return false
+}
+
+// Extension filter is statically registered at runtime.
+type ExtensionFilter struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the filter implementation to instantiate. The name must
+ // match a statically registered filter.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Custom configuration that depends on the filter being instantiated.
+ //
+ // Types that are assignable to ConfigType:
+ //
+ // *ExtensionFilter_TypedConfig
+ ConfigType isExtensionFilter_ConfigType `protobuf_oneof:"config_type"`
+}
+
+func (x *ExtensionFilter) Reset() {
+ *x = ExtensionFilter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExtensionFilter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtensionFilter) ProtoMessage() {}
+
+func (x *ExtensionFilter) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtensionFilter.ProtoReflect.Descriptor instead.
+func (*ExtensionFilter) Descriptor() ([]byte, []int) {
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP(), []int{15}
+}
+
+func (x *ExtensionFilter) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *ExtensionFilter) GetConfigType() isExtensionFilter_ConfigType {
+ if m != nil {
+ return m.ConfigType
+ }
+ return nil
+}
+
+func (x *ExtensionFilter) GetTypedConfig() *anypb.Any {
+ if x, ok := x.GetConfigType().(*ExtensionFilter_TypedConfig); ok {
+ return x.TypedConfig
+ }
+ return nil
+}
+
+type isExtensionFilter_ConfigType interface {
+ isExtensionFilter_ConfigType()
+}
+
+type ExtensionFilter_TypedConfig struct {
+ TypedConfig *anypb.Any `protobuf:"bytes,3,opt,name=typed_config,json=typedConfig,proto3,oneof"`
+}
+
+func (*ExtensionFilter_TypedConfig) isExtensionFilter_ConfigType() {}
+
+var File_envoy_config_accesslog_v3_accesslog_proto protoreflect.FileDescriptor
+
+var file_envoy_config_accesslog_v3_accesslog_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72,
+ 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x64, 0x61, 0x74,
+ 0x61, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x2f, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x6d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70, 0x65,
+ 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72,
+ 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64,
+ 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70,
+ 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17,
+ 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xee, 0x01, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c,
+ 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x39, 0x0a,
+ 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70,
+ 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a,
+ 0x2a, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76,
+ 0x32, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x42, 0x0d, 0x0a, 0x0b, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04,
+ 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xca, 0x09, 0x0a, 0x0f, 0x41, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x12,
+ 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x46,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43,
+ 0x6f, 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0f, 0x64, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x44,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52,
+ 0x0e, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12,
+ 0x68, 0x0a, 0x17, 0x6e, 0x6f, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x63, 0x68,
+ 0x65, 0x63, 0x6b, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x2f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x74,
+ 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x48, 0x00, 0x52, 0x14, 0x6e, 0x6f, 0x74, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68,
+ 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x57, 0x0a, 0x10, 0x74, 0x72, 0x61,
+ 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e,
+ 0x54, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48,
+ 0x00, 0x52, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x12, 0x51, 0x0a, 0x0e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x45, 0x0a, 0x0a, 0x61, 0x6e, 0x64, 0x5f, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c,
+ 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48,
+ 0x00, 0x52, 0x09, 0x61, 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x09,
+ 0x6f, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x72, 0x46, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x08, 0x6f, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x12, 0x4e, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67,
+ 0x2e, 0x76, 0x33, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x48, 0x00, 0x52, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x12, 0x61, 0x0a, 0x14, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x66, 0x6c, 0x61,
+ 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52,
+ 0x12, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x12, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x2b, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10,
+ 0x67, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72,
+ 0x12, 0x57, 0x0a, 0x10, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x54, 0x0a, 0x0f, 0x6d, 0x65, 0x74,
+ 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52,
+ 0x0e, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12,
+ 0x52, 0x0a, 0x0f, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f,
+ 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x6c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e,
+ 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x17, 0x0a, 0x10,
+ 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72,
+ 0x12, 0x03, 0xf8, 0x42, 0x01, 0x22, 0xf9, 0x01, 0x0a, 0x10, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72,
+ 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x02, 0x6f, 0x70,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x2e, 0x4f, 0x70, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01,
+ 0x52, 0x02, 0x6f, 0x70, 0x12, 0x43, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69,
+ 0x6d, 0x65, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02,
+ 0x10, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x1c, 0x0a, 0x02, 0x4f, 0x70, 0x12,
+ 0x06, 0x0a, 0x02, 0x45, 0x51, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x45, 0x10, 0x01, 0x12,
+ 0x06, 0x0a, 0x02, 0x4c, 0x45, 0x10, 0x02, 0x3a, 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32,
+ 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x22, 0xa3, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65,
+ 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72,
+ 0x69, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f,
+ 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10,
+ 0x01, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x3a, 0x38, 0x9a,
+ 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64,
+ 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x9f, 0x01, 0x0a, 0x0e, 0x44, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0a, 0x63, 0x6f,
+ 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61,
+ 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05,
+ 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f,
+ 0x6e, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x54, 0x0a, 0x14, 0x4e, 0x6f, 0x74,
+ 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x3a, 0x3c, 0x9a, 0xc5, 0x88, 0x1e, 0x37, 0x0a, 0x35, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x74, 0x48, 0x65,
+ 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22,
+ 0x4a, 0x0a, 0x0f, 0x54, 0x72, 0x61, 0x63, 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x72, 0x61, 0x63,
+ 0x65, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xf9, 0x01, 0x0a, 0x0d,
+ 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x28, 0x0a,
+ 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e,
+ 0x74, 0x69, 0x6d, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x0f, 0x70, 0x65, 0x72, 0x63, 0x65,
+ 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x65, 0x72, 0x63, 0x65,
+ 0x6e, 0x74, 0x52, 0x0e, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c,
+ 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x1a, 0x75, 0x73, 0x65, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x70, 0x65,
+ 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x5f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x6e, 0x65, 0x73, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x75, 0x73, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x70,
+ 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x74, 0x52, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x6e, 0x65, 0x73, 0x73,
+ 0x3a, 0x35, 0x9a, 0xc5, 0x88, 0x1e, 0x30, 0x0a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d,
+ 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8e, 0x01, 0x0a, 0x09, 0x41, 0x6e, 0x64, 0x46,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x07, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x31, 0x9a, 0xc5, 0x88, 0x1e, 0x2c, 0x0a, 0x2a, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x41,
+ 0x6e, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x08, 0x4f, 0x72, 0x46,
+ 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73,
+ 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e,
+ 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x74,
+ 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x07, 0x66, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x30, 0x9a, 0xc5, 0x88, 0x1e, 0x2b, 0x0a, 0x29, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65,
+ 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4f,
+ 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x8c, 0x01, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x42, 0x08,
+ 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x3a, 0x34, 0x9a, 0xc5, 0x88, 0x1e, 0x2f, 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xfa, 0x01, 0x0a, 0x12, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0xa7, 0x01,
+ 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x90, 0x01,
+ 0xfa, 0x42, 0x8c, 0x01, 0x92, 0x01, 0x88, 0x01, 0x22, 0x85, 0x01, 0x72, 0x82, 0x01, 0x52, 0x02,
+ 0x4c, 0x48, 0x52, 0x02, 0x55, 0x48, 0x52, 0x02, 0x55, 0x54, 0x52, 0x02, 0x4c, 0x52, 0x52, 0x02,
+ 0x55, 0x52, 0x52, 0x02, 0x55, 0x46, 0x52, 0x02, 0x55, 0x43, 0x52, 0x02, 0x55, 0x4f, 0x52, 0x02,
+ 0x4e, 0x52, 0x52, 0x02, 0x44, 0x49, 0x52, 0x02, 0x46, 0x49, 0x52, 0x02, 0x52, 0x4c, 0x52, 0x04,
+ 0x55, 0x41, 0x45, 0x58, 0x52, 0x04, 0x52, 0x4c, 0x53, 0x45, 0x52, 0x02, 0x44, 0x43, 0x52, 0x03,
+ 0x55, 0x52, 0x58, 0x52, 0x02, 0x53, 0x49, 0x52, 0x02, 0x49, 0x48, 0x52, 0x03, 0x44, 0x50, 0x45,
+ 0x52, 0x05, 0x55, 0x4d, 0x53, 0x44, 0x52, 0x52, 0x04, 0x52, 0x46, 0x43, 0x46, 0x52, 0x04, 0x4e,
+ 0x46, 0x43, 0x46, 0x52, 0x02, 0x44, 0x54, 0x52, 0x03, 0x55, 0x50, 0x45, 0x52, 0x02, 0x4e, 0x43,
+ 0x52, 0x02, 0x4f, 0x4d, 0x52, 0x02, 0x44, 0x46, 0x52, 0x02, 0x44, 0x4f, 0x52, 0x02, 0x44, 0x52,
+ 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32,
+ 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x46, 0x6c, 0x61, 0x67, 0x46, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x22, 0x80, 0x04, 0x0a, 0x10, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5d, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x0d,
+ 0xfa, 0x42, 0x0a, 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x08, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75,
+ 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64,
+ 0x65, 0x22, 0xb8, 0x02, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x06, 0x0a, 0x02,
+ 0x4f, 0x4b, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x45, 0x44,
+ 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12,
+ 0x14, 0x0a, 0x10, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x41, 0x52, 0x47, 0x55, 0x4d,
+ 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x44, 0x45, 0x41, 0x44, 0x4c, 0x49, 0x4e,
+ 0x45, 0x5f, 0x45, 0x58, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09,
+ 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x41,
+ 0x4c, 0x52, 0x45, 0x41, 0x44, 0x59, 0x5f, 0x45, 0x58, 0x49, 0x53, 0x54, 0x53, 0x10, 0x06, 0x12,
+ 0x15, 0x0a, 0x11, 0x50, 0x45, 0x52, 0x4d, 0x49, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x44, 0x45,
+ 0x4e, 0x49, 0x45, 0x44, 0x10, 0x07, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52,
+ 0x43, 0x45, 0x5f, 0x45, 0x58, 0x48, 0x41, 0x55, 0x53, 0x54, 0x45, 0x44, 0x10, 0x08, 0x12, 0x17,
+ 0x0a, 0x13, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x43, 0x4f, 0x4e, 0x44,
+ 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x09, 0x12, 0x0b, 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54,
+ 0x45, 0x44, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x52,
+ 0x41, 0x4e, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x11, 0x0a, 0x0d, 0x55, 0x4e, 0x49, 0x4d, 0x50, 0x4c,
+ 0x45, 0x4d, 0x45, 0x4e, 0x54, 0x45, 0x44, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x54,
+ 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x0d, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x4e, 0x41, 0x56, 0x41,
+ 0x49, 0x4c, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x0e, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x41, 0x54, 0x41,
+ 0x5f, 0x4c, 0x4f, 0x53, 0x53, 0x10, 0x0f, 0x12, 0x13, 0x0a, 0x0f, 0x55, 0x4e, 0x41, 0x55, 0x54,
+ 0x48, 0x45, 0x4e, 0x54, 0x49, 0x43, 0x41, 0x54, 0x45, 0x44, 0x10, 0x10, 0x3a, 0x38, 0x9a, 0xc5,
+ 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c,
+ 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x72, 0x70, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
+ 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xda, 0x01, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x61, 0x64,
+ 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x07, 0x6d, 0x61, 0x74,
+ 0x63, 0x68, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x2e,
+ 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x61, 0x74, 0x63, 0x68,
+ 0x65, 0x72, 0x52, 0x07, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x16, 0x6d,
+ 0x61, 0x74, 0x63, 0x68, 0x5f, 0x69, 0x66, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x6f, 0x74, 0x5f,
+ 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f,
+ 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x49, 0x66,
+ 0x4b, 0x65, 0x79, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x3a, 0x36, 0x9a, 0xc5, 0x88,
+ 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f,
+ 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x46, 0x69, 0x6c,
+ 0x74, 0x65, 0x72, 0x22, 0x76, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x46, 0x69,
+ 0x6c, 0x74, 0x65, 0x72, 0x12, 0x4b, 0x0a, 0x05, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x64, 0x61, 0x74, 0x61,
+ 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63,
+ 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x54, 0x79, 0x70, 0x65, 0x42, 0x0d, 0xfa, 0x42, 0x0a,
+ 0x92, 0x01, 0x07, 0x22, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x74, 0x79, 0x70, 0x65,
+ 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x07, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x22, 0xb6, 0x01, 0x0a, 0x0f,
+ 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12,
+ 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
+ 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0c, 0x74, 0x79, 0x70, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48,
+ 0x00, 0x52, 0x0b, 0x74, 0x79, 0x70, 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x37,
+ 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x42, 0x91, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a,
+ 0x27, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
+ 0x6c, 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78,
+ 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61,
+ 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f,
+ 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x33, 0x3b, 0x61, 0x63, 0x63,
+ 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_accesslog_v3_accesslog_proto_rawDescOnce sync.Once
+ file_envoy_config_accesslog_v3_accesslog_proto_rawDescData = file_envoy_config_accesslog_v3_accesslog_proto_rawDesc
+)
+
+func file_envoy_config_accesslog_v3_accesslog_proto_rawDescGZIP() []byte {
+ file_envoy_config_accesslog_v3_accesslog_proto_rawDescOnce.Do(func() {
+ file_envoy_config_accesslog_v3_accesslog_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_accesslog_v3_accesslog_proto_rawDescData)
+ })
+ return file_envoy_config_accesslog_v3_accesslog_proto_rawDescData
+}
+
+var file_envoy_config_accesslog_v3_accesslog_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_envoy_config_accesslog_v3_accesslog_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_envoy_config_accesslog_v3_accesslog_proto_goTypes = []interface{}{
+ (ComparisonFilter_Op)(0), // 0: envoy.config.accesslog.v3.ComparisonFilter.Op
+ (GrpcStatusFilter_Status)(0), // 1: envoy.config.accesslog.v3.GrpcStatusFilter.Status
+ (*AccessLog)(nil), // 2: envoy.config.accesslog.v3.AccessLog
+ (*AccessLogFilter)(nil), // 3: envoy.config.accesslog.v3.AccessLogFilter
+ (*ComparisonFilter)(nil), // 4: envoy.config.accesslog.v3.ComparisonFilter
+ (*StatusCodeFilter)(nil), // 5: envoy.config.accesslog.v3.StatusCodeFilter
+ (*DurationFilter)(nil), // 6: envoy.config.accesslog.v3.DurationFilter
+ (*NotHealthCheckFilter)(nil), // 7: envoy.config.accesslog.v3.NotHealthCheckFilter
+ (*TraceableFilter)(nil), // 8: envoy.config.accesslog.v3.TraceableFilter
+ (*RuntimeFilter)(nil), // 9: envoy.config.accesslog.v3.RuntimeFilter
+ (*AndFilter)(nil), // 10: envoy.config.accesslog.v3.AndFilter
+ (*OrFilter)(nil), // 11: envoy.config.accesslog.v3.OrFilter
+ (*HeaderFilter)(nil), // 12: envoy.config.accesslog.v3.HeaderFilter
+ (*ResponseFlagFilter)(nil), // 13: envoy.config.accesslog.v3.ResponseFlagFilter
+ (*GrpcStatusFilter)(nil), // 14: envoy.config.accesslog.v3.GrpcStatusFilter
+ (*MetadataFilter)(nil), // 15: envoy.config.accesslog.v3.MetadataFilter
+ (*LogTypeFilter)(nil), // 16: envoy.config.accesslog.v3.LogTypeFilter
+ (*ExtensionFilter)(nil), // 17: envoy.config.accesslog.v3.ExtensionFilter
+ (*anypb.Any)(nil), // 18: google.protobuf.Any
+ (*v3.RuntimeUInt32)(nil), // 19: envoy.config.core.v3.RuntimeUInt32
+ (*v31.FractionalPercent)(nil), // 20: envoy.type.v3.FractionalPercent
+ (*v32.HeaderMatcher)(nil), // 21: envoy.config.route.v3.HeaderMatcher
+ (*v33.MetadataMatcher)(nil), // 22: envoy.type.matcher.v3.MetadataMatcher
+ (*wrapperspb.BoolValue)(nil), // 23: google.protobuf.BoolValue
+ (v34.AccessLogType)(0), // 24: envoy.data.accesslog.v3.AccessLogType
+}
+var file_envoy_config_accesslog_v3_accesslog_proto_depIdxs = []int32{
+ 3, // 0: envoy.config.accesslog.v3.AccessLog.filter:type_name -> envoy.config.accesslog.v3.AccessLogFilter
+ 18, // 1: envoy.config.accesslog.v3.AccessLog.typed_config:type_name -> google.protobuf.Any
+ 5, // 2: envoy.config.accesslog.v3.AccessLogFilter.status_code_filter:type_name -> envoy.config.accesslog.v3.StatusCodeFilter
+ 6, // 3: envoy.config.accesslog.v3.AccessLogFilter.duration_filter:type_name -> envoy.config.accesslog.v3.DurationFilter
+ 7, // 4: envoy.config.accesslog.v3.AccessLogFilter.not_health_check_filter:type_name -> envoy.config.accesslog.v3.NotHealthCheckFilter
+ 8, // 5: envoy.config.accesslog.v3.AccessLogFilter.traceable_filter:type_name -> envoy.config.accesslog.v3.TraceableFilter
+ 9, // 6: envoy.config.accesslog.v3.AccessLogFilter.runtime_filter:type_name -> envoy.config.accesslog.v3.RuntimeFilter
+ 10, // 7: envoy.config.accesslog.v3.AccessLogFilter.and_filter:type_name -> envoy.config.accesslog.v3.AndFilter
+ 11, // 8: envoy.config.accesslog.v3.AccessLogFilter.or_filter:type_name -> envoy.config.accesslog.v3.OrFilter
+ 12, // 9: envoy.config.accesslog.v3.AccessLogFilter.header_filter:type_name -> envoy.config.accesslog.v3.HeaderFilter
+ 13, // 10: envoy.config.accesslog.v3.AccessLogFilter.response_flag_filter:type_name -> envoy.config.accesslog.v3.ResponseFlagFilter
+ 14, // 11: envoy.config.accesslog.v3.AccessLogFilter.grpc_status_filter:type_name -> envoy.config.accesslog.v3.GrpcStatusFilter
+ 17, // 12: envoy.config.accesslog.v3.AccessLogFilter.extension_filter:type_name -> envoy.config.accesslog.v3.ExtensionFilter
+ 15, // 13: envoy.config.accesslog.v3.AccessLogFilter.metadata_filter:type_name -> envoy.config.accesslog.v3.MetadataFilter
+ 16, // 14: envoy.config.accesslog.v3.AccessLogFilter.log_type_filter:type_name -> envoy.config.accesslog.v3.LogTypeFilter
+ 0, // 15: envoy.config.accesslog.v3.ComparisonFilter.op:type_name -> envoy.config.accesslog.v3.ComparisonFilter.Op
+ 19, // 16: envoy.config.accesslog.v3.ComparisonFilter.value:type_name -> envoy.config.core.v3.RuntimeUInt32
+ 4, // 17: envoy.config.accesslog.v3.StatusCodeFilter.comparison:type_name -> envoy.config.accesslog.v3.ComparisonFilter
+ 4, // 18: envoy.config.accesslog.v3.DurationFilter.comparison:type_name -> envoy.config.accesslog.v3.ComparisonFilter
+ 20, // 19: envoy.config.accesslog.v3.RuntimeFilter.percent_sampled:type_name -> envoy.type.v3.FractionalPercent
+ 3, // 20: envoy.config.accesslog.v3.AndFilter.filters:type_name -> envoy.config.accesslog.v3.AccessLogFilter
+ 3, // 21: envoy.config.accesslog.v3.OrFilter.filters:type_name -> envoy.config.accesslog.v3.AccessLogFilter
+ 21, // 22: envoy.config.accesslog.v3.HeaderFilter.header:type_name -> envoy.config.route.v3.HeaderMatcher
+ 1, // 23: envoy.config.accesslog.v3.GrpcStatusFilter.statuses:type_name -> envoy.config.accesslog.v3.GrpcStatusFilter.Status
+ 22, // 24: envoy.config.accesslog.v3.MetadataFilter.matcher:type_name -> envoy.type.matcher.v3.MetadataMatcher
+ 23, // 25: envoy.config.accesslog.v3.MetadataFilter.match_if_key_not_found:type_name -> google.protobuf.BoolValue
+ 24, // 26: envoy.config.accesslog.v3.LogTypeFilter.types:type_name -> envoy.data.accesslog.v3.AccessLogType
+ 18, // 27: envoy.config.accesslog.v3.ExtensionFilter.typed_config:type_name -> google.protobuf.Any
+ 28, // [28:28] is the sub-list for method output_type
+ 28, // [28:28] is the sub-list for method input_type
+ 28, // [28:28] is the sub-list for extension type_name
+ 28, // [28:28] is the sub-list for extension extendee
+ 0, // [0:28] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_accesslog_v3_accesslog_proto_init() }
+func file_envoy_config_accesslog_v3_accesslog_proto_init() {
+ if File_envoy_config_accesslog_v3_accesslog_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AccessLog); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AccessLogFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ComparisonFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*StatusCodeFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DurationFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*NotHealthCheckFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TraceableFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AndFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*OrFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*HeaderFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResponseFlagFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*GrpcStatusFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetadataFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LogTypeFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExtensionFilter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*AccessLog_TypedConfig)(nil),
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[1].OneofWrappers = []interface{}{
+ (*AccessLogFilter_StatusCodeFilter)(nil),
+ (*AccessLogFilter_DurationFilter)(nil),
+ (*AccessLogFilter_NotHealthCheckFilter)(nil),
+ (*AccessLogFilter_TraceableFilter)(nil),
+ (*AccessLogFilter_RuntimeFilter)(nil),
+ (*AccessLogFilter_AndFilter)(nil),
+ (*AccessLogFilter_OrFilter)(nil),
+ (*AccessLogFilter_HeaderFilter)(nil),
+ (*AccessLogFilter_ResponseFlagFilter)(nil),
+ (*AccessLogFilter_GrpcStatusFilter)(nil),
+ (*AccessLogFilter_ExtensionFilter)(nil),
+ (*AccessLogFilter_MetadataFilter)(nil),
+ (*AccessLogFilter_LogTypeFilter)(nil),
+ }
+ file_envoy_config_accesslog_v3_accesslog_proto_msgTypes[15].OneofWrappers = []interface{}{
+ (*ExtensionFilter_TypedConfig)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_accesslog_v3_accesslog_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 16,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_accesslog_v3_accesslog_proto_goTypes,
+ DependencyIndexes: file_envoy_config_accesslog_v3_accesslog_proto_depIdxs,
+ EnumInfos: file_envoy_config_accesslog_v3_accesslog_proto_enumTypes,
+ MessageInfos: file_envoy_config_accesslog_v3_accesslog_proto_msgTypes,
+ }.Build()
+ File_envoy_config_accesslog_v3_accesslog_proto = out.File
+ file_envoy_config_accesslog_v3_accesslog_proto_rawDesc = nil
+ file_envoy_config_accesslog_v3_accesslog_proto_goTypes = nil
+ file_envoy_config_accesslog_v3_accesslog_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go
new file mode 100644
index 000000000..746f6f2c4
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog.pb.validate.go
@@ -0,0 +1,2773 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/accesslog/v3/accesslog.proto
+
+package accesslogv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+
+ v3 "github.com/envoyproxy/go-control-plane/envoy/data/accesslog/v3"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+
+ _ = v3.AccessLogType(0)
+)
+
+// Validate checks the field values on AccessLog with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *AccessLog) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on AccessLog with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in AccessLogMultiError, or nil
+// if none found.
+func (m *AccessLog) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *AccessLog) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ if all {
+ switch v := interface{}(m.GetFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogValidationError{
+ field: "Filter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogValidationError{
+ field: "Filter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogValidationError{
+ field: "Filter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ switch v := m.ConfigType.(type) {
+ case *AccessLog_TypedConfig:
+ if v == nil {
+ err := AccessLogValidationError{
+ field: "ConfigType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetTypedConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return AccessLogMultiError(errors)
+ }
+
+ return nil
+}
+
+// AccessLogMultiError is an error wrapping multiple validation errors returned
+// by AccessLog.ValidateAll() if the designated constraints aren't met.
+type AccessLogMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AccessLogMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AccessLogMultiError) AllErrors() []error { return m }
+
+// AccessLogValidationError is the validation error returned by
+// AccessLog.Validate if the designated constraints aren't met.
+type AccessLogValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AccessLogValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AccessLogValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AccessLogValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AccessLogValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AccessLogValidationError) ErrorName() string { return "AccessLogValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AccessLogValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAccessLog.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AccessLogValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AccessLogValidationError{}
+
+// Validate checks the field values on AccessLogFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *AccessLogFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on AccessLogFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// AccessLogFilterMultiError, or nil if none found.
+func (m *AccessLogFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *AccessLogFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofFilterSpecifierPresent := false
+ switch v := m.FilterSpecifier.(type) {
+ case *AccessLogFilter_StatusCodeFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetStatusCodeFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "StatusCodeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "StatusCodeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetStatusCodeFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "StatusCodeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_DurationFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetDurationFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "DurationFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "DurationFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDurationFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "DurationFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_NotHealthCheckFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetNotHealthCheckFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "NotHealthCheckFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "NotHealthCheckFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetNotHealthCheckFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "NotHealthCheckFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_TraceableFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetTraceableFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "TraceableFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "TraceableFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTraceableFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "TraceableFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_RuntimeFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetRuntimeFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "RuntimeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "RuntimeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRuntimeFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "RuntimeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_AndFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetAndFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "AndFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "AndFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAndFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "AndFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_OrFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetOrFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "OrFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "OrFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOrFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "OrFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_HeaderFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetHeaderFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "HeaderFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "HeaderFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHeaderFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "HeaderFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_ResponseFlagFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetResponseFlagFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "ResponseFlagFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "ResponseFlagFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetResponseFlagFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "ResponseFlagFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_GrpcStatusFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetGrpcStatusFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "GrpcStatusFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "GrpcStatusFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetGrpcStatusFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "GrpcStatusFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_ExtensionFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetExtensionFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "ExtensionFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "ExtensionFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetExtensionFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "ExtensionFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_MetadataFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetMetadataFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "MetadataFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "MetadataFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMetadataFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "MetadataFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *AccessLogFilter_LogTypeFilter:
+ if v == nil {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofFilterSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetLogTypeFilter()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "LogTypeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AccessLogFilterValidationError{
+ field: "LogTypeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLogTypeFilter()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AccessLogFilterValidationError{
+ field: "LogTypeFilter",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofFilterSpecifierPresent {
+ err := AccessLogFilterValidationError{
+ field: "FilterSpecifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return AccessLogFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// AccessLogFilterMultiError is an error wrapping multiple validation errors
+// returned by AccessLogFilter.ValidateAll() if the designated constraints
+// aren't met.
+type AccessLogFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AccessLogFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AccessLogFilterMultiError) AllErrors() []error { return m }
+
+// AccessLogFilterValidationError is the validation error returned by
+// AccessLogFilter.Validate if the designated constraints aren't met.
+type AccessLogFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AccessLogFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AccessLogFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AccessLogFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AccessLogFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AccessLogFilterValidationError) ErrorName() string { return "AccessLogFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AccessLogFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAccessLogFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AccessLogFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AccessLogFilterValidationError{}
+
+// Validate checks the field values on ComparisonFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *ComparisonFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ComparisonFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ComparisonFilterMultiError, or nil if none found.
+func (m *ComparisonFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ComparisonFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if _, ok := ComparisonFilter_Op_name[int32(m.GetOp())]; !ok {
+ err := ComparisonFilterValidationError{
+ field: "Op",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetValue() == nil {
+ err := ComparisonFilterValidationError{
+ field: "Value",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetValue()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ComparisonFilterValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ComparisonFilterValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ComparisonFilterValidationError{
+ field: "Value",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ComparisonFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// ComparisonFilterMultiError is an error wrapping multiple validation errors
+// returned by ComparisonFilter.ValidateAll() if the designated constraints
+// aren't met.
+type ComparisonFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ComparisonFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ComparisonFilterMultiError) AllErrors() []error { return m }
+
+// ComparisonFilterValidationError is the validation error returned by
+// ComparisonFilter.Validate if the designated constraints aren't met.
+type ComparisonFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ComparisonFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ComparisonFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ComparisonFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ComparisonFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ComparisonFilterValidationError) ErrorName() string { return "ComparisonFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ComparisonFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sComparisonFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ComparisonFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ComparisonFilterValidationError{}
+
+// Validate checks the field values on StatusCodeFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *StatusCodeFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on StatusCodeFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// StatusCodeFilterMultiError, or nil if none found.
+func (m *StatusCodeFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *StatusCodeFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetComparison() == nil {
+ err := StatusCodeFilterValidationError{
+ field: "Comparison",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetComparison()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, StatusCodeFilterValidationError{
+ field: "Comparison",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, StatusCodeFilterValidationError{
+ field: "Comparison",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetComparison()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return StatusCodeFilterValidationError{
+ field: "Comparison",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return StatusCodeFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// StatusCodeFilterMultiError is an error wrapping multiple validation errors
+// returned by StatusCodeFilter.ValidateAll() if the designated constraints
+// aren't met.
+type StatusCodeFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m StatusCodeFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m StatusCodeFilterMultiError) AllErrors() []error { return m }
+
+// StatusCodeFilterValidationError is the validation error returned by
+// StatusCodeFilter.Validate if the designated constraints aren't met.
+type StatusCodeFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e StatusCodeFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e StatusCodeFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e StatusCodeFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e StatusCodeFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e StatusCodeFilterValidationError) ErrorName() string { return "StatusCodeFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e StatusCodeFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sStatusCodeFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = StatusCodeFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = StatusCodeFilterValidationError{}
+
+// Validate checks the field values on DurationFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *DurationFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on DurationFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in DurationFilterMultiError,
+// or nil if none found.
+func (m *DurationFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *DurationFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetComparison() == nil {
+ err := DurationFilterValidationError{
+ field: "Comparison",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetComparison()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, DurationFilterValidationError{
+ field: "Comparison",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, DurationFilterValidationError{
+ field: "Comparison",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetComparison()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return DurationFilterValidationError{
+ field: "Comparison",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return DurationFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// DurationFilterMultiError is an error wrapping multiple validation errors
+// returned by DurationFilter.ValidateAll() if the designated constraints
+// aren't met.
+type DurationFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m DurationFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m DurationFilterMultiError) AllErrors() []error { return m }
+
+// DurationFilterValidationError is the validation error returned by
+// DurationFilter.Validate if the designated constraints aren't met.
+type DurationFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e DurationFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e DurationFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e DurationFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e DurationFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e DurationFilterValidationError) ErrorName() string { return "DurationFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e DurationFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sDurationFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = DurationFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = DurationFilterValidationError{}
+
+// Validate checks the field values on NotHealthCheckFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *NotHealthCheckFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on NotHealthCheckFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// NotHealthCheckFilterMultiError, or nil if none found.
+func (m *NotHealthCheckFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *NotHealthCheckFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return NotHealthCheckFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// NotHealthCheckFilterMultiError is an error wrapping multiple validation
+// errors returned by NotHealthCheckFilter.ValidateAll() if the designated
+// constraints aren't met.
+type NotHealthCheckFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m NotHealthCheckFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m NotHealthCheckFilterMultiError) AllErrors() []error { return m }
+
+// NotHealthCheckFilterValidationError is the validation error returned by
+// NotHealthCheckFilter.Validate if the designated constraints aren't met.
+type NotHealthCheckFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e NotHealthCheckFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e NotHealthCheckFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e NotHealthCheckFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e NotHealthCheckFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e NotHealthCheckFilterValidationError) ErrorName() string {
+ return "NotHealthCheckFilterValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e NotHealthCheckFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sNotHealthCheckFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = NotHealthCheckFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = NotHealthCheckFilterValidationError{}
+
+// Validate checks the field values on TraceableFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *TraceableFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on TraceableFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// TraceableFilterMultiError, or nil if none found.
+func (m *TraceableFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *TraceableFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return TraceableFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// TraceableFilterMultiError is an error wrapping multiple validation errors
+// returned by TraceableFilter.ValidateAll() if the designated constraints
+// aren't met.
+type TraceableFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m TraceableFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m TraceableFilterMultiError) AllErrors() []error { return m }
+
+// TraceableFilterValidationError is the validation error returned by
+// TraceableFilter.Validate if the designated constraints aren't met.
+type TraceableFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e TraceableFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e TraceableFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e TraceableFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e TraceableFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e TraceableFilterValidationError) ErrorName() string { return "TraceableFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e TraceableFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sTraceableFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = TraceableFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = TraceableFilterValidationError{}
+
+// Validate checks the field values on RuntimeFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *RuntimeFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimeFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in RuntimeFilterMultiError, or
+// nil if none found.
+func (m *RuntimeFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimeFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetRuntimeKey()) < 1 {
+ err := RuntimeFilterValidationError{
+ field: "RuntimeKey",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetPercentSampled()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeFilterValidationError{
+ field: "PercentSampled",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeFilterValidationError{
+ field: "PercentSampled",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetPercentSampled()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeFilterValidationError{
+ field: "PercentSampled",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for UseIndependentRandomness
+
+ if len(errors) > 0 {
+ return RuntimeFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeFilterMultiError is an error wrapping multiple validation errors
+// returned by RuntimeFilter.ValidateAll() if the designated constraints
+// aren't met.
+type RuntimeFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeFilterMultiError) AllErrors() []error { return m }
+
+// RuntimeFilterValidationError is the validation error returned by
+// RuntimeFilter.Validate if the designated constraints aren't met.
+type RuntimeFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeFilterValidationError) ErrorName() string { return "RuntimeFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RuntimeFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeFilterValidationError{}
+
+// Validate checks the field values on AndFilter with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *AndFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on AndFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in AndFilterMultiError, or nil
+// if none found.
+func (m *AndFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *AndFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetFilters()) < 2 {
+ err := AndFilterValidationError{
+ field: "Filters",
+ reason: "value must contain at least 2 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetFilters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AndFilterValidationError{
+ field: fmt.Sprintf("Filters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AndFilterValidationError{
+ field: fmt.Sprintf("Filters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AndFilterValidationError{
+ field: fmt.Sprintf("Filters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return AndFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// AndFilterMultiError is an error wrapping multiple validation errors returned
+// by AndFilter.ValidateAll() if the designated constraints aren't met.
+type AndFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AndFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AndFilterMultiError) AllErrors() []error { return m }
+
+// AndFilterValidationError is the validation error returned by
+// AndFilter.Validate if the designated constraints aren't met.
+type AndFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AndFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AndFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AndFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AndFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AndFilterValidationError) ErrorName() string { return "AndFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AndFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAndFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AndFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AndFilterValidationError{}
+
+// Validate checks the field values on OrFilter with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *OrFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on OrFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in OrFilterMultiError, or nil
+// if none found.
+func (m *OrFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *OrFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(m.GetFilters()) < 2 {
+ err := OrFilterValidationError{
+ field: "Filters",
+ reason: "value must contain at least 2 item(s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ for idx, item := range m.GetFilters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, OrFilterValidationError{
+ field: fmt.Sprintf("Filters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, OrFilterValidationError{
+ field: fmt.Sprintf("Filters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return OrFilterValidationError{
+ field: fmt.Sprintf("Filters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return OrFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// OrFilterMultiError is an error wrapping multiple validation errors returned
+// by OrFilter.ValidateAll() if the designated constraints aren't met.
+type OrFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m OrFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m OrFilterMultiError) AllErrors() []error { return m }
+
+// OrFilterValidationError is the validation error returned by
+// OrFilter.Validate if the designated constraints aren't met.
+type OrFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e OrFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e OrFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e OrFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e OrFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e OrFilterValidationError) ErrorName() string { return "OrFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e OrFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sOrFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = OrFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = OrFilterValidationError{}
+
+// Validate checks the field values on HeaderFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *HeaderFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on HeaderFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in HeaderFilterMultiError, or
+// nil if none found.
+func (m *HeaderFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *HeaderFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if m.GetHeader() == nil {
+ err := HeaderFilterValidationError{
+ field: "Header",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetHeader()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, HeaderFilterValidationError{
+ field: "Header",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, HeaderFilterValidationError{
+ field: "Header",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHeader()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return HeaderFilterValidationError{
+ field: "Header",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return HeaderFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// HeaderFilterMultiError is an error wrapping multiple validation errors
+// returned by HeaderFilter.ValidateAll() if the designated constraints aren't met.
+type HeaderFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m HeaderFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m HeaderFilterMultiError) AllErrors() []error { return m }
+
+// HeaderFilterValidationError is the validation error returned by
+// HeaderFilter.Validate if the designated constraints aren't met.
+type HeaderFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e HeaderFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e HeaderFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e HeaderFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e HeaderFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e HeaderFilterValidationError) ErrorName() string { return "HeaderFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e HeaderFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sHeaderFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = HeaderFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = HeaderFilterValidationError{}
+
+// Validate checks the field values on ResponseFlagFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ResponseFlagFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ResponseFlagFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ResponseFlagFilterMultiError, or nil if none found.
+func (m *ResponseFlagFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ResponseFlagFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetFlags() {
+ _, _ = idx, item
+
+ if _, ok := _ResponseFlagFilter_Flags_InLookup[item]; !ok {
+ err := ResponseFlagFilterValidationError{
+ field: fmt.Sprintf("Flags[%v]", idx),
+ reason: "value must be in list [LH UH UT LR UR UF UC UO NR DI FI RL UAEX RLSE DC URX SI IH DPE UMSDR RFCF NFCF DT UPE NC OM DF DO DR]",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return ResponseFlagFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// ResponseFlagFilterMultiError is an error wrapping multiple validation errors
+// returned by ResponseFlagFilter.ValidateAll() if the designated constraints
+// aren't met.
+type ResponseFlagFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ResponseFlagFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ResponseFlagFilterMultiError) AllErrors() []error { return m }
+
+// ResponseFlagFilterValidationError is the validation error returned by
+// ResponseFlagFilter.Validate if the designated constraints aren't met.
+type ResponseFlagFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ResponseFlagFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ResponseFlagFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ResponseFlagFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ResponseFlagFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ResponseFlagFilterValidationError) ErrorName() string {
+ return "ResponseFlagFilterValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ResponseFlagFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sResponseFlagFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ResponseFlagFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ResponseFlagFilterValidationError{}
+
+var _ResponseFlagFilter_Flags_InLookup = map[string]struct{}{
+ "LH": {},
+ "UH": {},
+ "UT": {},
+ "LR": {},
+ "UR": {},
+ "UF": {},
+ "UC": {},
+ "UO": {},
+ "NR": {},
+ "DI": {},
+ "FI": {},
+ "RL": {},
+ "UAEX": {},
+ "RLSE": {},
+ "DC": {},
+ "URX": {},
+ "SI": {},
+ "IH": {},
+ "DPE": {},
+ "UMSDR": {},
+ "RFCF": {},
+ "NFCF": {},
+ "DT": {},
+ "UPE": {},
+ "NC": {},
+ "OM": {},
+ "DF": {},
+ "DO": {},
+ "DR": {},
+}
+
+// Validate checks the field values on GrpcStatusFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *GrpcStatusFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on GrpcStatusFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// GrpcStatusFilterMultiError, or nil if none found.
+func (m *GrpcStatusFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *GrpcStatusFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetStatuses() {
+ _, _ = idx, item
+
+ if _, ok := GrpcStatusFilter_Status_name[int32(item)]; !ok {
+ err := GrpcStatusFilterValidationError{
+ field: fmt.Sprintf("Statuses[%v]", idx),
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ // no validation rules for Exclude
+
+ if len(errors) > 0 {
+ return GrpcStatusFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// GrpcStatusFilterMultiError is an error wrapping multiple validation errors
+// returned by GrpcStatusFilter.ValidateAll() if the designated constraints
+// aren't met.
+type GrpcStatusFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m GrpcStatusFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m GrpcStatusFilterMultiError) AllErrors() []error { return m }
+
+// GrpcStatusFilterValidationError is the validation error returned by
+// GrpcStatusFilter.Validate if the designated constraints aren't met.
+type GrpcStatusFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e GrpcStatusFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e GrpcStatusFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e GrpcStatusFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e GrpcStatusFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e GrpcStatusFilterValidationError) ErrorName() string { return "GrpcStatusFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e GrpcStatusFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sGrpcStatusFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = GrpcStatusFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = GrpcStatusFilterValidationError{}
+
+// Validate checks the field values on MetadataFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *MetadataFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MetadataFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in MetadataFilterMultiError,
+// or nil if none found.
+func (m *MetadataFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MetadataFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetMatcher()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, MetadataFilterValidationError{
+ field: "Matcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, MetadataFilterValidationError{
+ field: "Matcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMatcher()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MetadataFilterValidationError{
+ field: "Matcher",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMatchIfKeyNotFound()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, MetadataFilterValidationError{
+ field: "MatchIfKeyNotFound",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, MetadataFilterValidationError{
+ field: "MatchIfKeyNotFound",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMatchIfKeyNotFound()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MetadataFilterValidationError{
+ field: "MatchIfKeyNotFound",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return MetadataFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// MetadataFilterMultiError is an error wrapping multiple validation errors
+// returned by MetadataFilter.ValidateAll() if the designated constraints
+// aren't met.
+type MetadataFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MetadataFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MetadataFilterMultiError) AllErrors() []error { return m }
+
+// MetadataFilterValidationError is the validation error returned by
+// MetadataFilter.Validate if the designated constraints aren't met.
+type MetadataFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MetadataFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MetadataFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MetadataFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MetadataFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MetadataFilterValidationError) ErrorName() string { return "MetadataFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e MetadataFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMetadataFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MetadataFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MetadataFilterValidationError{}
+
+// Validate checks the field values on LogTypeFilter with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *LogTypeFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on LogTypeFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in LogTypeFilterMultiError, or
+// nil if none found.
+func (m *LogTypeFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *LogTypeFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetTypes() {
+ _, _ = idx, item
+
+ if _, ok := v3.AccessLogType_name[int32(item)]; !ok {
+ err := LogTypeFilterValidationError{
+ field: fmt.Sprintf("Types[%v]", idx),
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+
+ // no validation rules for Exclude
+
+ if len(errors) > 0 {
+ return LogTypeFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// LogTypeFilterMultiError is an error wrapping multiple validation errors
+// returned by LogTypeFilter.ValidateAll() if the designated constraints
+// aren't met.
+type LogTypeFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m LogTypeFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m LogTypeFilterMultiError) AllErrors() []error { return m }
+
+// LogTypeFilterValidationError is the validation error returned by
+// LogTypeFilter.Validate if the designated constraints aren't met.
+type LogTypeFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e LogTypeFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e LogTypeFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e LogTypeFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e LogTypeFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e LogTypeFilterValidationError) ErrorName() string { return "LogTypeFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e LogTypeFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sLogTypeFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = LogTypeFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = LogTypeFilterValidationError{}
+
+// Validate checks the field values on ExtensionFilter with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *ExtensionFilter) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ExtensionFilter with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// ExtensionFilterMultiError, or nil if none found.
+func (m *ExtensionFilter) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ExtensionFilter) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ switch v := m.ConfigType.(type) {
+ case *ExtensionFilter_TypedConfig:
+ if v == nil {
+ err := ExtensionFilterValidationError{
+ field: "ConfigType",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetTypedConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ExtensionFilterValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ExtensionFilterValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTypedConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ExtensionFilterValidationError{
+ field: "TypedConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return ExtensionFilterMultiError(errors)
+ }
+
+ return nil
+}
+
+// ExtensionFilterMultiError is an error wrapping multiple validation errors
+// returned by ExtensionFilter.ValidateAll() if the designated constraints
+// aren't met.
+type ExtensionFilterMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ExtensionFilterMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ExtensionFilterMultiError) AllErrors() []error { return m }
+
+// ExtensionFilterValidationError is the validation error returned by
+// ExtensionFilter.Validate if the designated constraints aren't met.
+type ExtensionFilterValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ExtensionFilterValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ExtensionFilterValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ExtensionFilterValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ExtensionFilterValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ExtensionFilterValidationError) ErrorName() string { return "ExtensionFilterValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ExtensionFilterValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sExtensionFilter.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ExtensionFilterValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ExtensionFilterValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go
new file mode 100644
index 000000000..e75bf014a
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3/accesslog_vtproto.pb.go
@@ -0,0 +1,1751 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/config/accesslog/v3/accesslog.proto
+
+package accesslogv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ anypb "github.com/planetscale/vtprotobuf/types/known/anypb"
+ wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *AccessLog) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AccessLog) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if msg, ok := m.ConfigType.(*AccessLog_TypedConfig); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if m.Filter != nil {
+ size, err := m.Filter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AccessLog_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLog_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.TypedConfig != nil {
+ size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x22
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AccessLogFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_LogTypeFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_MetadataFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_ExtensionFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_GrpcStatusFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_ResponseFlagFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_HeaderFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_OrFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_AndFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_RuntimeFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_TraceableFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_NotHealthCheckFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_DurationFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.FilterSpecifier.(*AccessLogFilter_StatusCodeFilter); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AccessLogFilter_StatusCodeFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_StatusCodeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.StatusCodeFilter != nil {
+ size, err := m.StatusCodeFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_DurationFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_DurationFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.DurationFilter != nil {
+ size, err := m.DurationFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x12
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_NotHealthCheckFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_NotHealthCheckFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.NotHealthCheckFilter != nil {
+ size, err := m.NotHealthCheckFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x1a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_TraceableFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_TraceableFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.TraceableFilter != nil {
+ size, err := m.TraceableFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x22
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_RuntimeFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_RuntimeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.RuntimeFilter != nil {
+ size, err := m.RuntimeFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x2a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_AndFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_AndFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.AndFilter != nil {
+ size, err := m.AndFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x32
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x32
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_OrFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_OrFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.OrFilter != nil {
+ size, err := m.OrFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x3a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x3a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_HeaderFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_HeaderFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.HeaderFilter != nil {
+ size, err := m.HeaderFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x42
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x42
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_ResponseFlagFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_ResponseFlagFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.ResponseFlagFilter != nil {
+ size, err := m.ResponseFlagFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x4a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x4a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_GrpcStatusFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_GrpcStatusFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.GrpcStatusFilter != nil {
+ size, err := m.GrpcStatusFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x52
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x52
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_ExtensionFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_ExtensionFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.ExtensionFilter != nil {
+ size, err := m.ExtensionFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x5a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x5a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_MetadataFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_MetadataFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.MetadataFilter != nil {
+ size, err := m.MetadataFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x62
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x62
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLogFilter_LogTypeFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AccessLogFilter_LogTypeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.LogTypeFilter != nil {
+ size, err := m.LogTypeFilter.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x6a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x6a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *ComparisonFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ComparisonFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ComparisonFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Value != nil {
+ if vtmsg, ok := interface{}(m.Value).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Value)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Op != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Op))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *StatusCodeFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatusCodeFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *StatusCodeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Comparison != nil {
+ size, err := m.Comparison.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DurationFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DurationFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *DurationFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Comparison != nil {
+ size, err := m.Comparison.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *NotHealthCheckFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *NotHealthCheckFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *NotHealthCheckFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *TraceableFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TraceableFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *TraceableFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RuntimeFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RuntimeFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.UseIndependentRandomness {
+ i--
+ if m.UseIndependentRandomness {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.PercentSampled != nil {
+ if vtmsg, ok := interface{}(m.PercentSampled).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.PercentSampled)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.RuntimeKey) > 0 {
+ i -= len(m.RuntimeKey)
+ copy(dAtA[i:], m.RuntimeKey)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.RuntimeKey)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AndFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AndFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *AndFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Filters) > 0 {
+ for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *OrFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *OrFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *OrFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Filters) > 0 {
+ for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Filters[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HeaderFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HeaderFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *HeaderFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Header != nil {
+ if vtmsg, ok := interface{}(m.Header).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Header)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ResponseFlagFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResponseFlagFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ResponseFlagFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Flags) > 0 {
+ for iNdEx := len(m.Flags) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Flags[iNdEx])
+ copy(dAtA[i:], m.Flags[iNdEx])
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Flags[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *GrpcStatusFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *GrpcStatusFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *GrpcStatusFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Exclude {
+ i--
+ if m.Exclude {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Statuses) > 0 {
+ var pksize2 int
+ for _, num := range m.Statuses {
+ pksize2 += protohelpers.SizeOfVarint(uint64(num))
+ }
+ i -= pksize2
+ j1 := i
+ for _, num1 := range m.Statuses {
+ num := uint64(num1)
+ for num >= 1<<7 {
+ dAtA[j1] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j1++
+ }
+ dAtA[j1] = uint8(num)
+ j1++
+ }
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MetadataFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MetadataFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *MetadataFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.MatchIfKeyNotFound != nil {
+ size, err := (*wrapperspb.BoolValue)(m.MatchIfKeyNotFound).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Matcher != nil {
+ if vtmsg, ok := interface{}(m.Matcher).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Matcher)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LogTypeFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LogTypeFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *LogTypeFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Exclude {
+ i--
+ if m.Exclude {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Types) > 0 {
+ var pksize2 int
+ for _, num := range m.Types {
+ pksize2 += protohelpers.SizeOfVarint(uint64(num))
+ }
+ i -= pksize2
+ j1 := i
+ for _, num1 := range m.Types {
+ num := uint64(num1)
+ for num >= 1<<7 {
+ dAtA[j1] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j1++
+ }
+ dAtA[j1] = uint8(num)
+ j1++
+ }
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(pksize2))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ExtensionFilter) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ExtensionFilter) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ExtensionFilter) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if msg, ok := m.ConfigType.(*ExtensionFilter_TypedConfig); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ExtensionFilter_TypedConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ExtensionFilter_TypedConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.TypedConfig != nil {
+ size, err := (*anypb.Any)(m.TypedConfig).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x1a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *AccessLog) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Filter != nil {
+ l = m.Filter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok {
+ n += vtmsg.SizeVT()
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *AccessLog_TypedConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.TypedConfig != nil {
+ l = (*anypb.Any)(m.TypedConfig).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if vtmsg, ok := m.FilterSpecifier.(interface{ SizeVT() int }); ok {
+ n += vtmsg.SizeVT()
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *AccessLogFilter_StatusCodeFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.StatusCodeFilter != nil {
+ l = m.StatusCodeFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_DurationFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.DurationFilter != nil {
+ l = m.DurationFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_NotHealthCheckFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NotHealthCheckFilter != nil {
+ l = m.NotHealthCheckFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_TraceableFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.TraceableFilter != nil {
+ l = m.TraceableFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_RuntimeFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.RuntimeFilter != nil {
+ l = m.RuntimeFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_AndFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AndFilter != nil {
+ l = m.AndFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_OrFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.OrFilter != nil {
+ l = m.OrFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_HeaderFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.HeaderFilter != nil {
+ l = m.HeaderFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_ResponseFlagFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ResponseFlagFilter != nil {
+ l = m.ResponseFlagFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_GrpcStatusFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.GrpcStatusFilter != nil {
+ l = m.GrpcStatusFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_ExtensionFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ExtensionFilter != nil {
+ l = m.ExtensionFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_MetadataFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MetadataFilter != nil {
+ l = m.MetadataFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *AccessLogFilter_LogTypeFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.LogTypeFilter != nil {
+ l = m.LogTypeFilter.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *ComparisonFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Op != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Op))
+ }
+ if m.Value != nil {
+ if size, ok := interface{}(m.Value).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Value)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *StatusCodeFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Comparison != nil {
+ l = m.Comparison.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *DurationFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Comparison != nil {
+ l = m.Comparison.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *NotHealthCheckFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *TraceableFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RuntimeFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.RuntimeKey)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.PercentSampled != nil {
+ if size, ok := interface{}(m.PercentSampled).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.PercentSampled)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.UseIndependentRandomness {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *AndFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Filters) > 0 {
+ for _, e := range m.Filters {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *OrFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Filters) > 0 {
+ for _, e := range m.Filters {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *HeaderFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ if size, ok := interface{}(m.Header).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Header)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ResponseFlagFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Flags) > 0 {
+ for _, s := range m.Flags {
+ l = len(s)
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *GrpcStatusFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Statuses) > 0 {
+ l = 0
+ for _, e := range m.Statuses {
+ l += protohelpers.SizeOfVarint(uint64(e))
+ }
+ n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l
+ }
+ if m.Exclude {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *MetadataFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Matcher != nil {
+ if size, ok := interface{}(m.Matcher).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Matcher)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MatchIfKeyNotFound != nil {
+ l = (*wrapperspb.BoolValue)(m.MatchIfKeyNotFound).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *LogTypeFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Types) > 0 {
+ l = 0
+ for _, e := range m.Types {
+ l += protohelpers.SizeOfVarint(uint64(e))
+ }
+ n += 1 + protohelpers.SizeOfVarint(uint64(l)) + l
+ }
+ if m.Exclude {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ExtensionFilter) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if vtmsg, ok := m.ConfigType.(interface{ SizeVT() int }); ok {
+ n += vtmsg.SizeVT()
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ExtensionFilter_TypedConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.TypedConfig != nil {
+ l = (*anypb.Any)(m.TypedConfig).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go
new file mode 100644
index 000000000..a8522cc1a
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.go
@@ -0,0 +1,3310 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/config/bootstrap/v3/bootstrap.proto
+
+package bootstrapv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ _ "github.com/envoyproxy/go-control-plane/envoy/annotations"
+ v34 "github.com/envoyproxy/go-control-plane/envoy/config/accesslog/v3"
+ v37 "github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ v36 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
+ v31 "github.com/envoyproxy/go-control-plane/envoy/config/metrics/v3"
+ v33 "github.com/envoyproxy/go-control-plane/envoy/config/overload/v3"
+ v32 "github.com/envoyproxy/go-control-plane/envoy/config/trace/v3"
+ v38 "github.com/envoyproxy/go-control-plane/envoy/extensions/transport_sockets/tls/v3"
+ v35 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The events are fired in this order: KILL, MULTIKILL, MEGAMISS, MISS.
+// Within an event type, actions execute in the order they are configured.
+// For KILL/MULTIKILL there is a default PANIC that will run after the
+// registered actions and kills the process if it wasn't already killed.
+// It might be useful to specify several debug actions, and possibly an
+// alternate FATAL action.
+type Watchdog_WatchdogAction_WatchdogEvent int32
+
+const (
+ Watchdog_WatchdogAction_UNKNOWN Watchdog_WatchdogAction_WatchdogEvent = 0
+ Watchdog_WatchdogAction_KILL Watchdog_WatchdogAction_WatchdogEvent = 1
+ Watchdog_WatchdogAction_MULTIKILL Watchdog_WatchdogAction_WatchdogEvent = 2
+ Watchdog_WatchdogAction_MEGAMISS Watchdog_WatchdogAction_WatchdogEvent = 3
+ Watchdog_WatchdogAction_MISS Watchdog_WatchdogAction_WatchdogEvent = 4
+)
+
+// Enum value maps for Watchdog_WatchdogAction_WatchdogEvent.
+var (
+ Watchdog_WatchdogAction_WatchdogEvent_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "KILL",
+ 2: "MULTIKILL",
+ 3: "MEGAMISS",
+ 4: "MISS",
+ }
+ Watchdog_WatchdogAction_WatchdogEvent_value = map[string]int32{
+ "UNKNOWN": 0,
+ "KILL": 1,
+ "MULTIKILL": 2,
+ "MEGAMISS": 3,
+ "MISS": 4,
+ }
+)
+
+func (x Watchdog_WatchdogAction_WatchdogEvent) Enum() *Watchdog_WatchdogAction_WatchdogEvent {
+ p := new(Watchdog_WatchdogAction_WatchdogEvent)
+ *p = x
+ return p
+}
+
+func (x Watchdog_WatchdogAction_WatchdogEvent) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Watchdog_WatchdogAction_WatchdogEvent) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[0].Descriptor()
+}
+
+func (Watchdog_WatchdogAction_WatchdogEvent) Type() protoreflect.EnumType {
+ return &file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[0]
+}
+
+func (x Watchdog_WatchdogAction_WatchdogEvent) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Watchdog_WatchdogAction_WatchdogEvent.Descriptor instead.
+func (Watchdog_WatchdogAction_WatchdogEvent) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{4, 0, 0}
+}
+
+type CustomInlineHeader_InlineHeaderType int32
+
+const (
+ CustomInlineHeader_REQUEST_HEADER CustomInlineHeader_InlineHeaderType = 0
+ CustomInlineHeader_REQUEST_TRAILER CustomInlineHeader_InlineHeaderType = 1
+ CustomInlineHeader_RESPONSE_HEADER CustomInlineHeader_InlineHeaderType = 2
+ CustomInlineHeader_RESPONSE_TRAILER CustomInlineHeader_InlineHeaderType = 3
+)
+
+// Enum value maps for CustomInlineHeader_InlineHeaderType.
+var (
+ CustomInlineHeader_InlineHeaderType_name = map[int32]string{
+ 0: "REQUEST_HEADER",
+ 1: "REQUEST_TRAILER",
+ 2: "RESPONSE_HEADER",
+ 3: "RESPONSE_TRAILER",
+ }
+ CustomInlineHeader_InlineHeaderType_value = map[string]int32{
+ "REQUEST_HEADER": 0,
+ "REQUEST_TRAILER": 1,
+ "RESPONSE_HEADER": 2,
+ "RESPONSE_TRAILER": 3,
+ }
+)
+
+func (x CustomInlineHeader_InlineHeaderType) Enum() *CustomInlineHeader_InlineHeaderType {
+ p := new(CustomInlineHeader_InlineHeaderType)
+ *p = x
+ return p
+}
+
+func (x CustomInlineHeader_InlineHeaderType) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (CustomInlineHeader_InlineHeaderType) Descriptor() protoreflect.EnumDescriptor {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[1].Descriptor()
+}
+
+func (CustomInlineHeader_InlineHeaderType) Type() protoreflect.EnumType {
+ return &file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes[1]
+}
+
+func (x CustomInlineHeader_InlineHeaderType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use CustomInlineHeader_InlineHeaderType.Descriptor instead.
+func (CustomInlineHeader_InlineHeaderType) EnumDescriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{9, 0}
+}
+
+// Bootstrap :ref:`configuration overview `.
+// [#next-free-field: 42]
+type Bootstrap struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Node identity to present to the management server and for instance
+ // identification purposes (e.g. in generated headers).
+ Node *v3.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
+ // A list of :ref:`Node ` field names
+ // that will be included in the context parameters of the effective
+ // xdstp:// URL that is sent in a discovery request when resource
+ // locators are used for LDS/CDS. Any non-string field will have its JSON
+ // encoding set as the context parameter value, with the exception of
+ // metadata, which will be flattened (see example below). The supported field
+ // names are:
+ // - "cluster"
+ // - "id"
+ // - "locality.region"
+ // - "locality.sub_zone"
+ // - "locality.zone"
+ // - "metadata"
+ // - "user_agent_build_version.metadata"
+ // - "user_agent_build_version.version"
+ // - "user_agent_name"
+ // - "user_agent_version"
+ //
+ // The node context parameters act as a base layer dictionary for the context
+ // parameters (i.e. more specific resource specific context parameters will
+ // override). Field names will be prefixed with “udpa.node.” when included in
+ // context parameters.
+ //
+ // For example, if node_context_params is “["user_agent_name", "metadata"]“,
+ // the implied context parameters might be::
+ //
+ // node.user_agent_name: "envoy"
+ // node.metadata.foo: "{\"bar\": \"baz\"}"
+ // node.metadata.some: "42"
+ // node.metadata.thing: "\"thing\""
+ //
+ // [#not-implemented-hide:]
+ NodeContextParams []string `protobuf:"bytes,26,rep,name=node_context_params,json=nodeContextParams,proto3" json:"node_context_params,omitempty"`
+ // Statically specified resources.
+ StaticResources *Bootstrap_StaticResources `protobuf:"bytes,2,opt,name=static_resources,json=staticResources,proto3" json:"static_resources,omitempty"`
+ // xDS configuration sources.
+ DynamicResources *Bootstrap_DynamicResources `protobuf:"bytes,3,opt,name=dynamic_resources,json=dynamicResources,proto3" json:"dynamic_resources,omitempty"`
+ // Configuration for the cluster manager which owns all upstream clusters
+ // within the server.
+ ClusterManager *ClusterManager `protobuf:"bytes,4,opt,name=cluster_manager,json=clusterManager,proto3" json:"cluster_manager,omitempty"`
+ // Health discovery service config option.
+ // (:ref:`core.ApiConfigSource `)
+ HdsConfig *v3.ApiConfigSource `protobuf:"bytes,14,opt,name=hds_config,json=hdsConfig,proto3" json:"hds_config,omitempty"`
+ // Optional file system path to search for startup flag files.
+ FlagsPath string `protobuf:"bytes,5,opt,name=flags_path,json=flagsPath,proto3" json:"flags_path,omitempty"`
+ // Optional set of stats sinks.
+ StatsSinks []*v31.StatsSink `protobuf:"bytes,6,rep,name=stats_sinks,json=statsSinks,proto3" json:"stats_sinks,omitempty"`
+ // Options to control behaviors of deferred creation compatible stats.
+ DeferredStatOptions *Bootstrap_DeferredStatOptions `protobuf:"bytes,39,opt,name=deferred_stat_options,json=deferredStatOptions,proto3" json:"deferred_stat_options,omitempty"`
+ // Configuration for internal processing of stats.
+ StatsConfig *v31.StatsConfig `protobuf:"bytes,13,opt,name=stats_config,json=statsConfig,proto3" json:"stats_config,omitempty"`
+ // Optional duration between flushes to configured stats sinks. For
+ // performance reasons Envoy latches counters and only flushes counters and
+ // gauges at a periodic interval. If not specified the default is 5000ms (5
+ // seconds). Only one of “stats_flush_interval“ or “stats_flush_on_admin“
+ // can be set.
+ // Duration must be at least 1ms and at most 5 min.
+ StatsFlushInterval *durationpb.Duration `protobuf:"bytes,7,opt,name=stats_flush_interval,json=statsFlushInterval,proto3" json:"stats_flush_interval,omitempty"`
+ // Types that are assignable to StatsFlush:
+ //
+ // *Bootstrap_StatsFlushOnAdmin
+ StatsFlush isBootstrap_StatsFlush `protobuf_oneof:"stats_flush"`
+ // Optional watchdog configuration.
+ // This is for a single watchdog configuration for the entire system.
+ // Deprecated in favor of “watchdogs“ which has finer granularity.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+ Watchdog *Watchdog `protobuf:"bytes,8,opt,name=watchdog,proto3" json:"watchdog,omitempty"`
+ // Optional watchdogs configuration.
+ // This is used for specifying different watchdogs for the different subsystems.
+ // [#extension-category: envoy.guarddog_actions]
+ Watchdogs *Watchdogs `protobuf:"bytes,27,opt,name=watchdogs,proto3" json:"watchdogs,omitempty"`
+ // Configuration for an external tracing provider.
+ //
+ // .. attention::
+ //
+ // This field has been deprecated in favor of :ref:`HttpConnectionManager.Tracing.provider
+ // `.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+ Tracing *v32.Tracing `protobuf:"bytes,9,opt,name=tracing,proto3" json:"tracing,omitempty"`
+ // Configuration for the runtime configuration provider. If not
+ // specified, a “null” provider will be used which will result in all defaults
+ // being used.
+ LayeredRuntime *LayeredRuntime `protobuf:"bytes,17,opt,name=layered_runtime,json=layeredRuntime,proto3" json:"layered_runtime,omitempty"`
+ // Configuration for the local administration HTTP server.
+ Admin *Admin `protobuf:"bytes,12,opt,name=admin,proto3" json:"admin,omitempty"`
+ // Optional overload manager configuration.
+ OverloadManager *v33.OverloadManager `protobuf:"bytes,15,opt,name=overload_manager,json=overloadManager,proto3" json:"overload_manager,omitempty"`
+ // Enable :ref:`stats for event dispatcher `, defaults to false.
+ // Note that this records a value for each iteration of the event loop on every thread. This
+ // should normally be minimal overhead, but when using
+ // :ref:`statsd `, it will send each observed value
+ // over the wire individually because the statsd protocol doesn't have any way to represent a
+ // histogram summary. Be aware that this can be a very large volume of data.
+ EnableDispatcherStats bool `protobuf:"varint,16,opt,name=enable_dispatcher_stats,json=enableDispatcherStats,proto3" json:"enable_dispatcher_stats,omitempty"`
+ // Optional string which will be used in lieu of x-envoy in prefixing headers.
+ //
+ // For example, if this string is present and set to X-Foo, then x-envoy-retry-on will be
+ // transformed into x-foo-retry-on etc.
+ //
+ // Note this applies to the headers Envoy will generate, the headers Envoy will sanitize, and the
+ // headers Envoy will trust for core code and core extensions only. Be VERY careful making
+ // changes to this string, especially in multi-layer Envoy deployments or deployments using
+ // extensions which are not upstream.
+ HeaderPrefix string `protobuf:"bytes,18,opt,name=header_prefix,json=headerPrefix,proto3" json:"header_prefix,omitempty"`
+ // Optional proxy version which will be used to set the value of :ref:`server.version statistic
+ // ` if specified. Envoy will not process this value, it will be sent as is to
+ // :ref:`stats sinks `.
+ StatsServerVersionOverride *wrapperspb.UInt64Value `protobuf:"bytes,19,opt,name=stats_server_version_override,json=statsServerVersionOverride,proto3" json:"stats_server_version_override,omitempty"`
+ // Always use TCP queries instead of UDP queries for DNS lookups.
+ // This may be overridden on a per-cluster basis in cds_config,
+ // when :ref:`dns_resolvers ` and
+ // :ref:`use_tcp_for_dns_lookups ` are
+ // specified.
+ // This field is deprecated in favor of “dns_resolution_config“
+ // which aggregates all of the DNS resolver configuration in a single message.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+ UseTcpForDnsLookups bool `protobuf:"varint,20,opt,name=use_tcp_for_dns_lookups,json=useTcpForDnsLookups,proto3" json:"use_tcp_for_dns_lookups,omitempty"`
+ // DNS resolution configuration which includes the underlying dns resolver addresses and options.
+ // This may be overridden on a per-cluster basis in cds_config, when
+ // :ref:`dns_resolution_config `
+ // is specified.
+ // This field is deprecated in favor of
+ // :ref:`typed_dns_resolver_config `.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+ DnsResolutionConfig *v3.DnsResolutionConfig `protobuf:"bytes,30,opt,name=dns_resolution_config,json=dnsResolutionConfig,proto3" json:"dns_resolution_config,omitempty"`
+ // DNS resolver type configuration extension. This extension can be used to configure c-ares, apple,
+ // or any other DNS resolver types and the related parameters.
+ // For example, an object of
+ // :ref:`CaresDnsResolverConfig `
+ // can be packed into this “typed_dns_resolver_config“. This configuration replaces the
+ // :ref:`dns_resolution_config `
+ // configuration.
+ // During the transition period when both “dns_resolution_config“ and “typed_dns_resolver_config“ exists,
+ // when “typed_dns_resolver_config“ is in place, Envoy will use it and ignore “dns_resolution_config“.
+ // When “typed_dns_resolver_config“ is missing, the default behavior is in place.
+ // [#extension-category: envoy.network.dns_resolver]
+ TypedDnsResolverConfig *v3.TypedExtensionConfig `protobuf:"bytes,31,opt,name=typed_dns_resolver_config,json=typedDnsResolverConfig,proto3" json:"typed_dns_resolver_config,omitempty"`
+ // Specifies optional bootstrap extensions to be instantiated at startup time.
+ // Each item contains extension specific configuration.
+ // [#extension-category: envoy.bootstrap]
+ BootstrapExtensions []*v3.TypedExtensionConfig `protobuf:"bytes,21,rep,name=bootstrap_extensions,json=bootstrapExtensions,proto3" json:"bootstrap_extensions,omitempty"`
+ // Specifies optional extensions instantiated at startup time and
+ // invoked during crash time on the request that caused the crash.
+ FatalActions []*FatalAction `protobuf:"bytes,28,rep,name=fatal_actions,json=fatalActions,proto3" json:"fatal_actions,omitempty"`
+ // Configuration sources that will participate in
+ // xdstp:// URL authority resolution. The algorithm is as
+ // follows:
+ // 1. The authority field is taken from the xdstp:// URL, call
+ // this “resource_authority“.
+ // 2. “resource_authority“ is compared against the authorities in any peer
+ // “ConfigSource“. The peer “ConfigSource“ is the configuration source
+ // message which would have been used unconditionally for resolution
+ // with opaque resource names. If there is a match with an authority, the
+ // peer “ConfigSource“ message is used.
+ // 3. “resource_authority“ is compared sequentially with the authorities in
+ // each configuration source in “config_sources“. The first “ConfigSource“
+ // to match wins.
+ // 4. As a fallback, if no configuration source matches, then
+ // “default_config_source“ is used.
+ // 5. If “default_config_source“ is not specified, resolution fails.
+ //
+ // [#not-implemented-hide:]
+ ConfigSources []*v3.ConfigSource `protobuf:"bytes,22,rep,name=config_sources,json=configSources,proto3" json:"config_sources,omitempty"`
+ // Default configuration source for xdstp:// URLs if all
+ // other resolution fails.
+ // [#not-implemented-hide:]
+ DefaultConfigSource *v3.ConfigSource `protobuf:"bytes,23,opt,name=default_config_source,json=defaultConfigSource,proto3" json:"default_config_source,omitempty"`
+ // Optional overriding of default socket interface. The value must be the name of one of the
+ // socket interface factories initialized through a bootstrap extension
+ DefaultSocketInterface string `protobuf:"bytes,24,opt,name=default_socket_interface,json=defaultSocketInterface,proto3" json:"default_socket_interface,omitempty"`
+ // Global map of CertificateProvider instances. These instances are referred to by name in the
+ // :ref:`CommonTlsContext.CertificateProviderInstance.instance_name
+ // `
+ // field.
+ // [#not-implemented-hide:]
+ CertificateProviderInstances map[string]*v3.TypedExtensionConfig `protobuf:"bytes,25,rep,name=certificate_provider_instances,json=certificateProviderInstances,proto3" json:"certificate_provider_instances,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // Specifies a set of headers that need to be registered as inline header. This configuration
+ // allows users to customize the inline headers on-demand at Envoy startup without modifying
+ // Envoy's source code.
+ //
+ // Note that the 'set-cookie' header cannot be registered as inline header.
+ InlineHeaders []*CustomInlineHeader `protobuf:"bytes,32,rep,name=inline_headers,json=inlineHeaders,proto3" json:"inline_headers,omitempty"`
+ // Optional path to a file with performance tracing data created by "Perfetto" SDK in binary
+ // ProtoBuf format. The default value is "envoy.pftrace".
+ PerfTracingFilePath string `protobuf:"bytes,33,opt,name=perf_tracing_file_path,json=perfTracingFilePath,proto3" json:"perf_tracing_file_path,omitempty"`
+ // Optional overriding of default regex engine.
+ // If the value is not specified, Google RE2 will be used by default.
+ // [#extension-category: envoy.regex_engines]
+ DefaultRegexEngine *v3.TypedExtensionConfig `protobuf:"bytes,34,opt,name=default_regex_engine,json=defaultRegexEngine,proto3" json:"default_regex_engine,omitempty"`
+ // Optional XdsResourcesDelegate configuration, which allows plugging custom logic into both
+ // fetch and load events during xDS processing.
+ // If a value is not specified, no XdsResourcesDelegate will be used.
+ // TODO(abeyad): Add public-facing documentation.
+ // [#not-implemented-hide:]
+ XdsDelegateExtension *v3.TypedExtensionConfig `protobuf:"bytes,35,opt,name=xds_delegate_extension,json=xdsDelegateExtension,proto3" json:"xds_delegate_extension,omitempty"`
+ // Optional XdsConfigTracker configuration, which allows tracking xDS responses in external components,
+ // e.g., external tracer or monitor. It provides the process point when receive, ingest, or fail to
+ // process xDS resources and messages. If a value is not specified, no XdsConfigTracker will be used.
+ //
+ // .. note::
+ //
+ // There are no in-repo extensions currently, and the :repo:`XdsConfigTracker `
+ // interface should be implemented before using.
+ // See :repo:`xds_config_tracker_integration_test `
+ // for an example usage of the interface.
+ XdsConfigTrackerExtension *v3.TypedExtensionConfig `protobuf:"bytes,36,opt,name=xds_config_tracker_extension,json=xdsConfigTrackerExtension,proto3" json:"xds_config_tracker_extension,omitempty"`
+ // [#not-implemented-hide:]
+ // This controls the type of listener manager configured for Envoy. Currently
+ // Envoy only supports ListenerManager for this field and Envoy Mobile
+ // supports ApiListenerManager.
+ ListenerManager *v3.TypedExtensionConfig `protobuf:"bytes,37,opt,name=listener_manager,json=listenerManager,proto3" json:"listener_manager,omitempty"`
+ // Optional application log configuration.
+ ApplicationLogConfig *Bootstrap_ApplicationLogConfig `protobuf:"bytes,38,opt,name=application_log_config,json=applicationLogConfig,proto3" json:"application_log_config,omitempty"`
+ // Optional gRPC async manager config.
+ GrpcAsyncClientManagerConfig *Bootstrap_GrpcAsyncClientManagerConfig `protobuf:"bytes,40,opt,name=grpc_async_client_manager_config,json=grpcAsyncClientManagerConfig,proto3" json:"grpc_async_client_manager_config,omitempty"`
+ // Optional configuration for memory allocation manager.
+ // Memory releasing is only supported for `tcmalloc allocator `_.
+ MemoryAllocatorManager *MemoryAllocatorManager `protobuf:"bytes,41,opt,name=memory_allocator_manager,json=memoryAllocatorManager,proto3" json:"memory_allocator_manager,omitempty"`
+}
+
+func (x *Bootstrap) Reset() {
+ *x = Bootstrap{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bootstrap) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bootstrap) ProtoMessage() {}
+
+func (x *Bootstrap) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bootstrap.ProtoReflect.Descriptor instead.
+func (*Bootstrap) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Bootstrap) GetNode() *v3.Node {
+ if x != nil {
+ return x.Node
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetNodeContextParams() []string {
+ if x != nil {
+ return x.NodeContextParams
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetStaticResources() *Bootstrap_StaticResources {
+ if x != nil {
+ return x.StaticResources
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetDynamicResources() *Bootstrap_DynamicResources {
+ if x != nil {
+ return x.DynamicResources
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetClusterManager() *ClusterManager {
+ if x != nil {
+ return x.ClusterManager
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetHdsConfig() *v3.ApiConfigSource {
+ if x != nil {
+ return x.HdsConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetFlagsPath() string {
+ if x != nil {
+ return x.FlagsPath
+ }
+ return ""
+}
+
+func (x *Bootstrap) GetStatsSinks() []*v31.StatsSink {
+ if x != nil {
+ return x.StatsSinks
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetDeferredStatOptions() *Bootstrap_DeferredStatOptions {
+ if x != nil {
+ return x.DeferredStatOptions
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetStatsConfig() *v31.StatsConfig {
+ if x != nil {
+ return x.StatsConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetStatsFlushInterval() *durationpb.Duration {
+ if x != nil {
+ return x.StatsFlushInterval
+ }
+ return nil
+}
+
+func (m *Bootstrap) GetStatsFlush() isBootstrap_StatsFlush {
+ if m != nil {
+ return m.StatsFlush
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetStatsFlushOnAdmin() bool {
+ if x, ok := x.GetStatsFlush().(*Bootstrap_StatsFlushOnAdmin); ok {
+ return x.StatsFlushOnAdmin
+ }
+ return false
+}
+
+// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+func (x *Bootstrap) GetWatchdog() *Watchdog {
+ if x != nil {
+ return x.Watchdog
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetWatchdogs() *Watchdogs {
+ if x != nil {
+ return x.Watchdogs
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+func (x *Bootstrap) GetTracing() *v32.Tracing {
+ if x != nil {
+ return x.Tracing
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetLayeredRuntime() *LayeredRuntime {
+ if x != nil {
+ return x.LayeredRuntime
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetAdmin() *Admin {
+ if x != nil {
+ return x.Admin
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetOverloadManager() *v33.OverloadManager {
+ if x != nil {
+ return x.OverloadManager
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetEnableDispatcherStats() bool {
+ if x != nil {
+ return x.EnableDispatcherStats
+ }
+ return false
+}
+
+func (x *Bootstrap) GetHeaderPrefix() string {
+ if x != nil {
+ return x.HeaderPrefix
+ }
+ return ""
+}
+
+func (x *Bootstrap) GetStatsServerVersionOverride() *wrapperspb.UInt64Value {
+ if x != nil {
+ return x.StatsServerVersionOverride
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+func (x *Bootstrap) GetUseTcpForDnsLookups() bool {
+ if x != nil {
+ return x.UseTcpForDnsLookups
+ }
+ return false
+}
+
+// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+func (x *Bootstrap) GetDnsResolutionConfig() *v3.DnsResolutionConfig {
+ if x != nil {
+ return x.DnsResolutionConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetTypedDnsResolverConfig() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.TypedDnsResolverConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetBootstrapExtensions() []*v3.TypedExtensionConfig {
+ if x != nil {
+ return x.BootstrapExtensions
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetFatalActions() []*FatalAction {
+ if x != nil {
+ return x.FatalActions
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetConfigSources() []*v3.ConfigSource {
+ if x != nil {
+ return x.ConfigSources
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetDefaultConfigSource() *v3.ConfigSource {
+ if x != nil {
+ return x.DefaultConfigSource
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetDefaultSocketInterface() string {
+ if x != nil {
+ return x.DefaultSocketInterface
+ }
+ return ""
+}
+
+func (x *Bootstrap) GetCertificateProviderInstances() map[string]*v3.TypedExtensionConfig {
+ if x != nil {
+ return x.CertificateProviderInstances
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetInlineHeaders() []*CustomInlineHeader {
+ if x != nil {
+ return x.InlineHeaders
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetPerfTracingFilePath() string {
+ if x != nil {
+ return x.PerfTracingFilePath
+ }
+ return ""
+}
+
+func (x *Bootstrap) GetDefaultRegexEngine() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.DefaultRegexEngine
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetXdsDelegateExtension() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.XdsDelegateExtension
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetXdsConfigTrackerExtension() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.XdsConfigTrackerExtension
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetListenerManager() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.ListenerManager
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetApplicationLogConfig() *Bootstrap_ApplicationLogConfig {
+ if x != nil {
+ return x.ApplicationLogConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetGrpcAsyncClientManagerConfig() *Bootstrap_GrpcAsyncClientManagerConfig {
+ if x != nil {
+ return x.GrpcAsyncClientManagerConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap) GetMemoryAllocatorManager() *MemoryAllocatorManager {
+ if x != nil {
+ return x.MemoryAllocatorManager
+ }
+ return nil
+}
+
+type isBootstrap_StatsFlush interface {
+ isBootstrap_StatsFlush()
+}
+
+type Bootstrap_StatsFlushOnAdmin struct {
+ // Flush stats to sinks only when queried for on the admin interface. If set,
+ // a flush timer is not created. Only one of “stats_flush_on_admin“ or
+ // “stats_flush_interval“ can be set.
+ StatsFlushOnAdmin bool `protobuf:"varint,29,opt,name=stats_flush_on_admin,json=statsFlushOnAdmin,proto3,oneof"`
+}
+
+func (*Bootstrap_StatsFlushOnAdmin) isBootstrap_StatsFlush() {}
+
+// Administration interface :ref:`operations documentation
+// `.
+// [#next-free-field: 7]
+type Admin struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Configuration for :ref:`access logs `
+ // emitted by the administration server.
+ AccessLog []*v34.AccessLog `protobuf:"bytes,5,rep,name=access_log,json=accessLog,proto3" json:"access_log,omitempty"`
+ // The path to write the access log for the administration server. If no
+ // access log is desired specify ‘/dev/null’. This is only required if
+ // :ref:`address ` is set.
+ // Deprecated in favor of “access_log“ which offers more options.
+ //
+ // Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+ AccessLogPath string `protobuf:"bytes,1,opt,name=access_log_path,json=accessLogPath,proto3" json:"access_log_path,omitempty"`
+ // The cpu profiler output path for the administration server. If no profile
+ // path is specified, the default is ‘/var/log/envoy/envoy.prof’.
+ ProfilePath string `protobuf:"bytes,2,opt,name=profile_path,json=profilePath,proto3" json:"profile_path,omitempty"`
+ // The TCP address that the administration server will listen on.
+ // If not specified, Envoy will not start an administration server.
+ Address *v3.Address `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"`
+ // Additional socket options that may not be present in Envoy source code or
+ // precompiled binaries.
+ SocketOptions []*v3.SocketOption `protobuf:"bytes,4,rep,name=socket_options,json=socketOptions,proto3" json:"socket_options,omitempty"`
+ // Indicates whether :ref:`global_downstream_max_connections `
+ // should apply to the admin interface or not.
+ IgnoreGlobalConnLimit bool `protobuf:"varint,6,opt,name=ignore_global_conn_limit,json=ignoreGlobalConnLimit,proto3" json:"ignore_global_conn_limit,omitempty"`
+}
+
+func (x *Admin) Reset() {
+ *x = Admin{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Admin) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Admin) ProtoMessage() {}
+
+func (x *Admin) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Admin.ProtoReflect.Descriptor instead.
+func (*Admin) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Admin) GetAccessLog() []*v34.AccessLog {
+ if x != nil {
+ return x.AccessLog
+ }
+ return nil
+}
+
+// Deprecated: Marked as deprecated in envoy/config/bootstrap/v3/bootstrap.proto.
+func (x *Admin) GetAccessLogPath() string {
+ if x != nil {
+ return x.AccessLogPath
+ }
+ return ""
+}
+
+func (x *Admin) GetProfilePath() string {
+ if x != nil {
+ return x.ProfilePath
+ }
+ return ""
+}
+
+func (x *Admin) GetAddress() *v3.Address {
+ if x != nil {
+ return x.Address
+ }
+ return nil
+}
+
+func (x *Admin) GetSocketOptions() []*v3.SocketOption {
+ if x != nil {
+ return x.SocketOptions
+ }
+ return nil
+}
+
+func (x *Admin) GetIgnoreGlobalConnLimit() bool {
+ if x != nil {
+ return x.IgnoreGlobalConnLimit
+ }
+ return false
+}
+
+// Cluster manager :ref:`architecture overview `.
+// [#next-free-field: 6]
+type ClusterManager struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Name of the local cluster (i.e., the cluster that owns the Envoy running
+ // this configuration). In order to enable :ref:`zone aware routing
+ // ` this option must be set.
+ // If “local_cluster_name“ is defined then :ref:`clusters
+ // ` must be defined in the :ref:`Bootstrap
+ // static cluster resources
+ // `. This is unrelated to
+ // the :option:`--service-cluster` option which does not `affect zone aware
+ // routing `_.
+ LocalClusterName string `protobuf:"bytes,1,opt,name=local_cluster_name,json=localClusterName,proto3" json:"local_cluster_name,omitempty"`
+ // Optional global configuration for outlier detection.
+ OutlierDetection *ClusterManager_OutlierDetection `protobuf:"bytes,2,opt,name=outlier_detection,json=outlierDetection,proto3" json:"outlier_detection,omitempty"`
+ // Optional configuration used to bind newly established upstream connections.
+ // This may be overridden on a per-cluster basis by upstream_bind_config in the cds_config.
+ UpstreamBindConfig *v3.BindConfig `protobuf:"bytes,3,opt,name=upstream_bind_config,json=upstreamBindConfig,proto3" json:"upstream_bind_config,omitempty"`
+ // A management server endpoint to stream load stats to via
+ // “StreamLoadStats“. This must have :ref:`api_type
+ // ` :ref:`GRPC
+ // `.
+ LoadStatsConfig *v3.ApiConfigSource `protobuf:"bytes,4,opt,name=load_stats_config,json=loadStatsConfig,proto3" json:"load_stats_config,omitempty"`
+ // Whether the ClusterManager will create clusters on the worker threads
+ // inline during requests. This will save memory and CPU cycles in cases where
+ // there are lots of inactive clusters and > 1 worker thread.
+ EnableDeferredClusterCreation bool `protobuf:"varint,5,opt,name=enable_deferred_cluster_creation,json=enableDeferredClusterCreation,proto3" json:"enable_deferred_cluster_creation,omitempty"`
+}
+
+func (x *ClusterManager) Reset() {
+ *x = ClusterManager{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClusterManager) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClusterManager) ProtoMessage() {}
+
+func (x *ClusterManager) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClusterManager.ProtoReflect.Descriptor instead.
+func (*ClusterManager) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ClusterManager) GetLocalClusterName() string {
+ if x != nil {
+ return x.LocalClusterName
+ }
+ return ""
+}
+
+func (x *ClusterManager) GetOutlierDetection() *ClusterManager_OutlierDetection {
+ if x != nil {
+ return x.OutlierDetection
+ }
+ return nil
+}
+
+func (x *ClusterManager) GetUpstreamBindConfig() *v3.BindConfig {
+ if x != nil {
+ return x.UpstreamBindConfig
+ }
+ return nil
+}
+
+func (x *ClusterManager) GetLoadStatsConfig() *v3.ApiConfigSource {
+ if x != nil {
+ return x.LoadStatsConfig
+ }
+ return nil
+}
+
+func (x *ClusterManager) GetEnableDeferredClusterCreation() bool {
+ if x != nil {
+ return x.EnableDeferredClusterCreation
+ }
+ return false
+}
+
+// Allows you to specify different watchdog configs for different subsystems.
+// This allows finer tuned policies for the watchdog. If a subsystem is omitted
+// the default values for that system will be used.
+type Watchdogs struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Watchdog for the main thread.
+ MainThreadWatchdog *Watchdog `protobuf:"bytes,1,opt,name=main_thread_watchdog,json=mainThreadWatchdog,proto3" json:"main_thread_watchdog,omitempty"`
+ // Watchdog for the worker threads.
+ WorkerWatchdog *Watchdog `protobuf:"bytes,2,opt,name=worker_watchdog,json=workerWatchdog,proto3" json:"worker_watchdog,omitempty"`
+}
+
+func (x *Watchdogs) Reset() {
+ *x = Watchdogs{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Watchdogs) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Watchdogs) ProtoMessage() {}
+
+func (x *Watchdogs) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Watchdogs.ProtoReflect.Descriptor instead.
+func (*Watchdogs) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Watchdogs) GetMainThreadWatchdog() *Watchdog {
+ if x != nil {
+ return x.MainThreadWatchdog
+ }
+ return nil
+}
+
+func (x *Watchdogs) GetWorkerWatchdog() *Watchdog {
+ if x != nil {
+ return x.WorkerWatchdog
+ }
+ return nil
+}
+
+// Envoy process watchdog configuration. When configured, this monitors for
+// nonresponsive threads and kills the process after the configured thresholds.
+// See the :ref:`watchdog documentation ` for more information.
+// [#next-free-field: 8]
+type Watchdog struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Register actions that will fire on given WatchDog events.
+ // See “WatchDogAction“ for priority of events.
+ Actions []*Watchdog_WatchdogAction `protobuf:"bytes,7,rep,name=actions,proto3" json:"actions,omitempty"`
+ // The duration after which Envoy counts a nonresponsive thread in the
+ // “watchdog_miss“ statistic. If not specified the default is 200ms.
+ MissTimeout *durationpb.Duration `protobuf:"bytes,1,opt,name=miss_timeout,json=missTimeout,proto3" json:"miss_timeout,omitempty"`
+ // The duration after which Envoy counts a nonresponsive thread in the
+ // “watchdog_mega_miss“ statistic. If not specified the default is
+ // 1000ms.
+ MegamissTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=megamiss_timeout,json=megamissTimeout,proto3" json:"megamiss_timeout,omitempty"`
+ // If a watched thread has been nonresponsive for this duration, assume a
+ // programming error and kill the entire Envoy process. Set to 0 to disable
+ // kill behavior. If not specified the default is 0 (disabled).
+ KillTimeout *durationpb.Duration `protobuf:"bytes,3,opt,name=kill_timeout,json=killTimeout,proto3" json:"kill_timeout,omitempty"`
+ // Defines the maximum jitter used to adjust the “kill_timeout“ if “kill_timeout“ is
+ // enabled. Enabling this feature would help to reduce risk of synchronized
+ // watchdog kill events across proxies due to external triggers. Set to 0 to
+ // disable. If not specified the default is 0 (disabled).
+ MaxKillTimeoutJitter *durationpb.Duration `protobuf:"bytes,6,opt,name=max_kill_timeout_jitter,json=maxKillTimeoutJitter,proto3" json:"max_kill_timeout_jitter,omitempty"`
+ // If “max(2, ceil(registered_threads * Fraction(*multikill_threshold*)))“
+ // threads have been nonresponsive for at least this duration kill the entire
+ // Envoy process. Set to 0 to disable this behavior. If not specified the
+ // default is 0 (disabled).
+ MultikillTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=multikill_timeout,json=multikillTimeout,proto3" json:"multikill_timeout,omitempty"`
+ // Sets the threshold for “multikill_timeout“ in terms of the percentage of
+ // nonresponsive threads required for the “multikill_timeout“.
+ // If not specified the default is 0.
+ MultikillThreshold *v35.Percent `protobuf:"bytes,5,opt,name=multikill_threshold,json=multikillThreshold,proto3" json:"multikill_threshold,omitempty"`
+}
+
+func (x *Watchdog) Reset() {
+ *x = Watchdog{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Watchdog) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Watchdog) ProtoMessage() {}
+
+func (x *Watchdog) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Watchdog.ProtoReflect.Descriptor instead.
+func (*Watchdog) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Watchdog) GetActions() []*Watchdog_WatchdogAction {
+ if x != nil {
+ return x.Actions
+ }
+ return nil
+}
+
+func (x *Watchdog) GetMissTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.MissTimeout
+ }
+ return nil
+}
+
+func (x *Watchdog) GetMegamissTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.MegamissTimeout
+ }
+ return nil
+}
+
+func (x *Watchdog) GetKillTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.KillTimeout
+ }
+ return nil
+}
+
+func (x *Watchdog) GetMaxKillTimeoutJitter() *durationpb.Duration {
+ if x != nil {
+ return x.MaxKillTimeoutJitter
+ }
+ return nil
+}
+
+func (x *Watchdog) GetMultikillTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.MultikillTimeout
+ }
+ return nil
+}
+
+func (x *Watchdog) GetMultikillThreshold() *v35.Percent {
+ if x != nil {
+ return x.MultikillThreshold
+ }
+ return nil
+}
+
+// Fatal actions to run while crashing. Actions can be safe (meaning they are
+// async-signal safe) or unsafe. We run all safe actions before we run unsafe actions.
+// If using an unsafe action that could get stuck or deadlock, it important to
+// have an out of band system to terminate the process.
+//
+// The interface for the extension is “Envoy::Server::Configuration::FatalAction“.
+// “FatalAction“ extensions live in the “envoy.extensions.fatal_actions“ API
+// namespace.
+type FatalAction struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Extension specific configuration for the action. It's expected to conform
+ // to the “Envoy::Server::Configuration::FatalAction“ interface.
+ Config *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
+}
+
+func (x *FatalAction) Reset() {
+ *x = FatalAction{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FatalAction) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FatalAction) ProtoMessage() {}
+
+func (x *FatalAction) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FatalAction.ProtoReflect.Descriptor instead.
+func (*FatalAction) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *FatalAction) GetConfig() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.Config
+ }
+ return nil
+}
+
+// Runtime :ref:`configuration overview ` (deprecated).
+type Runtime struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The implementation assumes that the file system tree is accessed via a
+ // symbolic link. An atomic link swap is used when a new tree should be
+ // switched to. This parameter specifies the path to the symbolic link. Envoy
+ // will watch the location for changes and reload the file system tree when
+ // they happen. If this parameter is not set, there will be no disk based
+ // runtime.
+ SymlinkRoot string `protobuf:"bytes,1,opt,name=symlink_root,json=symlinkRoot,proto3" json:"symlink_root,omitempty"`
+ // Specifies the subdirectory to load within the root directory. This is
+ // useful if multiple systems share the same delivery mechanism. Envoy
+ // configuration elements can be contained in a dedicated subdirectory.
+ Subdirectory string `protobuf:"bytes,2,opt,name=subdirectory,proto3" json:"subdirectory,omitempty"`
+ // Specifies an optional subdirectory to load within the root directory. If
+ // specified and the directory exists, configuration values within this
+ // directory will override those found in the primary subdirectory. This is
+ // useful when Envoy is deployed across many different types of servers.
+ // Sometimes it is useful to have a per service cluster directory for runtime
+ // configuration. See below for exactly how the override directory is used.
+ OverrideSubdirectory string `protobuf:"bytes,3,opt,name=override_subdirectory,json=overrideSubdirectory,proto3" json:"override_subdirectory,omitempty"`
+ // Static base runtime. This will be :ref:`overridden
+ // ` by other runtime layers, e.g.
+ // disk or admin. This follows the :ref:`runtime protobuf JSON representation
+ // encoding `.
+ Base *structpb.Struct `protobuf:"bytes,4,opt,name=base,proto3" json:"base,omitempty"`
+}
+
+func (x *Runtime) Reset() {
+ *x = Runtime{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Runtime) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Runtime) ProtoMessage() {}
+
+func (x *Runtime) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Runtime.ProtoReflect.Descriptor instead.
+func (*Runtime) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Runtime) GetSymlinkRoot() string {
+ if x != nil {
+ return x.SymlinkRoot
+ }
+ return ""
+}
+
+func (x *Runtime) GetSubdirectory() string {
+ if x != nil {
+ return x.Subdirectory
+ }
+ return ""
+}
+
+func (x *Runtime) GetOverrideSubdirectory() string {
+ if x != nil {
+ return x.OverrideSubdirectory
+ }
+ return ""
+}
+
+func (x *Runtime) GetBase() *structpb.Struct {
+ if x != nil {
+ return x.Base
+ }
+ return nil
+}
+
+// [#next-free-field: 6]
+type RuntimeLayer struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Descriptive name for the runtime layer. This is only used for the runtime
+ // :http:get:`/runtime` output.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // Types that are assignable to LayerSpecifier:
+ //
+ // *RuntimeLayer_StaticLayer
+ // *RuntimeLayer_DiskLayer_
+ // *RuntimeLayer_AdminLayer_
+ // *RuntimeLayer_RtdsLayer_
+ LayerSpecifier isRuntimeLayer_LayerSpecifier `protobuf_oneof:"layer_specifier"`
+}
+
+func (x *RuntimeLayer) Reset() {
+ *x = RuntimeLayer{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeLayer) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeLayer) ProtoMessage() {}
+
+func (x *RuntimeLayer) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeLayer.ProtoReflect.Descriptor instead.
+func (*RuntimeLayer) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *RuntimeLayer) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (m *RuntimeLayer) GetLayerSpecifier() isRuntimeLayer_LayerSpecifier {
+ if m != nil {
+ return m.LayerSpecifier
+ }
+ return nil
+}
+
+func (x *RuntimeLayer) GetStaticLayer() *structpb.Struct {
+ if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_StaticLayer); ok {
+ return x.StaticLayer
+ }
+ return nil
+}
+
+func (x *RuntimeLayer) GetDiskLayer() *RuntimeLayer_DiskLayer {
+ if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_DiskLayer_); ok {
+ return x.DiskLayer
+ }
+ return nil
+}
+
+func (x *RuntimeLayer) GetAdminLayer() *RuntimeLayer_AdminLayer {
+ if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_AdminLayer_); ok {
+ return x.AdminLayer
+ }
+ return nil
+}
+
+func (x *RuntimeLayer) GetRtdsLayer() *RuntimeLayer_RtdsLayer {
+ if x, ok := x.GetLayerSpecifier().(*RuntimeLayer_RtdsLayer_); ok {
+ return x.RtdsLayer
+ }
+ return nil
+}
+
+type isRuntimeLayer_LayerSpecifier interface {
+ isRuntimeLayer_LayerSpecifier()
+}
+
+type RuntimeLayer_StaticLayer struct {
+ // :ref:`Static runtime ` layer.
+ // This follows the :ref:`runtime protobuf JSON representation encoding
+ // `. Unlike static xDS resources, this static
+ // layer is overridable by later layers in the runtime virtual filesystem.
+ StaticLayer *structpb.Struct `protobuf:"bytes,2,opt,name=static_layer,json=staticLayer,proto3,oneof"`
+}
+
+type RuntimeLayer_DiskLayer_ struct {
+ DiskLayer *RuntimeLayer_DiskLayer `protobuf:"bytes,3,opt,name=disk_layer,json=diskLayer,proto3,oneof"`
+}
+
+type RuntimeLayer_AdminLayer_ struct {
+ AdminLayer *RuntimeLayer_AdminLayer `protobuf:"bytes,4,opt,name=admin_layer,json=adminLayer,proto3,oneof"`
+}
+
+type RuntimeLayer_RtdsLayer_ struct {
+ RtdsLayer *RuntimeLayer_RtdsLayer `protobuf:"bytes,5,opt,name=rtds_layer,json=rtdsLayer,proto3,oneof"`
+}
+
+func (*RuntimeLayer_StaticLayer) isRuntimeLayer_LayerSpecifier() {}
+
+func (*RuntimeLayer_DiskLayer_) isRuntimeLayer_LayerSpecifier() {}
+
+func (*RuntimeLayer_AdminLayer_) isRuntimeLayer_LayerSpecifier() {}
+
+func (*RuntimeLayer_RtdsLayer_) isRuntimeLayer_LayerSpecifier() {}
+
+// Runtime :ref:`configuration overview `.
+type LayeredRuntime struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The :ref:`layers ` of the runtime. This is ordered
+ // such that later layers in the list overlay earlier entries.
+ Layers []*RuntimeLayer `protobuf:"bytes,1,rep,name=layers,proto3" json:"layers,omitempty"`
+}
+
+func (x *LayeredRuntime) Reset() {
+ *x = LayeredRuntime{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LayeredRuntime) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LayeredRuntime) ProtoMessage() {}
+
+func (x *LayeredRuntime) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LayeredRuntime.ProtoReflect.Descriptor instead.
+func (*LayeredRuntime) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *LayeredRuntime) GetLayers() []*RuntimeLayer {
+ if x != nil {
+ return x.Layers
+ }
+ return nil
+}
+
+// Used to specify the header that needs to be registered as an inline header.
+//
+// If request or response contain multiple headers with the same name and the header
+// name is registered as an inline header. Then multiple headers will be folded
+// into one, and multiple header values will be concatenated by a suitable delimiter.
+// The delimiter is generally a comma.
+//
+// For example, if 'foo' is registered as an inline header, and the headers contains
+// the following two headers:
+//
+// .. code-block:: text
+//
+// foo: bar
+// foo: eep
+//
+// Then they will eventually be folded into:
+//
+// .. code-block:: text
+//
+// foo: bar, eep
+//
+// Inline headers provide O(1) search performance, but each inline header imposes
+// an additional memory overhead on all instances of the corresponding type of
+// HeaderMap or TrailerMap.
+type CustomInlineHeader struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the header that is expected to be set as the inline header.
+ InlineHeaderName string `protobuf:"bytes,1,opt,name=inline_header_name,json=inlineHeaderName,proto3" json:"inline_header_name,omitempty"`
+ // The type of the header that is expected to be set as the inline header.
+ InlineHeaderType CustomInlineHeader_InlineHeaderType `protobuf:"varint,2,opt,name=inline_header_type,json=inlineHeaderType,proto3,enum=envoy.config.bootstrap.v3.CustomInlineHeader_InlineHeaderType" json:"inline_header_type,omitempty"`
+}
+
+func (x *CustomInlineHeader) Reset() {
+ *x = CustomInlineHeader{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CustomInlineHeader) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CustomInlineHeader) ProtoMessage() {}
+
+func (x *CustomInlineHeader) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CustomInlineHeader.ProtoReflect.Descriptor instead.
+func (*CustomInlineHeader) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *CustomInlineHeader) GetInlineHeaderName() string {
+ if x != nil {
+ return x.InlineHeaderName
+ }
+ return ""
+}
+
+func (x *CustomInlineHeader) GetInlineHeaderType() CustomInlineHeader_InlineHeaderType {
+ if x != nil {
+ return x.InlineHeaderType
+ }
+ return CustomInlineHeader_REQUEST_HEADER
+}
+
+type MemoryAllocatorManager struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Configures tcmalloc to perform background release of free memory in amount of bytes per “memory_release_interval“ interval.
+ // If equals to “0“, no memory release will occur. Defaults to “0“.
+ BytesToRelease uint64 `protobuf:"varint,1,opt,name=bytes_to_release,json=bytesToRelease,proto3" json:"bytes_to_release,omitempty"`
+ // Interval in milliseconds for memory releasing. If specified, during every
+ // interval Envoy will try to release “bytes_to_release“ of free memory back to operating system for reuse.
+ // Defaults to 1000 milliseconds.
+ MemoryReleaseInterval *durationpb.Duration `protobuf:"bytes,2,opt,name=memory_release_interval,json=memoryReleaseInterval,proto3" json:"memory_release_interval,omitempty"`
+}
+
+func (x *MemoryAllocatorManager) Reset() {
+ *x = MemoryAllocatorManager{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MemoryAllocatorManager) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MemoryAllocatorManager) ProtoMessage() {}
+
+func (x *MemoryAllocatorManager) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MemoryAllocatorManager.ProtoReflect.Descriptor instead.
+func (*MemoryAllocatorManager) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *MemoryAllocatorManager) GetBytesToRelease() uint64 {
+ if x != nil {
+ return x.BytesToRelease
+ }
+ return 0
+}
+
+func (x *MemoryAllocatorManager) GetMemoryReleaseInterval() *durationpb.Duration {
+ if x != nil {
+ return x.MemoryReleaseInterval
+ }
+ return nil
+}
+
+type Bootstrap_StaticResources struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Static :ref:`Listeners `. These listeners are
+ // available regardless of LDS configuration.
+ Listeners []*v36.Listener `protobuf:"bytes,1,rep,name=listeners,proto3" json:"listeners,omitempty"`
+ // If a network based configuration source is specified for :ref:`cds_config
+ // `, it's necessary
+ // to have some initial cluster definitions available to allow Envoy to know
+ // how to speak to the management server. These cluster definitions may not
+ // use :ref:`EDS ` (i.e. they should be static
+ // IP or DNS-based).
+ Clusters []*v37.Cluster `protobuf:"bytes,2,rep,name=clusters,proto3" json:"clusters,omitempty"`
+ // These static secrets can be used by :ref:`SdsSecretConfig
+ // `
+ Secrets []*v38.Secret `protobuf:"bytes,3,rep,name=secrets,proto3" json:"secrets,omitempty"`
+}
+
+func (x *Bootstrap_StaticResources) Reset() {
+ *x = Bootstrap_StaticResources{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bootstrap_StaticResources) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bootstrap_StaticResources) ProtoMessage() {}
+
+func (x *Bootstrap_StaticResources) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bootstrap_StaticResources.ProtoReflect.Descriptor instead.
+func (*Bootstrap_StaticResources) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *Bootstrap_StaticResources) GetListeners() []*v36.Listener {
+ if x != nil {
+ return x.Listeners
+ }
+ return nil
+}
+
+func (x *Bootstrap_StaticResources) GetClusters() []*v37.Cluster {
+ if x != nil {
+ return x.Clusters
+ }
+ return nil
+}
+
+func (x *Bootstrap_StaticResources) GetSecrets() []*v38.Secret {
+ if x != nil {
+ return x.Secrets
+ }
+ return nil
+}
+
+// [#next-free-field: 7]
+type Bootstrap_DynamicResources struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // All :ref:`Listeners ` are provided by a single
+ // :ref:`LDS ` configuration source.
+ LdsConfig *v3.ConfigSource `protobuf:"bytes,1,opt,name=lds_config,json=ldsConfig,proto3" json:"lds_config,omitempty"`
+ // xdstp:// resource locator for listener collection.
+ // [#not-implemented-hide:]
+ LdsResourcesLocator string `protobuf:"bytes,5,opt,name=lds_resources_locator,json=ldsResourcesLocator,proto3" json:"lds_resources_locator,omitempty"`
+ // All post-bootstrap :ref:`Cluster ` definitions are
+ // provided by a single :ref:`CDS `
+ // configuration source.
+ CdsConfig *v3.ConfigSource `protobuf:"bytes,2,opt,name=cds_config,json=cdsConfig,proto3" json:"cds_config,omitempty"`
+ // xdstp:// resource locator for cluster collection.
+ // [#not-implemented-hide:]
+ CdsResourcesLocator string `protobuf:"bytes,6,opt,name=cds_resources_locator,json=cdsResourcesLocator,proto3" json:"cds_resources_locator,omitempty"`
+ // A single :ref:`ADS ` source may be optionally
+ // specified. This must have :ref:`api_type
+ // ` :ref:`GRPC
+ // `. Only
+ // :ref:`ConfigSources ` that have
+ // the :ref:`ads ` field set will be
+ // streamed on the ADS channel.
+ AdsConfig *v3.ApiConfigSource `protobuf:"bytes,3,opt,name=ads_config,json=adsConfig,proto3" json:"ads_config,omitempty"`
+}
+
+func (x *Bootstrap_DynamicResources) Reset() {
+ *x = Bootstrap_DynamicResources{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bootstrap_DynamicResources) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bootstrap_DynamicResources) ProtoMessage() {}
+
+func (x *Bootstrap_DynamicResources) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bootstrap_DynamicResources.ProtoReflect.Descriptor instead.
+func (*Bootstrap_DynamicResources) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 1}
+}
+
+func (x *Bootstrap_DynamicResources) GetLdsConfig() *v3.ConfigSource {
+ if x != nil {
+ return x.LdsConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap_DynamicResources) GetLdsResourcesLocator() string {
+ if x != nil {
+ return x.LdsResourcesLocator
+ }
+ return ""
+}
+
+func (x *Bootstrap_DynamicResources) GetCdsConfig() *v3.ConfigSource {
+ if x != nil {
+ return x.CdsConfig
+ }
+ return nil
+}
+
+func (x *Bootstrap_DynamicResources) GetCdsResourcesLocator() string {
+ if x != nil {
+ return x.CdsResourcesLocator
+ }
+ return ""
+}
+
+func (x *Bootstrap_DynamicResources) GetAdsConfig() *v3.ApiConfigSource {
+ if x != nil {
+ return x.AdsConfig
+ }
+ return nil
+}
+
+type Bootstrap_ApplicationLogConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional field to set the application logs format. If this field is set, it will override
+ // the default log format. Setting both this field and :option:`--log-format` command line
+ // option is not allowed, and will cause a bootstrap error.
+ LogFormat *Bootstrap_ApplicationLogConfig_LogFormat `protobuf:"bytes,1,opt,name=log_format,json=logFormat,proto3" json:"log_format,omitempty"`
+}
+
+func (x *Bootstrap_ApplicationLogConfig) Reset() {
+ *x = Bootstrap_ApplicationLogConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bootstrap_ApplicationLogConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bootstrap_ApplicationLogConfig) ProtoMessage() {}
+
+func (x *Bootstrap_ApplicationLogConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bootstrap_ApplicationLogConfig.ProtoReflect.Descriptor instead.
+func (*Bootstrap_ApplicationLogConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 2}
+}
+
+func (x *Bootstrap_ApplicationLogConfig) GetLogFormat() *Bootstrap_ApplicationLogConfig_LogFormat {
+ if x != nil {
+ return x.LogFormat
+ }
+ return nil
+}
+
+type Bootstrap_DeferredStatOptions struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // When the flag is enabled, Envoy will lazily initialize a subset of the stats (see below).
+ // This will save memory and CPU cycles when creating the objects that own these stats, if those
+ // stats are never referenced throughout the lifetime of the process. However, it will incur additional
+ // memory overhead for these objects, and a small increase of CPU usage when a at least one of the stats
+ // is updated for the first time.
+ // Groups of stats that will be lazily initialized:
+ // - Cluster traffic stats: a subgroup of the :ref:`cluster statistics `
+ // that are used when requests are routed to the cluster.
+ EnableDeferredCreationStats bool `protobuf:"varint,1,opt,name=enable_deferred_creation_stats,json=enableDeferredCreationStats,proto3" json:"enable_deferred_creation_stats,omitempty"`
+}
+
+func (x *Bootstrap_DeferredStatOptions) Reset() {
+ *x = Bootstrap_DeferredStatOptions{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bootstrap_DeferredStatOptions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bootstrap_DeferredStatOptions) ProtoMessage() {}
+
+func (x *Bootstrap_DeferredStatOptions) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bootstrap_DeferredStatOptions.ProtoReflect.Descriptor instead.
+func (*Bootstrap_DeferredStatOptions) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 3}
+}
+
+func (x *Bootstrap_DeferredStatOptions) GetEnableDeferredCreationStats() bool {
+ if x != nil {
+ return x.EnableDeferredCreationStats
+ }
+ return false
+}
+
+type Bootstrap_GrpcAsyncClientManagerConfig struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Optional field to set the expiration time for the cached gRPC client object.
+ // The minimal value is 5s and the default is 50s.
+ MaxCachedEntryIdleDuration *durationpb.Duration `protobuf:"bytes,1,opt,name=max_cached_entry_idle_duration,json=maxCachedEntryIdleDuration,proto3" json:"max_cached_entry_idle_duration,omitempty"`
+}
+
+func (x *Bootstrap_GrpcAsyncClientManagerConfig) Reset() {
+ *x = Bootstrap_GrpcAsyncClientManagerConfig{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bootstrap_GrpcAsyncClientManagerConfig) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bootstrap_GrpcAsyncClientManagerConfig) ProtoMessage() {}
+
+func (x *Bootstrap_GrpcAsyncClientManagerConfig) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bootstrap_GrpcAsyncClientManagerConfig.ProtoReflect.Descriptor instead.
+func (*Bootstrap_GrpcAsyncClientManagerConfig) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 4}
+}
+
+func (x *Bootstrap_GrpcAsyncClientManagerConfig) GetMaxCachedEntryIdleDuration() *durationpb.Duration {
+ if x != nil {
+ return x.MaxCachedEntryIdleDuration
+ }
+ return nil
+}
+
+type Bootstrap_ApplicationLogConfig_LogFormat struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Types that are assignable to LogFormat:
+ //
+ // *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat
+ // *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat
+ LogFormat isBootstrap_ApplicationLogConfig_LogFormat_LogFormat `protobuf_oneof:"log_format"`
+}
+
+func (x *Bootstrap_ApplicationLogConfig_LogFormat) Reset() {
+ *x = Bootstrap_ApplicationLogConfig_LogFormat{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bootstrap_ApplicationLogConfig_LogFormat) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bootstrap_ApplicationLogConfig_LogFormat) ProtoMessage() {}
+
+func (x *Bootstrap_ApplicationLogConfig_LogFormat) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bootstrap_ApplicationLogConfig_LogFormat.ProtoReflect.Descriptor instead.
+func (*Bootstrap_ApplicationLogConfig_LogFormat) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{0, 2, 0}
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) GetLogFormat() isBootstrap_ApplicationLogConfig_LogFormat_LogFormat {
+ if m != nil {
+ return m.LogFormat
+ }
+ return nil
+}
+
+func (x *Bootstrap_ApplicationLogConfig_LogFormat) GetJsonFormat() *structpb.Struct {
+ if x, ok := x.GetLogFormat().(*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat); ok {
+ return x.JsonFormat
+ }
+ return nil
+}
+
+func (x *Bootstrap_ApplicationLogConfig_LogFormat) GetTextFormat() string {
+ if x, ok := x.GetLogFormat().(*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat); ok {
+ return x.TextFormat
+ }
+ return ""
+}
+
+type isBootstrap_ApplicationLogConfig_LogFormat_LogFormat interface {
+ isBootstrap_ApplicationLogConfig_LogFormat_LogFormat()
+}
+
+type Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat struct {
+ // Flush application logs in JSON format. The configured JSON struct can
+ // support all the format flags specified in the :option:`--log-format`
+ // command line options section, except for the “%v“ and “%_“ flags.
+ JsonFormat *structpb.Struct `protobuf:"bytes,1,opt,name=json_format,json=jsonFormat,proto3,oneof"`
+}
+
+type Bootstrap_ApplicationLogConfig_LogFormat_TextFormat struct {
+ // Flush application log in a format defined by a string. The text format
+ // can support all the format flags specified in the :option:`--log-format`
+ // command line option section.
+ TextFormat string `protobuf:"bytes,2,opt,name=text_format,json=textFormat,proto3,oneof"`
+}
+
+func (*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) isBootstrap_ApplicationLogConfig_LogFormat_LogFormat() {
+}
+
+func (*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) isBootstrap_ApplicationLogConfig_LogFormat_LogFormat() {
+}
+
+type ClusterManager_OutlierDetection struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies the path to the outlier event log.
+ EventLogPath string `protobuf:"bytes,1,opt,name=event_log_path,json=eventLogPath,proto3" json:"event_log_path,omitempty"`
+ // [#not-implemented-hide:]
+ // The gRPC service for the outlier detection event service.
+ // If empty, outlier detection events won't be sent to a remote endpoint.
+ EventService *v3.EventServiceConfig `protobuf:"bytes,2,opt,name=event_service,json=eventService,proto3" json:"event_service,omitempty"`
+}
+
+func (x *ClusterManager_OutlierDetection) Reset() {
+ *x = ClusterManager_OutlierDetection{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ClusterManager_OutlierDetection) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClusterManager_OutlierDetection) ProtoMessage() {}
+
+func (x *ClusterManager_OutlierDetection) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClusterManager_OutlierDetection.ProtoReflect.Descriptor instead.
+func (*ClusterManager_OutlierDetection) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{2, 0}
+}
+
+func (x *ClusterManager_OutlierDetection) GetEventLogPath() string {
+ if x != nil {
+ return x.EventLogPath
+ }
+ return ""
+}
+
+func (x *ClusterManager_OutlierDetection) GetEventService() *v3.EventServiceConfig {
+ if x != nil {
+ return x.EventService
+ }
+ return nil
+}
+
+type Watchdog_WatchdogAction struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Extension specific configuration for the action.
+ Config *v3.TypedExtensionConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
+ Event Watchdog_WatchdogAction_WatchdogEvent `protobuf:"varint,2,opt,name=event,proto3,enum=envoy.config.bootstrap.v3.Watchdog_WatchdogAction_WatchdogEvent" json:"event,omitempty"`
+}
+
+func (x *Watchdog_WatchdogAction) Reset() {
+ *x = Watchdog_WatchdogAction{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Watchdog_WatchdogAction) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Watchdog_WatchdogAction) ProtoMessage() {}
+
+func (x *Watchdog_WatchdogAction) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Watchdog_WatchdogAction.ProtoReflect.Descriptor instead.
+func (*Watchdog_WatchdogAction) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *Watchdog_WatchdogAction) GetConfig() *v3.TypedExtensionConfig {
+ if x != nil {
+ return x.Config
+ }
+ return nil
+}
+
+func (x *Watchdog_WatchdogAction) GetEvent() Watchdog_WatchdogAction_WatchdogEvent {
+ if x != nil {
+ return x.Event
+ }
+ return Watchdog_WatchdogAction_UNKNOWN
+}
+
+// :ref:`Disk runtime ` layer.
+type RuntimeLayer_DiskLayer struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The implementation assumes that the file system tree is accessed via a
+ // symbolic link. An atomic link swap is used when a new tree should be
+ // switched to. This parameter specifies the path to the symbolic link.
+ // Envoy will watch the location for changes and reload the file system tree
+ // when they happen. See documentation on runtime :ref:`atomicity
+ // ` for further details on how reloads are
+ // treated.
+ SymlinkRoot string `protobuf:"bytes,1,opt,name=symlink_root,json=symlinkRoot,proto3" json:"symlink_root,omitempty"`
+ // Specifies the subdirectory to load within the root directory. This is
+ // useful if multiple systems share the same delivery mechanism. Envoy
+ // configuration elements can be contained in a dedicated subdirectory.
+ Subdirectory string `protobuf:"bytes,3,opt,name=subdirectory,proto3" json:"subdirectory,omitempty"`
+ // :ref:`Append ` the
+ // service cluster to the path under symlink root.
+ AppendServiceCluster bool `protobuf:"varint,2,opt,name=append_service_cluster,json=appendServiceCluster,proto3" json:"append_service_cluster,omitempty"`
+}
+
+func (x *RuntimeLayer_DiskLayer) Reset() {
+ *x = RuntimeLayer_DiskLayer{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeLayer_DiskLayer) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeLayer_DiskLayer) ProtoMessage() {}
+
+func (x *RuntimeLayer_DiskLayer) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeLayer_DiskLayer.ProtoReflect.Descriptor instead.
+func (*RuntimeLayer_DiskLayer) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7, 0}
+}
+
+func (x *RuntimeLayer_DiskLayer) GetSymlinkRoot() string {
+ if x != nil {
+ return x.SymlinkRoot
+ }
+ return ""
+}
+
+func (x *RuntimeLayer_DiskLayer) GetSubdirectory() string {
+ if x != nil {
+ return x.Subdirectory
+ }
+ return ""
+}
+
+func (x *RuntimeLayer_DiskLayer) GetAppendServiceCluster() bool {
+ if x != nil {
+ return x.AppendServiceCluster
+ }
+ return false
+}
+
+// :ref:`Admin console runtime ` layer.
+type RuntimeLayer_AdminLayer struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+}
+
+func (x *RuntimeLayer_AdminLayer) Reset() {
+ *x = RuntimeLayer_AdminLayer{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeLayer_AdminLayer) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeLayer_AdminLayer) ProtoMessage() {}
+
+func (x *RuntimeLayer_AdminLayer) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeLayer_AdminLayer.ProtoReflect.Descriptor instead.
+func (*RuntimeLayer_AdminLayer) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7, 1}
+}
+
+// :ref:`Runtime Discovery Service (RTDS) ` layer.
+type RuntimeLayer_RtdsLayer struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Resource to subscribe to at “rtds_config“ for the RTDS layer.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // RTDS configuration source.
+ RtdsConfig *v3.ConfigSource `protobuf:"bytes,2,opt,name=rtds_config,json=rtdsConfig,proto3" json:"rtds_config,omitempty"`
+}
+
+func (x *RuntimeLayer_RtdsLayer) Reset() {
+ *x = RuntimeLayer_RtdsLayer{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RuntimeLayer_RtdsLayer) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RuntimeLayer_RtdsLayer) ProtoMessage() {}
+
+func (x *RuntimeLayer_RtdsLayer) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RuntimeLayer_RtdsLayer.ProtoReflect.Descriptor instead.
+func (*RuntimeLayer_RtdsLayer) Descriptor() ([]byte, []int) {
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP(), []int{7, 2}
+}
+
+func (x *RuntimeLayer_RtdsLayer) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *RuntimeLayer_RtdsLayer) GetRtdsConfig() *v3.ConfigSource {
+ if x != nil {
+ return x.RtdsConfig
+ }
+ return nil
+}
+
+var File_envoy_config_bootstrap_v3_bootstrap_proto protoreflect.FileDescriptor
+
+var file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc = []byte{
+ 0x0a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62,
+ 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x2f, 0x62, 0x6f, 0x6f, 0x74,
+ 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
+ 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x1a, 0x29, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2f, 0x76,
+ 0x33, 0x2f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x1a, 0x25, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f,
+ 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x22, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x61,
+ 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f,
+ 0x76, 0x33, 0x2f, 0x62, 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65,
+ 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65, 0x76,
+ 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x65,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72,
+ 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x1a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74,
+ 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6c, 0x69, 0x73, 0x74,
+ 0x65, 0x6e, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2f, 0x76, 0x33, 0x2f,
+ 0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f,
+ 0x61, 0x64, 0x2f, 0x76, 0x33, 0x2f, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x68, 0x74, 0x74, 0x70,
+ 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x36, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f,
+ 0x74, 0x72, 0x61, 0x6e, 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74,
+ 0x73, 0x2f, 0x74, 0x6c, 0x73, 0x2f, 0x76, 0x33, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x74, 0x79, 0x70,
+ 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x1a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69,
+ 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x22, 0x99, 0x24, 0x0a, 0x09, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x12,
+ 0x2e, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72,
+ 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x12,
+ 0x2e, 0x0a, 0x13, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x5f,
+ 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x6e, 0x6f,
+ 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12,
+ 0x5f, 0x0a, 0x10, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
+ 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e,
+ 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52,
+ 0x0f, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
+ 0x12, 0x62, 0x0a, 0x11, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73,
+ 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61,
+ 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x73, 0x52, 0x10, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x0f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f,
+ 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f,
+ 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x0e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x68, 0x64, 0x73, 0x5f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x52, 0x09, 0x68, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x50, 0x61, 0x74, 0x68, 0x12, 0x43, 0x0a,
+ 0x0b, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x73, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x74, 0x61,
+ 0x74, 0x73, 0x53, 0x69, 0x6e, 0x6b, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x53, 0x69, 0x6e,
+ 0x6b, 0x73, 0x12, 0x6c, 0x0a, 0x15, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x73,
+ 0x74, 0x61, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x27, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x38, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f,
+ 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x44, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64,
+ 0x53, 0x74, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x64, 0x65, 0x66,
+ 0x65, 0x72, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x12, 0x47, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x76, 0x33,
+ 0x2e, 0x53, 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0b, 0x73, 0x74,
+ 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x74, 0x61,
+ 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x42, 0x24, 0xfa, 0x42, 0x0e, 0xaa, 0x01, 0x0b, 0x1a, 0x03, 0x08, 0xac, 0x02, 0x32,
+ 0x04, 0x10, 0xc0, 0x84, 0x3d, 0xf2, 0x98, 0xfe, 0x8f, 0x05, 0x0d, 0x12, 0x0b, 0x73, 0x74, 0x61,
+ 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x12, 0x73, 0x74, 0x61, 0x74, 0x73, 0x46,
+ 0x6c, 0x75, 0x73, 0x68, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x3a, 0x0a, 0x14,
+ 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x5f, 0x6f, 0x6e, 0x5f, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x08, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x6a,
+ 0x02, 0x08, 0x01, 0x48, 0x00, 0x52, 0x11, 0x73, 0x74, 0x61, 0x74, 0x73, 0x46, 0x6c, 0x75, 0x73,
+ 0x68, 0x4f, 0x6e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x4c, 0x0a, 0x08, 0x77, 0x61, 0x74, 0x63,
+ 0x68, 0x64, 0x6f, 0x67, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
+ 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x42,
+ 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x08, 0x77, 0x61,
+ 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x12, 0x42, 0x0a, 0x09, 0x77, 0x61, 0x74, 0x63, 0x68, 0x64,
+ 0x6f, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
+ 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x52,
+ 0x09, 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x12, 0x45, 0x0a, 0x07, 0x74, 0x72,
+ 0x61, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x42, 0x0b, 0x92, 0xc7, 0x86,
+ 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e,
+ 0x67, 0x12, 0x52, 0x0a, 0x0f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x75, 0x6e,
+ 0x74, 0x69, 0x6d, 0x65, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
+ 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75,
+ 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x52, 0x0e, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75,
+ 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x36, 0x0a, 0x05, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x18, 0x0c,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33,
+ 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x52, 0x05, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x5f, 0x0a,
+ 0x10, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
+ 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x2e,
+ 0x76, 0x33, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x61, 0x67,
+ 0x65, 0x72, 0x42, 0x09, 0x8a, 0x93, 0xb7, 0x2a, 0x04, 0x08, 0x01, 0x10, 0x01, 0x52, 0x0f, 0x6f,
+ 0x76, 0x65, 0x72, 0x6c, 0x6f, 0x61, 0x64, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x36,
+ 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63,
+ 0x68, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x65,
+ 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x12, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x68,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x5f, 0x0a, 0x1d, 0x73,
+ 0x74, 0x61, 0x74, 0x73, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x18, 0x13, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x52, 0x1a, 0x73, 0x74, 0x61, 0x74, 0x73, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x56, 0x65, 0x72,
+ 0x73, 0x69, 0x6f, 0x6e, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x12, 0x41, 0x0a, 0x17,
+ 0x75, 0x73, 0x65, 0x5f, 0x74, 0x63, 0x70, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x64, 0x6e, 0x73, 0x5f,
+ 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x0b, 0x92,
+ 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x13, 0x75, 0x73, 0x65, 0x54,
+ 0x63, 0x70, 0x46, 0x6f, 0x72, 0x44, 0x6e, 0x73, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x73, 0x12,
+ 0x6a, 0x0a, 0x15, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74,
+ 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04,
+ 0x03, 0x33, 0x2e, 0x30, 0x18, 0x01, 0x52, 0x13, 0x64, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c,
+ 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x65, 0x0a, 0x19, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x5f, 0x64, 0x6e, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65,
+ 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f,
+ 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x16, 0x74, 0x79, 0x70, 0x65,
+ 0x64, 0x44, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x12, 0x5d, 0x0a, 0x14, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x5f,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x13, 0x62, 0x6f,
+ 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x73, 0x12, 0x4b, 0x0a, 0x0d, 0x66, 0x61, 0x74, 0x61, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x1c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61,
+ 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x46, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x0c, 0x66, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49,
+ 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
+ 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x56, 0x0a, 0x15, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x13, 0x64, 0x65,
+ 0x66, 0x61, 0x75, 0x6c, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x73, 0x6f, 0x63,
+ 0x6b, 0x65, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x18, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x16, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x53, 0x6f, 0x63, 0x6b,
+ 0x65, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x8c, 0x01, 0x0a, 0x1e,
+ 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x76,
+ 0x69, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x19,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x46, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33,
+ 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x43, 0x65, 0x72, 0x74, 0x69,
+ 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e,
+ 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x1c, 0x63, 0x65,
+ 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65,
+ 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x54, 0x0a, 0x0e, 0x69, 0x6e,
+ 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x20, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x43,
+ 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x52, 0x0d, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x12, 0x33, 0x0a, 0x16, 0x70, 0x65, 0x72, 0x66, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67,
+ 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x21, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x13, 0x70, 0x65, 0x72, 0x66, 0x54, 0x72, 0x61, 0x63, 0x69, 0x6e, 0x67, 0x46, 0x69, 0x6c,
+ 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x5c, 0x0a, 0x14, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
+ 0x5f, 0x72, 0x65, 0x67, 0x65, 0x78, 0x5f, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x18, 0x22, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64,
+ 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
+ 0x12, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x67, 0x65, 0x78, 0x45, 0x6e, 0x67,
+ 0x69, 0x6e, 0x65, 0x12, 0x60, 0x0a, 0x16, 0x78, 0x64, 0x73, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x67,
+ 0x61, 0x74, 0x65, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x23, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64,
+ 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
+ 0x14, 0x78, 0x64, 0x73, 0x44, 0x65, 0x6c, 0x65, 0x67, 0x61, 0x74, 0x65, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x6b, 0x0a, 0x1c, 0x78, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e,
+ 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
+ 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x19, 0x78, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x12, 0x55, 0x0a, 0x10, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x5f, 0x6d,
+ 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, 0x25, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65,
+ 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0f, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e,
+ 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x6f, 0x0a, 0x16, 0x61, 0x70, 0x70,
+ 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
+ 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e,
+ 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x20, 0x67,
+ 0x72, 0x70, 0x63, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
+ 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18,
+ 0x28, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76,
+ 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x47, 0x72, 0x70, 0x63,
+ 0x41, 0x73, 0x79, 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67,
+ 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x1c, 0x67, 0x72, 0x70, 0x63, 0x41, 0x73,
+ 0x79, 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
+ 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x6b, 0x0a, 0x18, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79,
+ 0x5f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x6d, 0x61, 0x6e, 0x61, 0x67,
+ 0x65, 0x72, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61,
+ 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x52, 0x16, 0x6d, 0x65, 0x6d,
+ 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e, 0x61,
+ 0x67, 0x65, 0x72, 0x1a, 0x9a, 0x02, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x65,
+ 0x6e, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e,
+ 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x52, 0x09,
+ 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x73, 0x12, 0x3c, 0x0a, 0x08, 0x63, 0x6c, 0x75,
+ 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74,
+ 0x65, 0x72, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x08, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x72, 0x65,
+ 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x74, 0x72, 0x61, 0x6e,
+ 0x73, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x2e, 0x74, 0x6c,
+ 0x73, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x07, 0x73, 0x65, 0x63,
+ 0x72, 0x65, 0x74, 0x73, 0x3a, 0x3a, 0x9a, 0xc5, 0x88, 0x1e, 0x35, 0x0a, 0x33, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
+ 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70,
+ 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
+ 0x1a, 0x89, 0x03, 0x0a, 0x10, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x41, 0x0a, 0x0a, 0x6c, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33,
+ 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x6c,
+ 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x6c, 0x64, 0x73, 0x5f,
+ 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f,
+ 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6c, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x41, 0x0a, 0x0a,
+ 0x63, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x52, 0x09, 0x63, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12,
+ 0x32, 0x0a, 0x15, 0x63, 0x64, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73,
+ 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13,
+ 0x63, 0x64, 0x73, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4c, 0x6f, 0x63, 0x61,
+ 0x74, 0x6f, 0x72, 0x12, 0x44, 0x0a, 0x0a, 0x61, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41,
+ 0x70, 0x69, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x09,
+ 0x61, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x3b, 0x9a, 0xc5, 0x88, 0x1e, 0x36,
+ 0x0a, 0x34, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62,
+ 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x6f, 0x6f, 0x74,
+ 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x1a, 0xf9, 0x01, 0x0a,
+ 0x14, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x62, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
+ 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e,
+ 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x43, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x09,
+ 0x6c, 0x6f, 0x67, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x1a, 0x7d, 0x0a, 0x09, 0x4c, 0x6f, 0x67,
+ 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x3a, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74,
+ 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x6a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d,
+ 0x61, 0x74, 0x12, 0x21, 0x0a, 0x0b, 0x74, 0x65, 0x78, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, 0x74, 0x65, 0x78, 0x74, 0x46,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x11, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x12, 0x03, 0xf8, 0x42, 0x01, 0x1a, 0x5a, 0x0a, 0x13, 0x44, 0x65, 0x66, 0x65,
+ 0x72, 0x72, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x43, 0x0a, 0x1e, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72,
+ 0x65, 0x64, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74,
+ 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1b, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44,
+ 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
+ 0x74, 0x61, 0x74, 0x73, 0x1a, 0x89, 0x01, 0x0a, 0x1c, 0x47, 0x72, 0x70, 0x63, 0x41, 0x73, 0x79,
+ 0x6e, 0x63, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x43,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x69, 0x0a, 0x1e, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x61, 0x63,
+ 0x68, 0x65, 0x64, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x64,
+ 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x0a, 0xfa, 0x42, 0x07, 0xaa, 0x01, 0x04,
+ 0x32, 0x02, 0x08, 0x05, 0x52, 0x1a, 0x6d, 0x61, 0x78, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x49, 0x64, 0x6c, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x1a, 0x7b, 0x0a, 0x21, 0x43, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x50,
+ 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x40, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79,
+ 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x2a, 0x9a,
+ 0xc5, 0x88, 0x1e, 0x25, 0x0a, 0x23, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e,
+ 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x42, 0x0d, 0x0a, 0x0b, 0x73, 0x74, 0x61,
+ 0x74, 0x73, 0x5f, 0x66, 0x6c, 0x75, 0x73, 0x68, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x4a, 0x04,
+ 0x08, 0x0b, 0x10, 0x0c, 0x52, 0x07, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0x89, 0x03,
+ 0x0a, 0x05, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0x43, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x61, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x6c, 0x6f, 0x67, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f,
+ 0x67, 0x52, 0x09, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x33, 0x0a, 0x0f,
+ 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0b, 0x92, 0xc7, 0x86, 0xd8, 0x04, 0x03, 0x33, 0x2e, 0x30,
+ 0x18, 0x01, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74,
+ 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x70, 0x61, 0x74,
+ 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65,
+ 0x50, 0x61, 0x74, 0x68, 0x12, 0x37, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x64, 0x64,
+ 0x72, 0x65, 0x73, 0x73, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x49, 0x0a,
+ 0x0e, 0x73, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x6f, 0x63,
+ 0x6b, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x6f, 0x63, 0x6b, 0x65,
+ 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x18, 0x69, 0x67, 0x6e, 0x6f,
+ 0x72, 0x65, 0x5f, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x5f, 0x6c,
+ 0x69, 0x6d, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, 0x69, 0x67, 0x6e, 0x6f,
+ 0x72, 0x65, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x43, 0x6f, 0x6e, 0x6e, 0x4c, 0x69, 0x6d, 0x69,
+ 0x74, 0x3a, 0x26, 0x9a, 0xc5, 0x88, 0x1e, 0x21, 0x0a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70,
+ 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x22, 0x94, 0x05, 0x0a, 0x0e, 0x43, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x12, 0x2c, 0x0a, 0x12,
+ 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x43,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x67, 0x0a, 0x11, 0x6f, 0x75,
+ 0x74, 0x6c, 0x69, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76,
+ 0x33, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
+ 0x2e, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x10, 0x6f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x52, 0x0a, 0x14, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f,
+ 0x62, 0x69, 0x6e, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x42, 0x69, 0x6e, 0x64, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x52, 0x12, 0x75, 0x70, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, 0x69, 0x6e,
+ 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x51, 0x0a, 0x11, 0x6c, 0x6f, 0x61, 0x64, 0x5f,
+ 0x73, 0x74, 0x61, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x41, 0x70, 0x69, 0x43, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x0f, 0x6c, 0x6f, 0x61, 0x64, 0x53,
+ 0x74, 0x61, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x47, 0x0a, 0x20, 0x65, 0x6e,
+ 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x65, 0x66, 0x65, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x1d, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x66, 0x65,
+ 0x72, 0x72, 0x65, 0x64, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x1a, 0xc9, 0x01, 0x0a, 0x10, 0x4f, 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44,
+ 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0e, 0x65, 0x76, 0x65, 0x6e,
+ 0x74, 0x5f, 0x6c, 0x6f, 0x67, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x4c, 0x6f, 0x67, 0x50, 0x61, 0x74, 0x68, 0x12, 0x4d,
+ 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52,
+ 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x40, 0x9a,
+ 0xc5, 0x88, 0x1e, 0x3b, 0x0a, 0x39, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e,
+ 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x4f,
+ 0x75, 0x74, 0x6c, 0x69, 0x65, 0x72, 0x44, 0x65, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a,
+ 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76,
+ 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72,
+ 0x22, 0xb0, 0x01, 0x0a, 0x09, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x73, 0x12, 0x55,
+ 0x0a, 0x14, 0x6d, 0x61, 0x69, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x77, 0x61,
+ 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74,
+ 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f,
+ 0x67, 0x52, 0x12, 0x6d, 0x61, 0x69, 0x6e, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x57, 0x61, 0x74,
+ 0x63, 0x68, 0x64, 0x6f, 0x67, 0x12, 0x4c, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f,
+ 0x77, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f,
+ 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68,
+ 0x64, 0x6f, 0x67, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x57, 0x61, 0x74, 0x63, 0x68,
+ 0x64, 0x6f, 0x67, 0x22, 0xba, 0x06, 0x0a, 0x08, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67,
+ 0x12, 0x4c, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x57, 0x61,
+ 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c,
+ 0x0a, 0x0c, 0x6d, 0x69, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0b, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x44, 0x0a, 0x10,
+ 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, 0x73, 0x73, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x0f, 0x6d, 0x65, 0x67, 0x61, 0x6d, 0x69, 0x73, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x6f,
+ 0x75, 0x74, 0x12, 0x3c, 0x0a, 0x0c, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f,
+ 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+ 0x12, 0x5a, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x6f, 0x75, 0x74, 0x5f, 0x6a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x08, 0xfa, 0x42,
+ 0x05, 0xaa, 0x01, 0x02, 0x32, 0x00, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x4b, 0x69, 0x6c, 0x6c, 0x54,
+ 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x4a, 0x69, 0x74, 0x74, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x11,
+ 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75,
+ 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x52, 0x10, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x69, 0x6d,
+ 0x65, 0x6f, 0x75, 0x74, 0x12, 0x47, 0x0a, 0x13, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x6b, 0x69, 0x6c,
+ 0x6c, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76,
+ 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52, 0x12, 0x6d, 0x75, 0x6c, 0x74, 0x69,
+ 0x6b, 0x69, 0x6c, 0x6c, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x1a, 0x85, 0x02,
+ 0x0a, 0x0e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x12, 0x60, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0e, 0x32, 0x40, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e,
+ 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f,
+ 0x67, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67,
+ 0x45, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52,
+ 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x0d, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64,
+ 0x6f, 0x67, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f,
+ 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x01, 0x12, 0x0d,
+ 0x0a, 0x09, 0x4d, 0x55, 0x4c, 0x54, 0x49, 0x4b, 0x49, 0x4c, 0x4c, 0x10, 0x02, 0x12, 0x0c, 0x0a,
+ 0x08, 0x4d, 0x45, 0x47, 0x41, 0x4d, 0x49, 0x53, 0x53, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x4d,
+ 0x49, 0x53, 0x53, 0x10, 0x04, 0x3a, 0x29, 0x9a, 0xc5, 0x88, 0x1e, 0x24, 0x0a, 0x22, 0x65, 0x6e,
+ 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73,
+ 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x61, 0x74, 0x63, 0x68, 0x64, 0x6f, 0x67,
+ 0x22, 0x51, 0x0a, 0x0b, 0x46, 0x61, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12,
+ 0x42, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x2a, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63,
+ 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e,
+ 0x66, 0x69, 0x67, 0x22, 0xdc, 0x01, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12,
+ 0x21, 0x0a, 0x0c, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f,
+ 0x6f, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f,
+ 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x33, 0x0a, 0x15, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69,
+ 0x64, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x53,
+ 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x2b, 0x0a, 0x04, 0x62,
+ 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75,
+ 0x63, 0x74, 0x52, 0x04, 0x62, 0x61, 0x73, 0x65, 0x3a, 0x28, 0x9a, 0xc5, 0x88, 0x1e, 0x23, 0x0a,
+ 0x21, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f,
+ 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69,
+ 0x6d, 0x65, 0x22, 0xdb, 0x06, 0x0a, 0x0c, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61,
+ 0x79, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x3c, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48,
+ 0x00, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x52,
+ 0x0a, 0x0a, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69,
+ 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52,
+ 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b,
+ 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x64, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79,
+ 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x61, 0x79, 0x65,
+ 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70,
+ 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72,
+ 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0a, 0x61,
+ 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x52, 0x0a, 0x0a, 0x72, 0x74, 0x64,
+ 0x73, 0x5f, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f,
+ 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d,
+ 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72,
+ 0x48, 0x00, 0x52, 0x09, 0x72, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0xc1, 0x01,
+ 0x0a, 0x09, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x73,
+ 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x22,
+ 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f,
+ 0x72, 0x79, 0x12, 0x34, 0x0a, 0x16, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x5f, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x14, 0x61, 0x70, 0x70, 0x65, 0x6e, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a,
+ 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f,
+ 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69,
+ 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x4c, 0x61, 0x79, 0x65,
+ 0x72, 0x1a, 0x46, 0x0a, 0x0a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x3a,
+ 0x38, 0x9a, 0xc5, 0x88, 0x1e, 0x33, 0x0a, 0x31, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76,
+ 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e, 0x41,
+ 0x64, 0x6d, 0x69, 0x6e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x1a, 0x9d, 0x01, 0x0a, 0x09, 0x52, 0x74,
+ 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x43, 0x0a, 0x0b, 0x72,
+ 0x74, 0x64, 0x73, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x22, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x52, 0x0a, 0x72, 0x74, 0x64, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x3a, 0x37, 0x9a, 0xc5, 0x88, 0x1e, 0x32, 0x0a, 0x30, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63,
+ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e,
+ 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x2e,
+ 0x52, 0x74, 0x64, 0x73, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x3a, 0x2d, 0x9a, 0xc5, 0x88, 0x1e, 0x28,
+ 0x0a, 0x26, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62,
+ 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x75, 0x6e, 0x74,
+ 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x42, 0x16, 0x0a, 0x0f, 0x6c, 0x61, 0x79, 0x65,
+ 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x03, 0xf8, 0x42, 0x01,
+ 0x22, 0x82, 0x01, 0x0a, 0x0e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75, 0x6e, 0x74,
+ 0x69, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x06, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66,
+ 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76, 0x33, 0x2e,
+ 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x52, 0x06, 0x6c, 0x61,
+ 0x79, 0x65, 0x72, 0x73, 0x3a, 0x2f, 0x9a, 0xc5, 0x88, 0x1e, 0x2a, 0x0a, 0x28, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
+ 0x72, 0x61, 0x70, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x61, 0x79, 0x65, 0x72, 0x65, 0x64, 0x52, 0x75,
+ 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x22, 0xb1, 0x02, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d,
+ 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x3b, 0x0a, 0x12,
+ 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61,
+ 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x0d, 0xfa, 0x42, 0x0a, 0x72, 0x08, 0x10,
+ 0x01, 0xc8, 0x01, 0x00, 0xc0, 0x01, 0x01, 0x52, 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x76, 0x0a, 0x12, 0x69, 0x6e, 0x6c,
+ 0x69, 0x6e, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f,
+ 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2e, 0x76,
+ 0x33, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x2e, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x54, 0x79, 0x70, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52,
+ 0x10, 0x69, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x54, 0x79, 0x70,
+ 0x65, 0x22, 0x66, 0x0a, 0x10, 0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54,
+ 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x52, 0x45, 0x51,
+ 0x55, 0x45, 0x53, 0x54, 0x5f, 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x01, 0x12, 0x13,
+ 0x0a, 0x0f, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45,
+ 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x5f,
+ 0x54, 0x52, 0x41, 0x49, 0x4c, 0x45, 0x52, 0x10, 0x03, 0x22, 0x95, 0x01, 0x0a, 0x16, 0x4d, 0x65,
+ 0x6d, 0x6f, 0x72, 0x79, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x4d, 0x61, 0x6e,
+ 0x61, 0x67, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x74, 0x6f,
+ 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e,
+ 0x62, 0x79, 0x74, 0x65, 0x73, 0x54, 0x6f, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x12, 0x51,
+ 0x0a, 0x17, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65,
+ 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
+ 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x15, 0x6d, 0x65, 0x6d, 0x6f,
+ 0x72, 0x79, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61,
+ 0x6c, 0x42, 0x91, 0x01, 0xba, 0x80, 0xc8, 0xd1, 0x06, 0x02, 0x10, 0x02, 0x0a, 0x27, 0x69, 0x6f,
+ 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72,
+ 0x61, 0x70, 0x2e, 0x76, 0x33, 0x42, 0x0e, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70,
+ 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x4c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67,
+ 0x6f, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f,
+ 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x62, 0x6f, 0x6f,
+ 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x2f, 0x76, 0x33, 0x3b, 0x62, 0x6f, 0x6f, 0x74, 0x73, 0x74,
+ 0x72, 0x61, 0x70, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescOnce sync.Once
+ file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData = file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc
+)
+
+func file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescGZIP() []byte {
+ file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescOnce.Do(func() {
+ file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData)
+ })
+ return file_envoy_config_bootstrap_v3_bootstrap_proto_rawDescData
+}
+
+var file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes = make([]protoimpl.MessageInfo, 23)
+var file_envoy_config_bootstrap_v3_bootstrap_proto_goTypes = []interface{}{
+ (Watchdog_WatchdogAction_WatchdogEvent)(0), // 0: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.WatchdogEvent
+ (CustomInlineHeader_InlineHeaderType)(0), // 1: envoy.config.bootstrap.v3.CustomInlineHeader.InlineHeaderType
+ (*Bootstrap)(nil), // 2: envoy.config.bootstrap.v3.Bootstrap
+ (*Admin)(nil), // 3: envoy.config.bootstrap.v3.Admin
+ (*ClusterManager)(nil), // 4: envoy.config.bootstrap.v3.ClusterManager
+ (*Watchdogs)(nil), // 5: envoy.config.bootstrap.v3.Watchdogs
+ (*Watchdog)(nil), // 6: envoy.config.bootstrap.v3.Watchdog
+ (*FatalAction)(nil), // 7: envoy.config.bootstrap.v3.FatalAction
+ (*Runtime)(nil), // 8: envoy.config.bootstrap.v3.Runtime
+ (*RuntimeLayer)(nil), // 9: envoy.config.bootstrap.v3.RuntimeLayer
+ (*LayeredRuntime)(nil), // 10: envoy.config.bootstrap.v3.LayeredRuntime
+ (*CustomInlineHeader)(nil), // 11: envoy.config.bootstrap.v3.CustomInlineHeader
+ (*MemoryAllocatorManager)(nil), // 12: envoy.config.bootstrap.v3.MemoryAllocatorManager
+ (*Bootstrap_StaticResources)(nil), // 13: envoy.config.bootstrap.v3.Bootstrap.StaticResources
+ (*Bootstrap_DynamicResources)(nil), // 14: envoy.config.bootstrap.v3.Bootstrap.DynamicResources
+ (*Bootstrap_ApplicationLogConfig)(nil), // 15: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig
+ (*Bootstrap_DeferredStatOptions)(nil), // 16: envoy.config.bootstrap.v3.Bootstrap.DeferredStatOptions
+ (*Bootstrap_GrpcAsyncClientManagerConfig)(nil), // 17: envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig
+ nil, // 18: envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry
+ (*Bootstrap_ApplicationLogConfig_LogFormat)(nil), // 19: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat
+ (*ClusterManager_OutlierDetection)(nil), // 20: envoy.config.bootstrap.v3.ClusterManager.OutlierDetection
+ (*Watchdog_WatchdogAction)(nil), // 21: envoy.config.bootstrap.v3.Watchdog.WatchdogAction
+ (*RuntimeLayer_DiskLayer)(nil), // 22: envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer
+ (*RuntimeLayer_AdminLayer)(nil), // 23: envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer
+ (*RuntimeLayer_RtdsLayer)(nil), // 24: envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer
+ (*v3.Node)(nil), // 25: envoy.config.core.v3.Node
+ (*v3.ApiConfigSource)(nil), // 26: envoy.config.core.v3.ApiConfigSource
+ (*v31.StatsSink)(nil), // 27: envoy.config.metrics.v3.StatsSink
+ (*v31.StatsConfig)(nil), // 28: envoy.config.metrics.v3.StatsConfig
+ (*durationpb.Duration)(nil), // 29: google.protobuf.Duration
+ (*v32.Tracing)(nil), // 30: envoy.config.trace.v3.Tracing
+ (*v33.OverloadManager)(nil), // 31: envoy.config.overload.v3.OverloadManager
+ (*wrapperspb.UInt64Value)(nil), // 32: google.protobuf.UInt64Value
+ (*v3.DnsResolutionConfig)(nil), // 33: envoy.config.core.v3.DnsResolutionConfig
+ (*v3.TypedExtensionConfig)(nil), // 34: envoy.config.core.v3.TypedExtensionConfig
+ (*v3.ConfigSource)(nil), // 35: envoy.config.core.v3.ConfigSource
+ (*v34.AccessLog)(nil), // 36: envoy.config.accesslog.v3.AccessLog
+ (*v3.Address)(nil), // 37: envoy.config.core.v3.Address
+ (*v3.SocketOption)(nil), // 38: envoy.config.core.v3.SocketOption
+ (*v3.BindConfig)(nil), // 39: envoy.config.core.v3.BindConfig
+ (*v35.Percent)(nil), // 40: envoy.type.v3.Percent
+ (*structpb.Struct)(nil), // 41: google.protobuf.Struct
+ (*v36.Listener)(nil), // 42: envoy.config.listener.v3.Listener
+ (*v37.Cluster)(nil), // 43: envoy.config.cluster.v3.Cluster
+ (*v38.Secret)(nil), // 44: envoy.extensions.transport_sockets.tls.v3.Secret
+ (*v3.EventServiceConfig)(nil), // 45: envoy.config.core.v3.EventServiceConfig
+}
+var file_envoy_config_bootstrap_v3_bootstrap_proto_depIdxs = []int32{
+ 25, // 0: envoy.config.bootstrap.v3.Bootstrap.node:type_name -> envoy.config.core.v3.Node
+ 13, // 1: envoy.config.bootstrap.v3.Bootstrap.static_resources:type_name -> envoy.config.bootstrap.v3.Bootstrap.StaticResources
+ 14, // 2: envoy.config.bootstrap.v3.Bootstrap.dynamic_resources:type_name -> envoy.config.bootstrap.v3.Bootstrap.DynamicResources
+ 4, // 3: envoy.config.bootstrap.v3.Bootstrap.cluster_manager:type_name -> envoy.config.bootstrap.v3.ClusterManager
+ 26, // 4: envoy.config.bootstrap.v3.Bootstrap.hds_config:type_name -> envoy.config.core.v3.ApiConfigSource
+ 27, // 5: envoy.config.bootstrap.v3.Bootstrap.stats_sinks:type_name -> envoy.config.metrics.v3.StatsSink
+ 16, // 6: envoy.config.bootstrap.v3.Bootstrap.deferred_stat_options:type_name -> envoy.config.bootstrap.v3.Bootstrap.DeferredStatOptions
+ 28, // 7: envoy.config.bootstrap.v3.Bootstrap.stats_config:type_name -> envoy.config.metrics.v3.StatsConfig
+ 29, // 8: envoy.config.bootstrap.v3.Bootstrap.stats_flush_interval:type_name -> google.protobuf.Duration
+ 6, // 9: envoy.config.bootstrap.v3.Bootstrap.watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog
+ 5, // 10: envoy.config.bootstrap.v3.Bootstrap.watchdogs:type_name -> envoy.config.bootstrap.v3.Watchdogs
+ 30, // 11: envoy.config.bootstrap.v3.Bootstrap.tracing:type_name -> envoy.config.trace.v3.Tracing
+ 10, // 12: envoy.config.bootstrap.v3.Bootstrap.layered_runtime:type_name -> envoy.config.bootstrap.v3.LayeredRuntime
+ 3, // 13: envoy.config.bootstrap.v3.Bootstrap.admin:type_name -> envoy.config.bootstrap.v3.Admin
+ 31, // 14: envoy.config.bootstrap.v3.Bootstrap.overload_manager:type_name -> envoy.config.overload.v3.OverloadManager
+ 32, // 15: envoy.config.bootstrap.v3.Bootstrap.stats_server_version_override:type_name -> google.protobuf.UInt64Value
+ 33, // 16: envoy.config.bootstrap.v3.Bootstrap.dns_resolution_config:type_name -> envoy.config.core.v3.DnsResolutionConfig
+ 34, // 17: envoy.config.bootstrap.v3.Bootstrap.typed_dns_resolver_config:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 34, // 18: envoy.config.bootstrap.v3.Bootstrap.bootstrap_extensions:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 7, // 19: envoy.config.bootstrap.v3.Bootstrap.fatal_actions:type_name -> envoy.config.bootstrap.v3.FatalAction
+ 35, // 20: envoy.config.bootstrap.v3.Bootstrap.config_sources:type_name -> envoy.config.core.v3.ConfigSource
+ 35, // 21: envoy.config.bootstrap.v3.Bootstrap.default_config_source:type_name -> envoy.config.core.v3.ConfigSource
+ 18, // 22: envoy.config.bootstrap.v3.Bootstrap.certificate_provider_instances:type_name -> envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry
+ 11, // 23: envoy.config.bootstrap.v3.Bootstrap.inline_headers:type_name -> envoy.config.bootstrap.v3.CustomInlineHeader
+ 34, // 24: envoy.config.bootstrap.v3.Bootstrap.default_regex_engine:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 34, // 25: envoy.config.bootstrap.v3.Bootstrap.xds_delegate_extension:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 34, // 26: envoy.config.bootstrap.v3.Bootstrap.xds_config_tracker_extension:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 34, // 27: envoy.config.bootstrap.v3.Bootstrap.listener_manager:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 15, // 28: envoy.config.bootstrap.v3.Bootstrap.application_log_config:type_name -> envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig
+ 17, // 29: envoy.config.bootstrap.v3.Bootstrap.grpc_async_client_manager_config:type_name -> envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig
+ 12, // 30: envoy.config.bootstrap.v3.Bootstrap.memory_allocator_manager:type_name -> envoy.config.bootstrap.v3.MemoryAllocatorManager
+ 36, // 31: envoy.config.bootstrap.v3.Admin.access_log:type_name -> envoy.config.accesslog.v3.AccessLog
+ 37, // 32: envoy.config.bootstrap.v3.Admin.address:type_name -> envoy.config.core.v3.Address
+ 38, // 33: envoy.config.bootstrap.v3.Admin.socket_options:type_name -> envoy.config.core.v3.SocketOption
+ 20, // 34: envoy.config.bootstrap.v3.ClusterManager.outlier_detection:type_name -> envoy.config.bootstrap.v3.ClusterManager.OutlierDetection
+ 39, // 35: envoy.config.bootstrap.v3.ClusterManager.upstream_bind_config:type_name -> envoy.config.core.v3.BindConfig
+ 26, // 36: envoy.config.bootstrap.v3.ClusterManager.load_stats_config:type_name -> envoy.config.core.v3.ApiConfigSource
+ 6, // 37: envoy.config.bootstrap.v3.Watchdogs.main_thread_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog
+ 6, // 38: envoy.config.bootstrap.v3.Watchdogs.worker_watchdog:type_name -> envoy.config.bootstrap.v3.Watchdog
+ 21, // 39: envoy.config.bootstrap.v3.Watchdog.actions:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction
+ 29, // 40: envoy.config.bootstrap.v3.Watchdog.miss_timeout:type_name -> google.protobuf.Duration
+ 29, // 41: envoy.config.bootstrap.v3.Watchdog.megamiss_timeout:type_name -> google.protobuf.Duration
+ 29, // 42: envoy.config.bootstrap.v3.Watchdog.kill_timeout:type_name -> google.protobuf.Duration
+ 29, // 43: envoy.config.bootstrap.v3.Watchdog.max_kill_timeout_jitter:type_name -> google.protobuf.Duration
+ 29, // 44: envoy.config.bootstrap.v3.Watchdog.multikill_timeout:type_name -> google.protobuf.Duration
+ 40, // 45: envoy.config.bootstrap.v3.Watchdog.multikill_threshold:type_name -> envoy.type.v3.Percent
+ 34, // 46: envoy.config.bootstrap.v3.FatalAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 41, // 47: envoy.config.bootstrap.v3.Runtime.base:type_name -> google.protobuf.Struct
+ 41, // 48: envoy.config.bootstrap.v3.RuntimeLayer.static_layer:type_name -> google.protobuf.Struct
+ 22, // 49: envoy.config.bootstrap.v3.RuntimeLayer.disk_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.DiskLayer
+ 23, // 50: envoy.config.bootstrap.v3.RuntimeLayer.admin_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.AdminLayer
+ 24, // 51: envoy.config.bootstrap.v3.RuntimeLayer.rtds_layer:type_name -> envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer
+ 9, // 52: envoy.config.bootstrap.v3.LayeredRuntime.layers:type_name -> envoy.config.bootstrap.v3.RuntimeLayer
+ 1, // 53: envoy.config.bootstrap.v3.CustomInlineHeader.inline_header_type:type_name -> envoy.config.bootstrap.v3.CustomInlineHeader.InlineHeaderType
+ 29, // 54: envoy.config.bootstrap.v3.MemoryAllocatorManager.memory_release_interval:type_name -> google.protobuf.Duration
+ 42, // 55: envoy.config.bootstrap.v3.Bootstrap.StaticResources.listeners:type_name -> envoy.config.listener.v3.Listener
+ 43, // 56: envoy.config.bootstrap.v3.Bootstrap.StaticResources.clusters:type_name -> envoy.config.cluster.v3.Cluster
+ 44, // 57: envoy.config.bootstrap.v3.Bootstrap.StaticResources.secrets:type_name -> envoy.extensions.transport_sockets.tls.v3.Secret
+ 35, // 58: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.lds_config:type_name -> envoy.config.core.v3.ConfigSource
+ 35, // 59: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.cds_config:type_name -> envoy.config.core.v3.ConfigSource
+ 26, // 60: envoy.config.bootstrap.v3.Bootstrap.DynamicResources.ads_config:type_name -> envoy.config.core.v3.ApiConfigSource
+ 19, // 61: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.log_format:type_name -> envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat
+ 29, // 62: envoy.config.bootstrap.v3.Bootstrap.GrpcAsyncClientManagerConfig.max_cached_entry_idle_duration:type_name -> google.protobuf.Duration
+ 34, // 63: envoy.config.bootstrap.v3.Bootstrap.CertificateProviderInstancesEntry.value:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 41, // 64: envoy.config.bootstrap.v3.Bootstrap.ApplicationLogConfig.LogFormat.json_format:type_name -> google.protobuf.Struct
+ 45, // 65: envoy.config.bootstrap.v3.ClusterManager.OutlierDetection.event_service:type_name -> envoy.config.core.v3.EventServiceConfig
+ 34, // 66: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.config:type_name -> envoy.config.core.v3.TypedExtensionConfig
+ 0, // 67: envoy.config.bootstrap.v3.Watchdog.WatchdogAction.event:type_name -> envoy.config.bootstrap.v3.Watchdog.WatchdogAction.WatchdogEvent
+ 35, // 68: envoy.config.bootstrap.v3.RuntimeLayer.RtdsLayer.rtds_config:type_name -> envoy.config.core.v3.ConfigSource
+ 69, // [69:69] is the sub-list for method output_type
+ 69, // [69:69] is the sub-list for method input_type
+ 69, // [69:69] is the sub-list for extension type_name
+ 69, // [69:69] is the sub-list for extension extendee
+ 0, // [0:69] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_bootstrap_v3_bootstrap_proto_init() }
+func file_envoy_config_bootstrap_v3_bootstrap_proto_init() {
+ if File_envoy_config_bootstrap_v3_bootstrap_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bootstrap); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Admin); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClusterManager); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Watchdogs); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Watchdog); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FatalAction); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Runtime); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeLayer); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LayeredRuntime); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CustomInlineHeader); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MemoryAllocatorManager); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bootstrap_StaticResources); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bootstrap_DynamicResources); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bootstrap_ApplicationLogConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bootstrap_DeferredStatOptions); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bootstrap_GrpcAsyncClientManagerConfig); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bootstrap_ApplicationLogConfig_LogFormat); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ClusterManager_OutlierDetection); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Watchdog_WatchdogAction); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeLayer_DiskLayer); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeLayer_AdminLayer); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RuntimeLayer_RtdsLayer); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*Bootstrap_StatsFlushOnAdmin)(nil),
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[7].OneofWrappers = []interface{}{
+ (*RuntimeLayer_StaticLayer)(nil),
+ (*RuntimeLayer_DiskLayer_)(nil),
+ (*RuntimeLayer_AdminLayer_)(nil),
+ (*RuntimeLayer_RtdsLayer_)(nil),
+ }
+ file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes[17].OneofWrappers = []interface{}{
+ (*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat)(nil),
+ (*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc,
+ NumEnums: 2,
+ NumMessages: 23,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_bootstrap_v3_bootstrap_proto_goTypes,
+ DependencyIndexes: file_envoy_config_bootstrap_v3_bootstrap_proto_depIdxs,
+ EnumInfos: file_envoy_config_bootstrap_v3_bootstrap_proto_enumTypes,
+ MessageInfos: file_envoy_config_bootstrap_v3_bootstrap_proto_msgTypes,
+ }.Build()
+ File_envoy_config_bootstrap_v3_bootstrap_proto = out.File
+ file_envoy_config_bootstrap_v3_bootstrap_proto_rawDesc = nil
+ file_envoy_config_bootstrap_v3_bootstrap_proto_goTypes = nil
+ file_envoy_config_bootstrap_v3_bootstrap_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go
new file mode 100644
index 000000000..55724c095
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap.pb.validate.go
@@ -0,0 +1,4501 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/bootstrap/v3/bootstrap.proto
+
+package bootstrapv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+)
+
+// Validate checks the field values on Bootstrap with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Bootstrap) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Bootstrap with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in BootstrapMultiError, or nil
+// if none found.
+func (m *Bootstrap) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Bootstrap) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetNode()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Node",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Node",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetNode()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "Node",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetStaticResources()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "StaticResources",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "StaticResources",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetStaticResources()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "StaticResources",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetDynamicResources()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DynamicResources",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DynamicResources",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDynamicResources()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "DynamicResources",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetClusterManager()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "ClusterManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "ClusterManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetClusterManager()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "ClusterManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetHdsConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "HdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "HdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetHdsConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "HdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for FlagsPath
+
+ for idx, item := range m.GetStatsSinks() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("StatsSinks[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("StatsSinks[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: fmt.Sprintf("StatsSinks[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetDeferredStatOptions()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DeferredStatOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DeferredStatOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDeferredStatOptions()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "DeferredStatOptions",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetStatsConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "StatsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "StatsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetStatsConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "StatsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if d := m.GetStatsFlushInterval(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = BootstrapValidationError{
+ field: "StatsFlushInterval",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ lt := time.Duration(300*time.Second + 0*time.Nanosecond)
+ gte := time.Duration(0*time.Second + 1000000*time.Nanosecond)
+
+ if dur < gte || dur >= lt {
+ err := BootstrapValidationError{
+ field: "StatsFlushInterval",
+ reason: "value must be inside range [1ms, 5m0s)",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetWatchdog()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Watchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Watchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetWatchdog()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "Watchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetWatchdogs()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Watchdogs",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Watchdogs",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetWatchdogs()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "Watchdogs",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetTracing()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Tracing",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Tracing",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTracing()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "Tracing",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLayeredRuntime()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "LayeredRuntime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "LayeredRuntime",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLayeredRuntime()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "LayeredRuntime",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetAdmin()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Admin",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "Admin",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAdmin()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "Admin",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetOverloadManager()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "OverloadManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "OverloadManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOverloadManager()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "OverloadManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for EnableDispatcherStats
+
+ // no validation rules for HeaderPrefix
+
+ if all {
+ switch v := interface{}(m.GetStatsServerVersionOverride()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "StatsServerVersionOverride",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "StatsServerVersionOverride",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetStatsServerVersionOverride()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "StatsServerVersionOverride",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for UseTcpForDnsLookups
+
+ if all {
+ switch v := interface{}(m.GetDnsResolutionConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DnsResolutionConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DnsResolutionConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDnsResolutionConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "DnsResolutionConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetTypedDnsResolverConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "TypedDnsResolverConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "TypedDnsResolverConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetTypedDnsResolverConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "TypedDnsResolverConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetBootstrapExtensions() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("BootstrapExtensions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("BootstrapExtensions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: fmt.Sprintf("BootstrapExtensions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetFatalActions() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("FatalActions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("FatalActions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: fmt.Sprintf("FatalActions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetConfigSources() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("ConfigSources[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("ConfigSources[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: fmt.Sprintf("ConfigSources[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetDefaultConfigSource()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DefaultConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DefaultConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDefaultConfigSource()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "DefaultConfigSource",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for DefaultSocketInterface
+
+ {
+ sorted_keys := make([]string, len(m.GetCertificateProviderInstances()))
+ i := 0
+ for key := range m.GetCertificateProviderInstances() {
+ sorted_keys[i] = key
+ i++
+ }
+ sort.Slice(sorted_keys, func(i, j int) bool { return sorted_keys[i] < sorted_keys[j] })
+ for _, key := range sorted_keys {
+ val := m.GetCertificateProviderInstances()[key]
+ _ = val
+
+ // no validation rules for CertificateProviderInstances[key]
+
+ if all {
+ switch v := interface{}(val).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("CertificateProviderInstances[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("CertificateProviderInstances[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(val).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: fmt.Sprintf("CertificateProviderInstances[%v]", key),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+ }
+
+ for idx, item := range m.GetInlineHeaders() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("InlineHeaders[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: fmt.Sprintf("InlineHeaders[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: fmt.Sprintf("InlineHeaders[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ // no validation rules for PerfTracingFilePath
+
+ if all {
+ switch v := interface{}(m.GetDefaultRegexEngine()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DefaultRegexEngine",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "DefaultRegexEngine",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDefaultRegexEngine()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "DefaultRegexEngine",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetXdsDelegateExtension()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "XdsDelegateExtension",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "XdsDelegateExtension",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetXdsDelegateExtension()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "XdsDelegateExtension",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetXdsConfigTrackerExtension()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "XdsConfigTrackerExtension",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "XdsConfigTrackerExtension",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetXdsConfigTrackerExtension()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "XdsConfigTrackerExtension",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetListenerManager()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "ListenerManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "ListenerManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetListenerManager()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "ListenerManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetApplicationLogConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "ApplicationLogConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "ApplicationLogConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetApplicationLogConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "ApplicationLogConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetGrpcAsyncClientManagerConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "GrpcAsyncClientManagerConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "GrpcAsyncClientManagerConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetGrpcAsyncClientManagerConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "GrpcAsyncClientManagerConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMemoryAllocatorManager()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "MemoryAllocatorManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, BootstrapValidationError{
+ field: "MemoryAllocatorManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMemoryAllocatorManager()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return BootstrapValidationError{
+ field: "MemoryAllocatorManager",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ switch v := m.StatsFlush.(type) {
+ case *Bootstrap_StatsFlushOnAdmin:
+ if v == nil {
+ err := BootstrapValidationError{
+ field: "StatsFlush",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if m.GetStatsFlushOnAdmin() != true {
+ err := BootstrapValidationError{
+ field: "StatsFlushOnAdmin",
+ reason: "value must equal true",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+
+ if len(errors) > 0 {
+ return BootstrapMultiError(errors)
+ }
+
+ return nil
+}
+
+// BootstrapMultiError is an error wrapping multiple validation errors returned
+// by Bootstrap.ValidateAll() if the designated constraints aren't met.
+type BootstrapMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m BootstrapMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m BootstrapMultiError) AllErrors() []error { return m }
+
+// BootstrapValidationError is the validation error returned by
+// Bootstrap.Validate if the designated constraints aren't met.
+type BootstrapValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e BootstrapValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e BootstrapValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e BootstrapValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e BootstrapValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e BootstrapValidationError) ErrorName() string { return "BootstrapValidationError" }
+
+// Error satisfies the builtin error interface
+func (e BootstrapValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrap.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = BootstrapValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = BootstrapValidationError{}
+
+// Validate checks the field values on Admin with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Admin) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Admin with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in AdminMultiError, or nil if none found.
+func (m *Admin) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Admin) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetAccessLog() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AdminValidationError{
+ field: fmt.Sprintf("AccessLog[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AdminValidationError{
+ field: fmt.Sprintf("AccessLog[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AdminValidationError{
+ field: fmt.Sprintf("AccessLog[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ // no validation rules for AccessLogPath
+
+ // no validation rules for ProfilePath
+
+ if all {
+ switch v := interface{}(m.GetAddress()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AdminValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AdminValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAddress()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AdminValidationError{
+ field: "Address",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ for idx, item := range m.GetSocketOptions() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, AdminValidationError{
+ field: fmt.Sprintf("SocketOptions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, AdminValidationError{
+ field: fmt.Sprintf("SocketOptions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return AdminValidationError{
+ field: fmt.Sprintf("SocketOptions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ // no validation rules for IgnoreGlobalConnLimit
+
+ if len(errors) > 0 {
+ return AdminMultiError(errors)
+ }
+
+ return nil
+}
+
+// AdminMultiError is an error wrapping multiple validation errors returned by
+// Admin.ValidateAll() if the designated constraints aren't met.
+type AdminMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m AdminMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m AdminMultiError) AllErrors() []error { return m }
+
+// AdminValidationError is the validation error returned by Admin.Validate if
+// the designated constraints aren't met.
+type AdminValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e AdminValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e AdminValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e AdminValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e AdminValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e AdminValidationError) ErrorName() string { return "AdminValidationError" }
+
+// Error satisfies the builtin error interface
+func (e AdminValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sAdmin.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = AdminValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = AdminValidationError{}
+
+// Validate checks the field values on ClusterManager with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *ClusterManager) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ClusterManager with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in ClusterManagerMultiError,
+// or nil if none found.
+func (m *ClusterManager) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ClusterManager) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for LocalClusterName
+
+ if all {
+ switch v := interface{}(m.GetOutlierDetection()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterManagerValidationError{
+ field: "OutlierDetection",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterManagerValidationError{
+ field: "OutlierDetection",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetOutlierDetection()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterManagerValidationError{
+ field: "OutlierDetection",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetUpstreamBindConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterManagerValidationError{
+ field: "UpstreamBindConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterManagerValidationError{
+ field: "UpstreamBindConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetUpstreamBindConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterManagerValidationError{
+ field: "UpstreamBindConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetLoadStatsConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterManagerValidationError{
+ field: "LoadStatsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterManagerValidationError{
+ field: "LoadStatsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLoadStatsConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterManagerValidationError{
+ field: "LoadStatsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for EnableDeferredClusterCreation
+
+ if len(errors) > 0 {
+ return ClusterManagerMultiError(errors)
+ }
+
+ return nil
+}
+
+// ClusterManagerMultiError is an error wrapping multiple validation errors
+// returned by ClusterManager.ValidateAll() if the designated constraints
+// aren't met.
+type ClusterManagerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ClusterManagerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ClusterManagerMultiError) AllErrors() []error { return m }
+
+// ClusterManagerValidationError is the validation error returned by
+// ClusterManager.Validate if the designated constraints aren't met.
+type ClusterManagerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ClusterManagerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ClusterManagerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ClusterManagerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ClusterManagerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ClusterManagerValidationError) ErrorName() string { return "ClusterManagerValidationError" }
+
+// Error satisfies the builtin error interface
+func (e ClusterManagerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sClusterManager.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ClusterManagerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ClusterManagerValidationError{}
+
+// Validate checks the field values on Watchdogs with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Watchdogs) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Watchdogs with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in WatchdogsMultiError, or nil
+// if none found.
+func (m *Watchdogs) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Watchdogs) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetMainThreadWatchdog()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogsValidationError{
+ field: "MainThreadWatchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogsValidationError{
+ field: "MainThreadWatchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMainThreadWatchdog()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogsValidationError{
+ field: "MainThreadWatchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetWorkerWatchdog()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogsValidationError{
+ field: "WorkerWatchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogsValidationError{
+ field: "WorkerWatchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetWorkerWatchdog()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogsValidationError{
+ field: "WorkerWatchdog",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return WatchdogsMultiError(errors)
+ }
+
+ return nil
+}
+
+// WatchdogsMultiError is an error wrapping multiple validation errors returned
+// by Watchdogs.ValidateAll() if the designated constraints aren't met.
+type WatchdogsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m WatchdogsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m WatchdogsMultiError) AllErrors() []error { return m }
+
+// WatchdogsValidationError is the validation error returned by
+// Watchdogs.Validate if the designated constraints aren't met.
+type WatchdogsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e WatchdogsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e WatchdogsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e WatchdogsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e WatchdogsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e WatchdogsValidationError) ErrorName() string { return "WatchdogsValidationError" }
+
+// Error satisfies the builtin error interface
+func (e WatchdogsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sWatchdogs.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = WatchdogsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = WatchdogsValidationError{}
+
+// Validate checks the field values on Watchdog with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Watchdog) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Watchdog with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in WatchdogMultiError, or nil
+// if none found.
+func (m *Watchdog) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Watchdog) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetActions() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: fmt.Sprintf("Actions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: fmt.Sprintf("Actions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogValidationError{
+ field: fmt.Sprintf("Actions[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if all {
+ switch v := interface{}(m.GetMissTimeout()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MissTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MissTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMissTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogValidationError{
+ field: "MissTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMegamissTimeout()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MegamissTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MegamissTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMegamissTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogValidationError{
+ field: "MegamissTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetKillTimeout()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "KillTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "KillTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetKillTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogValidationError{
+ field: "KillTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if d := m.GetMaxKillTimeoutJitter(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = WatchdogValidationError{
+ field: "MaxKillTimeoutJitter",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gte := time.Duration(0*time.Second + 0*time.Nanosecond)
+
+ if dur < gte {
+ err := WatchdogValidationError{
+ field: "MaxKillTimeoutJitter",
+ reason: "value must be greater than or equal to 0s",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMultikillTimeout()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MultikillTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MultikillTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMultikillTimeout()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogValidationError{
+ field: "MultikillTimeout",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMultikillThreshold()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MultikillThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, WatchdogValidationError{
+ field: "MultikillThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMultikillThreshold()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return WatchdogValidationError{
+ field: "MultikillThreshold",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return WatchdogMultiError(errors)
+ }
+
+ return nil
+}
+
+// WatchdogMultiError is an error wrapping multiple validation errors returned
+// by Watchdog.ValidateAll() if the designated constraints aren't met.
+type WatchdogMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m WatchdogMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m WatchdogMultiError) AllErrors() []error { return m }
+
+// WatchdogValidationError is the validation error returned by
+// Watchdog.Validate if the designated constraints aren't met.
+type WatchdogValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e WatchdogValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e WatchdogValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e WatchdogValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e WatchdogValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e WatchdogValidationError) ErrorName() string { return "WatchdogValidationError" }
+
+// Error satisfies the builtin error interface
+func (e WatchdogValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sWatchdog.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = WatchdogValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = WatchdogValidationError{}
+
+// Validate checks the field values on FatalAction with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *FatalAction) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on FatalAction with the rules defined in
+// the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in FatalActionMultiError, or
+// nil if none found.
+func (m *FatalAction) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *FatalAction) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, FatalActionValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, FatalActionValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return FatalActionValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return FatalActionMultiError(errors)
+ }
+
+ return nil
+}
+
+// FatalActionMultiError is an error wrapping multiple validation errors
+// returned by FatalAction.ValidateAll() if the designated constraints aren't met.
+type FatalActionMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m FatalActionMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m FatalActionMultiError) AllErrors() []error { return m }
+
+// FatalActionValidationError is the validation error returned by
+// FatalAction.Validate if the designated constraints aren't met.
+type FatalActionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e FatalActionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e FatalActionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e FatalActionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e FatalActionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e FatalActionValidationError) ErrorName() string { return "FatalActionValidationError" }
+
+// Error satisfies the builtin error interface
+func (e FatalActionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sFatalAction.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = FatalActionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = FatalActionValidationError{}
+
+// Validate checks the field values on Runtime with the rules defined in the
+// proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *Runtime) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Runtime with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in RuntimeMultiError, or nil if none found.
+func (m *Runtime) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Runtime) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for SymlinkRoot
+
+ // no validation rules for Subdirectory
+
+ // no validation rules for OverrideSubdirectory
+
+ if all {
+ switch v := interface{}(m.GetBase()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeValidationError{
+ field: "Base",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeValidationError{
+ field: "Base",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetBase()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeValidationError{
+ field: "Base",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return RuntimeMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeMultiError is an error wrapping multiple validation errors returned
+// by Runtime.ValidateAll() if the designated constraints aren't met.
+type RuntimeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeMultiError) AllErrors() []error { return m }
+
+// RuntimeValidationError is the validation error returned by Runtime.Validate
+// if the designated constraints aren't met.
+type RuntimeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeValidationError) ErrorName() string { return "RuntimeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RuntimeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntime.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeValidationError{}
+
+// Validate checks the field values on RuntimeLayer with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *RuntimeLayer) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimeLayer with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in RuntimeLayerMultiError, or
+// nil if none found.
+func (m *RuntimeLayer) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimeLayer) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetName()) < 1 {
+ err := RuntimeLayerValidationError{
+ field: "Name",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ oneofLayerSpecifierPresent := false
+ switch v := m.LayerSpecifier.(type) {
+ case *RuntimeLayer_StaticLayer:
+ if v == nil {
+ err := RuntimeLayerValidationError{
+ field: "LayerSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofLayerSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetStaticLayer()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "StaticLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "StaticLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetStaticLayer()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeLayerValidationError{
+ field: "StaticLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *RuntimeLayer_DiskLayer_:
+ if v == nil {
+ err := RuntimeLayerValidationError{
+ field: "LayerSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofLayerSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetDiskLayer()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "DiskLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "DiskLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetDiskLayer()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeLayerValidationError{
+ field: "DiskLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *RuntimeLayer_AdminLayer_:
+ if v == nil {
+ err := RuntimeLayerValidationError{
+ field: "LayerSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofLayerSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetAdminLayer()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "AdminLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "AdminLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAdminLayer()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeLayerValidationError{
+ field: "AdminLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *RuntimeLayer_RtdsLayer_:
+ if v == nil {
+ err := RuntimeLayerValidationError{
+ field: "LayerSpecifier",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofLayerSpecifierPresent = true
+
+ if all {
+ switch v := interface{}(m.GetRtdsLayer()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "RtdsLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeLayerValidationError{
+ field: "RtdsLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRtdsLayer()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeLayerValidationError{
+ field: "RtdsLayer",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofLayerSpecifierPresent {
+ err := RuntimeLayerValidationError{
+ field: "LayerSpecifier",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return RuntimeLayerMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeLayerMultiError is an error wrapping multiple validation errors
+// returned by RuntimeLayer.ValidateAll() if the designated constraints aren't met.
+type RuntimeLayerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeLayerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeLayerMultiError) AllErrors() []error { return m }
+
+// RuntimeLayerValidationError is the validation error returned by
+// RuntimeLayer.Validate if the designated constraints aren't met.
+type RuntimeLayerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeLayerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeLayerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeLayerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeLayerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeLayerValidationError) ErrorName() string { return "RuntimeLayerValidationError" }
+
+// Error satisfies the builtin error interface
+func (e RuntimeLayerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeLayer.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeLayerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeLayerValidationError{}
+
+// Validate checks the field values on LayeredRuntime with the rules defined in
+// the proto definition for this message. If any rules are violated, the first
+// error encountered is returned, or nil if there are no violations.
+func (m *LayeredRuntime) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on LayeredRuntime with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// result is a list of violation errors wrapped in LayeredRuntimeMultiError,
+// or nil if none found.
+func (m *LayeredRuntime) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *LayeredRuntime) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetLayers() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, LayeredRuntimeValidationError{
+ field: fmt.Sprintf("Layers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, LayeredRuntimeValidationError{
+ field: fmt.Sprintf("Layers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return LayeredRuntimeValidationError{
+ field: fmt.Sprintf("Layers[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return LayeredRuntimeMultiError(errors)
+ }
+
+ return nil
+}
+
+// LayeredRuntimeMultiError is an error wrapping multiple validation errors
+// returned by LayeredRuntime.ValidateAll() if the designated constraints
+// aren't met.
+type LayeredRuntimeMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m LayeredRuntimeMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m LayeredRuntimeMultiError) AllErrors() []error { return m }
+
+// LayeredRuntimeValidationError is the validation error returned by
+// LayeredRuntime.Validate if the designated constraints aren't met.
+type LayeredRuntimeValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e LayeredRuntimeValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e LayeredRuntimeValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e LayeredRuntimeValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e LayeredRuntimeValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e LayeredRuntimeValidationError) ErrorName() string { return "LayeredRuntimeValidationError" }
+
+// Error satisfies the builtin error interface
+func (e LayeredRuntimeValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sLayeredRuntime.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = LayeredRuntimeValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = LayeredRuntimeValidationError{}
+
+// Validate checks the field values on CustomInlineHeader with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *CustomInlineHeader) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CustomInlineHeader with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CustomInlineHeaderMultiError, or nil if none found.
+func (m *CustomInlineHeader) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CustomInlineHeader) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if utf8.RuneCountInString(m.GetInlineHeaderName()) < 1 {
+ err := CustomInlineHeaderValidationError{
+ field: "InlineHeaderName",
+ reason: "value length must be at least 1 runes",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if !_CustomInlineHeader_InlineHeaderName_Pattern.MatchString(m.GetInlineHeaderName()) {
+ err := CustomInlineHeaderValidationError{
+ field: "InlineHeaderName",
+ reason: "value does not match regex pattern \"^[^\\x00\\n\\r]*$\"",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if _, ok := CustomInlineHeader_InlineHeaderType_name[int32(m.GetInlineHeaderType())]; !ok {
+ err := CustomInlineHeaderValidationError{
+ field: "InlineHeaderType",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return CustomInlineHeaderMultiError(errors)
+ }
+
+ return nil
+}
+
+// CustomInlineHeaderMultiError is an error wrapping multiple validation errors
+// returned by CustomInlineHeader.ValidateAll() if the designated constraints
+// aren't met.
+type CustomInlineHeaderMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CustomInlineHeaderMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CustomInlineHeaderMultiError) AllErrors() []error { return m }
+
+// CustomInlineHeaderValidationError is the validation error returned by
+// CustomInlineHeader.Validate if the designated constraints aren't met.
+type CustomInlineHeaderValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CustomInlineHeaderValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CustomInlineHeaderValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CustomInlineHeaderValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CustomInlineHeaderValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CustomInlineHeaderValidationError) ErrorName() string {
+ return "CustomInlineHeaderValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CustomInlineHeaderValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCustomInlineHeader.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CustomInlineHeaderValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CustomInlineHeaderValidationError{}
+
+var _CustomInlineHeader_InlineHeaderName_Pattern = regexp.MustCompile("^[^\x00\n\r]*$")
+
+// Validate checks the field values on MemoryAllocatorManager with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *MemoryAllocatorManager) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on MemoryAllocatorManager with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// MemoryAllocatorManagerMultiError, or nil if none found.
+func (m *MemoryAllocatorManager) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *MemoryAllocatorManager) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for BytesToRelease
+
+ if all {
+ switch v := interface{}(m.GetMemoryReleaseInterval()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, MemoryAllocatorManagerValidationError{
+ field: "MemoryReleaseInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, MemoryAllocatorManagerValidationError{
+ field: "MemoryReleaseInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMemoryReleaseInterval()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return MemoryAllocatorManagerValidationError{
+ field: "MemoryReleaseInterval",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return MemoryAllocatorManagerMultiError(errors)
+ }
+
+ return nil
+}
+
+// MemoryAllocatorManagerMultiError is an error wrapping multiple validation
+// errors returned by MemoryAllocatorManager.ValidateAll() if the designated
+// constraints aren't met.
+type MemoryAllocatorManagerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m MemoryAllocatorManagerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m MemoryAllocatorManagerMultiError) AllErrors() []error { return m }
+
+// MemoryAllocatorManagerValidationError is the validation error returned by
+// MemoryAllocatorManager.Validate if the designated constraints aren't met.
+type MemoryAllocatorManagerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e MemoryAllocatorManagerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e MemoryAllocatorManagerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e MemoryAllocatorManagerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e MemoryAllocatorManagerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e MemoryAllocatorManagerValidationError) ErrorName() string {
+ return "MemoryAllocatorManagerValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e MemoryAllocatorManagerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sMemoryAllocatorManager.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = MemoryAllocatorManagerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = MemoryAllocatorManagerValidationError{}
+
+// Validate checks the field values on Bootstrap_StaticResources with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Bootstrap_StaticResources) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Bootstrap_StaticResources with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Bootstrap_StaticResourcesMultiError, or nil if none found.
+func (m *Bootstrap_StaticResources) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Bootstrap_StaticResources) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetListeners() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Listeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Listeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Listeners[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetClusters() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Clusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Clusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Clusters[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetSecrets() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Secrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Secrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_StaticResourcesValidationError{
+ field: fmt.Sprintf("Secrets[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return Bootstrap_StaticResourcesMultiError(errors)
+ }
+
+ return nil
+}
+
+// Bootstrap_StaticResourcesMultiError is an error wrapping multiple validation
+// errors returned by Bootstrap_StaticResources.ValidateAll() if the
+// designated constraints aren't met.
+type Bootstrap_StaticResourcesMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Bootstrap_StaticResourcesMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Bootstrap_StaticResourcesMultiError) AllErrors() []error { return m }
+
+// Bootstrap_StaticResourcesValidationError is the validation error returned by
+// Bootstrap_StaticResources.Validate if the designated constraints aren't met.
+type Bootstrap_StaticResourcesValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Bootstrap_StaticResourcesValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Bootstrap_StaticResourcesValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Bootstrap_StaticResourcesValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Bootstrap_StaticResourcesValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Bootstrap_StaticResourcesValidationError) ErrorName() string {
+ return "Bootstrap_StaticResourcesValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Bootstrap_StaticResourcesValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrap_StaticResources.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Bootstrap_StaticResourcesValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Bootstrap_StaticResourcesValidationError{}
+
+// Validate checks the field values on Bootstrap_DynamicResources with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Bootstrap_DynamicResources) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Bootstrap_DynamicResources with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Bootstrap_DynamicResourcesMultiError, or nil if none found.
+func (m *Bootstrap_DynamicResources) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Bootstrap_DynamicResources) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetLdsConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_DynamicResourcesValidationError{
+ field: "LdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_DynamicResourcesValidationError{
+ field: "LdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLdsConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_DynamicResourcesValidationError{
+ field: "LdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for LdsResourcesLocator
+
+ if all {
+ switch v := interface{}(m.GetCdsConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_DynamicResourcesValidationError{
+ field: "CdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_DynamicResourcesValidationError{
+ field: "CdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetCdsConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_DynamicResourcesValidationError{
+ field: "CdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for CdsResourcesLocator
+
+ if all {
+ switch v := interface{}(m.GetAdsConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_DynamicResourcesValidationError{
+ field: "AdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_DynamicResourcesValidationError{
+ field: "AdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetAdsConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_DynamicResourcesValidationError{
+ field: "AdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return Bootstrap_DynamicResourcesMultiError(errors)
+ }
+
+ return nil
+}
+
+// Bootstrap_DynamicResourcesMultiError is an error wrapping multiple
+// validation errors returned by Bootstrap_DynamicResources.ValidateAll() if
+// the designated constraints aren't met.
+type Bootstrap_DynamicResourcesMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Bootstrap_DynamicResourcesMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Bootstrap_DynamicResourcesMultiError) AllErrors() []error { return m }
+
+// Bootstrap_DynamicResourcesValidationError is the validation error returned
+// by Bootstrap_DynamicResources.Validate if the designated constraints aren't met.
+type Bootstrap_DynamicResourcesValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Bootstrap_DynamicResourcesValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Bootstrap_DynamicResourcesValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Bootstrap_DynamicResourcesValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Bootstrap_DynamicResourcesValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Bootstrap_DynamicResourcesValidationError) ErrorName() string {
+ return "Bootstrap_DynamicResourcesValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Bootstrap_DynamicResourcesValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrap_DynamicResources.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Bootstrap_DynamicResourcesValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Bootstrap_DynamicResourcesValidationError{}
+
+// Validate checks the field values on Bootstrap_ApplicationLogConfig with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Bootstrap_ApplicationLogConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Bootstrap_ApplicationLogConfig with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// Bootstrap_ApplicationLogConfigMultiError, or nil if none found.
+func (m *Bootstrap_ApplicationLogConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Bootstrap_ApplicationLogConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetLogFormat()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_ApplicationLogConfigValidationError{
+ field: "LogFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_ApplicationLogConfigValidationError{
+ field: "LogFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetLogFormat()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_ApplicationLogConfigValidationError{
+ field: "LogFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return Bootstrap_ApplicationLogConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// Bootstrap_ApplicationLogConfigMultiError is an error wrapping multiple
+// validation errors returned by Bootstrap_ApplicationLogConfig.ValidateAll()
+// if the designated constraints aren't met.
+type Bootstrap_ApplicationLogConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Bootstrap_ApplicationLogConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Bootstrap_ApplicationLogConfigMultiError) AllErrors() []error { return m }
+
+// Bootstrap_ApplicationLogConfigValidationError is the validation error
+// returned by Bootstrap_ApplicationLogConfig.Validate if the designated
+// constraints aren't met.
+type Bootstrap_ApplicationLogConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Bootstrap_ApplicationLogConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Bootstrap_ApplicationLogConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Bootstrap_ApplicationLogConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Bootstrap_ApplicationLogConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Bootstrap_ApplicationLogConfigValidationError) ErrorName() string {
+ return "Bootstrap_ApplicationLogConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Bootstrap_ApplicationLogConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrap_ApplicationLogConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Bootstrap_ApplicationLogConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Bootstrap_ApplicationLogConfigValidationError{}
+
+// Validate checks the field values on Bootstrap_DeferredStatOptions with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Bootstrap_DeferredStatOptions) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Bootstrap_DeferredStatOptions with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// Bootstrap_DeferredStatOptionsMultiError, or nil if none found.
+func (m *Bootstrap_DeferredStatOptions) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Bootstrap_DeferredStatOptions) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for EnableDeferredCreationStats
+
+ if len(errors) > 0 {
+ return Bootstrap_DeferredStatOptionsMultiError(errors)
+ }
+
+ return nil
+}
+
+// Bootstrap_DeferredStatOptionsMultiError is an error wrapping multiple
+// validation errors returned by Bootstrap_DeferredStatOptions.ValidateAll()
+// if the designated constraints aren't met.
+type Bootstrap_DeferredStatOptionsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Bootstrap_DeferredStatOptionsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Bootstrap_DeferredStatOptionsMultiError) AllErrors() []error { return m }
+
+// Bootstrap_DeferredStatOptionsValidationError is the validation error
+// returned by Bootstrap_DeferredStatOptions.Validate if the designated
+// constraints aren't met.
+type Bootstrap_DeferredStatOptionsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Bootstrap_DeferredStatOptionsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Bootstrap_DeferredStatOptionsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Bootstrap_DeferredStatOptionsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Bootstrap_DeferredStatOptionsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Bootstrap_DeferredStatOptionsValidationError) ErrorName() string {
+ return "Bootstrap_DeferredStatOptionsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Bootstrap_DeferredStatOptionsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrap_DeferredStatOptions.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Bootstrap_DeferredStatOptionsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Bootstrap_DeferredStatOptionsValidationError{}
+
+// Validate checks the field values on Bootstrap_GrpcAsyncClientManagerConfig
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *Bootstrap_GrpcAsyncClientManagerConfig) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// Bootstrap_GrpcAsyncClientManagerConfig with the rules defined in the proto
+// definition for this message. If any rules are violated, the result is a
+// list of violation errors wrapped in
+// Bootstrap_GrpcAsyncClientManagerConfigMultiError, or nil if none found.
+func (m *Bootstrap_GrpcAsyncClientManagerConfig) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Bootstrap_GrpcAsyncClientManagerConfig) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if d := m.GetMaxCachedEntryIdleDuration(); d != nil {
+ dur, err := d.AsDuration(), d.CheckValid()
+ if err != nil {
+ err = Bootstrap_GrpcAsyncClientManagerConfigValidationError{
+ field: "MaxCachedEntryIdleDuration",
+ reason: "value is not a valid duration",
+ cause: err,
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ } else {
+
+ gte := time.Duration(5*time.Second + 0*time.Nanosecond)
+
+ if dur < gte {
+ err := Bootstrap_GrpcAsyncClientManagerConfigValidationError{
+ field: "MaxCachedEntryIdleDuration",
+ reason: "value must be greater than or equal to 5s",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ }
+ }
+
+ if len(errors) > 0 {
+ return Bootstrap_GrpcAsyncClientManagerConfigMultiError(errors)
+ }
+
+ return nil
+}
+
+// Bootstrap_GrpcAsyncClientManagerConfigMultiError is an error wrapping
+// multiple validation errors returned by
+// Bootstrap_GrpcAsyncClientManagerConfig.ValidateAll() if the designated
+// constraints aren't met.
+type Bootstrap_GrpcAsyncClientManagerConfigMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Bootstrap_GrpcAsyncClientManagerConfigMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Bootstrap_GrpcAsyncClientManagerConfigMultiError) AllErrors() []error { return m }
+
+// Bootstrap_GrpcAsyncClientManagerConfigValidationError is the validation
+// error returned by Bootstrap_GrpcAsyncClientManagerConfig.Validate if the
+// designated constraints aren't met.
+type Bootstrap_GrpcAsyncClientManagerConfigValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) ErrorName() string {
+ return "Bootstrap_GrpcAsyncClientManagerConfigValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Bootstrap_GrpcAsyncClientManagerConfigValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrap_GrpcAsyncClientManagerConfig.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Bootstrap_GrpcAsyncClientManagerConfigValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Bootstrap_GrpcAsyncClientManagerConfigValidationError{}
+
+// Validate checks the field values on Bootstrap_ApplicationLogConfig_LogFormat
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// Bootstrap_ApplicationLogConfig_LogFormat with the rules defined in the
+// proto definition for this message. If any rules are violated, the result is
+// a list of violation errors wrapped in
+// Bootstrap_ApplicationLogConfig_LogFormatMultiError, or nil if none found.
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ oneofLogFormatPresent := false
+ switch v := m.LogFormat.(type) {
+ case *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat:
+ if v == nil {
+ err := Bootstrap_ApplicationLogConfig_LogFormatValidationError{
+ field: "LogFormat",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofLogFormatPresent = true
+
+ if all {
+ switch v := interface{}(m.GetJsonFormat()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Bootstrap_ApplicationLogConfig_LogFormatValidationError{
+ field: "JsonFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Bootstrap_ApplicationLogConfig_LogFormatValidationError{
+ field: "JsonFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetJsonFormat()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Bootstrap_ApplicationLogConfig_LogFormatValidationError{
+ field: "JsonFormat",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ case *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat:
+ if v == nil {
+ err := Bootstrap_ApplicationLogConfig_LogFormatValidationError{
+ field: "LogFormat",
+ reason: "oneof value cannot be a typed-nil",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+ oneofLogFormatPresent = true
+ // no validation rules for TextFormat
+ default:
+ _ = v // ensures v is used
+ }
+ if !oneofLogFormatPresent {
+ err := Bootstrap_ApplicationLogConfig_LogFormatValidationError{
+ field: "LogFormat",
+ reason: "value is required",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return Bootstrap_ApplicationLogConfig_LogFormatMultiError(errors)
+ }
+
+ return nil
+}
+
+// Bootstrap_ApplicationLogConfig_LogFormatMultiError is an error wrapping
+// multiple validation errors returned by
+// Bootstrap_ApplicationLogConfig_LogFormat.ValidateAll() if the designated
+// constraints aren't met.
+type Bootstrap_ApplicationLogConfig_LogFormatMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Bootstrap_ApplicationLogConfig_LogFormatMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Bootstrap_ApplicationLogConfig_LogFormatMultiError) AllErrors() []error { return m }
+
+// Bootstrap_ApplicationLogConfig_LogFormatValidationError is the validation
+// error returned by Bootstrap_ApplicationLogConfig_LogFormat.Validate if the
+// designated constraints aren't met.
+type Bootstrap_ApplicationLogConfig_LogFormatValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) ErrorName() string {
+ return "Bootstrap_ApplicationLogConfig_LogFormatValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Bootstrap_ApplicationLogConfig_LogFormatValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sBootstrap_ApplicationLogConfig_LogFormat.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Bootstrap_ApplicationLogConfig_LogFormatValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Bootstrap_ApplicationLogConfig_LogFormatValidationError{}
+
+// Validate checks the field values on ClusterManager_OutlierDetection with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *ClusterManager_OutlierDetection) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on ClusterManager_OutlierDetection with
+// the rules defined in the proto definition for this message. If any rules
+// are violated, the result is a list of violation errors wrapped in
+// ClusterManager_OutlierDetectionMultiError, or nil if none found.
+func (m *ClusterManager_OutlierDetection) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *ClusterManager_OutlierDetection) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for EventLogPath
+
+ if all {
+ switch v := interface{}(m.GetEventService()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, ClusterManager_OutlierDetectionValidationError{
+ field: "EventService",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, ClusterManager_OutlierDetectionValidationError{
+ field: "EventService",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetEventService()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return ClusterManager_OutlierDetectionValidationError{
+ field: "EventService",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return ClusterManager_OutlierDetectionMultiError(errors)
+ }
+
+ return nil
+}
+
+// ClusterManager_OutlierDetectionMultiError is an error wrapping multiple
+// validation errors returned by ClusterManager_OutlierDetection.ValidateAll()
+// if the designated constraints aren't met.
+type ClusterManager_OutlierDetectionMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m ClusterManager_OutlierDetectionMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m ClusterManager_OutlierDetectionMultiError) AllErrors() []error { return m }
+
+// ClusterManager_OutlierDetectionValidationError is the validation error
+// returned by ClusterManager_OutlierDetection.Validate if the designated
+// constraints aren't met.
+type ClusterManager_OutlierDetectionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e ClusterManager_OutlierDetectionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e ClusterManager_OutlierDetectionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e ClusterManager_OutlierDetectionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e ClusterManager_OutlierDetectionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e ClusterManager_OutlierDetectionValidationError) ErrorName() string {
+ return "ClusterManager_OutlierDetectionValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e ClusterManager_OutlierDetectionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sClusterManager_OutlierDetection.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = ClusterManager_OutlierDetectionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = ClusterManager_OutlierDetectionValidationError{}
+
+// Validate checks the field values on Watchdog_WatchdogAction with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *Watchdog_WatchdogAction) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on Watchdog_WatchdogAction with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// Watchdog_WatchdogActionMultiError, or nil if none found.
+func (m *Watchdog_WatchdogAction) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *Watchdog_WatchdogAction) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, Watchdog_WatchdogActionValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, Watchdog_WatchdogActionValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return Watchdog_WatchdogActionValidationError{
+ field: "Config",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if _, ok := Watchdog_WatchdogAction_WatchdogEvent_name[int32(m.GetEvent())]; !ok {
+ err := Watchdog_WatchdogActionValidationError{
+ field: "Event",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return Watchdog_WatchdogActionMultiError(errors)
+ }
+
+ return nil
+}
+
+// Watchdog_WatchdogActionMultiError is an error wrapping multiple validation
+// errors returned by Watchdog_WatchdogAction.ValidateAll() if the designated
+// constraints aren't met.
+type Watchdog_WatchdogActionMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m Watchdog_WatchdogActionMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m Watchdog_WatchdogActionMultiError) AllErrors() []error { return m }
+
+// Watchdog_WatchdogActionValidationError is the validation error returned by
+// Watchdog_WatchdogAction.Validate if the designated constraints aren't met.
+type Watchdog_WatchdogActionValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e Watchdog_WatchdogActionValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e Watchdog_WatchdogActionValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e Watchdog_WatchdogActionValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e Watchdog_WatchdogActionValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e Watchdog_WatchdogActionValidationError) ErrorName() string {
+ return "Watchdog_WatchdogActionValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e Watchdog_WatchdogActionValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sWatchdog_WatchdogAction.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = Watchdog_WatchdogActionValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = Watchdog_WatchdogActionValidationError{}
+
+// Validate checks the field values on RuntimeLayer_DiskLayer with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *RuntimeLayer_DiskLayer) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimeLayer_DiskLayer with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RuntimeLayer_DiskLayerMultiError, or nil if none found.
+func (m *RuntimeLayer_DiskLayer) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimeLayer_DiskLayer) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for SymlinkRoot
+
+ // no validation rules for Subdirectory
+
+ // no validation rules for AppendServiceCluster
+
+ if len(errors) > 0 {
+ return RuntimeLayer_DiskLayerMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeLayer_DiskLayerMultiError is an error wrapping multiple validation
+// errors returned by RuntimeLayer_DiskLayer.ValidateAll() if the designated
+// constraints aren't met.
+type RuntimeLayer_DiskLayerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeLayer_DiskLayerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeLayer_DiskLayerMultiError) AllErrors() []error { return m }
+
+// RuntimeLayer_DiskLayerValidationError is the validation error returned by
+// RuntimeLayer_DiskLayer.Validate if the designated constraints aren't met.
+type RuntimeLayer_DiskLayerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeLayer_DiskLayerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeLayer_DiskLayerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeLayer_DiskLayerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeLayer_DiskLayerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeLayer_DiskLayerValidationError) ErrorName() string {
+ return "RuntimeLayer_DiskLayerValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RuntimeLayer_DiskLayerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeLayer_DiskLayer.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeLayer_DiskLayerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeLayer_DiskLayerValidationError{}
+
+// Validate checks the field values on RuntimeLayer_AdminLayer with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *RuntimeLayer_AdminLayer) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimeLayer_AdminLayer with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RuntimeLayer_AdminLayerMultiError, or nil if none found.
+func (m *RuntimeLayer_AdminLayer) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimeLayer_AdminLayer) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if len(errors) > 0 {
+ return RuntimeLayer_AdminLayerMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeLayer_AdminLayerMultiError is an error wrapping multiple validation
+// errors returned by RuntimeLayer_AdminLayer.ValidateAll() if the designated
+// constraints aren't met.
+type RuntimeLayer_AdminLayerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeLayer_AdminLayerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeLayer_AdminLayerMultiError) AllErrors() []error { return m }
+
+// RuntimeLayer_AdminLayerValidationError is the validation error returned by
+// RuntimeLayer_AdminLayer.Validate if the designated constraints aren't met.
+type RuntimeLayer_AdminLayerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeLayer_AdminLayerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeLayer_AdminLayerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeLayer_AdminLayerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeLayer_AdminLayerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeLayer_AdminLayerValidationError) ErrorName() string {
+ return "RuntimeLayer_AdminLayerValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RuntimeLayer_AdminLayerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeLayer_AdminLayer.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeLayer_AdminLayerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeLayer_AdminLayerValidationError{}
+
+// Validate checks the field values on RuntimeLayer_RtdsLayer with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *RuntimeLayer_RtdsLayer) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on RuntimeLayer_RtdsLayer with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// RuntimeLayer_RtdsLayerMultiError, or nil if none found.
+func (m *RuntimeLayer_RtdsLayer) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *RuntimeLayer_RtdsLayer) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ // no validation rules for Name
+
+ if all {
+ switch v := interface{}(m.GetRtdsConfig()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, RuntimeLayer_RtdsLayerValidationError{
+ field: "RtdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, RuntimeLayer_RtdsLayerValidationError{
+ field: "RtdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRtdsConfig()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return RuntimeLayer_RtdsLayerValidationError{
+ field: "RtdsConfig",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return RuntimeLayer_RtdsLayerMultiError(errors)
+ }
+
+ return nil
+}
+
+// RuntimeLayer_RtdsLayerMultiError is an error wrapping multiple validation
+// errors returned by RuntimeLayer_RtdsLayer.ValidateAll() if the designated
+// constraints aren't met.
+type RuntimeLayer_RtdsLayerMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m RuntimeLayer_RtdsLayerMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m RuntimeLayer_RtdsLayerMultiError) AllErrors() []error { return m }
+
+// RuntimeLayer_RtdsLayerValidationError is the validation error returned by
+// RuntimeLayer_RtdsLayer.Validate if the designated constraints aren't met.
+type RuntimeLayer_RtdsLayerValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e RuntimeLayer_RtdsLayerValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e RuntimeLayer_RtdsLayerValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e RuntimeLayer_RtdsLayerValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e RuntimeLayer_RtdsLayerValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e RuntimeLayer_RtdsLayerValidationError) ErrorName() string {
+ return "RuntimeLayer_RtdsLayerValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e RuntimeLayer_RtdsLayerValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sRuntimeLayer_RtdsLayer.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = RuntimeLayer_RtdsLayerValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = RuntimeLayer_RtdsLayerValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go
new file mode 100644
index 000000000..51e10e0e0
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/bootstrap/v3/bootstrap_vtproto.pb.go
@@ -0,0 +1,3128 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/config/bootstrap/v3/bootstrap.proto
+
+package bootstrapv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ durationpb "github.com/planetscale/vtprotobuf/types/known/durationpb"
+ structpb "github.com/planetscale/vtprotobuf/types/known/structpb"
+ wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *Bootstrap_StaticResources) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Bootstrap_StaticResources) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_StaticResources) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Secrets) > 0 {
+ for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.Secrets[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Secrets[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Clusters) > 0 {
+ for iNdEx := len(m.Clusters) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.Clusters[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Clusters[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Listeners) > 0 {
+ for iNdEx := len(m.Listeners) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.Listeners[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Listeners[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap_DynamicResources) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Bootstrap_DynamicResources) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_DynamicResources) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.CdsResourcesLocator) > 0 {
+ i -= len(m.CdsResourcesLocator)
+ copy(dAtA[i:], m.CdsResourcesLocator)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CdsResourcesLocator)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if len(m.LdsResourcesLocator) > 0 {
+ i -= len(m.LdsResourcesLocator)
+ copy(dAtA[i:], m.LdsResourcesLocator)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LdsResourcesLocator)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.AdsConfig != nil {
+ if vtmsg, ok := interface{}(m.AdsConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.AdsConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.CdsConfig != nil {
+ if vtmsg, ok := interface{}(m.CdsConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.CdsConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.LdsConfig != nil {
+ if vtmsg, ok := interface{}(m.LdsConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.LdsConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if msg, ok := m.LogFormat.(*Bootstrap_ApplicationLogConfig_LogFormat_TextFormat); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.LogFormat.(*Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.JsonFormat != nil {
+ size, err := (*structpb.Struct)(m.JsonFormat).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i -= len(m.TextFormat)
+ copy(dAtA[i:], m.TextFormat)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.TextFormat)))
+ i--
+ dAtA[i] = 0x12
+ return len(dAtA) - i, nil
+}
+func (m *Bootstrap_ApplicationLogConfig) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Bootstrap_ApplicationLogConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_ApplicationLogConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.LogFormat != nil {
+ size, err := m.LogFormat.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap_DeferredStatOptions) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Bootstrap_DeferredStatOptions) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_DeferredStatOptions) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.EnableDeferredCreationStats {
+ i--
+ if m.EnableDeferredCreationStats {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_GrpcAsyncClientManagerConfig) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.MaxCachedEntryIdleDuration != nil {
+ size, err := (*durationpb.Duration)(m.MaxCachedEntryIdleDuration).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Bootstrap) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.MemoryAllocatorManager != nil {
+ size, err := m.MemoryAllocatorManager.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xca
+ }
+ if m.GrpcAsyncClientManagerConfig != nil {
+ size, err := m.GrpcAsyncClientManagerConfig.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xc2
+ }
+ if m.DeferredStatOptions != nil {
+ size, err := m.DeferredStatOptions.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xba
+ }
+ if m.ApplicationLogConfig != nil {
+ size, err := m.ApplicationLogConfig.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xb2
+ }
+ if m.ListenerManager != nil {
+ if vtmsg, ok := interface{}(m.ListenerManager).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.ListenerManager)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xaa
+ }
+ if m.XdsConfigTrackerExtension != nil {
+ if vtmsg, ok := interface{}(m.XdsConfigTrackerExtension).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.XdsConfigTrackerExtension)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0xa2
+ }
+ if m.XdsDelegateExtension != nil {
+ if vtmsg, ok := interface{}(m.XdsDelegateExtension).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.XdsDelegateExtension)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x9a
+ }
+ if m.DefaultRegexEngine != nil {
+ if vtmsg, ok := interface{}(m.DefaultRegexEngine).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.DefaultRegexEngine)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x92
+ }
+ if len(m.PerfTracingFilePath) > 0 {
+ i -= len(m.PerfTracingFilePath)
+ copy(dAtA[i:], m.PerfTracingFilePath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.PerfTracingFilePath)))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x8a
+ }
+ if len(m.InlineHeaders) > 0 {
+ for iNdEx := len(m.InlineHeaders) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.InlineHeaders[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2
+ i--
+ dAtA[i] = 0x82
+ }
+ }
+ if m.TypedDnsResolverConfig != nil {
+ if vtmsg, ok := interface{}(m.TypedDnsResolverConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.TypedDnsResolverConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xfa
+ }
+ if m.DnsResolutionConfig != nil {
+ if vtmsg, ok := interface{}(m.DnsResolutionConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.DnsResolutionConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xf2
+ }
+ if msg, ok := m.StatsFlush.(*Bootstrap_StatsFlushOnAdmin); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if len(m.FatalActions) > 0 {
+ for iNdEx := len(m.FatalActions) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.FatalActions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xe2
+ }
+ }
+ if m.Watchdogs != nil {
+ size, err := m.Watchdogs.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xda
+ }
+ if len(m.NodeContextParams) > 0 {
+ for iNdEx := len(m.NodeContextParams) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.NodeContextParams[iNdEx])
+ copy(dAtA[i:], m.NodeContextParams[iNdEx])
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.NodeContextParams[iNdEx])))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xd2
+ }
+ }
+ if len(m.CertificateProviderInstances) > 0 {
+ for k := range m.CertificateProviderInstances {
+ v := m.CertificateProviderInstances[k]
+ baseI := i
+ if vtmsg, ok := interface{}(v).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(v)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ i -= len(k)
+ copy(dAtA[i:], k)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k)))
+ i--
+ dAtA[i] = 0xa
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xca
+ }
+ }
+ if len(m.DefaultSocketInterface) > 0 {
+ i -= len(m.DefaultSocketInterface)
+ copy(dAtA[i:], m.DefaultSocketInterface)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.DefaultSocketInterface)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xc2
+ }
+ if m.DefaultConfigSource != nil {
+ if vtmsg, ok := interface{}(m.DefaultConfigSource).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.DefaultConfigSource)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xba
+ }
+ if len(m.ConfigSources) > 0 {
+ for iNdEx := len(m.ConfigSources) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.ConfigSources[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.ConfigSources[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xb2
+ }
+ }
+ if len(m.BootstrapExtensions) > 0 {
+ for iNdEx := len(m.BootstrapExtensions) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.BootstrapExtensions[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.BootstrapExtensions[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xaa
+ }
+ }
+ if m.UseTcpForDnsLookups {
+ i--
+ if m.UseTcpForDnsLookups {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xa0
+ }
+ if m.StatsServerVersionOverride != nil {
+ size, err := (*wrapperspb.UInt64Value)(m.StatsServerVersionOverride).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x9a
+ }
+ if len(m.HeaderPrefix) > 0 {
+ i -= len(m.HeaderPrefix)
+ copy(dAtA[i:], m.HeaderPrefix)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.HeaderPrefix)))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x92
+ }
+ if m.LayeredRuntime != nil {
+ size, err := m.LayeredRuntime.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x8a
+ }
+ if m.EnableDispatcherStats {
+ i--
+ if m.EnableDispatcherStats {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x80
+ }
+ if m.OverloadManager != nil {
+ if vtmsg, ok := interface{}(m.OverloadManager).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.OverloadManager)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x7a
+ }
+ if m.HdsConfig != nil {
+ if vtmsg, ok := interface{}(m.HdsConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.HdsConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x72
+ }
+ if m.StatsConfig != nil {
+ if vtmsg, ok := interface{}(m.StatsConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.StatsConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x6a
+ }
+ if m.Admin != nil {
+ size, err := m.Admin.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x62
+ }
+ if m.Tracing != nil {
+ if vtmsg, ok := interface{}(m.Tracing).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Tracing)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.Watchdog != nil {
+ size, err := m.Watchdog.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.StatsFlushInterval != nil {
+ size, err := (*durationpb.Duration)(m.StatsFlushInterval).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if len(m.StatsSinks) > 0 {
+ for iNdEx := len(m.StatsSinks) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.StatsSinks[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.StatsSinks[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ }
+ if len(m.FlagsPath) > 0 {
+ i -= len(m.FlagsPath)
+ copy(dAtA[i:], m.FlagsPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.FlagsPath)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.ClusterManager != nil {
+ size, err := m.ClusterManager.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.DynamicResources != nil {
+ size, err := m.DynamicResources.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.StaticResources != nil {
+ size, err := m.StaticResources.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Node != nil {
+ if vtmsg, ok := interface{}(m.Node).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Node)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap_StatsFlushOnAdmin) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Bootstrap_StatsFlushOnAdmin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i--
+ if m.StatsFlushOnAdmin {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0xe8
+ return len(dAtA) - i, nil
+}
+func (m *Admin) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Admin) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Admin) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.IgnoreGlobalConnLimit {
+ i--
+ if m.IgnoreGlobalConnLimit {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
+ if len(m.AccessLog) > 0 {
+ for iNdEx := len(m.AccessLog) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.AccessLog[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.AccessLog[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if len(m.SocketOptions) > 0 {
+ for iNdEx := len(m.SocketOptions) - 1; iNdEx >= 0; iNdEx-- {
+ if vtmsg, ok := interface{}(m.SocketOptions[iNdEx]).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.SocketOptions[iNdEx])
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if m.Address != nil {
+ if vtmsg, ok := interface{}(m.Address).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Address)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.ProfilePath) > 0 {
+ i -= len(m.ProfilePath)
+ copy(dAtA[i:], m.ProfilePath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.ProfilePath)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.AccessLogPath) > 0 {
+ i -= len(m.AccessLogPath)
+ copy(dAtA[i:], m.AccessLogPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.AccessLogPath)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterManager_OutlierDetection) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterManager_OutlierDetection) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ClusterManager_OutlierDetection) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.EventService != nil {
+ if vtmsg, ok := interface{}(m.EventService).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.EventService)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.EventLogPath) > 0 {
+ i -= len(m.EventLogPath)
+ copy(dAtA[i:], m.EventLogPath)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.EventLogPath)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterManager) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterManager) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *ClusterManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.EnableDeferredClusterCreation {
+ i--
+ if m.EnableDeferredClusterCreation {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.LoadStatsConfig != nil {
+ if vtmsg, ok := interface{}(m.LoadStatsConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.LoadStatsConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.UpstreamBindConfig != nil {
+ if vtmsg, ok := interface{}(m.UpstreamBindConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.UpstreamBindConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.OutlierDetection != nil {
+ size, err := m.OutlierDetection.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.LocalClusterName) > 0 {
+ i -= len(m.LocalClusterName)
+ copy(dAtA[i:], m.LocalClusterName)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.LocalClusterName)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Watchdogs) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Watchdogs) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Watchdogs) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.WorkerWatchdog != nil {
+ size, err := m.WorkerWatchdog.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.MainThreadWatchdog != nil {
+ size, err := m.MainThreadWatchdog.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Watchdog_WatchdogAction) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Watchdog_WatchdogAction) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Watchdog_WatchdogAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Event != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Event))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Config != nil {
+ if vtmsg, ok := interface{}(m.Config).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Config)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Watchdog) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Watchdog) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Watchdog) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Actions) > 0 {
+ for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Actions[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ if m.MaxKillTimeoutJitter != nil {
+ size, err := (*durationpb.Duration)(m.MaxKillTimeoutJitter).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.MultikillThreshold != nil {
+ if vtmsg, ok := interface{}(m.MultikillThreshold).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.MultikillThreshold)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.MultikillTimeout != nil {
+ size, err := (*durationpb.Duration)(m.MultikillTimeout).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.KillTimeout != nil {
+ size, err := (*durationpb.Duration)(m.KillTimeout).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.MegamissTimeout != nil {
+ size, err := (*durationpb.Duration)(m.MegamissTimeout).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.MissTimeout != nil {
+ size, err := (*durationpb.Duration)(m.MissTimeout).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *FatalAction) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *FatalAction) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *FatalAction) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Config != nil {
+ if vtmsg, ok := interface{}(m.Config).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.Config)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Runtime) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Runtime) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *Runtime) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.Base != nil {
+ size, err := (*structpb.Struct)(m.Base).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.OverrideSubdirectory) > 0 {
+ i -= len(m.OverrideSubdirectory)
+ copy(dAtA[i:], m.OverrideSubdirectory)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.OverrideSubdirectory)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Subdirectory) > 0 {
+ i -= len(m.Subdirectory)
+ copy(dAtA[i:], m.Subdirectory)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Subdirectory)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.SymlinkRoot) > 0 {
+ i -= len(m.SymlinkRoot)
+ copy(dAtA[i:], m.SymlinkRoot)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SymlinkRoot)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RuntimeLayer_DiskLayer) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RuntimeLayer_DiskLayer) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer_DiskLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Subdirectory) > 0 {
+ i -= len(m.Subdirectory)
+ copy(dAtA[i:], m.Subdirectory)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Subdirectory)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.AppendServiceCluster {
+ i--
+ if m.AppendServiceCluster {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.SymlinkRoot) > 0 {
+ i -= len(m.SymlinkRoot)
+ copy(dAtA[i:], m.SymlinkRoot)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.SymlinkRoot)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RuntimeLayer_AdminLayer) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RuntimeLayer_AdminLayer) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer_AdminLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RuntimeLayer_RtdsLayer) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RuntimeLayer_RtdsLayer) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer_RtdsLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.RtdsConfig != nil {
+ if vtmsg, ok := interface{}(m.RtdsConfig).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.RtdsConfig)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RuntimeLayer) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RuntimeLayer) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if msg, ok := m.LayerSpecifier.(*RuntimeLayer_RtdsLayer_); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.LayerSpecifier.(*RuntimeLayer_AdminLayer_); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.LayerSpecifier.(*RuntimeLayer_DiskLayer_); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if msg, ok := m.LayerSpecifier.(*RuntimeLayer_StaticLayer); ok {
+ size, err := msg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RuntimeLayer_StaticLayer) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer_StaticLayer) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.StaticLayer != nil {
+ size, err := (*structpb.Struct)(m.StaticLayer).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x12
+ }
+ return len(dAtA) - i, nil
+}
+func (m *RuntimeLayer_DiskLayer_) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer_DiskLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.DiskLayer != nil {
+ size, err := m.DiskLayer.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x1a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *RuntimeLayer_AdminLayer_) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer_AdminLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.AdminLayer != nil {
+ size, err := m.AdminLayer.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x22
+ }
+ return len(dAtA) - i, nil
+}
+func (m *RuntimeLayer_RtdsLayer_) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *RuntimeLayer_RtdsLayer_) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.RtdsLayer != nil {
+ size, err := m.RtdsLayer.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ } else {
+ i = protohelpers.EncodeVarint(dAtA, i, 0)
+ i--
+ dAtA[i] = 0x2a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *LayeredRuntime) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LayeredRuntime) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *LayeredRuntime) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.Layers) > 0 {
+ for iNdEx := len(m.Layers) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Layers[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CustomInlineHeader) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CustomInlineHeader) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *CustomInlineHeader) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.InlineHeaderType != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.InlineHeaderType))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.InlineHeaderName) > 0 {
+ i -= len(m.InlineHeaderName)
+ copy(dAtA[i:], m.InlineHeaderName)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.InlineHeaderName)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MemoryAllocatorManager) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemoryAllocatorManager) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *MemoryAllocatorManager) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.MemoryReleaseInterval != nil {
+ size, err := (*durationpb.Duration)(m.MemoryReleaseInterval).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.BytesToRelease != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.BytesToRelease))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Bootstrap_StaticResources) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Listeners) > 0 {
+ for _, e := range m.Listeners {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.Clusters) > 0 {
+ for _, e := range m.Clusters {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.Secrets) > 0 {
+ for _, e := range m.Secrets {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Bootstrap_DynamicResources) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.LdsConfig != nil {
+ if size, ok := interface{}(m.LdsConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.LdsConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.CdsConfig != nil {
+ if size, ok := interface{}(m.CdsConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.CdsConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.AdsConfig != nil {
+ if size, ok := interface{}(m.AdsConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.AdsConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.LdsResourcesLocator)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.CdsResourcesLocator)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if vtmsg, ok := m.LogFormat.(interface{ SizeVT() int }); ok {
+ n += vtmsg.SizeVT()
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Bootstrap_ApplicationLogConfig_LogFormat_JsonFormat) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.JsonFormat != nil {
+ l = (*structpb.Struct)(m.JsonFormat).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *Bootstrap_ApplicationLogConfig_LogFormat_TextFormat) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.TextFormat)
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ return n
+}
+func (m *Bootstrap_ApplicationLogConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.LogFormat != nil {
+ l = m.LogFormat.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Bootstrap_DeferredStatOptions) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.EnableDeferredCreationStats {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Bootstrap_GrpcAsyncClientManagerConfig) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MaxCachedEntryIdleDuration != nil {
+ l = (*durationpb.Duration)(m.MaxCachedEntryIdleDuration).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Bootstrap) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Node != nil {
+ if size, ok := interface{}(m.Node).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Node)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.StaticResources != nil {
+ l = m.StaticResources.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.DynamicResources != nil {
+ l = m.DynamicResources.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ClusterManager != nil {
+ l = m.ClusterManager.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.FlagsPath)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.StatsSinks) > 0 {
+ for _, e := range m.StatsSinks {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.StatsFlushInterval != nil {
+ l = (*durationpb.Duration)(m.StatsFlushInterval).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Watchdog != nil {
+ l = m.Watchdog.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Tracing != nil {
+ if size, ok := interface{}(m.Tracing).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Tracing)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Admin != nil {
+ l = m.Admin.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.StatsConfig != nil {
+ if size, ok := interface{}(m.StatsConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.StatsConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.HdsConfig != nil {
+ if size, ok := interface{}(m.HdsConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.HdsConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.OverloadManager != nil {
+ if size, ok := interface{}(m.OverloadManager).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.OverloadManager)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.EnableDispatcherStats {
+ n += 3
+ }
+ if m.LayeredRuntime != nil {
+ l = m.LayeredRuntime.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.HeaderPrefix)
+ if l > 0 {
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.StatsServerVersionOverride != nil {
+ l = (*wrapperspb.UInt64Value)(m.StatsServerVersionOverride).SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.UseTcpForDnsLookups {
+ n += 3
+ }
+ if len(m.BootstrapExtensions) > 0 {
+ for _, e := range m.BootstrapExtensions {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.ConfigSources) > 0 {
+ for _, e := range m.ConfigSources {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.DefaultConfigSource != nil {
+ if size, ok := interface{}(m.DefaultConfigSource).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.DefaultConfigSource)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.DefaultSocketInterface)
+ if l > 0 {
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.CertificateProviderInstances) > 0 {
+ for k, v := range m.CertificateProviderInstances {
+ _ = k
+ _ = v
+ l = 0
+ if v != nil {
+ if size, ok := interface{}(v).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(v)
+ }
+ }
+ l += 1 + protohelpers.SizeOfVarint(uint64(l))
+ mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l
+ n += mapEntrySize + 2 + protohelpers.SizeOfVarint(uint64(mapEntrySize))
+ }
+ }
+ if len(m.NodeContextParams) > 0 {
+ for _, s := range m.NodeContextParams {
+ l = len(s)
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.Watchdogs != nil {
+ l = m.Watchdogs.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.FatalActions) > 0 {
+ for _, e := range m.FatalActions {
+ l = e.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if vtmsg, ok := m.StatsFlush.(interface{ SizeVT() int }); ok {
+ n += vtmsg.SizeVT()
+ }
+ if m.DnsResolutionConfig != nil {
+ if size, ok := interface{}(m.DnsResolutionConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.DnsResolutionConfig)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.TypedDnsResolverConfig != nil {
+ if size, ok := interface{}(m.TypedDnsResolverConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.TypedDnsResolverConfig)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.InlineHeaders) > 0 {
+ for _, e := range m.InlineHeaders {
+ l = e.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ l = len(m.PerfTracingFilePath)
+ if l > 0 {
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.DefaultRegexEngine != nil {
+ if size, ok := interface{}(m.DefaultRegexEngine).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.DefaultRegexEngine)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.XdsDelegateExtension != nil {
+ if size, ok := interface{}(m.XdsDelegateExtension).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.XdsDelegateExtension)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.XdsConfigTrackerExtension != nil {
+ if size, ok := interface{}(m.XdsConfigTrackerExtension).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.XdsConfigTrackerExtension)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ListenerManager != nil {
+ if size, ok := interface{}(m.ListenerManager).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.ListenerManager)
+ }
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.ApplicationLogConfig != nil {
+ l = m.ApplicationLogConfig.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.DeferredStatOptions != nil {
+ l = m.DeferredStatOptions.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.GrpcAsyncClientManagerConfig != nil {
+ l = m.GrpcAsyncClientManagerConfig.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MemoryAllocatorManager != nil {
+ l = m.MemoryAllocatorManager.SizeVT()
+ n += 2 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Bootstrap_StatsFlushOnAdmin) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 3
+ return n
+}
+func (m *Admin) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.AccessLogPath)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.ProfilePath)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Address != nil {
+ if size, ok := interface{}(m.Address).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Address)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.SocketOptions) > 0 {
+ for _, e := range m.SocketOptions {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.AccessLog) > 0 {
+ for _, e := range m.AccessLog {
+ if size, ok := interface{}(e).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(e)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if m.IgnoreGlobalConnLimit {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ClusterManager_OutlierDetection) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.EventLogPath)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.EventService != nil {
+ if size, ok := interface{}(m.EventService).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.EventService)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *ClusterManager) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.LocalClusterName)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.OutlierDetection != nil {
+ l = m.OutlierDetection.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.UpstreamBindConfig != nil {
+ if size, ok := interface{}(m.UpstreamBindConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.UpstreamBindConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.LoadStatsConfig != nil {
+ if size, ok := interface{}(m.LoadStatsConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.LoadStatsConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.EnableDeferredClusterCreation {
+ n += 2
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Watchdogs) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MainThreadWatchdog != nil {
+ l = m.MainThreadWatchdog.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.WorkerWatchdog != nil {
+ l = m.WorkerWatchdog.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Watchdog_WatchdogAction) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Config != nil {
+ if size, ok := interface{}(m.Config).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Config)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Event != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Event))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Watchdog) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MissTimeout != nil {
+ l = (*durationpb.Duration)(m.MissTimeout).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MegamissTimeout != nil {
+ l = (*durationpb.Duration)(m.MegamissTimeout).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.KillTimeout != nil {
+ l = (*durationpb.Duration)(m.KillTimeout).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MultikillTimeout != nil {
+ l = (*durationpb.Duration)(m.MultikillTimeout).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MultikillThreshold != nil {
+ if size, ok := interface{}(m.MultikillThreshold).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.MultikillThreshold)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MaxKillTimeoutJitter != nil {
+ l = (*durationpb.Duration)(m.MaxKillTimeoutJitter).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if len(m.Actions) > 0 {
+ for _, e := range m.Actions {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *FatalAction) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Config != nil {
+ if size, ok := interface{}(m.Config).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.Config)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *Runtime) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SymlinkRoot)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.Subdirectory)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ l = len(m.OverrideSubdirectory)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.Base != nil {
+ l = (*structpb.Struct)(m.Base).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RuntimeLayer_DiskLayer) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.SymlinkRoot)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.AppendServiceCluster {
+ n += 2
+ }
+ l = len(m.Subdirectory)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RuntimeLayer_AdminLayer) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RuntimeLayer_RtdsLayer) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.RtdsConfig != nil {
+ if size, ok := interface{}(m.RtdsConfig).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.RtdsConfig)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RuntimeLayer) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if vtmsg, ok := m.LayerSpecifier.(interface{ SizeVT() int }); ok {
+ n += vtmsg.SizeVT()
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *RuntimeLayer_StaticLayer) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.StaticLayer != nil {
+ l = (*structpb.Struct)(m.StaticLayer).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *RuntimeLayer_DiskLayer_) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.DiskLayer != nil {
+ l = m.DiskLayer.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *RuntimeLayer_AdminLayer_) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.AdminLayer != nil {
+ l = m.AdminLayer.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *RuntimeLayer_RtdsLayer_) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.RtdsLayer != nil {
+ l = m.RtdsLayer.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ } else {
+ n += 2
+ }
+ return n
+}
+func (m *LayeredRuntime) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Layers) > 0 {
+ for _, e := range m.Layers {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *CustomInlineHeader) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.InlineHeaderName)
+ if l > 0 {
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.InlineHeaderType != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.InlineHeaderType))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *MemoryAllocatorManager) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.BytesToRelease != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.BytesToRelease))
+ }
+ if m.MemoryReleaseInterval != nil {
+ l = (*durationpb.Duration)(m.MemoryReleaseInterval).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go
new file mode 100644
index 000000000..e70e27b1f
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.go
@@ -0,0 +1,507 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/config/cluster/v3/circuit_breaker.proto
+
+package clusterv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ v31 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// :ref:`Circuit breaking` settings can be
+// specified individually for each defined priority.
+type CircuitBreakers struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // If multiple :ref:`Thresholds`
+ // are defined with the same :ref:`RoutingPriority`,
+ // the first one in the list is used. If no Thresholds is defined for a given
+ // :ref:`RoutingPriority`, the default values
+ // are used.
+ Thresholds []*CircuitBreakers_Thresholds `protobuf:"bytes,1,rep,name=thresholds,proto3" json:"thresholds,omitempty"`
+ // Optional per-host limits which apply to each individual host in a cluster.
+ //
+ // .. note::
+ //
+ // currently only the :ref:`max_connections
+ // ` field is supported for per-host limits.
+ //
+ // If multiple per-host :ref:`Thresholds`
+ // are defined with the same :ref:`RoutingPriority`,
+ // the first one in the list is used. If no per-host Thresholds are defined for a given
+ // :ref:`RoutingPriority`,
+ // the cluster will not have per-host limits.
+ PerHostThresholds []*CircuitBreakers_Thresholds `protobuf:"bytes,2,rep,name=per_host_thresholds,json=perHostThresholds,proto3" json:"per_host_thresholds,omitempty"`
+}
+
+func (x *CircuitBreakers) Reset() {
+ *x = CircuitBreakers{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CircuitBreakers) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CircuitBreakers) ProtoMessage() {}
+
+func (x *CircuitBreakers) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CircuitBreakers.ProtoReflect.Descriptor instead.
+func (*CircuitBreakers) Descriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *CircuitBreakers) GetThresholds() []*CircuitBreakers_Thresholds {
+ if x != nil {
+ return x.Thresholds
+ }
+ return nil
+}
+
+func (x *CircuitBreakers) GetPerHostThresholds() []*CircuitBreakers_Thresholds {
+ if x != nil {
+ return x.PerHostThresholds
+ }
+ return nil
+}
+
+// A Thresholds defines CircuitBreaker settings for a
+// :ref:`RoutingPriority`.
+// [#next-free-field: 9]
+type CircuitBreakers_Thresholds struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The :ref:`RoutingPriority`
+ // the specified CircuitBreaker settings apply to.
+ Priority v3.RoutingPriority `protobuf:"varint,1,opt,name=priority,proto3,enum=envoy.config.core.v3.RoutingPriority" json:"priority,omitempty"`
+ // The maximum number of connections that Envoy will make to the upstream
+ // cluster. If not specified, the default is 1024.
+ MaxConnections *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=max_connections,json=maxConnections,proto3" json:"max_connections,omitempty"`
+ // The maximum number of pending requests that Envoy will allow to the
+ // upstream cluster. If not specified, the default is 1024.
+ // This limit is applied as a connection limit for non-HTTP traffic.
+ MaxPendingRequests *wrapperspb.UInt32Value `protobuf:"bytes,3,opt,name=max_pending_requests,json=maxPendingRequests,proto3" json:"max_pending_requests,omitempty"`
+ // The maximum number of parallel requests that Envoy will make to the
+ // upstream cluster. If not specified, the default is 1024.
+ // This limit does not apply to non-HTTP traffic.
+ MaxRequests *wrapperspb.UInt32Value `protobuf:"bytes,4,opt,name=max_requests,json=maxRequests,proto3" json:"max_requests,omitempty"`
+ // The maximum number of parallel retries that Envoy will allow to the
+ // upstream cluster. If not specified, the default is 3.
+ MaxRetries *wrapperspb.UInt32Value `protobuf:"bytes,5,opt,name=max_retries,json=maxRetries,proto3" json:"max_retries,omitempty"`
+ // Specifies a limit on concurrent retries in relation to the number of active requests. This
+ // parameter is optional.
+ //
+ // .. note::
+ //
+ // If this field is set, the retry budget will override any configured retry circuit
+ // breaker.
+ RetryBudget *CircuitBreakers_Thresholds_RetryBudget `protobuf:"bytes,8,opt,name=retry_budget,json=retryBudget,proto3" json:"retry_budget,omitempty"`
+ // If track_remaining is true, then stats will be published that expose
+ // the number of resources remaining until the circuit breakers open. If
+ // not specified, the default is false.
+ //
+ // .. note::
+ //
+ // If a retry budget is used in lieu of the max_retries circuit breaker,
+ // the remaining retry resources remaining will not be tracked.
+ TrackRemaining bool `protobuf:"varint,6,opt,name=track_remaining,json=trackRemaining,proto3" json:"track_remaining,omitempty"`
+ // The maximum number of connection pools per cluster that Envoy will concurrently support at
+ // once. If not specified, the default is unlimited. Set this for clusters which create a
+ // large number of connection pools. See
+ // :ref:`Circuit Breaking ` for
+ // more details.
+ MaxConnectionPools *wrapperspb.UInt32Value `protobuf:"bytes,7,opt,name=max_connection_pools,json=maxConnectionPools,proto3" json:"max_connection_pools,omitempty"`
+}
+
+func (x *CircuitBreakers_Thresholds) Reset() {
+ *x = CircuitBreakers_Thresholds{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CircuitBreakers_Thresholds) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CircuitBreakers_Thresholds) ProtoMessage() {}
+
+func (x *CircuitBreakers_Thresholds) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CircuitBreakers_Thresholds.ProtoReflect.Descriptor instead.
+func (*CircuitBreakers_Thresholds) Descriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP(), []int{0, 0}
+}
+
+func (x *CircuitBreakers_Thresholds) GetPriority() v3.RoutingPriority {
+ if x != nil {
+ return x.Priority
+ }
+ return v3.RoutingPriority(0)
+}
+
+func (x *CircuitBreakers_Thresholds) GetMaxConnections() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxConnections
+ }
+ return nil
+}
+
+func (x *CircuitBreakers_Thresholds) GetMaxPendingRequests() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxPendingRequests
+ }
+ return nil
+}
+
+func (x *CircuitBreakers_Thresholds) GetMaxRequests() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxRequests
+ }
+ return nil
+}
+
+func (x *CircuitBreakers_Thresholds) GetMaxRetries() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxRetries
+ }
+ return nil
+}
+
+func (x *CircuitBreakers_Thresholds) GetRetryBudget() *CircuitBreakers_Thresholds_RetryBudget {
+ if x != nil {
+ return x.RetryBudget
+ }
+ return nil
+}
+
+func (x *CircuitBreakers_Thresholds) GetTrackRemaining() bool {
+ if x != nil {
+ return x.TrackRemaining
+ }
+ return false
+}
+
+func (x *CircuitBreakers_Thresholds) GetMaxConnectionPools() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MaxConnectionPools
+ }
+ return nil
+}
+
+type CircuitBreakers_Thresholds_RetryBudget struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Specifies the limit on concurrent retries as a percentage of the sum of active requests and
+ // active pending requests. For example, if there are 100 active requests and the
+ // budget_percent is set to 25, there may be 25 active retries.
+ //
+ // This parameter is optional. Defaults to 20%.
+ BudgetPercent *v31.Percent `protobuf:"bytes,1,opt,name=budget_percent,json=budgetPercent,proto3" json:"budget_percent,omitempty"`
+ // Specifies the minimum retry concurrency allowed for the retry budget. The limit on the
+ // number of active retries may never go below this number.
+ //
+ // This parameter is optional. Defaults to 3.
+ MinRetryConcurrency *wrapperspb.UInt32Value `protobuf:"bytes,2,opt,name=min_retry_concurrency,json=minRetryConcurrency,proto3" json:"min_retry_concurrency,omitempty"`
+}
+
+func (x *CircuitBreakers_Thresholds_RetryBudget) Reset() {
+ *x = CircuitBreakers_Thresholds_RetryBudget{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *CircuitBreakers_Thresholds_RetryBudget) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*CircuitBreakers_Thresholds_RetryBudget) ProtoMessage() {}
+
+func (x *CircuitBreakers_Thresholds_RetryBudget) ProtoReflect() protoreflect.Message {
+ mi := &file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use CircuitBreakers_Thresholds_RetryBudget.ProtoReflect.Descriptor instead.
+func (*CircuitBreakers_Thresholds_RetryBudget) Descriptor() ([]byte, []int) {
+ return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP(), []int{0, 0, 0}
+}
+
+func (x *CircuitBreakers_Thresholds_RetryBudget) GetBudgetPercent() *v31.Percent {
+ if x != nil {
+ return x.BudgetPercent
+ }
+ return nil
+}
+
+func (x *CircuitBreakers_Thresholds_RetryBudget) GetMinRetryConcurrency() *wrapperspb.UInt32Value {
+ if x != nil {
+ return x.MinRetryConcurrency
+ }
+ return nil
+}
+
+var File_envoy_config_cluster_v3_circuit_breaker_proto protoreflect.FileDescriptor
+
+var file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc = []byte{
+ 0x0a, 0x2d, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63,
+ 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x33, 0x2f, 0x63, 0x69, 0x72, 0x63, 0x75, 0x69,
+ 0x74, 0x5f, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
+ 0x17, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c,
+ 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x1a, 0x1f, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2f,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x62,
+ 0x61, 0x73, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2f, 0x74, 0x79, 0x70, 0x65, 0x2f, 0x76, 0x33, 0x2f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e,
+ 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x21, 0x75, 0x64, 0x70, 0x61, 0x2f, 0x61, 0x6e, 0x6e, 0x6f,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69,
+ 0x6e, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
+ 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x22, 0xe5, 0x08, 0x0a, 0x0f, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65,
+ 0x61, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x53, 0x0a, 0x0a, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f,
+ 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f,
+ 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x2e, 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b,
+ 0x65, 0x72, 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x52, 0x0a,
+ 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x63, 0x0a, 0x13, 0x70, 0x65,
+ 0x72, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64,
+ 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76,
+ 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72,
+ 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x52, 0x11, 0x70, 0x65,
+ 0x72, 0x48, 0x6f, 0x73, 0x74, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x1a,
+ 0xea, 0x06, 0x0a, 0x0a, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x12, 0x4b,
+ 0x0a, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x25, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e,
+ 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x50,
+ 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10,
+ 0x01, 0x52, 0x08, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x45, 0x0a, 0x0f, 0x6d,
+ 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x12, 0x4e, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x65, 0x6e, 0x64, 0x69, 0x6e,
+ 0x67, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12,
+ 0x6d, 0x61, 0x78, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x73, 0x12, 0x3f, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33,
+ 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x0b, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x69,
+ 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x49, 0x6e, 0x74, 0x33,
+ 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x74, 0x72, 0x69,
+ 0x65, 0x73, 0x12, 0x62, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x62, 0x75, 0x64, 0x67,
+ 0x65, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e,
+ 0x76, 0x33, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65,
+ 0x72, 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x2e, 0x52, 0x65,
+ 0x74, 0x72, 0x79, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79,
+ 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f,
+ 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0e, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x52, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x12,
+ 0x4e, 0x0a, 0x14, 0x6d, 0x61, 0x78, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f,
+ 0x6e, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x12, 0x6d, 0x61, 0x78,
+ 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6f, 0x6c, 0x73, 0x1a,
+ 0xe2, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x12,
+ 0x3d, 0x0a, 0x0e, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x65, 0x72, 0x63, 0x65, 0x6e,
+ 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e,
+ 0x74, 0x79, 0x70, 0x65, 0x2e, 0x76, 0x33, 0x2e, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x52,
+ 0x0d, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x50, 0x65, 0x72, 0x63, 0x65, 0x6e, 0x74, 0x12, 0x50,
+ 0x0a, 0x15, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6e, 0x63,
+ 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x55, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x13, 0x6d, 0x69, 0x6e,
+ 0x52, 0x65, 0x74, 0x72, 0x79, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x79,
+ 0x3a, 0x42, 0x9a, 0xc5, 0x88, 0x1e, 0x3d, 0x0a, 0x3b, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x69,
+ 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x2e, 0x54, 0x68,
+ 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x2e, 0x52, 0x65, 0x74, 0x72, 0x79, 0x42, 0x75,
+ 0x64, 0x67, 0x65, 0x74, 0x3a, 0x36, 0x9a, 0xc5, 0x88, 0x1e, 0x31, 0x0a, 0x2f, 0x65, 0x6e, 0x76,
+ 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72,
+ 0x73, 0x2e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x3a, 0x2b, 0x9a, 0xc5,
+ 0x88, 0x1e, 0x26, 0x0a, 0x24, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76,
+ 0x32, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x69, 0x72, 0x63, 0x75, 0x69,
+ 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x73, 0x42, 0x90, 0x01, 0xba, 0x80, 0xc8, 0xd1,
+ 0x06, 0x02, 0x10, 0x02, 0x0a, 0x25, 0x69, 0x6f, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72,
+ 0x6f, 0x78, 0x79, 0x2e, 0x65, 0x6e, 0x76, 0x6f, 0x79, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
+ 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x33, 0x42, 0x13, 0x43, 0x69, 0x72,
+ 0x63, 0x75, 0x69, 0x74, 0x42, 0x72, 0x65, 0x61, 0x6b, 0x65, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
+ 0x50, 0x01, 0x5a, 0x48, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x65,
+ 0x6e, 0x76, 0x6f, 0x79, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2f, 0x67, 0x6f, 0x2d, 0x63, 0x6f, 0x6e,
+ 0x74, 0x72, 0x6f, 0x6c, 0x2d, 0x70, 0x6c, 0x61, 0x6e, 0x65, 0x2f, 0x65, 0x6e, 0x76, 0x6f, 0x79,
+ 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2f,
+ 0x76, 0x33, 0x3b, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x76, 0x33, 0x62, 0x06, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescOnce sync.Once
+ file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData = file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc
+)
+
+func file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescGZIP() []byte {
+ file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescOnce.Do(func() {
+ file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData = protoimpl.X.CompressGZIP(file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData)
+ })
+ return file_envoy_config_cluster_v3_circuit_breaker_proto_rawDescData
+}
+
+var file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_envoy_config_cluster_v3_circuit_breaker_proto_goTypes = []interface{}{
+ (*CircuitBreakers)(nil), // 0: envoy.config.cluster.v3.CircuitBreakers
+ (*CircuitBreakers_Thresholds)(nil), // 1: envoy.config.cluster.v3.CircuitBreakers.Thresholds
+ (*CircuitBreakers_Thresholds_RetryBudget)(nil), // 2: envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget
+ (v3.RoutingPriority)(0), // 3: envoy.config.core.v3.RoutingPriority
+ (*wrapperspb.UInt32Value)(nil), // 4: google.protobuf.UInt32Value
+ (*v31.Percent)(nil), // 5: envoy.type.v3.Percent
+}
+var file_envoy_config_cluster_v3_circuit_breaker_proto_depIdxs = []int32{
+ 1, // 0: envoy.config.cluster.v3.CircuitBreakers.thresholds:type_name -> envoy.config.cluster.v3.CircuitBreakers.Thresholds
+ 1, // 1: envoy.config.cluster.v3.CircuitBreakers.per_host_thresholds:type_name -> envoy.config.cluster.v3.CircuitBreakers.Thresholds
+ 3, // 2: envoy.config.cluster.v3.CircuitBreakers.Thresholds.priority:type_name -> envoy.config.core.v3.RoutingPriority
+ 4, // 3: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_connections:type_name -> google.protobuf.UInt32Value
+ 4, // 4: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_pending_requests:type_name -> google.protobuf.UInt32Value
+ 4, // 5: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_requests:type_name -> google.protobuf.UInt32Value
+ 4, // 6: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_retries:type_name -> google.protobuf.UInt32Value
+ 2, // 7: envoy.config.cluster.v3.CircuitBreakers.Thresholds.retry_budget:type_name -> envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget
+ 4, // 8: envoy.config.cluster.v3.CircuitBreakers.Thresholds.max_connection_pools:type_name -> google.protobuf.UInt32Value
+ 5, // 9: envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget.budget_percent:type_name -> envoy.type.v3.Percent
+ 4, // 10: envoy.config.cluster.v3.CircuitBreakers.Thresholds.RetryBudget.min_retry_concurrency:type_name -> google.protobuf.UInt32Value
+ 11, // [11:11] is the sub-list for method output_type
+ 11, // [11:11] is the sub-list for method input_type
+ 11, // [11:11] is the sub-list for extension type_name
+ 11, // [11:11] is the sub-list for extension extendee
+ 0, // [0:11] is the sub-list for field type_name
+}
+
+func init() { file_envoy_config_cluster_v3_circuit_breaker_proto_init() }
+func file_envoy_config_cluster_v3_circuit_breaker_proto_init() {
+ if File_envoy_config_cluster_v3_circuit_breaker_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CircuitBreakers); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CircuitBreakers_Thresholds); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*CircuitBreakers_Thresholds_RetryBudget); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_envoy_config_cluster_v3_circuit_breaker_proto_goTypes,
+ DependencyIndexes: file_envoy_config_cluster_v3_circuit_breaker_proto_depIdxs,
+ MessageInfos: file_envoy_config_cluster_v3_circuit_breaker_proto_msgTypes,
+ }.Build()
+ File_envoy_config_cluster_v3_circuit_breaker_proto = out.File
+ file_envoy_config_cluster_v3_circuit_breaker_proto_rawDesc = nil
+ file_envoy_config_cluster_v3_circuit_breaker_proto_goTypes = nil
+ file_envoy_config_cluster_v3_circuit_breaker_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go
new file mode 100644
index 000000000..8bf3373be
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker.pb.validate.go
@@ -0,0 +1,662 @@
+//go:build !disable_pgv
+// Code generated by protoc-gen-validate. DO NOT EDIT.
+// source: envoy/config/cluster/v3/circuit_breaker.proto
+
+package clusterv3
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "net"
+ "net/mail"
+ "net/url"
+ "regexp"
+ "sort"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/protobuf/types/known/anypb"
+
+ v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+)
+
+// ensure the imports are used
+var (
+ _ = bytes.MinRead
+ _ = errors.New("")
+ _ = fmt.Print
+ _ = utf8.UTFMax
+ _ = (*regexp.Regexp)(nil)
+ _ = (*strings.Reader)(nil)
+ _ = net.IPv4len
+ _ = time.Duration(0)
+ _ = (*url.URL)(nil)
+ _ = (*mail.Address)(nil)
+ _ = anypb.Any{}
+ _ = sort.Sort
+
+ _ = v3.RoutingPriority(0)
+)
+
+// Validate checks the field values on CircuitBreakers with the rules defined
+// in the proto definition for this message. If any rules are violated, the
+// first error encountered is returned, or nil if there are no violations.
+func (m *CircuitBreakers) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CircuitBreakers with the rules
+// defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CircuitBreakersMultiError, or nil if none found.
+func (m *CircuitBreakers) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CircuitBreakers) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ for idx, item := range m.GetThresholds() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakersValidationError{
+ field: fmt.Sprintf("Thresholds[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakersValidationError{
+ field: fmt.Sprintf("Thresholds[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakersValidationError{
+ field: fmt.Sprintf("Thresholds[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ for idx, item := range m.GetPerHostThresholds() {
+ _, _ = idx, item
+
+ if all {
+ switch v := interface{}(item).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakersValidationError{
+ field: fmt.Sprintf("PerHostThresholds[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakersValidationError{
+ field: fmt.Sprintf("PerHostThresholds[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(item).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakersValidationError{
+ field: fmt.Sprintf("PerHostThresholds[%v]", idx),
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ }
+
+ if len(errors) > 0 {
+ return CircuitBreakersMultiError(errors)
+ }
+
+ return nil
+}
+
+// CircuitBreakersMultiError is an error wrapping multiple validation errors
+// returned by CircuitBreakers.ValidateAll() if the designated constraints
+// aren't met.
+type CircuitBreakersMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CircuitBreakersMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CircuitBreakersMultiError) AllErrors() []error { return m }
+
+// CircuitBreakersValidationError is the validation error returned by
+// CircuitBreakers.Validate if the designated constraints aren't met.
+type CircuitBreakersValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CircuitBreakersValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CircuitBreakersValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CircuitBreakersValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CircuitBreakersValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CircuitBreakersValidationError) ErrorName() string { return "CircuitBreakersValidationError" }
+
+// Error satisfies the builtin error interface
+func (e CircuitBreakersValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCircuitBreakers.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CircuitBreakersValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CircuitBreakersValidationError{}
+
+// Validate checks the field values on CircuitBreakers_Thresholds with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the first error encountered is returned, or nil if there are no violations.
+func (m *CircuitBreakers_Thresholds) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on CircuitBreakers_Thresholds with the
+// rules defined in the proto definition for this message. If any rules are
+// violated, the result is a list of violation errors wrapped in
+// CircuitBreakers_ThresholdsMultiError, or nil if none found.
+func (m *CircuitBreakers_Thresholds) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CircuitBreakers_Thresholds) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if _, ok := v3.RoutingPriority_name[int32(m.GetPriority())]; !ok {
+ err := CircuitBreakers_ThresholdsValidationError{
+ field: "Priority",
+ reason: "value must be one of the defined enum values",
+ }
+ if !all {
+ return err
+ }
+ errors = append(errors, err)
+ }
+
+ if all {
+ switch v := interface{}(m.GetMaxConnections()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxConnections",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxConnections",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxConnections()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_ThresholdsValidationError{
+ field: "MaxConnections",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMaxPendingRequests()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxPendingRequests",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxPendingRequests",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxPendingRequests()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_ThresholdsValidationError{
+ field: "MaxPendingRequests",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMaxRequests()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxRequests",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxRequests",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxRequests()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_ThresholdsValidationError{
+ field: "MaxRequests",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMaxRetries()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxRetries",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxRetries",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxRetries()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_ThresholdsValidationError{
+ field: "MaxRetries",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetRetryBudget()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "RetryBudget",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "RetryBudget",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetRetryBudget()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_ThresholdsValidationError{
+ field: "RetryBudget",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ // no validation rules for TrackRemaining
+
+ if all {
+ switch v := interface{}(m.GetMaxConnectionPools()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxConnectionPools",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_ThresholdsValidationError{
+ field: "MaxConnectionPools",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMaxConnectionPools()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_ThresholdsValidationError{
+ field: "MaxConnectionPools",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return CircuitBreakers_ThresholdsMultiError(errors)
+ }
+
+ return nil
+}
+
+// CircuitBreakers_ThresholdsMultiError is an error wrapping multiple
+// validation errors returned by CircuitBreakers_Thresholds.ValidateAll() if
+// the designated constraints aren't met.
+type CircuitBreakers_ThresholdsMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CircuitBreakers_ThresholdsMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CircuitBreakers_ThresholdsMultiError) AllErrors() []error { return m }
+
+// CircuitBreakers_ThresholdsValidationError is the validation error returned
+// by CircuitBreakers_Thresholds.Validate if the designated constraints aren't met.
+type CircuitBreakers_ThresholdsValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CircuitBreakers_ThresholdsValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CircuitBreakers_ThresholdsValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CircuitBreakers_ThresholdsValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CircuitBreakers_ThresholdsValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CircuitBreakers_ThresholdsValidationError) ErrorName() string {
+ return "CircuitBreakers_ThresholdsValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CircuitBreakers_ThresholdsValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCircuitBreakers_Thresholds.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CircuitBreakers_ThresholdsValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CircuitBreakers_ThresholdsValidationError{}
+
+// Validate checks the field values on CircuitBreakers_Thresholds_RetryBudget
+// with the rules defined in the proto definition for this message. If any
+// rules are violated, the first error encountered is returned, or nil if
+// there are no violations.
+func (m *CircuitBreakers_Thresholds_RetryBudget) Validate() error {
+ return m.validate(false)
+}
+
+// ValidateAll checks the field values on
+// CircuitBreakers_Thresholds_RetryBudget with the rules defined in the proto
+// definition for this message. If any rules are violated, the result is a
+// list of violation errors wrapped in
+// CircuitBreakers_Thresholds_RetryBudgetMultiError, or nil if none found.
+func (m *CircuitBreakers_Thresholds_RetryBudget) ValidateAll() error {
+ return m.validate(true)
+}
+
+func (m *CircuitBreakers_Thresholds_RetryBudget) validate(all bool) error {
+ if m == nil {
+ return nil
+ }
+
+ var errors []error
+
+ if all {
+ switch v := interface{}(m.GetBudgetPercent()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{
+ field: "BudgetPercent",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{
+ field: "BudgetPercent",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetBudgetPercent()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_Thresholds_RetryBudgetValidationError{
+ field: "BudgetPercent",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if all {
+ switch v := interface{}(m.GetMinRetryConcurrency()).(type) {
+ case interface{ ValidateAll() error }:
+ if err := v.ValidateAll(); err != nil {
+ errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{
+ field: "MinRetryConcurrency",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ case interface{ Validate() error }:
+ if err := v.Validate(); err != nil {
+ errors = append(errors, CircuitBreakers_Thresholds_RetryBudgetValidationError{
+ field: "MinRetryConcurrency",
+ reason: "embedded message failed validation",
+ cause: err,
+ })
+ }
+ }
+ } else if v, ok := interface{}(m.GetMinRetryConcurrency()).(interface{ Validate() error }); ok {
+ if err := v.Validate(); err != nil {
+ return CircuitBreakers_Thresholds_RetryBudgetValidationError{
+ field: "MinRetryConcurrency",
+ reason: "embedded message failed validation",
+ cause: err,
+ }
+ }
+ }
+
+ if len(errors) > 0 {
+ return CircuitBreakers_Thresholds_RetryBudgetMultiError(errors)
+ }
+
+ return nil
+}
+
+// CircuitBreakers_Thresholds_RetryBudgetMultiError is an error wrapping
+// multiple validation errors returned by
+// CircuitBreakers_Thresholds_RetryBudget.ValidateAll() if the designated
+// constraints aren't met.
+type CircuitBreakers_Thresholds_RetryBudgetMultiError []error
+
+// Error returns a concatenation of all the error messages it wraps.
+func (m CircuitBreakers_Thresholds_RetryBudgetMultiError) Error() string {
+ var msgs []string
+ for _, err := range m {
+ msgs = append(msgs, err.Error())
+ }
+ return strings.Join(msgs, "; ")
+}
+
+// AllErrors returns a list of validation violation errors.
+func (m CircuitBreakers_Thresholds_RetryBudgetMultiError) AllErrors() []error { return m }
+
+// CircuitBreakers_Thresholds_RetryBudgetValidationError is the validation
+// error returned by CircuitBreakers_Thresholds_RetryBudget.Validate if the
+// designated constraints aren't met.
+type CircuitBreakers_Thresholds_RetryBudgetValidationError struct {
+ field string
+ reason string
+ cause error
+ key bool
+}
+
+// Field function returns field value.
+func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Field() string { return e.field }
+
+// Reason function returns reason value.
+func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Reason() string { return e.reason }
+
+// Cause function returns cause value.
+func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Cause() error { return e.cause }
+
+// Key function returns key value.
+func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Key() bool { return e.key }
+
+// ErrorName returns error name.
+func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) ErrorName() string {
+ return "CircuitBreakers_Thresholds_RetryBudgetValidationError"
+}
+
+// Error satisfies the builtin error interface
+func (e CircuitBreakers_Thresholds_RetryBudgetValidationError) Error() string {
+ cause := ""
+ if e.cause != nil {
+ cause = fmt.Sprintf(" | caused by: %v", e.cause)
+ }
+
+ key := ""
+ if e.key {
+ key = "key for "
+ }
+
+ return fmt.Sprintf(
+ "invalid %sCircuitBreakers_Thresholds_RetryBudget.%s: %s%s",
+ key,
+ e.field,
+ e.reason,
+ cause)
+}
+
+var _ error = CircuitBreakers_Thresholds_RetryBudgetValidationError{}
+
+var _ interface {
+ Field() string
+ Reason() string
+ Key() bool
+ Cause() error
+ ErrorName() string
+} = CircuitBreakers_Thresholds_RetryBudgetValidationError{}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go
new file mode 100644
index 000000000..14ca0a1f1
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/circuit_breaker_vtproto.pb.go
@@ -0,0 +1,337 @@
+//go:build vtprotobuf
+// +build vtprotobuf
+
+// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
+// source: envoy/config/cluster/v3/circuit_breaker.proto
+
+package clusterv3
+
+import (
+ protohelpers "github.com/planetscale/vtprotobuf/protohelpers"
+ wrapperspb "github.com/planetscale/vtprotobuf/types/known/wrapperspb"
+ proto "google.golang.org/protobuf/proto"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *CircuitBreakers_Thresholds_RetryBudget) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.MinRetryConcurrency != nil {
+ size, err := (*wrapperspb.UInt32Value)(m.MinRetryConcurrency).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.BudgetPercent != nil {
+ if vtmsg, ok := interface{}(m.BudgetPercent).(interface {
+ MarshalToSizedBufferVTStrict([]byte) (int, error)
+ }); ok {
+ size, err := vtmsg.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ } else {
+ encoded, err := proto.Marshal(m.BudgetPercent)
+ if err != nil {
+ return 0, err
+ }
+ i -= len(encoded)
+ copy(dAtA[i:], encoded)
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(len(encoded)))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CircuitBreakers_Thresholds) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CircuitBreakers_Thresholds) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *CircuitBreakers_Thresholds) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if m.RetryBudget != nil {
+ size, err := m.RetryBudget.MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.MaxConnectionPools != nil {
+ size, err := (*wrapperspb.UInt32Value)(m.MaxConnectionPools).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.TrackRemaining {
+ i--
+ if m.TrackRemaining {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.MaxRetries != nil {
+ size, err := (*wrapperspb.UInt32Value)(m.MaxRetries).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.MaxRequests != nil {
+ size, err := (*wrapperspb.UInt32Value)(m.MaxRequests).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.MaxPendingRequests != nil {
+ size, err := (*wrapperspb.UInt32Value)(m.MaxPendingRequests).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.MaxConnections != nil {
+ size, err := (*wrapperspb.UInt32Value)(m.MaxConnections).MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Priority != 0 {
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Priority))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CircuitBreakers) MarshalVTStrict() (dAtA []byte, err error) {
+ if m == nil {
+ return nil, nil
+ }
+ size := m.SizeVT()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBufferVTStrict(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CircuitBreakers) MarshalToVTStrict(dAtA []byte) (int, error) {
+ size := m.SizeVT()
+ return m.MarshalToSizedBufferVTStrict(dAtA[:size])
+}
+
+func (m *CircuitBreakers) MarshalToSizedBufferVTStrict(dAtA []byte) (int, error) {
+ if m == nil {
+ return 0, nil
+ }
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.unknownFields != nil {
+ i -= len(m.unknownFields)
+ copy(dAtA[i:], m.unknownFields)
+ }
+ if len(m.PerHostThresholds) > 0 {
+ for iNdEx := len(m.PerHostThresholds) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.PerHostThresholds[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Thresholds) > 0 {
+ for iNdEx := len(m.Thresholds) - 1; iNdEx >= 0; iNdEx-- {
+ size, err := m.Thresholds[iNdEx].MarshalToSizedBufferVTStrict(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = protohelpers.EncodeVarint(dAtA, i, uint64(size))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CircuitBreakers_Thresholds_RetryBudget) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.BudgetPercent != nil {
+ if size, ok := interface{}(m.BudgetPercent).(interface {
+ SizeVT() int
+ }); ok {
+ l = size.SizeVT()
+ } else {
+ l = proto.Size(m.BudgetPercent)
+ }
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MinRetryConcurrency != nil {
+ l = (*wrapperspb.UInt32Value)(m.MinRetryConcurrency).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *CircuitBreakers_Thresholds) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Priority != 0 {
+ n += 1 + protohelpers.SizeOfVarint(uint64(m.Priority))
+ }
+ if m.MaxConnections != nil {
+ l = (*wrapperspb.UInt32Value)(m.MaxConnections).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MaxPendingRequests != nil {
+ l = (*wrapperspb.UInt32Value)(m.MaxPendingRequests).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MaxRequests != nil {
+ l = (*wrapperspb.UInt32Value)(m.MaxRequests).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.MaxRetries != nil {
+ l = (*wrapperspb.UInt32Value)(m.MaxRetries).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.TrackRemaining {
+ n += 2
+ }
+ if m.MaxConnectionPools != nil {
+ l = (*wrapperspb.UInt32Value)(m.MaxConnectionPools).SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ if m.RetryBudget != nil {
+ l = m.RetryBudget.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ n += len(m.unknownFields)
+ return n
+}
+
+func (m *CircuitBreakers) SizeVT() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Thresholds) > 0 {
+ for _, e := range m.Thresholds {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ if len(m.PerHostThresholds) > 0 {
+ for _, e := range m.PerHostThresholds {
+ l = e.SizeVT()
+ n += 1 + l + protohelpers.SizeOfVarint(uint64(l))
+ }
+ }
+ n += len(m.unknownFields)
+ return n
+}
diff --git a/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go
new file mode 100644
index 000000000..a2fb08949
--- /dev/null
+++ b/vendor/github.com/envoyproxy/go-control-plane/envoy/config/cluster/v3/cluster.pb.go
@@ -0,0 +1,4698 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v5.29.3
+// source: envoy/config/cluster/v3/cluster.proto
+
+package clusterv3
+
+import (
+ _ "github.com/cncf/xds/go/udpa/annotations"
+ v3 "github.com/cncf/xds/go/xds/core/v3"
+ _ "github.com/envoyproxy/go-control-plane/envoy/annotations"
+ v32 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
+ v31 "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
+ v34 "github.com/envoyproxy/go-control-plane/envoy/type/metadata/v3"
+ v33 "github.com/envoyproxy/go-control-plane/envoy/type/v3"
+ _ "github.com/envoyproxy/protoc-gen-validate/validate"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ anypb "google.golang.org/protobuf/types/known/anypb"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Refer to :ref:`service discovery type `
+// for an explanation on each type.
+type Cluster_DiscoveryType int32
+
+const (
+ // Refer to the :ref:`static discovery type`
+ // for an explanation.
+ Cluster_STATIC Cluster_DiscoveryType = 0
+ // Refer to the :ref:`strict DNS discovery
+ // type